Repository: Kuaishou-OneRec/OpenOneRec Branch: main Commit: 1f4a99ea4708 Files: 1960 Total size: 12.5 MB Directory structure: gitextract_yy4wnsy3/ ├── .gitignore ├── README.md ├── benchmarks/ │ ├── LICENSE │ ├── README.md │ ├── api/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── base.py │ │ ├── claude.py │ │ ├── config/ │ │ │ └── llm_config.json │ │ ├── deepseek.py │ │ ├── example.py │ │ └── gemini.py │ ├── benchmark/ │ │ ├── __init__.py │ │ ├── base_generator.py │ │ ├── benchmark.py │ │ ├── checkpoint_utils.py │ │ ├── console.py │ │ ├── generation_runner.py │ │ ├── gpu_utils.py │ │ └── tasks/ │ │ ├── __init__.py │ │ ├── tasks.py │ │ └── v1_0/ │ │ ├── __init__.py │ │ ├── base_evaluator.py │ │ ├── base_loader.py │ │ ├── item_understand/ │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── evaluator.py │ │ │ └── utils.py │ │ ├── label_pred/ │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── evaluator.py │ │ │ └── utils.py │ │ ├── mfu_evaluator.py │ │ ├── qwen3.jinja2 │ │ ├── qwen3_soft_switch.jinja2 │ │ ├── rec_reason/ │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── evaluator.py │ │ │ └── utils.py │ │ ├── recommendation/ │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── evaluator.py │ │ │ ├── utils.py │ │ │ └── utils_by_pid.py │ │ └── registry.py │ ├── eval_script.sh │ ├── pyproject.toml │ ├── requirements.txt │ └── scripts/ │ ├── __init__.py │ ├── eval_dev_results.py │ ├── init_ray.sh │ ├── init_ray_cluster.sh │ └── ray-vllm/ │ ├── evaluate.py │ └── utils/ │ ├── __init__.py │ ├── arguments.py │ └── generator.py ├── data/ │ ├── README.md │ ├── general_text/ │ │ ├── pretrain.csv │ │ └── sft.csv │ ├── onerec_data/ │ │ ├── README.md │ │ ├── pretrain/ │ │ │ ├── item_understand.py │ │ │ ├── user_profile.py │ │ │ └── video_rec.py │ │ ├── run.sh │ │ └── sft/ │ │ ├── ad_rec.py │ │ ├── interactive_rec.py │ │ ├── item_understand.py │ │ ├── label_cond_rec.py │ │ ├── label_pred.py │ │ ├── product_rec.py │ │ ├── rec_reason.py │ │ └── video_rec.py │ ├── prepare_distillation.sh │ ├── prepare_pretrain.sh │ ├── prepare_rl.sh │ ├── prepare_sft.sh │ └── scripts/ │ ├── parquet_unicode_fix.py │ ├── sample_data.py │ ├── split_data.py │ └── train_test_split.py ├── pretrain/ │ ├── .gitignore │ ├── README.md │ ├── examples/ │ │ ├── dataset_config/ │ │ │ ├── pretrain.json │ │ │ └── sft.json │ │ ├── posttrain_sft.sh │ │ ├── pretrain_stg1.sh │ │ └── pretrain_stg2.sh │ ├── onerec_llm/ │ │ ├── __init__.py │ │ ├── data/ │ │ │ ├── __init__.py │ │ │ ├── dataloaders.py │ │ │ ├── local_shuffle_buffer.py │ │ │ └── qwen3_dataset.py │ │ ├── losses/ │ │ │ ├── __init__.py │ │ │ └── ce.py │ │ ├── models/ │ │ │ └── qwen3/ │ │ │ ├── __init__.py │ │ │ ├── configuration_qwen3.py │ │ │ ├── modeling_qwen3.py │ │ │ └── modular_qwen3.py │ │ ├── training/ │ │ │ ├── __init__.py │ │ │ ├── activations.py │ │ │ ├── checkpoint.py │ │ │ ├── common.py │ │ │ ├── distributed.py │ │ │ ├── gradients.py │ │ │ └── lr_schedulers.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── common.py │ │ ├── data_utils.py │ │ ├── distributed.py │ │ ├── ds_utils.py │ │ ├── mfu_stats.py │ │ ├── time_tracker.py │ │ └── worker_utils.py │ ├── recipes/ │ │ └── train_qwen3.py │ ├── scripts/ │ │ ├── convert_checkpoint_to_hf.sh │ │ ├── expand_qwen3_vocab.sh │ │ ├── killall.sh │ │ ├── numa_runner.sh │ │ ├── test_cases_example.json │ │ └── test_hf_model.sh │ ├── set_env.sh │ ├── tests/ │ │ └── test_qwen3_dataset_file_distribution.py │ └── tools/ │ ├── model_converter/ │ │ ├── convert_checkpoint_to_hf.py │ │ └── expand_qwen3_vocab.py │ └── model_test/ │ └── test_hf_model.py ├── tokenizer/ │ ├── README.md │ ├── infer_res_kmeans.py │ ├── res_kmeans.py │ └── train_res_kmeans.py ├── verl_distillation/ │ ├── LICENSE │ ├── README.md │ ├── README_ORIGINAL.md │ ├── deploy_env.sh │ ├── docker/ │ │ ├── Apptainerfile.rocm │ │ ├── Dockerfile.extention.awsefa │ │ ├── Dockerfile.ngc.vllm │ │ ├── Dockerfile.ngc.vllm0.8 │ │ ├── Dockerfile.ngc.vllm0.8.sagemaker │ │ ├── Dockerfile.rocm │ │ ├── Dockerfile.rocm7 │ │ ├── Dockerfile.rocm_verl-0.3.0.post1 │ │ ├── Dockerfile.rocm_verl-0.4.1 │ │ ├── Dockerfile.sglang │ │ ├── Dockerfile.vemlp.vllm.te │ │ ├── Dockerfile.vllm.sglang.megatron.deepseek │ │ ├── README.md │ │ ├── ascend/ │ │ │ ├── Dockerfile.ascend_8.2.rc1_a2 │ │ │ └── Dockerfile.ascend_8.2.rc1_a3 │ │ ├── verl0.4-cu124-torch2.6-fa2.7.4/ │ │ │ ├── Dockerfile.app.sglang.vllm.mcore0.12 │ │ │ ├── Dockerfile.app.sglang.vllm.mcore0.12.deepep │ │ │ ├── Dockerfile.app.sglang.vllm.mcore0.13.preview │ │ │ ├── Dockerfile.app.vllm.mcore0.12 │ │ │ ├── Dockerfile.app.vllm.mcore0.12.deepep │ │ │ ├── Dockerfile.app.vllm.mcore0.13.preview │ │ │ ├── Dockerfile.base │ │ │ └── README.md │ │ ├── verl0.5-cu126-torch2.7-fa2.7.4/ │ │ │ ├── Dockerfile.app.sglang0.4.10.post2.mcore0.13 │ │ │ ├── Dockerfile.app.sglang0.4.9.post6.mcore0.13 │ │ │ ├── Dockerfile.app.vllm.mcore0.13 │ │ │ ├── Dockerfile.app.vllm.mcore0.15 │ │ │ ├── Dockerfile.base.torch2.7.1 │ │ │ └── README.md │ │ ├── verl0.5-cu126-torch2.7.1-fa2.8.0/ │ │ │ ├── Dockerfile.app.sglang.mcore0.12 │ │ │ ├── Dockerfile.app.sglang.mcore0.13.preview │ │ │ ├── Dockerfile.base │ │ │ └── README.md │ │ ├── verl0.5-preview-cu128-torch2.7.1-fa2.8.0/ │ │ │ ├── Dockerfile.app.sglang.megatron │ │ │ ├── Dockerfile.base │ │ │ └── README.md │ │ └── verl0.6-cu128-torch2.8.0-fa2.7.4/ │ │ ├── Dockerfile.app.sglang │ │ ├── Dockerfile.base │ │ └── Dockerfile.vllm011.mcore_gpt-oss │ ├── docs/ │ │ ├── Makefile │ │ ├── README.md │ │ ├── README_vllm0.7.md │ │ ├── README_vllm0.8.md │ │ ├── _static/ │ │ │ ├── custom.css │ │ │ └── js/ │ │ │ ├── resizable-sidebar.js │ │ │ └── runllm-widget.js │ │ ├── advance/ │ │ │ ├── agent_loop.rst │ │ │ ├── attention_implementation.rst │ │ │ ├── checkpoint.rst │ │ │ ├── dpo_extension.rst │ │ │ ├── fsdp_extension.rst │ │ │ ├── fully_async.md │ │ │ ├── megatron_extension.rst │ │ │ ├── one_step_off.md │ │ │ ├── placement.rst │ │ │ ├── ppo_lora.rst │ │ │ ├── reward_loop.rst │ │ │ ├── rollout_is.md │ │ │ ├── rollout_skip.rst │ │ │ ├── rollout_trace.rst │ │ │ └── rope.rst │ │ ├── algo/ │ │ │ ├── baseline.md │ │ │ ├── collabllm.md │ │ │ ├── dapo.md │ │ │ ├── entropy.md │ │ │ ├── gpg.md │ │ │ ├── grpo.md │ │ │ ├── opo.md │ │ │ ├── ppo.md │ │ │ ├── spin.md │ │ │ └── sppo.md │ │ ├── amd_tutorial/ │ │ │ ├── amd_build_dockerfile_page.rst │ │ │ └── amd_vllm_page.rst │ │ ├── api/ │ │ │ ├── data.rst │ │ │ ├── single_controller.rst │ │ │ ├── trainer.rst │ │ │ └── utils.rst │ │ ├── ascend_tutorial/ │ │ │ ├── ascend_profiling_en.rst │ │ │ ├── ascend_profiling_zh.rst │ │ │ ├── ascend_quick_start.rst │ │ │ ├── ascend_sglang_quick_start.rst │ │ │ └── dockerfile_build_guidance.rst │ │ ├── conf.py │ │ ├── data/ │ │ │ └── transfer_queue.md │ │ ├── examples/ │ │ │ ├── config.rst │ │ │ ├── gsm8k_example.rst │ │ │ ├── multi_modal_example.rst │ │ │ ├── ppo_code_architecture.rst │ │ │ ├── sandbox_fusion_example.rst │ │ │ └── skypilot_examples.rst │ │ ├── faq/ │ │ │ └── faq.rst │ │ ├── hybrid_flow.rst │ │ ├── index.rst │ │ ├── perf/ │ │ │ ├── best_practices.rst │ │ │ ├── device_tuning.rst │ │ │ ├── dpsk.md │ │ │ ├── nsight_profiling.md │ │ │ ├── perf_tuning.rst │ │ │ └── verl_profiler_system.md │ │ ├── preparation/ │ │ │ ├── prepare_data.rst │ │ │ └── reward_function.rst │ │ ├── requirements-docs.txt │ │ ├── sglang_multiturn/ │ │ │ ├── interaction_system.rst │ │ │ ├── multiturn.rst │ │ │ ├── sandbox_fusion.rst │ │ │ └── search_tool_example.rst │ │ ├── single_controller.rst │ │ ├── start/ │ │ │ ├── agentic_rl.rst │ │ │ ├── install.rst │ │ │ ├── more_resources.rst │ │ │ ├── multinode.rst │ │ │ ├── quickstart.rst │ │ │ └── ray_debug_tutorial.rst │ │ └── workers/ │ │ ├── fsdp_workers.rst │ │ ├── megatron_workers.rst │ │ ├── model_engine.rst │ │ ├── ray_trainer.rst │ │ └── sglang_worker.rst │ ├── examples/ │ │ ├── data_preprocess/ │ │ │ ├── aime2024_multiturn_w_tool.py │ │ │ ├── dapo_multiturn_w_tool.py │ │ │ ├── full_hh_rlhf.py │ │ │ ├── geo3k.py │ │ │ ├── geo3k_multiturn_w_tool.py │ │ │ ├── gsm8k.py │ │ │ ├── gsm8k_multiturn_sft.py │ │ │ ├── gsm8k_multiturn_w_interaction.py │ │ │ ├── gsm8k_multiturn_w_tool.py │ │ │ ├── gsm8k_tool_agent_loop.py │ │ │ ├── hellaswag.py │ │ │ ├── math_dataset.py │ │ │ ├── multiturn.py │ │ │ └── preprocess_search_r1_dataset.py │ │ ├── generation/ │ │ │ ├── run_deepseek7b_mutli_node.sh │ │ │ └── run_deepseek_v2_lite_math.sh │ │ ├── gmpo_trainer/ │ │ │ ├── README.md │ │ │ ├── run_qwen2_5-7b_math.sh │ │ │ ├── test_dapo_7b_math.sh │ │ │ └── test_dapo_qwen3_30b_math.sh │ │ ├── gpg_trainer/ │ │ │ ├── gpg.md │ │ │ ├── run_qwen2-7b_math.sh │ │ │ └── run_qwen2-7b_math_megatron.sh │ │ ├── grpo_trainer/ │ │ │ ├── README.md │ │ │ ├── run_deepseek671b_math_megatron_80gb.sh │ │ │ ├── run_deepseek671b_math_megatron_96gb.sh │ │ │ ├── run_deepseek7b_llm.sh │ │ │ ├── run_deepseek7b_llm_math.sh │ │ │ ├── run_deepseek7b_llm_math_megatron.sh │ │ │ ├── run_deepseek7b_llm_seq_balance.sh │ │ │ ├── run_glm41v_9b.sh │ │ │ ├── run_gptoss_20b.sh │ │ │ ├── run_minicpmo2_6.sh │ │ │ ├── run_mistral13b_skyworkrm_hhrlhf.sh │ │ │ ├── run_moonlight16b_math_megatron.sh │ │ │ ├── run_qwen2-7b.sh │ │ │ ├── run_qwen2-7b_math.sh │ │ │ ├── run_qwen2-7b_math_megatron.sh │ │ │ ├── run_qwen2-7b_seq_balance.sh │ │ │ ├── run_qwen2-7b_seq_balance_math_megatron.sh │ │ │ ├── run_qwen2-7b_sgl_megatron.sh │ │ │ ├── run_qwen2_5-3b_gsm8k_grpo_lora.sh │ │ │ ├── run_qwen2_5-3b_gsm8k_grpo_lora_from_adapter.sh │ │ │ ├── run_qwen2_5-7b_math_megatron_diff_tp.sh │ │ │ ├── run_qwen2_5_32b_grpo_npu.sh │ │ │ ├── run_qwen2_5_7b_grpo_discrete_prof_npu.sh │ │ │ ├── run_qwen2_5_7b_grpo_e2e_prof_npu.sh │ │ │ ├── run_qwen2_5_7b_grpo_npu.sh │ │ │ ├── run_qwen2_5_vl-7b-megatron.sh │ │ │ ├── run_qwen2_5_vl-7b-sglang.sh │ │ │ ├── run_qwen2_5_vl-7b.sh │ │ │ ├── run_qwen2_5_vl-7b_freeze_vision.sh │ │ │ ├── run_qwen2_5_vl-7b_lora.sh │ │ │ ├── run_qwen2_5_vl-7b_seq_balance.sh │ │ │ ├── run_qwen2_5_vl_32b_npu.sh │ │ │ ├── run_qwen2_5_vl_3b_npu.sh │ │ │ ├── run_qwen2_5_vl_7b_npu.sh │ │ │ ├── run_qwen3-235b_megatron_96gb.sh │ │ │ ├── run_qwen3-32b_npu.sh │ │ │ ├── run_qwen3-8b.sh │ │ │ ├── run_qwen3-8b_npu.sh │ │ │ ├── run_qwen3_8b_grpo_sglang_1k_spmd_npu.sh │ │ │ ├── run_qwen3_8b_grpo_sglang_32k_spmd_npu.sh │ │ │ ├── run_qwen3_vl-235b-megatron.sh │ │ │ ├── run_qwen3_vl-30b-megatron.sh │ │ │ ├── run_qwen3_vl-8b-megatron.sh │ │ │ ├── run_qwen3moe-30b_megatron_96gb.sh │ │ │ └── run_seed_oss_36b.sh │ │ ├── ppo_trainer/ │ │ │ ├── README.md │ │ │ ├── run_deepseek7b_llm.sh │ │ │ ├── run_deepseek7b_llm_modelscope.sh │ │ │ ├── run_deepseek7b_llm_pfppo.sh │ │ │ ├── run_deepseek7b_llm_sandbox_fusion.sh │ │ │ ├── run_deepseek7b_llm_sp2.sh │ │ │ ├── run_deepseek_full_hh_rlhf.sh │ │ │ ├── run_deepseek_math_gsm8k_megatron.sh │ │ │ ├── run_deepseek_math_gsm8k_megatron_nsys.sh │ │ │ ├── run_gemma.sh │ │ │ ├── run_moonlight16b_a3b_gsm8k_megatron.sh │ │ │ ├── run_qwen1.5_moe_a2.7b-gsm8k_megatron.sh │ │ │ ├── run_qwen2-7b_math_gsm8k_megatron.sh │ │ │ ├── run_qwen2-7b_rm.sh │ │ │ ├── run_qwen2-7b_rm_seq_balance.sh │ │ │ ├── run_qwen2-7b_rm_seq_balance_fused_kernels.sh │ │ │ ├── run_qwen2-7b_rm_seq_balance_nsys.sh │ │ │ ├── run_qwen2-7b_seq_balance.sh │ │ │ ├── run_qwen2-7b_sglang_seq_balance.sh │ │ │ ├── run_qwen2.5-32b.sh │ │ │ └── run_qwen3-8b_npu.sh │ │ ├── ray/ │ │ │ └── tutorial.ipynb │ │ ├── reinforce_plus_plus_trainer/ │ │ │ ├── run_qwen2-7b_math_rf.sh │ │ │ └── run_qwen2-7b_math_rf_baseline.sh │ │ ├── remax_trainer/ │ │ │ ├── run_qwen2.5-3b_seq_balance.sh │ │ │ └── run_qwen2.5-7b_seq_balance.sh │ │ ├── rloo_trainer/ │ │ │ └── run_qwen2-7b.sh │ │ ├── rollout_importance_sampling/ │ │ │ ├── README.md │ │ │ └── run_with_rollout_is.sh │ │ ├── sft/ │ │ │ ├── gsm8k/ │ │ │ │ ├── run_deepseek_6b7.sh │ │ │ │ ├── run_gemma_2b.sh │ │ │ │ ├── run_gemma_7b.sh │ │ │ │ ├── run_qwen3_8b_sft_peft_sp2_npu.sh │ │ │ │ ├── run_qwen_05_peft.sh │ │ │ │ ├── run_qwen_05_sp2.sh │ │ │ │ ├── run_qwen_05_sp2_liger.sh │ │ │ │ └── run_seed_oss_36b_sft.sh │ │ │ └── multiturn/ │ │ │ └── run_qwen_05_sp2.sh │ │ ├── sglang_multiturn/ │ │ │ ├── README.md │ │ │ ├── config/ │ │ │ │ ├── geo3k_multiturn_grpo.yaml │ │ │ │ ├── geo3k_multiturn_megatron_grpo.yaml │ │ │ │ ├── gsm8k_multiturn_grpo.yaml │ │ │ │ ├── gsm8k_multiturn_grpo_server.yaml │ │ │ │ ├── gsm8k_multiturn_grpo_w_interaction.yaml │ │ │ │ ├── gsm8k_multiturn_megatron_grpo.yaml │ │ │ │ ├── interaction_config/ │ │ │ │ │ └── gsm8k_interaction_config.yaml │ │ │ │ ├── retool_multiturn_grpo.yaml │ │ │ │ ├── search_multiturn_grpo.yaml │ │ │ │ ├── search_multiturn_grpo_one_step_off.yaml │ │ │ │ └── tool_config/ │ │ │ │ ├── geo3k_tool_config.yaml │ │ │ │ ├── gsm8k_tool_config.yaml │ │ │ │ ├── mcp_server.json │ │ │ │ ├── mcp_tool_config.yaml │ │ │ │ ├── sandbox_fusion_tool_config.yaml │ │ │ │ └── search_tool_config.yaml │ │ │ ├── geo3k/ │ │ │ │ ├── run_qwen2.5-3b_geo3k_multiturn.sh │ │ │ │ ├── run_qwen2.5-3b_geo3k_multiturn_4xgpu.sh │ │ │ │ └── run_qwen2.5-3b_megatron_geo3k_multiturn.sh │ │ │ ├── run_qwen0.5b_gsm8k_multiturn_curriculum.sh │ │ │ ├── run_qwen2.5-0.5b_gsm8k_multiturn_w_interaction.sh │ │ │ ├── run_qwen2.5-3b_gsm8k_multiturn.sh │ │ │ ├── run_qwen2.5-3b_gsm8k_multiturn_4xgpu.sh │ │ │ ├── run_qwen2.5-3b_gsm8k_multiturn_4xgpu_server.sh │ │ │ ├── run_qwen2.5-3b_gsm8k_multiturn_server.sh │ │ │ ├── run_qwen2.5-3b_gsm8k_multiturn_vllm_fsdp.sh │ │ │ ├── run_qwen2.5-3b_gsm8k_tool_agent_mlflow.sh │ │ │ ├── run_qwen2.5-3b_megatron_gsm8k_multiturn.sh │ │ │ ├── run_qwen3-4b_gsm8k_multiturn.sh │ │ │ ├── run_qwen3_4b_dapo_multiturn.sh │ │ │ └── search_r1_like/ │ │ │ ├── local_dense_retriever/ │ │ │ │ ├── download.py │ │ │ │ └── retrieval_server.py │ │ │ └── run_qwen2.5-3b_instruct_search_multiturn.sh │ │ ├── skypilot/ │ │ │ ├── README.md │ │ │ ├── verl-grpo.yaml │ │ │ ├── verl-multiturn-tools.yaml │ │ │ └── verl-ppo.yaml │ │ ├── slurm/ │ │ │ └── ray_on_slurm.slurm │ │ ├── split_placement/ │ │ │ ├── README.md │ │ │ ├── config/ │ │ │ │ └── ppo_trainer_split.yaml │ │ │ ├── main_ppo_split.py │ │ │ ├── run_deepseek7b_llm.sh │ │ │ └── split_monkey_patch.py │ │ ├── tuning/ │ │ │ ├── 0.5b/ │ │ │ │ └── qwen2-0.5b_grpo-lora_1_h100_fsdp_vllm.sh │ │ │ ├── 1.5b/ │ │ │ │ └── qwen2-1.5b_grpo-lora_1_h100_fsdp_vllm.sh │ │ │ ├── 14b/ │ │ │ │ ├── qwen2-14b_grpo-lora_2_h100_fsdp_vllm.sh │ │ │ │ └── qwen2_14b_grpo_4_h800_fsdp_vllm.sh │ │ │ ├── 32b/ │ │ │ │ ├── qwen2-32b_grpo-lora_4_h100_fsdp_vllm.sh │ │ │ │ └── qwen2_32B_grpo_8_h20_megatron_vllm.sh │ │ │ ├── 3b/ │ │ │ │ └── qwen2-3b_grpo-lora_1_h100_fsdp_vllm.sh │ │ │ ├── 70b/ │ │ │ │ ├── qwen2-70b_grpo_32_h20_fsdp_vllm.sh │ │ │ │ ├── qwen2-70b_grpo_32_h800_fsdp_vllm.sh │ │ │ │ └── qwen2-72b_grpo-lora_8_h100_fsdp_vllm.sh │ │ │ └── 7b/ │ │ │ ├── qwen2-7b_grpo-lora_1_h100_fsdp_vllm.sh │ │ │ └── qwen2-7b_grpo_2_h800_fsdp_vllm.sh │ │ └── tutorial/ │ │ └── agent_loop_get_started/ │ │ ├── agent_loop_tutorial.ipynb │ │ └── sandbox.py │ ├── init_ray.sh │ ├── init_ray_cluster.sh │ ├── pyproject.toml │ ├── recipe/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── char_count/ │ │ │ ├── README.md │ │ │ ├── create_dataset.py │ │ │ ├── reward_function.py │ │ │ ├── train_grpo.sh │ │ │ └── train_sft.sh │ │ ├── collabllm/ │ │ │ ├── README.md │ │ │ ├── collabllm_agent_loop.py │ │ │ ├── collabllm_interation.py │ │ │ ├── config/ │ │ │ │ ├── agent.yaml │ │ │ │ └── collabllm_interaction_config.yaml │ │ │ ├── metrics/ │ │ │ │ ├── accuracy.py │ │ │ │ ├── bleu_score.py │ │ │ │ ├── interactivity.py │ │ │ │ ├── pass_rate.py │ │ │ │ └── token_amount.py │ │ │ ├── process_dataset.py │ │ │ ├── reward_function.py │ │ │ ├── train_rl_collabllm.sh │ │ │ ├── train_sft_collabllm.sh │ │ │ └── utils.py │ │ ├── dapo/ │ │ │ ├── README.md │ │ │ ├── config/ │ │ │ │ ├── dapo_megatron_trainer.yaml │ │ │ │ └── dapo_trainer.yaml │ │ │ ├── dapo_ray_trainer.py │ │ │ ├── main_dapo.py │ │ │ ├── prepare_dapo_data.sh │ │ │ ├── run_dapo_early_qwen2.5_32b.sh │ │ │ ├── run_dapo_qwen2.5_32b.sh │ │ │ ├── run_dapo_qwen2.5_32b_npu.sh │ │ │ ├── run_dapo_qwen2.5_32b_rollout_is.sh │ │ │ ├── run_dapo_qwen2.5_7b_npu.sh │ │ │ ├── run_dapo_qwen3_14b_base_npu.sh │ │ │ ├── run_dapo_qwen3_8b_base_npu.sh │ │ │ ├── run_dapo_qwen3_moe_30b_base_fsdp_npu.sh │ │ │ ├── run_dapo_qwen3_moe_30b_megatron_npu.sh │ │ │ ├── run_dapo_wo_ds_qwen2.5_32b.sh │ │ │ ├── runtime_env.yaml │ │ │ ├── test_dapo_7b.sh │ │ │ ├── test_dapo_7b_math.sh │ │ │ ├── test_dapo_7b_math_lora.sh │ │ │ ├── test_dapo_7b_math_megatron.sh │ │ │ ├── test_dapo_dspk_671b_megatron_96gb.sh │ │ │ ├── test_dapo_glm_air_megatron.sh │ │ │ ├── test_dapo_qwen3_30b_math.sh │ │ │ └── test_dapo_qwen3_30b_math_single_node.sh │ │ ├── deepeyes/ │ │ │ ├── README.md │ │ │ ├── configs/ │ │ │ │ ├── deepeyes_multiturn_grpo.yaml │ │ │ │ └── image_zoom_in_tool_config.yaml │ │ │ ├── deepeyes.py │ │ │ └── run_deepeyes_grpo.sh │ │ ├── entropy/ │ │ │ ├── 32b_clip_cov.sh │ │ │ ├── 32b_kl_cov.sh │ │ │ ├── 32b_kl_cov_mininbsz.sh │ │ │ ├── 7b_clip_cov.sh │ │ │ ├── 7b_kl_cov.sh │ │ │ ├── README.md │ │ │ ├── config/ │ │ │ │ └── entropy_trainer.yaml │ │ │ ├── entropy_ray_trainer.py │ │ │ ├── main_entropy.py │ │ │ ├── reward.py │ │ │ └── reward_score/ │ │ │ ├── __init__.py │ │ │ └── entropy_math/ │ │ │ ├── __init__.py │ │ │ ├── grader.py │ │ │ └── math_normalize.py │ │ ├── fapo/ │ │ │ ├── README.md │ │ │ ├── config/ │ │ │ │ └── rm_config.yaml │ │ │ ├── prepare_fapo_data.py │ │ │ ├── reward_fn_genrm.py │ │ │ ├── reward_fn_reasoning.py │ │ │ ├── reward_fn_reasoning_remote.py │ │ │ ├── run_baseline_32b.sh │ │ │ ├── run_baseline_7b.sh │ │ │ ├── run_fapo_32b.sh │ │ │ ├── run_fapo_32b_remote.sh │ │ │ ├── run_fapo_7b.sh │ │ │ ├── run_fapo_7b_remote.sh │ │ │ ├── run_fapo_genrm_train.sh │ │ │ └── runtime_env.yaml │ │ ├── fully_async_policy/ │ │ │ ├── README.md │ │ │ ├── README_zh.md │ │ │ ├── agent_loop/ │ │ │ │ ├── __init__.py │ │ │ │ ├── agent_loop.py │ │ │ │ └── partial_single_turn_agent_loop.py │ │ │ ├── config/ │ │ │ │ ├── fully_async_ppo_megatron_trainer.yaml │ │ │ │ └── fully_async_ppo_trainer.yaml │ │ │ ├── detach_utils.py │ │ │ ├── fsdp2_utils.py │ │ │ ├── fsdp_workers.py │ │ │ ├── fully_async_main.py │ │ │ ├── fully_async_rollouter.py │ │ │ ├── fully_async_trainer.py │ │ │ ├── megatron_worker.py │ │ │ ├── message_queue.py │ │ │ ├── param_sync.py │ │ │ ├── ray_trainer.py │ │ │ ├── shell/ │ │ │ │ ├── dapo_7b_math_fsdp2_16_16.sh │ │ │ │ ├── dapo_7b_math_fsdp2_32_32.sh │ │ │ │ ├── dapo_7b_math_fsdp2_4_12.sh │ │ │ │ ├── dapo_7b_math_fsdp2_4_4.sh │ │ │ │ ├── dapo_7b_math_fsdp2_64_64.sh │ │ │ │ ├── dapo_7b_math_fsdp2_64_64_mis.sh │ │ │ │ ├── dapo_7b_math_fsdp2_8_8.sh │ │ │ │ ├── geo3k_qwen25vl_7b_megatron_4_4.sh │ │ │ │ └── runtime_env.yaml │ │ │ ├── unittest/ │ │ │ │ └── simple_streaming_demo.py │ │ │ └── vllm_rollout/ │ │ │ ├── __init__.py │ │ │ └── vllm_async_server.py │ │ ├── genrm_remote/ │ │ │ ├── README.md │ │ │ ├── reward_function.py │ │ │ └── run_genrm_remote.sh │ │ ├── gspo/ │ │ │ ├── test_gspo_3b_math.sh │ │ │ ├── test_gspo_3b_math_slurm.sh │ │ │ └── test_gspo_qwen30b_a3b_ep.sh │ │ ├── infigui-g1/ │ │ │ ├── README.md │ │ │ ├── reward_fn.py │ │ │ ├── run_3b.sh │ │ │ └── run_7b.sh │ │ ├── langgraph_agent/ │ │ │ ├── __init__.py │ │ │ ├── chat_model.py │ │ │ ├── example/ │ │ │ │ ├── README.md │ │ │ │ ├── agent.yaml │ │ │ │ ├── create_dataset.py │ │ │ │ ├── math_expression.py │ │ │ │ ├── run_gpt_oss_20b_bf16.sh │ │ │ │ └── run_qwen2.5_3b.sh │ │ │ ├── react_agent_loop.py │ │ │ └── test_react_agent_loop.py │ │ ├── minicpmo/ │ │ │ └── rl_dataset.py │ │ ├── one_step_off_policy/ │ │ │ ├── README.md │ │ │ ├── config/ │ │ │ │ ├── one_step_off_ppo_megatron_trainer.yaml │ │ │ │ └── one_step_off_ppo_trainer.yaml │ │ │ ├── dapo_7b_math_fsdp2_4_12.sh │ │ │ ├── dapo_7b_math_fsdp2_colocate.sh │ │ │ ├── dapo_7b_math_fsdp2_sglang_4_12.sh │ │ │ ├── dapo_7b_math_fsdp2_sglang_colocate.sh │ │ │ ├── dapo_7b_math_megatron_4_12.sh │ │ │ ├── dapo_7b_math_megatron_colocate.sh │ │ │ ├── distributed_util.py │ │ │ ├── fsdp_workers.py │ │ │ ├── grpo_0.6b_gsm8k_fsdp2_2_6.sh │ │ │ ├── grpo_0.6b_gsm8k_fsdp2_sglang_2_6.sh │ │ │ ├── grpo_3b_gsm8k_fsdp2_2_6.sh │ │ │ ├── main_ppo.py │ │ │ ├── megatron_workers.py │ │ │ ├── ray_trainer.py │ │ │ ├── sglang_sharding_manager.py │ │ │ ├── utils.py │ │ │ └── vllm_sharding_manager.py │ │ ├── onpolicy_distill/ │ │ │ ├── __init__.py │ │ │ ├── config/ │ │ │ │ └── onpolicy_distill_trainer.yaml │ │ │ ├── main_onpolicy_distill.py │ │ │ ├── onpolicy_distill_trainer.py │ │ │ └── run_qwen3_distill.sh │ │ ├── open_math_reasoning/ │ │ │ ├── README.md │ │ │ ├── compute_score.py │ │ │ ├── prepare_eval_dataset.py │ │ │ ├── prepare_nvidia-OpenMathReasoning_sft.py │ │ │ ├── run_eval.sh │ │ │ ├── run_generation.sh │ │ │ └── run_sft_qwen3_8b.sh │ │ ├── prime/ │ │ │ ├── __init__.py │ │ │ ├── config/ │ │ │ │ └── prime_trainer.yaml │ │ │ ├── main_prime.py │ │ │ ├── prime_core_algos.py │ │ │ ├── prime_dp_rm.py │ │ │ ├── prime_fsdp_workers.py │ │ │ ├── prime_ray_trainer.py │ │ │ ├── run_prime_qwen.sh │ │ │ └── run_prime_qwen_code.sh │ │ ├── r1/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── config/ │ │ │ │ └── evaluation.yaml │ │ │ ├── data_process.py │ │ │ ├── main_eval.py │ │ │ ├── reward_score.py │ │ │ ├── run_r1_distill_qwen.sh │ │ │ └── tasks/ │ │ │ ├── __init__.py │ │ │ ├── gpqa.py │ │ │ ├── livecodebench.py │ │ │ └── math_reward.py │ │ ├── retool/ │ │ │ ├── README.md │ │ │ ├── retool.py │ │ │ ├── retool_sft_preprocess.py │ │ │ ├── run_gpt_oss_ppo.sh │ │ │ ├── run_qwen2-32b_dapo.sh │ │ │ ├── run_qwen2-32b_ppo.sh │ │ │ ├── run_qwen2-32b_sft.sh │ │ │ ├── run_qwen2_7b_dapo.sh │ │ │ ├── run_qwen2_7b_sft.sh │ │ │ ├── run_qwen2_7b_sft_npu.sh │ │ │ └── sandbox_fusion_tool_config.yaml │ │ ├── spin/ │ │ │ ├── README.md │ │ │ ├── config/ │ │ │ │ └── spin_trainer.yaml │ │ │ ├── core_algos.py │ │ │ ├── dp_actor.py │ │ │ ├── fsdp_workers.py │ │ │ ├── main_spin.py │ │ │ ├── run_spin.sh │ │ │ ├── spin_trainer.py │ │ │ └── utils.py │ │ ├── sppo/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── config/ │ │ │ │ └── sppo_trainer.yaml │ │ │ ├── config.py │ │ │ ├── dp_actor.py │ │ │ ├── main_sppo.py │ │ │ ├── run_qwen2.5-7b_rm.sh │ │ │ ├── sppo_ray_trainer.py │ │ │ └── sppo_worker.py │ │ └── transfer_queue/ │ │ ├── agent_loop.py │ │ ├── config/ │ │ │ └── transfer_queue_ppo_trainer.yaml │ │ ├── main_ppo.py │ │ ├── ray_trainer.py │ │ └── run_qwen3-8b_transferqueue_npu.sh │ ├── requirements-cuda.txt │ ├── requirements-npu.txt │ ├── requirements.txt │ ├── requirements_sglang.txt │ ├── requirements_transferqueue.txt │ ├── scripts/ │ │ ├── __init__.py │ │ ├── converter_hf_to_mcore.py │ │ ├── diagnose.py │ │ ├── generate_trainer_config.sh │ │ ├── init_random_model.py │ │ ├── install_vllm_sglang_mcore.sh │ │ ├── legacy_model_merger.py │ │ ├── print_cfg.py │ │ └── rollout_viewer.py │ ├── setup.py │ ├── tests/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── experimental/ │ │ │ ├── agent_loop/ │ │ │ │ ├── agent_utils.py │ │ │ │ ├── qwen_vl_tool_chat_template.jinja2 │ │ │ │ ├── test_agent_loop_reward.py │ │ │ │ ├── test_agent_loop_reward_model.py │ │ │ │ ├── test_basic_agent_loop.py │ │ │ │ ├── test_gpt_oss_tool_parser.py │ │ │ │ ├── test_multi_modal.py │ │ │ │ └── test_standalone_rollout.py │ │ │ └── reward/ │ │ │ ├── reward_fn.py │ │ │ ├── test_agent_loop_reward_manager.py │ │ │ └── test_reward_model.py │ │ ├── interactions/ │ │ │ ├── __init__.py │ │ │ ├── test_gsm8k_interaction.py │ │ │ └── test_interaction_registry.py │ │ ├── kill_github_tests.sh │ │ ├── models/ │ │ │ ├── test_engine.py │ │ │ ├── test_transformer.py │ │ │ └── test_transformers_ulysses.py │ │ ├── single_controller/ │ │ │ ├── __init__.py │ │ │ ├── base/ │ │ │ │ └── test_decorator.py │ │ │ ├── check_worker_alive/ │ │ │ │ └── main.py │ │ │ ├── detached_worker/ │ │ │ │ ├── README.md │ │ │ │ ├── client.py │ │ │ │ ├── run.sh │ │ │ │ └── server.py │ │ │ ├── test_auto_padding_on_cpu.py │ │ │ ├── test_colocated_workers.py │ │ │ ├── test_colocated_workers_fused.py │ │ │ ├── test_data_transfer.py │ │ │ ├── test_decorator_on_cpu.py │ │ │ ├── test_device_mesh_register.py │ │ │ ├── test_driverfunc_to_worker.py │ │ │ ├── test_fused_workers_on_cpu.py │ │ │ ├── test_high_level_scheduling_api.py │ │ │ ├── test_nested_worker.py │ │ │ ├── test_ray_collectives.py │ │ │ ├── test_ray_local_envs_on_cpu.py │ │ │ ├── test_ray_utils_on_cpu.py │ │ │ ├── test_rvdz.py │ │ │ ├── test_worker_group_basics.py │ │ │ └── test_worker_group_torch.py │ │ ├── special_distributed/ │ │ │ ├── README.md │ │ │ ├── run_all.sh │ │ │ ├── test_fsdp_ckpt.py │ │ │ ├── test_mcore_config_converter.py │ │ │ └── test_tensor_dict.py │ │ ├── special_e2e/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── check_custom_rwd_fn.py │ │ │ ├── check_results.py │ │ │ ├── envs/ │ │ │ │ ├── __init__.py │ │ │ │ └── digit_completion/ │ │ │ │ ├── __init__.py │ │ │ │ ├── task.py │ │ │ │ └── tokenizer.py │ │ │ ├── generation/ │ │ │ │ ├── run_gen_qwen05.sh │ │ │ │ └── run_gen_qwen05_server.sh │ │ │ ├── ppo_trainer/ │ │ │ │ ├── expert_parallel/ │ │ │ │ │ └── qwen2moe_minimal.json │ │ │ │ ├── run_function_reward.sh │ │ │ │ ├── run_model_reward.sh │ │ │ │ ├── run_single_gpu.sh │ │ │ │ └── run_single_gpu_with_engine.sh │ │ │ ├── run_dapo.sh │ │ │ ├── run_fully_async_policy.sh │ │ │ ├── run_genrm_remote.sh │ │ │ ├── run_geo3k_fsdp_sgl_multiturn_w_tool.sh │ │ │ ├── run_grpo_lora_with_merge.sh │ │ │ ├── run_gsm8k_fsdp_sgl_multiturn_sf_tool.sh │ │ │ ├── run_gsm8k_fsdp_sgl_multiturn_w_tool.sh │ │ │ ├── run_one_step_off_policy.sh │ │ │ ├── run_ppo_trainer_megatron.sh │ │ │ ├── run_prime.sh │ │ │ ├── run_r1_distill_qwen_aime24_eval.sh │ │ │ ├── run_spin.sh │ │ │ ├── run_sppo.sh │ │ │ ├── run_test.sh │ │ │ └── sft/ │ │ │ ├── compare_sft_engine_results.py │ │ │ ├── run_sft.sh │ │ │ ├── run_sft_engine_gsm8k.sh │ │ │ ├── test_sft_engine_all.sh │ │ │ └── test_sp_loss_match.py │ │ ├── special_npu/ │ │ │ ├── run_qwen2_5_05b_dapo.sh │ │ │ ├── run_qwen2_5_05b_grpo.sh │ │ │ ├── run_qwen2_5_05b_grpo_mindspeed.sh │ │ │ ├── run_qwen2_5_05b_sft_peft_sp2.sh │ │ │ ├── run_qwen2_5_vl_3b_npu.sh │ │ │ └── run_qwen3_06b_ppo.sh │ │ ├── special_sanity/ │ │ │ ├── check_api_docs.py │ │ │ ├── check_dataproto_usage.py │ │ │ ├── check_device_api_usage.py │ │ │ ├── check_docs_time_info.py │ │ │ ├── check_docstrings.py │ │ │ ├── check_license.py │ │ │ ├── check_pr_description.py │ │ │ ├── check_pr_title.py │ │ │ ├── test_config_docs.py │ │ │ ├── test_import.py │ │ │ ├── type_coverage_check.py │ │ │ ├── validate_imported_docs.py │ │ │ └── validate_structure.py │ │ ├── special_standalone/ │ │ │ ├── README.md │ │ │ └── test_memory_buffers.py │ │ ├── test_base_config_on_cpu.py │ │ ├── test_protocol_on_cpu.py │ │ ├── test_protocol_v2_on_cpu.py │ │ ├── trainer/ │ │ │ ├── __init__.py │ │ │ ├── config/ │ │ │ │ ├── __init__.py │ │ │ │ ├── legacy_ppo_megatron_trainer.yaml │ │ │ │ ├── legacy_ppo_trainer.yaml │ │ │ │ ├── test_algo_config_on_cpu.py │ │ │ │ └── test_legacy_config_on_cpu.py │ │ │ └── ppo/ │ │ │ ├── __init__.py │ │ │ ├── test_core_algos_on_cpu.py │ │ │ ├── test_metric_utils_on_cpu.py │ │ │ ├── test_rollout_is.py │ │ │ └── test_rollout_is_integration.py │ │ ├── utils/ │ │ │ ├── _test_module.py │ │ │ ├── dataset/ │ │ │ │ ├── test_create_rl_sampler_on_cpu.py │ │ │ │ ├── test_multiturn_sft_dataset_on_cpu.py │ │ │ │ ├── test_rl_collate_fn_on_cpu.py │ │ │ │ ├── test_rl_dataset_on_cpu.py │ │ │ │ └── test_sft_dataset_on_cpu.py │ │ │ ├── debug/ │ │ │ │ └── test_metrics.py │ │ │ ├── megatron/ │ │ │ │ └── test_pipeline_parallel.py │ │ │ ├── reward_score/ │ │ │ │ ├── reward_score/ │ │ │ │ │ └── test_sandbox_fusion_on_cpu.py │ │ │ │ └── test_sandbox_on_cpu.py │ │ │ ├── test_activation_offload.py │ │ │ ├── test_config_on_cpu.py │ │ │ ├── test_flops_counter.py │ │ │ ├── test_fs_on_cpu.py │ │ │ ├── test_groupwise.py │ │ │ ├── test_import_utils_on_cpu.py │ │ │ ├── test_linear_cross_entropy.py │ │ │ ├── test_mlflow_key_sanitization.py │ │ │ ├── test_model_on_cpu.py │ │ │ ├── test_nvtx_profile.py │ │ │ ├── test_rollout_skip_on_cpu.py │ │ │ ├── test_rollout_trace_on_cpu.py │ │ │ ├── test_seqlen_balancing.py │ │ │ ├── test_special_linear_cross_entropy_tp.py │ │ │ ├── test_special_mstx_profile.py │ │ │ ├── test_temp_env_on_cpu.py │ │ │ ├── test_timeout_decorator_cpu.py │ │ │ └── test_torch_functional.py │ │ └── workers/ │ │ ├── actor/ │ │ │ └── test_special_dp_actor.py │ │ ├── config/ │ │ │ ├── test_actor_config_on_cpu.py │ │ │ ├── test_critic_config_on_cpu.py │ │ │ ├── test_engine_config_on_cpu.py │ │ │ └── test_optim_config_on_cpu.py │ │ ├── critic/ │ │ │ └── test_special_dp_critic.py │ │ ├── reward_manager/ │ │ │ └── test_registry_on_cpu.py │ │ ├── rollout/ │ │ │ ├── perf/ │ │ │ │ └── vllm_async_rollout.py │ │ │ ├── resource/ │ │ │ │ └── tool_configs/ │ │ │ │ ├── mcp_server.json │ │ │ │ ├── mcp_tool_config │ │ │ │ ├── sandbox_fusion_tool_config │ │ │ │ └── search_tool_config │ │ │ ├── rollout_sglang/ │ │ │ │ └── test_http_server_engine.py │ │ │ ├── rollout_vllm/ │ │ │ │ ├── run_fsdp_vllm.py │ │ │ │ ├── test_vllm_model_rope_scaling.py │ │ │ │ └── test_vllm_spmd.py │ │ │ ├── test_hf_rollout.py │ │ │ ├── test_sglang_async_rollout_mcp_tools.py │ │ │ ├── test_sglang_async_rollout_multimodal_delta.py │ │ │ ├── test_sglang_async_rollout_search_tools.py │ │ │ ├── test_sglang_async_rollout_sf_tools.py │ │ │ ├── test_sglang_async_rollout_w_interaction.py │ │ │ ├── test_sglang_async_rollout_w_tools.py │ │ │ ├── test_sglang_async_rollout_w_tools_token_out.py │ │ │ ├── test_sglang_multi_interaction.py │ │ │ ├── test_sglang_rollout_sharding_manager.py │ │ │ ├── test_sglang_spmd.py │ │ │ └── utils_sglang.py │ │ ├── test_fsdp_attn_implementation.py │ │ └── test_fsdp_workers.py │ └── verl/ │ ├── __init__.py │ ├── base_config.py │ ├── experimental/ │ │ ├── __init__.py │ │ ├── agent_loop/ │ │ │ ├── __init__.py │ │ │ ├── agent_loop.py │ │ │ ├── single_turn_agent_loop.py │ │ │ ├── tool_agent_loop.py │ │ │ ├── tool_parser.py │ │ │ └── utils.py │ │ ├── dataset/ │ │ │ ├── __init__.py │ │ │ └── sampler.py │ │ ├── dynamic_dataset/ │ │ │ ├── __init__.py │ │ │ └── dynamicgen_dataset.py │ │ └── reward/ │ │ ├── __init__.py │ │ ├── reward_loop/ │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── dapo.py │ │ │ ├── naive.py │ │ │ └── registry.py │ │ ├── reward_manager.py │ │ ├── reward_model.py │ │ └── router/ │ │ ├── naive_router.py │ │ └── sglang_router.py │ ├── interactions/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── gsm8k_interaction.py │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ └── interaction_registry.py │ │ └── weather_interaction.py │ ├── model_merger/ │ │ ├── __init__.py │ │ ├── __main__.py │ │ ├── base_model_merger.py │ │ ├── fsdp_model_merger.py │ │ └── megatron_model_merger.py │ ├── models/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── llama/ │ │ │ ├── __init__.py │ │ │ └── megatron/ │ │ │ ├── __init__.py │ │ │ ├── checkpoint_utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── llama_loader.py │ │ │ │ ├── llama_loader_depracated.py │ │ │ │ └── llama_saver.py │ │ │ ├── layers/ │ │ │ │ ├── __init__.py │ │ │ │ ├── parallel_attention.py │ │ │ │ ├── parallel_decoder.py │ │ │ │ ├── parallel_linear.py │ │ │ │ ├── parallel_mlp.py │ │ │ │ └── parallel_rmsnorm.py │ │ │ └── modeling_llama_megatron.py │ │ ├── mcore/ │ │ │ ├── __init__.py │ │ │ ├── config_converter.py │ │ │ ├── loader.py │ │ │ ├── mbridge.py │ │ │ ├── model_forward.py │ │ │ ├── model_forward_1f1b_overlap.py │ │ │ ├── model_forward_fused.py │ │ │ ├── model_initializer.py │ │ │ ├── patch_v012.py │ │ │ ├── qwen2_5_vl/ │ │ │ │ ├── __init__.py │ │ │ │ ├── attention.py │ │ │ │ ├── model.py │ │ │ │ ├── rope_utils.py │ │ │ │ ├── vision_config.py │ │ │ │ ├── vision_model.py │ │ │ │ └── vision_transformer_block.py │ │ │ ├── readme.md │ │ │ ├── registry.py │ │ │ ├── saver.py │ │ │ ├── util.py │ │ │ └── weight_converter.py │ │ ├── qwen2/ │ │ │ ├── __init__.py │ │ │ └── megatron/ │ │ │ ├── __init__.py │ │ │ ├── checkpoint_utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── qwen2_loader.py │ │ │ │ ├── qwen2_loader_depracated.py │ │ │ │ └── qwen2_saver.py │ │ │ ├── layers/ │ │ │ │ ├── __init__.py │ │ │ │ ├── parallel_attention.py │ │ │ │ ├── parallel_decoder.py │ │ │ │ ├── parallel_linear.py │ │ │ │ ├── parallel_mlp.py │ │ │ │ └── parallel_rmsnorm.py │ │ │ └── modeling_qwen2_megatron.py │ │ ├── registry.py │ │ ├── transformers/ │ │ │ ├── __init__.py │ │ │ ├── apertus.py │ │ │ ├── dense_common.py │ │ │ ├── glm4v.py │ │ │ ├── kimi_vl.py │ │ │ ├── llama.py │ │ │ ├── monkey_patch.py │ │ │ ├── npu_patch.py │ │ │ ├── qwen2.py │ │ │ ├── qwen2_vl.py │ │ │ └── qwen3_vl.py │ │ └── weight_loader_registry.py │ ├── protocol.py │ ├── py.typed │ ├── single_controller/ │ │ ├── __init__.py │ │ ├── base/ │ │ │ ├── __init__.py │ │ │ ├── decorator.py │ │ │ ├── worker.py │ │ │ └── worker_group.py │ │ └── ray/ │ │ ├── __init__.py │ │ └── base.py │ ├── third_party/ │ │ ├── __init__.py │ │ ├── sglang/ │ │ │ ├── __init__.py │ │ │ └── parallel_state.py │ │ ├── torch/ │ │ │ ├── __init__.py │ │ │ └── distributed/ │ │ │ ├── __init__.py │ │ │ ├── _state_dict_utils.py │ │ │ └── checkpoint/ │ │ │ ├── __init__.py │ │ │ └── state_dict.py │ │ └── vllm/ │ │ └── __init__.py │ ├── tools/ │ │ ├── __init__.py │ │ ├── base_tool.py │ │ ├── geo3k_tool.py │ │ ├── gsm8k_tool.py │ │ ├── image_zoom_in_tool.py │ │ ├── mcp_base_tool.py │ │ ├── mcp_search_tool.py │ │ ├── sandbox_fusion_tools.py │ │ ├── schemas.py │ │ ├── search_tool.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── mcp_clients/ │ │ │ ├── McpClientManager.py │ │ │ └── utils.py │ │ ├── search_r1_like_utils.py │ │ └── tool_registry.py │ ├── trainer/ │ │ ├── __init__.py │ │ ├── config/ │ │ │ ├── __init__.py │ │ │ ├── _generated_ppo_megatron_trainer.yaml │ │ │ ├── _generated_ppo_trainer.yaml │ │ │ ├── actor/ │ │ │ │ ├── actor.yaml │ │ │ │ ├── dp_actor.yaml │ │ │ │ └── megatron_actor.yaml │ │ │ ├── algorithm.py │ │ │ ├── config.py │ │ │ ├── critic/ │ │ │ │ ├── critic.yaml │ │ │ │ ├── dp_critic.yaml │ │ │ │ └── megatron_critic.yaml │ │ │ ├── data/ │ │ │ │ └── legacy_data.yaml │ │ │ ├── engine/ │ │ │ │ ├── fsdp.yaml │ │ │ │ └── megatron.yaml │ │ │ ├── evaluation.yaml │ │ │ ├── generation.yaml │ │ │ ├── model/ │ │ │ │ └── hf_model.yaml │ │ │ ├── npu_profile/ │ │ │ │ └── npu_profile.yaml │ │ │ ├── optim/ │ │ │ │ ├── fsdp.yaml │ │ │ │ └── megatron.yaml │ │ │ ├── ppo_megatron_trainer.yaml │ │ │ ├── ppo_trainer.yaml │ │ │ ├── ref/ │ │ │ │ ├── dp_ref.yaml │ │ │ │ ├── megatron_ref.yaml │ │ │ │ └── ref.yaml │ │ │ ├── reward_model/ │ │ │ │ ├── dp_reward_model.yaml │ │ │ │ ├── megatron_reward_model.yaml │ │ │ │ └── reward_model.yaml │ │ │ ├── rollout/ │ │ │ │ └── rollout.yaml │ │ │ ├── sft_trainer.yaml │ │ │ └── sft_trainer_engine.yaml │ │ ├── constants_ppo.py │ │ ├── fsdp_sft_trainer.py │ │ ├── main_eval.py │ │ ├── main_generation.py │ │ ├── main_generation_server.py │ │ ├── main_ppo.py │ │ ├── ppo/ │ │ │ ├── __init__.py │ │ │ ├── core_algos.py │ │ │ ├── metric_utils.py │ │ │ ├── mismatch_helper.py │ │ │ ├── ray_trainer.py │ │ │ ├── reward.py │ │ │ └── utils.py │ │ ├── runtime_env.yaml │ │ └── sft_trainer.py │ ├── utils/ │ │ ├── __init__.py │ │ ├── activation_offload.py │ │ ├── attention_utils.py │ │ ├── checkpoint/ │ │ │ ├── __init__.py │ │ │ ├── checkpoint_handler.py │ │ │ ├── checkpoint_manager.py │ │ │ ├── fsdp_checkpoint_manager.py │ │ │ └── megatron_checkpoint_manager.py │ │ ├── config.py │ │ ├── dataset/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── dataset_utils.py │ │ │ ├── multiturn_sft_dataset.py │ │ │ ├── onerec_dataset.py │ │ │ ├── rl_dataset.py │ │ │ ├── rm_dataset.py │ │ │ ├── sft_dataset.py │ │ │ └── vision_utils.py │ │ ├── debug/ │ │ │ ├── __init__.py │ │ │ ├── metrics.py │ │ │ ├── performance.py │ │ │ └── trajectory_tracker.py │ │ ├── device.py │ │ ├── distributed.py │ │ ├── experimental/ │ │ │ ├── __init__.py │ │ │ └── torch_functional.py │ │ ├── flops_counter.py │ │ ├── fs.py │ │ ├── fsdp_utils.py │ │ ├── groupwise.py │ │ ├── hdfs_io.py │ │ ├── import_utils.py │ │ ├── kernel/ │ │ │ ├── __init__.py │ │ │ ├── kernels.py │ │ │ └── linear_cross_entropy.py │ │ ├── logger/ │ │ │ ├── __init__.py │ │ │ └── aggregate_logger.py │ │ ├── logging_utils.py │ │ ├── megatron/ │ │ │ ├── __init__.py │ │ │ ├── dist_checkpointing.py │ │ │ ├── memory.py │ │ │ ├── optimizer.py │ │ │ ├── pipeline_parallel.py │ │ │ ├── sequence_parallel.py │ │ │ └── tensor_parallel.py │ │ ├── megatron_utils.py │ │ ├── memory_buffer.py │ │ ├── memory_utils.py │ │ ├── metric/ │ │ │ ├── __init__.py │ │ │ └── utils.py │ │ ├── model.py │ │ ├── net_utils.py │ │ ├── npu_utils.py │ │ ├── profiler/ │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── empty_annotations.py │ │ │ ├── mstx_profile.py │ │ │ ├── nvtx_profile.py │ │ │ ├── performance.py │ │ │ └── profile.py │ │ ├── py_functional.py │ │ ├── ray_utils.py │ │ ├── rendezvous/ │ │ │ ├── __init__.py │ │ │ └── ray_backend.py │ │ ├── reward_score/ │ │ │ ├── __init__.py │ │ │ ├── geo3k.py │ │ │ ├── gsm8k.py │ │ │ ├── math_batch.py │ │ │ ├── math_dapo.py │ │ │ ├── math_reward.py │ │ │ ├── math_verify.py │ │ │ ├── prime_code/ │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── testing_util.py │ │ │ │ └── utils.py │ │ │ ├── prime_math/ │ │ │ │ ├── __init__.py │ │ │ │ ├── grader.py │ │ │ │ └── math_normalize.py │ │ │ ├── sandbox_fusion/ │ │ │ │ ├── __init__.py │ │ │ │ └── utils.py │ │ │ └── search_r1_like_qa_em.py │ │ ├── rollout_skip.py │ │ ├── rollout_trace.py │ │ ├── seqlen_balancing.py │ │ ├── tensordict_utils.py │ │ ├── tokenizer.py │ │ ├── torch_dtypes.py │ │ ├── torch_functional.py │ │ ├── tracking.py │ │ ├── transferqueue_utils.py │ │ ├── transformers_compat.py │ │ ├── ulysses.py │ │ └── vllm/ │ │ ├── __init__.py │ │ ├── patch.py │ │ └── utils.py │ ├── version/ │ │ └── version │ └── workers/ │ ├── __init__.py │ ├── actor/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── dp_actor.py │ │ └── megatron_actor.py │ ├── config/ │ │ ├── __init__.py │ │ ├── actor.py │ │ ├── critic.py │ │ ├── engine.py │ │ ├── model.py │ │ ├── optimizer.py │ │ ├── reward_model.py │ │ └── rollout.py │ ├── critic/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── dp_critic.py │ │ └── megatron_critic.py │ ├── engine/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── fsdp/ │ │ │ ├── __init__.py │ │ │ ├── transformer_impl.py │ │ │ └── utils.py │ │ ├── megatron/ │ │ │ ├── __init__.py │ │ │ ├── transformer_impl.py │ │ │ └── utils.py │ │ ├── mindspeed/ │ │ │ ├── __init__.py │ │ │ └── transformer_impl.py │ │ └── utils.py │ ├── fsdp_workers.py │ ├── megatron_workers.py │ ├── reward_manager/ │ │ ├── __init__.py │ │ ├── abstract.py │ │ ├── batch.py │ │ ├── dapo.py │ │ ├── naive.py │ │ ├── prime.py │ │ └── registry.py │ ├── reward_model/ │ │ ├── __init__.py │ │ ├── base.py │ │ └── megatron/ │ │ ├── __init__.py │ │ └── reward_model.py │ ├── roles/ │ │ ├── __init__.py │ │ ├── actor.py │ │ ├── critic.py │ │ ├── hybrid_engine.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── losses.py │ │ └── padding.py │ ├── rollout/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── hf_rollout.py │ │ ├── naive/ │ │ │ ├── __init__.py │ │ │ └── naive_rollout.py │ │ ├── replica.py │ │ ├── schemas.py │ │ ├── sglang_rollout/ │ │ │ ├── __init__.py │ │ │ ├── async_sglang_server.py │ │ │ ├── http_server_engine.py │ │ │ ├── sglang_rollout.py │ │ │ └── utils.py │ │ ├── tokenizer.py │ │ ├── utils.py │ │ └── vllm_rollout/ │ │ ├── __init__.py │ │ ├── utils.py │ │ ├── vllm_async_server.py │ │ └── vllm_rollout_spmd.py │ └── sharding_manager/ │ ├── __init__.py │ ├── base.py │ ├── fsdp_sglang.py │ ├── fsdp_ulysses.py │ ├── fsdp_vllm.py │ ├── megatron_sglang.py │ └── megatron_vllm.py └── verl_rl/ ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── README_ORIGINAL.md ├── deploy_env.sh ├── docker/ │ ├── Apptainerfile.rocm │ ├── Dockerfile.extention.awsefa │ ├── Dockerfile.ngc.vllm │ ├── Dockerfile.ngc.vllm0.8 │ ├── Dockerfile.ngc.vllm0.8.sagemaker │ ├── Dockerfile.rocm │ ├── Dockerfile.rocm_verl-0.3.0.post1 │ ├── Dockerfile.rocm_verl-0.4.1 │ ├── Dockerfile.sglang │ ├── Dockerfile.vemlp.vllm.te │ ├── Dockerfile.vllm.sglang.megatron.deepseek │ ├── README.md │ ├── verl0.4-cu124-torch2.6-fa2.7.4/ │ │ ├── Dockerfile.app.sglang.vllm.mcore0.12 │ │ ├── Dockerfile.app.sglang.vllm.mcore0.12.deepep │ │ ├── Dockerfile.app.sglang.vllm.mcore0.13.preview │ │ ├── Dockerfile.app.vllm.mcore0.12 │ │ ├── Dockerfile.app.vllm.mcore0.12.deepep │ │ ├── Dockerfile.app.vllm.mcore0.13.preview │ │ ├── Dockerfile.base │ │ └── README.md │ ├── verl0.5-cu126-torch2.7-fa2.7.4/ │ │ ├── Dockerfile.app.sglang.mcore0.12 │ │ ├── Dockerfile.app.vllm.mcore0.12 │ │ ├── Dockerfile.base.torch2.7.0 │ │ ├── Dockerfile.base.torch2.7.1 │ │ └── README.md │ ├── verl0.5-cu126-torch2.7.1-fa2.8.0/ │ │ ├── Dockerfile.app.sglang.mcore0.12 │ │ ├── Dockerfile.app.sglang.mcore0.13.preview │ │ ├── Dockerfile.base │ │ └── README.md │ └── verl0.5-preview-cu128-torch2.7.1-fa2.8.0/ │ ├── Dockerfile.app.sglang.megatron │ ├── Dockerfile.base │ └── README.md ├── docs/ │ ├── Makefile │ ├── README.md │ ├── README_vllm0.7.md │ ├── README_vllm0.8.md │ ├── _static/ │ │ └── js/ │ │ └── runllm-widget.js │ ├── advance/ │ │ ├── agent_loop.rst │ │ ├── checkpoint.rst │ │ ├── dpo_extension.rst │ │ ├── fsdp_extension.rst │ │ ├── megatron_extension.rst │ │ ├── one_step_off.md │ │ ├── placement.rst │ │ ├── ppo_lora.rst │ │ ├── rollout_trace.rst │ │ └── rope.rst │ ├── algo/ │ │ ├── baseline.md │ │ ├── dapo.md │ │ ├── entropy.md │ │ ├── gpg.md │ │ ├── grpo.md │ │ ├── opo.md │ │ ├── ppo.md │ │ ├── spin.md │ │ └── sppo.md │ ├── amd_tutorial/ │ │ ├── amd_build_dockerfile_page.rst │ │ └── amd_vllm_page.rst │ ├── api/ │ │ ├── data.rst │ │ ├── single_controller.rst │ │ ├── trainer.rst │ │ └── utils.rst │ ├── ascend_tutorial/ │ │ ├── ascend_profiling.rst │ │ ├── ascend_profiling_en.rst │ │ └── ascend_quick_start.rst │ ├── conf.py │ ├── examples/ │ │ ├── config.rst │ │ ├── gsm8k_example.rst │ │ ├── multi_modal_example.rst │ │ ├── ppo_code_architecture.rst │ │ └── sandbox_fusion_example.rst │ ├── faq/ │ │ └── faq.rst │ ├── hybrid_flow.rst │ ├── index.rst │ ├── perf/ │ │ ├── device_tuning.rst │ │ ├── dpsk.md │ │ ├── nsight_profiling.md │ │ └── perf_tuning.rst │ ├── preparation/ │ │ ├── prepare_data.rst │ │ └── reward_function.rst │ ├── requirements-docs.txt │ ├── sglang_multiturn/ │ │ ├── interaction_system.rst │ │ ├── multiturn.rst │ │ ├── sandbox_fusion.rst │ │ └── search_tool_example.rst │ ├── single_controller.rst │ ├── start/ │ │ ├── agentic_rl.rst │ │ ├── install.rst │ │ ├── more_resources.rst │ │ ├── multinode.rst │ │ ├── quickstart.rst │ │ └── ray_debug_tutorial.rst │ └── workers/ │ ├── fsdp_workers.rst │ ├── megatron_workers.rst │ ├── ray_trainer.rst │ └── sglang_worker.rst ├── examples/ │ ├── data_preprocess/ │ │ ├── aime2024_multiturn_w_tool.py │ │ ├── dapo_multiturn_w_tool.py │ │ ├── full_hh_rlhf.py │ │ ├── geo3k.py │ │ ├── geo3k_multiturn_w_tool.py │ │ ├── gsm8k.py │ │ ├── gsm8k_multiturn_w_interaction.py │ │ ├── gsm8k_multiturn_w_tool.py │ │ ├── gsm8k_tool_agent_loop.py │ │ ├── hellaswag.py │ │ ├── math_dataset.py │ │ ├── multiturn.py │ │ └── preprocess_search_r1_dataset.py │ ├── generation/ │ │ ├── run_deepseek7b_mutli_node.sh │ │ └── run_deepseek_v2_lite_math.sh │ ├── gpg_trainer/ │ │ ├── gpg.md │ │ ├── run_qwen2-7b_math.sh │ │ └── run_qwen2-7b_math_megatron.sh │ ├── grpo_trainer/ │ │ ├── README.md │ │ ├── run_deepseek671b_math_megatron.sh │ │ ├── run_deepseek7b_llm.sh │ │ ├── run_deepseek7b_llm_math.sh │ │ ├── run_deepseek7b_llm_math_megatron.sh │ │ ├── run_deepseek7b_llm_seq_balance.sh │ │ ├── run_minicpmo2_6.sh │ │ ├── run_moonlight16b_math_megatron.sh │ │ ├── run_qwen2-7b.sh │ │ ├── run_qwen2-7b_math.sh │ │ ├── run_qwen2-7b_math_megatron.sh │ │ ├── run_qwen2-7b_seq_balance.sh │ │ ├── run_qwen2-7b_seq_balance_math_megatron.sh │ │ ├── run_qwen2-7b_sgl_megatron.sh │ │ ├── run_qwen2_5-3b_gsm8k_grpo_lora.sh │ │ ├── run_qwen2_5-7b_math_megatron_diff_tp.sh │ │ ├── run_qwen2_5_32b_grpo_npu.sh │ │ ├── run_qwen2_5_7b_grpo_discrete_prof_npu.sh │ │ ├── run_qwen2_5_7b_grpo_e2e_prof_npu.sh │ │ ├── run_qwen2_5_7b_grpo_npu.sh │ │ ├── run_qwen2_5_vl-7b-megatron.sh │ │ ├── run_qwen2_5_vl-7b.sh │ │ ├── run_qwen2_5_vl-7b_lora.sh │ │ ├── run_qwen2_5_vl-7b_seq_balance.sh │ │ ├── run_qwen2_5_vl_32b_npu.sh │ │ ├── run_qwen2_5_vl_3b_npu.sh │ │ ├── run_qwen2_5_vl_7b_npu.sh │ │ ├── run_qwen3-236b_megatron.sh │ │ ├── run_qwen3-8b.sh │ │ └── run_qwen3moe-30b_megatron.sh │ ├── ppo_trainer/ │ │ ├── README.md │ │ ├── run_deepseek7b_llm.sh │ │ ├── run_deepseek7b_llm_modelscope.sh │ │ ├── run_deepseek7b_llm_pfppo.sh │ │ ├── run_deepseek7b_llm_sandbox_fusion.sh │ │ ├── run_deepseek7b_llm_sp2.sh │ │ ├── run_deepseek_full_hh_rlhf.sh │ │ ├── run_deepseek_math_gsm8k_megatron.sh │ │ ├── run_deepseek_math_gsm8k_megatron_nsys.sh │ │ ├── run_gemma.sh │ │ ├── run_moonlight16b_a3b_gsm8k_megatron.sh │ │ ├── run_qwen1.5_moe_a2.7b-gsm8k_megatron.sh │ │ ├── run_qwen2-7b_math_gsm8k_megatron.sh │ │ ├── run_qwen2-7b_rm.sh │ │ ├── run_qwen2-7b_rm_seq_balance.sh │ │ ├── run_qwen2-7b_rm_seq_balance_fused_kernels.sh │ │ ├── run_qwen2-7b_rm_seq_balance_nsys.sh │ │ ├── run_qwen2-7b_seq_balance.sh │ │ ├── run_qwen2-7b_sglang_seq_balance.sh │ │ └── run_qwen2.5-32b.sh │ ├── ray/ │ │ └── tutorial.ipynb │ ├── reinforce_plus_plus_trainer/ │ │ ├── run_qwen2-7b_math_rf.sh │ │ └── run_qwen2-7b_math_rf_baseline.sh │ ├── remax_trainer/ │ │ ├── run_qwen2.5-3b_seq_balance.sh │ │ └── run_qwen2.5-7b_seq_balance.sh │ ├── rloo_trainer/ │ │ └── run_qwen2-7b.sh │ ├── sft/ │ │ ├── gsm8k/ │ │ │ ├── run_deepseek_6b7.sh │ │ │ ├── run_gemma_2b.sh │ │ │ ├── run_gemma_7b.sh │ │ │ ├── run_qwen2_5_05b_sft_peft_sp2_npu.sh │ │ │ ├── run_qwen_05_peft.sh │ │ │ ├── run_qwen_05_sp2.sh │ │ │ └── run_qwen_05_sp2_liger.sh │ │ └── multiturn/ │ │ └── run_qwen_05_sp2.sh │ ├── sglang_multiturn/ │ │ ├── README.md │ │ ├── config/ │ │ │ ├── geo3k_multiturn_grpo.yaml │ │ │ ├── geo3k_multiturn_megatron_grpo.yaml │ │ │ ├── gsm8k_multiturn_grpo.yaml │ │ │ ├── gsm8k_multiturn_grpo_w_interaction.yaml │ │ │ ├── gsm8k_multiturn_megatron_grpo.yaml │ │ │ ├── interaction_config/ │ │ │ │ └── gsm8k_interaction_config.yaml │ │ │ ├── retool_multiturn_grpo.yaml │ │ │ ├── search_multiturn_grpo.yaml │ │ │ └── tool_config/ │ │ │ ├── geo3k_tool_config.yaml │ │ │ ├── gsm8k_tool_config.yaml │ │ │ ├── mcp_server.json │ │ │ ├── mcp_tool_config.yaml │ │ │ ├── sandbox_fusion_tool_config.yaml │ │ │ └── search_tool_config.yaml │ │ ├── geo3k/ │ │ │ ├── run_qwen2.5-3b_geo3k_multiturn.sh │ │ │ ├── run_qwen2.5-3b_geo3k_multiturn_4xgpu.sh │ │ │ └── run_qwen2.5-3b_megatron_geo3k_multiturn.sh │ │ ├── run_qwen0.5b_gsm8k_multiturn_curriculum.sh │ │ ├── run_qwen2.5-0.5b_gsm8k_multiturn_w_interaction.sh │ │ ├── run_qwen2.5-3b_gsm8k_multiturn.sh │ │ ├── run_qwen2.5-3b_gsm8k_multiturn_4xgpu.sh │ │ ├── run_qwen2.5-3b_gsm8k_tool_agent_mlflow.sh │ │ ├── run_qwen2.5-3b_megatron_gsm8k_multiturn.sh │ │ ├── run_qwen3-4b_gsm8k_multiturn.sh │ │ └── search_r1_like/ │ │ ├── local_dense_retriever/ │ │ │ ├── download.py │ │ │ └── retrieval_server.py │ │ └── run_qwen2.5-3b_instruct_search_multiturn.sh │ ├── slurm/ │ │ └── ray_on_slurm.slurm │ ├── split_placement/ │ │ ├── README.md │ │ ├── config/ │ │ │ └── ppo_trainer_split.yaml │ │ ├── main_ppo_split.py │ │ ├── run_deepseek7b_llm.sh │ │ └── split_monkey_patch.py │ └── tuning/ │ ├── 0.5b/ │ │ └── qwen2-0.5b_grpo-lora_1_h100_fsdp_vllm.sh │ ├── 1.5b/ │ │ └── qwen2-1.5b_grpo-lora_1_h100_fsdp_vllm.sh │ ├── 14b/ │ │ ├── qwen2-14b_grpo-lora_2_h100_fsdp_vllm.sh │ │ └── qwen2_14b_grpo_4_h800_fsdp_vllm.sh │ ├── 32b/ │ │ ├── qwen2-32b_grpo-lora_4_h100_fsdp_vllm.sh │ │ └── qwen2_32B_grpo_8_h20_megatron_vllm.sh │ ├── 3b/ │ │ └── qwen2-3b_grpo-lora_1_h100_fsdp_vllm.sh │ ├── 70b/ │ │ ├── qwen2-70b_grpo_32_h20_fsdp_vllm.sh │ │ ├── qwen2-70b_grpo_32_h800_fsdp_vllm.sh │ │ └── qwen2-72b_grpo-lora_8_h100_fsdp_vllm.sh │ └── 7b/ │ ├── qwen2-7b_grpo-lora_1_h100_fsdp_vllm.sh │ └── qwen2-7b_grpo_2_h800_fsdp_vllm.sh ├── init_ray.sh ├── init_ray_cluster.sh ├── pyproject.toml ├── recipe/ │ ├── README.md │ ├── char_count/ │ │ ├── README.md │ │ ├── create_dataset.py │ │ ├── reward_function.py │ │ ├── train_grpo.sh │ │ └── train_sft.sh │ ├── dapo/ │ │ ├── README.md │ │ ├── config/ │ │ │ └── dapo_trainer.yaml │ │ ├── dapo_ray_trainer.py │ │ ├── main_dapo.py │ │ ├── prepare_dapo_data.sh │ │ ├── run_dapo_early_qwen2.5_32b.sh │ │ ├── run_dapo_qwen2.5_32b.sh │ │ ├── run_dapo_wo_ds_qwen2.5_32b.sh │ │ ├── runtime_env.yaml │ │ ├── test_dapo_7b.sh │ │ ├── test_dapo_7b_math.sh │ │ ├── test_dapo_7b_math_lora.sh │ │ ├── test_dapo_7b_math_megatron.sh │ │ ├── test_dapo_dspk_671b_megatron.sh │ │ ├── test_dapo_qwen3_30b_math.sh │ │ └── test_dapo_qwen3_30b_math_single_node.sh │ ├── entropy/ │ │ ├── 32b_clip_cov.sh │ │ ├── 32b_kl_cov.sh │ │ ├── 32b_kl_cov_mininbsz.sh │ │ ├── 7b_clip_cov.sh │ │ ├── 7b_kl_cov.sh │ │ ├── README.md │ │ ├── config/ │ │ │ └── entropy_trainer.yaml │ │ ├── entropy_ray_trainer.py │ │ ├── main_entropy.py │ │ ├── reward.py │ │ └── reward_score/ │ │ ├── __init__.py │ │ └── entropy_math/ │ │ ├── __init__.py │ │ ├── grader.py │ │ └── math_normalize.py │ ├── genrm_remote/ │ │ ├── README.md │ │ ├── reward_function.py │ │ └── run_genrm_remote.sh │ ├── langgraph_agent/ │ │ ├── __init__.py │ │ ├── chat_model.py │ │ ├── example/ │ │ │ ├── README.md │ │ │ ├── agent.yaml │ │ │ ├── create_dataset.py │ │ │ ├── math_expression.py │ │ │ └── run_qwen2.5_3b.sh │ │ ├── react_agent_loop.py │ │ └── test_react_agent_loop.py │ ├── minicpmo/ │ │ └── rl_dataset.py │ ├── one_step_off_policy/ │ │ ├── README.md │ │ ├── config/ │ │ │ ├── one_step_off_ppo_megatron_trainer.yaml │ │ │ └── one_step_off_ppo_trainer.yaml │ │ ├── dapo_7b_math_fsdp2_4_12.sh │ │ ├── dapo_7b_math_fsdp2_colocate.sh │ │ ├── dapo_7b_math_megatron_4_12.sh │ │ ├── dapo_7b_math_megatron_colocate.sh │ │ ├── fsdp_workers.py │ │ ├── grpo_0.6b_gsm8k_fsdp2_2_6.sh │ │ ├── grpo_3b_gsm8k_fsdp2_2_6.sh │ │ ├── main_ppo.py │ │ ├── megatron_workers.py │ │ ├── ray_trainer.py │ │ └── vllm_sharding_manager.py │ ├── onerec/ │ │ ├── main_onerec_ppo.py │ │ ├── onerec_fsdp_workers.py │ │ ├── onerec_ray_trainer.py │ │ ├── onerec_recipe.py │ │ ├── onerec_vllm_rollout.py │ │ └── run_grpo.sh │ ├── prime/ │ │ ├── __init__.py │ │ ├── config/ │ │ │ └── prime_trainer.yaml │ │ ├── main_prime.py │ │ ├── prime_core_algos.py │ │ ├── prime_dp_rm.py │ │ ├── prime_fsdp_workers.py │ │ ├── prime_ray_trainer.py │ │ ├── run_prime_qwen.sh │ │ └── run_prime_qwen_code.sh │ ├── r1/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── config/ │ │ │ └── evaluation.yaml │ │ ├── data_process.py │ │ ├── main_eval.py │ │ ├── reward_score.py │ │ ├── run_r1_distill_qwen.sh │ │ └── tasks/ │ │ ├── __init__.py │ │ ├── gpqa.py │ │ ├── livecodebench.py │ │ └── math.py │ ├── retool/ │ │ ├── retool.py │ │ ├── retool_multi_turn_sft_preprocess.py │ │ ├── retool_sft_preprocess.py │ │ ├── run_qwen2-32b_sft.sh │ │ ├── run_qwen2.5_32b_sp8.sh │ │ ├── run_qwen2.5_7b_sp4.sh │ │ ├── run_qwen3_4b_sp4.sh │ │ └── sandbox_fusion_tool_config.yaml │ ├── spin/ │ │ ├── README.md │ │ ├── config/ │ │ │ └── spin_trainer.yaml │ │ ├── core_algos.py │ │ ├── dp_actor.py │ │ ├── fsdp_workers.py │ │ ├── main_spin.py │ │ ├── run_spin.sh │ │ └── spin_trainer.py │ └── sppo/ │ ├── README.md │ ├── __init__.py │ ├── config/ │ │ └── sppo_trainer.yaml │ ├── dp_actor.py │ ├── main_sppo.py │ ├── run_qwen2.5-7b_rm.sh │ ├── sppo_ray_trainer.py │ └── sppo_worker.py ├── requirements-npu.txt ├── requirements.txt ├── requirements_sglang.txt ├── scripts/ │ ├── __init__.py │ ├── converter_hf_to_mcore.py │ ├── diagnose.py │ ├── generate_trainer_config.sh │ ├── init_random_model.py │ ├── install_vllm_sglang_mcore.sh │ ├── legacy_model_merger.py │ ├── print_cfg.py │ └── rollout_viewer.py ├── setup.py ├── tests/ │ ├── README.md │ ├── __init__.py │ ├── experimental/ │ │ └── agent_loop/ │ │ ├── agent_utils.py │ │ └── test_basic_agent_loop.py │ ├── interactions/ │ │ ├── __init__.py │ │ ├── test_gsm8k_interaction.py │ │ └── test_interaction_registry.py │ ├── kill_github_tests.sh │ ├── models/ │ │ ├── test_transformer.py │ │ └── test_transformers_ulysses.py │ ├── single_controller/ │ │ ├── __init__.py │ │ ├── base/ │ │ │ └── test_decorator.py │ │ ├── check_worker_alive/ │ │ │ └── main.py │ │ ├── detached_worker/ │ │ │ ├── README.md │ │ │ ├── client.py │ │ │ ├── run.sh │ │ │ └── server.py │ │ ├── test_auto_padding_on_cpu.py │ │ ├── test_colocated_workers.py │ │ ├── test_colocated_workers_fused.py │ │ ├── test_data_transfer.py │ │ ├── test_decorator_on_cpu.py │ │ ├── test_driverfunc_to_worker.py │ │ ├── test_fused_workers_on_cpu.py │ │ ├── test_high_level_scheduling_api.py │ │ ├── test_ray_collectives.py │ │ ├── test_ray_local_envs_on_cpu.py │ │ ├── test_ray_utils_on_cpu.py │ │ ├── test_rvdz.py │ │ ├── test_worker_group_basics.py │ │ └── test_worker_group_torch.py │ ├── special_distributed/ │ │ ├── README.md │ │ ├── run_all.sh │ │ ├── test_fsdp_ckpt.py │ │ └── test_tensor_dict.py │ ├── special_e2e/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── check_custom_rwd_fn.py │ │ ├── check_results.py │ │ ├── envs/ │ │ │ ├── __init__.py │ │ │ └── digit_completion/ │ │ │ ├── __init__.py │ │ │ ├── task.py │ │ │ └── tokenizer.py │ │ ├── generation/ │ │ │ └── run_gen_qwen05.sh │ │ ├── ppo_trainer/ │ │ │ ├── expert_parallel/ │ │ │ │ └── qwen2moe_minimal.json │ │ │ ├── run_function_reward.sh │ │ │ ├── run_model_reward.sh │ │ │ ├── run_single_gpu.sh │ │ │ └── run_single_gpu_with_engine.sh │ │ ├── run_dapo.sh │ │ ├── run_genrm_remote.sh │ │ ├── run_geo3k_fsdp_sgl_multiturn_w_tool.sh │ │ ├── run_grpo_lora_with_merge.sh │ │ ├── run_gsm8k_fsdp_sgl_multiturn_sf_tool.sh │ │ ├── run_gsm8k_fsdp_sgl_multiturn_w_tool.sh │ │ ├── run_one_step_off_policy.sh │ │ ├── run_ppo_trainer_megatron.sh │ │ ├── run_prime.sh │ │ ├── run_r1_distill_qwen_aime24_eval.sh │ │ ├── run_spin.sh │ │ ├── run_sppo.sh │ │ ├── run_test.sh │ │ └── sft/ │ │ ├── run_sft.sh │ │ └── test_sp_loss_match.py │ ├── special_npu/ │ │ ├── run_qwen2_5_05b_dapo.sh │ │ ├── run_qwen2_5_05b_grpo.sh │ │ ├── run_qwen2_5_05b_sft_peft_sp2.sh │ │ └── run_qwen2_5_vl_3b_npu.sh │ ├── special_sanity/ │ │ ├── check_api_docs.py │ │ ├── check_device_api_usage.py │ │ ├── check_docs_time_info.py │ │ ├── check_docstrings.py │ │ ├── check_license.py │ │ ├── check_pr_description.py │ │ ├── check_pr_title.py │ │ ├── test_config_docs.py │ │ ├── test_import.py │ │ ├── type_coverage_check.py │ │ ├── validate_imported_docs.py │ │ └── validate_structure.py │ ├── special_standalone/ │ │ ├── README.md │ │ └── test_memory_buffers.py │ ├── test_base_config_on_cpu.py │ ├── test_protocol_on_cpu.py │ ├── tools/ │ │ └── test_base_tool_on_cpu.py │ ├── trainer/ │ │ ├── __init__.py │ │ ├── config/ │ │ │ ├── __init__.py │ │ │ ├── legacy_ppo_megatron_trainer.yaml │ │ │ ├── legacy_ppo_trainer.yaml │ │ │ ├── test_algo_config_on_cpu.py │ │ │ ├── test_critic_config_on_cpu.py │ │ │ └── test_legacy_config_on_cpu.py │ │ └── ppo/ │ │ ├── __init__.py │ │ ├── test_core_algos_on_cpu.py │ │ └── test_metric_utils_on_cpu.py │ ├── utils/ │ │ ├── _test_module.py │ │ ├── dataset/ │ │ │ ├── test_create_rl_sampler_on_cpu.py │ │ │ ├── test_multiturn_sft_dataset_on_cpu.py │ │ │ ├── test_rl_dataset_on_cpu.py │ │ │ └── test_sft_dataset_on_cpu.py │ │ ├── megatron/ │ │ │ └── test_pipeline_parallel.py │ │ ├── reward_score/ │ │ │ ├── reward_score/ │ │ │ │ └── test_sandbox_fusion_on_cpu.py │ │ │ └── test_sandbox_on_cpu.py │ │ ├── test_activation_offload.py │ │ ├── test_config_on_cpu.py │ │ ├── test_flops_counter.py │ │ ├── test_fs_on_cpu.py │ │ ├── test_import_utils_on_cpu.py │ │ ├── test_linear_cross_entropy.py │ │ ├── test_linear_cross_entropy_tp.py │ │ ├── test_model_on_cpu.py │ │ ├── test_nvtx_profile.py │ │ ├── test_rollout_trace_on_cpu.py │ │ ├── test_seqlen_balancing.py │ │ ├── test_temp_env_on_cpu.py │ │ ├── test_timeout_decorator_cpu.py │ │ └── test_torch_functional.py │ └── workers/ │ ├── reward_manager/ │ │ └── test_registry_on_cpu.py │ └── rollout/ │ ├── async_rollout_utils.py │ ├── perf/ │ │ └── vllm_async_rollout.py │ ├── resource/ │ │ └── tool_configs/ │ │ ├── mcp_server.json │ │ ├── mcp_tool_config │ │ ├── sandbox_fusion_tool_config │ │ └── search_tool_config │ ├── rollout_vllm/ │ │ ├── run_fsdp_vllm.py │ │ ├── test_vllm_chat_scheduler.py │ │ ├── test_vllm_model_rope_scaling.py │ │ └── test_vllm_spmd.py │ ├── test_async_sglang_server_on_cpu.py │ ├── test_custom_completion_callback.py │ ├── test_hf_rollout.py │ ├── test_sglang_async_rollout_mcp_tools.py │ ├── test_sglang_async_rollout_multimodal_delta.py │ ├── test_sglang_async_rollout_search_tools.py │ ├── test_sglang_async_rollout_sf_tools.py │ ├── test_sglang_async_rollout_w_interaction.py │ ├── test_sglang_async_rollout_w_tools.py │ ├── test_sglang_multi_interaction.py │ ├── test_sglang_rollout_sharding_manager.py │ ├── test_sglang_spmd.py │ └── utils_sglang.py └── verl/ ├── __init__.py ├── base_config.py ├── experimental/ │ ├── __init__.py │ ├── agent_loop/ │ │ ├── __init__.py │ │ ├── agent_loop.py │ │ ├── single_turn_agent_loop.py │ │ ├── tool_agent_loop.py │ │ └── tool_parser.py │ ├── dataset/ │ │ ├── __init__.py │ │ └── sampler.py │ └── dynamic_dataset/ │ ├── __init__.py │ └── dynamicgen_dataset.py ├── interactions/ │ ├── __init__.py │ ├── base.py │ ├── gsm8k_interaction.py │ └── utils/ │ ├── __init__.py │ └── interaction_registry.py ├── model_merger/ │ ├── __init__.py │ ├── __main__.py │ ├── base_model_merger.py │ ├── fsdp_model_merger.py │ └── megatron_model_merger.py ├── models/ │ ├── README.md │ ├── __init__.py │ ├── llama/ │ │ ├── __init__.py │ │ └── megatron/ │ │ ├── __init__.py │ │ ├── checkpoint_utils/ │ │ │ ├── __init__.py │ │ │ ├── llama_loader.py │ │ │ ├── llama_loader_depracated.py │ │ │ └── llama_saver.py │ │ ├── layers/ │ │ │ ├── __init__.py │ │ │ ├── parallel_attention.py │ │ │ ├── parallel_decoder.py │ │ │ ├── parallel_linear.py │ │ │ ├── parallel_mlp.py │ │ │ └── parallel_rmsnorm.py │ │ └── modeling_llama_megatron.py │ ├── mcore/ │ │ ├── __init__.py │ │ ├── config_converter.py │ │ ├── loader.py │ │ ├── mbridge.py │ │ ├── model_forward.py │ │ ├── model_forward_fused.py │ │ ├── model_initializer.py │ │ ├── patch_v012.py │ │ ├── qwen2_5_vl/ │ │ │ ├── __init__.py │ │ │ ├── attention.py │ │ │ ├── model.py │ │ │ ├── rope_utils.py │ │ │ ├── vision_config.py │ │ │ ├── vision_model.py │ │ │ └── vision_transformer_block.py │ │ ├── readme.md │ │ ├── registry.py │ │ ├── saver.py │ │ ├── util.py │ │ └── weight_converter.py │ ├── qwen2/ │ │ ├── __init__.py │ │ └── megatron/ │ │ ├── __init__.py │ │ ├── checkpoint_utils/ │ │ │ ├── __init__.py │ │ │ ├── qwen2_loader.py │ │ │ ├── qwen2_loader_depracated.py │ │ │ └── qwen2_saver.py │ │ ├── layers/ │ │ │ ├── __init__.py │ │ │ ├── parallel_attention.py │ │ │ ├── parallel_decoder.py │ │ │ ├── parallel_linear.py │ │ │ ├── parallel_mlp.py │ │ │ └── parallel_rmsnorm.py │ │ └── modeling_qwen2_megatron.py │ ├── registry.py │ ├── transformers/ │ │ ├── __init__.py │ │ ├── dense_common.py │ │ ├── kimi_vl.py │ │ ├── llama.py │ │ ├── monkey_patch.py │ │ ├── npu_patch.py │ │ ├── qwen2.py │ │ ├── qwen2_5_vl.py │ │ └── qwen2_vl.py │ └── weight_loader_registry.py ├── protocol.py ├── py.typed ├── single_controller/ │ ├── __init__.py │ ├── base/ │ │ ├── __init__.py │ │ ├── decorator.py │ │ ├── megatron/ │ │ │ ├── __init__.py │ │ │ ├── worker.py │ │ │ └── worker_group.py │ │ ├── register_center/ │ │ │ ├── __init__.py │ │ │ └── ray.py │ │ ├── worker.py │ │ └── worker_group.py │ └── ray/ │ ├── __init__.py │ ├── base.py │ └── megatron.py ├── third_party/ │ ├── __init__.py │ ├── sglang/ │ │ ├── __init__.py │ │ └── parallel_state.py │ ├── torch/ │ │ ├── __init__.py │ │ └── distributed/ │ │ ├── __init__.py │ │ ├── _state_dict_utils.py │ │ └── checkpoint/ │ │ ├── __init__.py │ │ └── state_dict.py │ └── vllm/ │ └── __init__.py ├── tools/ │ ├── __init__.py │ ├── base_tool.py │ ├── geo3k_tool.py │ ├── gsm8k_tool.py │ ├── mcp_base_tool.py │ ├── mcp_search_tool.py │ ├── sandbox_fusion_tools.py │ ├── schemas.py │ ├── search_tool.py │ └── utils/ │ ├── __init__.py │ ├── mcp_clients/ │ │ ├── McpClientManager.py │ │ └── utils.py │ ├── search_r1_like_utils.py │ └── tool_registry.py ├── trainer/ │ ├── __init__.py │ ├── config/ │ │ ├── __init__.py │ │ ├── _generated_ppo_megatron_trainer.yaml │ │ ├── _generated_ppo_trainer.yaml │ │ ├── actor/ │ │ │ ├── actor.yaml │ │ │ ├── dp_actor.yaml │ │ │ └── megatron_actor.yaml │ │ ├── algorithm.py │ │ ├── config.py │ │ ├── critic/ │ │ │ ├── critic.yaml │ │ │ ├── dp_critic.yaml │ │ │ └── megatron_critic.yaml │ │ ├── data/ │ │ │ └── legacy_data.yaml │ │ ├── evaluation.yaml │ │ ├── generation.yaml │ │ ├── npu_profile/ │ │ │ └── npu_profile.yaml │ │ ├── ppo_megatron_trainer.yaml │ │ ├── ppo_trainer.yaml │ │ ├── ref/ │ │ │ ├── dp_ref.yaml │ │ │ ├── megatron_ref.yaml │ │ │ └── ref.yaml │ │ ├── reward_model/ │ │ │ ├── dp_reward_model.yaml │ │ │ ├── megatron_reward_model.yaml │ │ │ └── reward_model.yaml │ │ ├── rollout/ │ │ │ └── rollout.yaml │ │ └── sft_trainer.yaml │ ├── constants_ppo.py │ ├── fsdp_sft_trainer.py │ ├── main_eval.py │ ├── main_generation.py │ ├── main_ppo.py │ ├── ppo/ │ │ ├── __init__.py │ │ ├── core_algos.py │ │ ├── metric_utils.py │ │ ├── ray_trainer.py │ │ └── reward.py │ └── runtime_env.yaml ├── utils/ │ ├── __init__.py │ ├── activation_offload.py │ ├── checkpoint/ │ │ ├── __init__.py │ │ ├── checkpoint_manager.py │ │ ├── fsdp_checkpoint_manager.py │ │ └── megatron_checkpoint_manager.py │ ├── config.py │ ├── dataset/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── multiturn_sft_dataset.py │ │ ├── rl_dataset.py │ │ ├── rm_dataset.py │ │ ├── sft_dataset.py │ │ └── vision_utils.py │ ├── debug/ │ │ ├── __init__.py │ │ ├── performance.py │ │ └── trajectory_tracker.py │ ├── device.py │ ├── distributed.py │ ├── experimental/ │ │ ├── __init__.py │ │ └── torch_functional.py │ ├── flops_counter.py │ ├── fs.py │ ├── fsdp_utils.py │ ├── hdfs_io.py │ ├── import_utils.py │ ├── kernel/ │ │ ├── __init__.py │ │ ├── kernels.py │ │ └── linear_cross_entropy.py │ ├── logger/ │ │ ├── __init__.py │ │ └── aggregate_logger.py │ ├── logging_utils.py │ ├── megatron/ │ │ ├── __init__.py │ │ ├── dist_checkpointing.py │ │ ├── memory.py │ │ ├── optimizer.py │ │ ├── pipeline_parallel.py │ │ ├── sequence_parallel.py │ │ └── tensor_parallel.py │ ├── megatron_utils.py │ ├── memory_buffer.py │ ├── metric/ │ │ ├── __init__.py │ │ └── utils.py │ ├── model.py │ ├── net_utils.py │ ├── profiler/ │ │ ├── __init__.py │ │ ├── config.py │ │ ├── empty_annotations.py │ │ ├── mstx_profile.py │ │ ├── nvtx_profile.py │ │ ├── performance.py │ │ └── profile.py │ ├── py_functional.py │ ├── ray_utils.py │ ├── rendezvous/ │ │ ├── __init__.py │ │ └── ray_backend.py │ ├── reward_score/ │ │ ├── __init__.py │ │ ├── geo3k.py │ │ ├── gsm8k.py │ │ ├── math.py │ │ ├── math_batch.py │ │ ├── math_dapo.py │ │ ├── math_verify.py │ │ ├── prime_code/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── testing_util.py │ │ │ └── utils.py │ │ ├── prime_math/ │ │ │ ├── __init__.py │ │ │ ├── grader.py │ │ │ └── math_normalize.py │ │ ├── sandbox_fusion/ │ │ │ ├── __init__.py │ │ │ └── utils.py │ │ └── search_r1_like_qa_em.py │ ├── rollout_trace.py │ ├── seqlen_balancing.py │ ├── tokenizer.py │ ├── torch_dtypes.py │ ├── torch_functional.py │ ├── tracking.py │ ├── ulysses.py │ └── vllm_utils.py ├── version/ │ └── version └── workers/ ├── __init__.py ├── actor/ │ ├── __init__.py │ ├── base.py │ ├── dp_actor.py │ └── megatron_actor.py ├── critic/ │ ├── __init__.py │ ├── base.py │ ├── dp_critic.py │ └── megatron_critic.py ├── engine/ │ ├── __init__.py │ ├── base.py │ ├── fsdp/ │ │ ├── __init__.py │ │ ├── engine_impl.py │ │ └── utils.py │ └── megatron/ │ ├── __init__.py │ └── engine_impl.py ├── fsdp_workers.py ├── megatron_workers.py ├── reward_manager/ │ ├── __init__.py │ ├── batch.py │ ├── dapo.py │ ├── naive.py │ ├── prime.py │ └── registry.py ├── reward_model/ │ ├── __init__.py │ ├── base.py │ └── megatron/ │ ├── __init__.py │ └── reward_model.py ├── roles/ │ ├── __init__.py │ ├── actor.py │ └── critic.py ├── rollout/ │ ├── __init__.py │ ├── async_server.py │ ├── base.py │ ├── chat_scheduler.py │ ├── hf_rollout.py │ ├── naive/ │ │ ├── __init__.py │ │ └── naive_rollout.py │ ├── schemas.py │ ├── sglang_rollout/ │ │ ├── __init__.py │ │ ├── async_sglang_server.py │ │ ├── sglang_rollout.py │ │ └── utils.py │ ├── tokenizer.py │ └── vllm_rollout/ │ ├── __init__.py │ ├── vllm_async_server.py │ └── vllm_rollout_spmd.py └── sharding_manager/ ├── __init__.py ├── base.py ├── fsdp_sglang.py ├── fsdp_ulysses.py ├── fsdp_vllm.py ├── megatron_sglang.py └── megatron_vllm.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ # IDE .idea/ .vscode/ .claude/ .gemini/ *.swp *.swo *~ # OS .DS_Store Thumbs.db # Python __pycache__/ *.py[cod] *$py.class *.so .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg # Virtual environments .venv/ venv/ ENV/ env/ # Logs *.log logs/ tmp_ray/ # Jupyter .ipynb_checkpoints/ # Testing .pytest_cache/ .coverage htmlcov/ .tox/ .nox/ # ML/DL wandb/ mlruns/ *.ckpt *.pt *.pth *.bin *.safetensors output/ checkpoints/ ckpt/ # Data # *.parquet # *.csv # *.json # *.jsonl # Ray ray_results/ ================================================ FILE: README.md ================================================

OpenOneRec

An Open Foundation Model and Benchmark to Accelerate Generative Recommendation

Hugging Face GitHub Code Paper License


## 📖 Introduction **OpenOneRec** is an open-source framework designed to bridge the gap between traditional recommendation systems and Large Language Models (LLMs). While Generative Recommendation has shown promise, existing models often struggle with isolated data silos and a lack of reasoning capabilities. To address this, we introduce a unified framework that comprises: * **RecIF-Bench**: The first holistic Recommendation Instruction-Following Benchmark, containing **100M interactions** from 200k users across heterogeneous domains (Short Video, Ads, Product). * **OneRec-Foundation Models**: A family of models (1.7B & 8B) built on the Qwen3 backbone. The series includes **Standard** versions trained on our open-source dataset and **Pro** versions enhanced with a hundred-billion-token industrial corpus from Kuaishou. * **Full-Stack Pipeline**: We open-source our comprehensive training pipeline, including data processing, co-pretraining, and post-training, to ensure full reproducibility and facilitate scaling law research in recommendation. ## 🔥 News * **[2026.1.1]** 📑 **The technical report** has been released. * **[2026.1.1]** 🎉 **OneRec-Foundation** models (1.7B, 8B) are now available on Hugging Face! * **[2026.1.1]** 🚀 **RecIF-Bench** dataset and evaluation scripts are open-sourced. ## 📊 RecIF-Bench We propose **RecIF-Bench** to rigorously assess the synergy between instruction following and domain-specific recommendation. It organizes 8 distinct tasks into a four-layer capability hierarchy: * **Layer 0: Semantic Alignment** (Item Understanding) * **Layer 1: Fundamental Prediction** (Short Video Rec, Ad Rec, Product Rec, Label Prediction) * **Layer 2: Instruction Following** (Interactive Rec, Label-Conditional Rec) * **Layer 3: Reasoning** (Recommendation Explanation) The benchmark aggregates data from three domains: **Short Video** (Content), **Ads** (Commercial), and **Product** (E-commerce). ## 🤖 Model Zoo The OpenOneRec-Foundation series is built upon the Qwen architecture, enhanced with **Itemic Tokens** for modality alignment and trained via a multi-stage protocol. | Model | Backbone | Parameters | Description | Link | | :--- | :--- | :--- | :--- | :--- | | **OneRec-1.7B** | Qwen3-1.7B | 1.7B | Standard version trained on open-source data (~33B tokens) | [HuggingFace](https://huggingface.co/OpenOneRec/OneRec-1.7B) | | **OneRec-8B** | Qwen3-8B | 8B | Standard version trained on open-source data (~33B tokens) | [HuggingFace](https://huggingface.co/OpenOneRec/OneRec-8B) | | **OneRec-1.7B-Pro** | Qwen3-1.7B | 1.7B | Scaled-up version with expanded datasets (~130B tokens) | [HuggingFace](https://huggingface.co/OpenOneRec/OneRec-1.7B-pro) | | **OneRec-8B-Pro** | Qwen3-8B | 8B | Scaled-up version with expanded datasets (~130B tokens) | [HuggingFace](https://huggingface.co/OpenOneRec/OneRec-8B-pro) | ## 🏗️ Method & Architecture OpenOneRec reframes recommendation as a general-purpose sequence modeling paradigm. ### 1. Items as Tokens To bridge the modality gap, we treat items as a distinct modality using **Itemic Tokens** derived from hierarchical vector quantization. This allows the LLM to process interaction history as a cohesive context sequence. ### 2. Training Pipeline Our framework utilizes the following recipe: * **Pre-Training**: Integrates collaborative signals via Itemic-Text Alignment and Full-Parameter Co-Pretraining. * **Post-Training**: * *Stage 1*: Multi-task Supervised Fine-tuning for basic instruction following. * *Stage 2*: On-policy Distillation to restore general reasoning performance. * *Stage 3*: Reinforcement Learning to enhance recommendation capabilities.
OpenOneRec Overall Framework
Figure: The Overall Framework of OpenOneRec.
## 📈 Performance ### Results on RecIF-Bench OpenOneRec-Foundation achieves **State-of-the-Art (SOTA)** results across RecIF-Bench tasks, significantly outperforming baselines like LC-Rec and TIGER. | Task | Metric | SASRec | TIGER | LC-Rec | OneRec-1.7B | OneRec-8B | OneRec-1.7B-Pro | **OneRec-8B-Pro** | | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | | **Short Video Rec** | Recall@32 | 0.0119 | 0.0132 | 0.0180 | 0.0272 | 0.0355 | 0.0274 | **0.0369** | | **Ad Rec** | Recall@32 | 0.0293 | 0.0581 | 0.0723 | 0.0707 | 0.0877 | 0.0735 | **0.0964** | | **Product Rec** | Recall@32 | 0.0175 | 0.0283 | 0.0416 | 0.0360 | 0.0470 | 0.0405 | **0.0538** | | **Label-Cond. Rec** | Recall@32 | 0.0140 | 0.0123 | 0.0170 | 0.0184 | 0.0228 | 0.0182 | **0.0235** | | **Label Pred.** | AUC | 0.6244 | 0.6675 | 0.6139 | 0.6184 | 0.6615 | 0.6071 | **0.6912** | | **Interactive Rec** | Recall@32 | -- | -- | 0.2394 | 0.1941 | 0.3032 | 0.2024 | **0.3458** | | **Item Und.** | LLM Score | -- | -- | 0.2517 | 0.3175 | 0.3202 | 0.3133 | **0.3209** | | **Rec. Explanation** | LLM Score | -- | -- | 3.9350 | 3.3540 | 3.6774 | 3.5060 | **4.0381** |
Holistic Performance Overview of OpenOneRec.
Holistic Performance Overview of OpenOneRec.
### Cross-Domain Transferability On the **Amazon Benchmark** (10 datasets), OpenOneRec demonstrates exceptional zero-shot/few-shot transfer capabilities, achieving an average **26.8% improvement** in Recall@10 over the second-best method. | Domain | SASRec | TIGER | LC-Rec | **Ours** | | :--- | :--- | :--- | :--- | :--- | | Baby | 0.0381 | 0.0318 | 0.0344 | **0.0513** | | Beauty | 0.0639 | 0.0628 | 0.0764 | **0.0924** | | Cell Phones | 0.0782 | 0.0786 | 0.0883 | **0.1036** | | Grocery | 0.0789 | 0.0691 | 0.0790 | **0.1029** | | Health | 0.0506 | 0.0534 | 0.0616 | **0.0768** | | Home | 0.0212 | 0.0216 | 0.0293 | **0.0390** | | Pet Supplies | 0.0607 | 0.0542 | 0.0612 | **0.0834** | | Sports | 0.0389 | 0.0331 | 0.0418 | **0.0547** | | Tools | 0.0437 | 0.0344 | 0.0438 | **0.0593** | | Toys | 0.0658 | 0.0527 | 0.0549 | **0.0953** | *Metric: Recall@10. Ours refers to OneRec-Foundation with text-augmented itemic tokens strategy. For implementation details, please refer to [GRLM](https://github.com/ZY0025/GRLM).* ## 🚀 Quick Start *Code release and detailed usage instructions are coming soon.* Currently, you can load our models using `transformers>=4.51.0`: ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "OpenOneRec/OneRec-8B" # load the tokenizer and the model tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype="auto", device_map="auto" ) # prepare the model input # case - prompt with itemic tokens prompt = "这是一个视频:<|sid_begin|><|sid_end|>,帮我总结一下这个视频讲述了什么内容" messages = [ {"role": "user", "content": prompt} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=True # Switches between thinking and non-thinking modes. Default is True. ) model_inputs = tokenizer([text], return_tensors="pt").to(model.device) # conduct text completion # Note: In our experience, default decoding settings may be unstable for small models. # For 1.7B, we suggest: top_p=0.95, top_k=20, temperature=0.75 (during 0.6 to 0.8) generated_ids = model.generate( **model_inputs, max_new_tokens=32768 ) output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() # parsing thinking content try: # rindex finding 151668 () index = len(output_ids) - output_ids[::-1].index(151668) except ValueError: index = 0 thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n") content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n") print("thinking content:", thinking_content) print("content:", content) ``` ## 🛣️ Roadmap / Under Development We are actively working on the following features: - [ ] **General-domain data**: scripts to fetch and preprocess public general-domain corpora used in `data/general_text`. - [ ] **Reproducible environments**: training pipeline Docker/Apptainer images for easier end-to-end reproduction. - [ ] **One-click reproduction**: further code cleanup and streamlined training recipes for an end-to-end “run from scratch” experience. - [ ] **Docs & tutorials**: improved documentation, tutorials, and best-practice guides. - [ ] **Unified VeRL integration**: consolidate RL and distillation codepaths into a single, consistent VeRL-based implementation. - [ ] **More model sizes**: support additional pretraining scales and configurations beyond current checkpoints. Contributions are welcome! Please refer to the detailed documentation in each module. ## 📜 Citation If you find our work helpful, please cite our technical report: ```bibtex @misc{OpenOneRec, title={OpenOneRec Technical Report}, author={Guorui Zhou and Honghui Bao and Jiaming Huang and Jiaxin Deng and Jinghao Zhang and Junda She and Kuo Cai and Lejian Ren and Lu Ren and Qiang Luo and Qianqian Wang and Qigen Hu and Rongzhou Zhang and Ruiming Tang and Shiyao Wang and Wuchao Li and Xiangyu Wu and Xinchen Luo and Xingmei Wang and Yifei Hu and Yunfan Wu and Zhanyu Liu and Zhiyang Zhang and Zixing Zhang and Bo Chen and Bin Wen and Chaoyi Ma and Chengru Song and Chenglong Chu and Defu Lian and Fan Yang and Feng Jiang and Hongtao Cheng and Huanjie Wang and Kun Gai and Pengfei Zheng and Qiang Wang and Rui Huang and Siyang Mao and Tingting Gao and Wei Yuan and Yan Wang and Yang Zhou and Yi Su and Zexuan Cheng and Zhixin Ling and Ziming Li}, year={2025}, eprint={2512.24762}, archivePrefix={arXiv}, primaryClass={cs.IR} } ``` ## 🛡️ License The code in this repository is licensed under the Apache 2.0 License. The model weights are subject to their specific license agreements. ## 🙏 Acknowledgements OpenOneRec is built upon and inspired by the open-source ecosystem. We would like to thank: - **Qwen3**: for providing the base architecture and model initialization that OpenOneRec builds upon. - **General-domain data sources**: for the public corpora referenced in [`data/general_text`](https://github.com/Kuaishou-OneRec/OpenOneRec/tree/main/data/general_text) used for mixed-domain training. - **VeRL & PyTorch distributed training**: for the training infrastructure and scalable primitives (e.g., **FSDP**) used in post-training and large-scale runs. We sincerely thank these projects for their outstanding work. ================================================ FILE: benchmarks/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [2025] [OneRec Team] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: benchmarks/README.md ================================================ # Benchmark ## Quick Start ### Step 1: Install Dependencies ```bash cd benchmarks conda create -n benchmark python=3.10 conda activate benchmark pip install uv uv pip install torch==2.5.1 transformers==4.52.0 vllm==0.7.3 pip install -r requirements.txt pip install -e . --no-deps --no-build-isolation ``` ### Step 2: Start Ray Cluster (Optional) ```bash # Initialize multi-node multi-GPU environment # Skip this step if using single-node multi-GPU setup bash scripts/init_ray_cluster.sh ``` ### Step 3: Configure LLM API Edit `api/config/llm_config.json` to fill in your Gemini configuration: ```json { "gemini": { "project": "", "location": "", "credentials_path": "", ... } } ``` **Note**: Only `project`, `location`, and `credentials_path` need to be configured. Test the configuration: ```python from api import get_client_from_config # Create client client = get_client_from_config("gemini") # Generate text response = client.generate("Tell me a joke") print(response) ``` ### Step 4: Run Evaluation ```bash export BENCHMARK_BASE_DIR="." export BENCHMARK_DATA_DIR="../raw_data/onerec_data/benchmark_data" export DATA_VERSION="v1.0" bash eval_script.sh ``` **Parameters**: | Parameter | Description | Example | |-----------|-------------|---------| | model_path | Path to the model to evaluate | `model_output/sft/global_step10/converted` | | result_name | Name identifier for output directory | `sft_nonthink` | | enable_thinking | `true` or `false` | `false` | **Examples**: ```bash # Without thinking mode bash eval_script.sh \ /path/to/model \ model_nonthink \ false # With thinking mode bash eval_script.sh \ /path/to/model \ model_think \ true ``` For debugging purposes, you can add `--sample_size 10` to each python command in `eval_script.sh` to run evaluation on a smaller subset of data. ### Step 5: View Results After evaluation completes, results are saved in: ``` ./results/v1.0/results_/ ``` Log files are located at: ``` ./auto_eval_logs/v1.0/.log ``` --- ## Evaluation Tasks | Task Name | Source | Description | |-----------|--------|-------------| | ad | Kuaishou Internal | 27,677 | Predict next clicked advertisement | | product | Kuaishou Internal | 27,910 | Predict next clicked product | | interactive | Kuaishou Internal | 1,000 | Predict next interacted video | | video | Kuaishou Internal | 38,781 | Next video prediction | | label_cond | Kuaishou Internal | 34,891 | Predict next video given specified consumption behavior | | label_pred | Kuaishou Internal | 346,190 | Predict user engagement with video content | | item_understand | Kuaishou Internal | 500 | Video SID to Caption generation task | | rec_reason | Kuaishou Internal | 470 | Recommendation reason inference | ================================================ FILE: benchmarks/api/README.md ================================================ # Unified LLM API Wrapper This is a unified LLM API wrapper library that provides a clean and elegant interface for calling different large language models. ## Supported Models - **Claude** - Anthropic Claude models - **Gemini** - Google Vertex AI Gemini models - **DeepSeek** - DeepSeek models via Baidu Qianfan platform ## Model Pricing Comparison - Claude: https://claude.com/pricing - Gemini: https://ai.google.dev/gemini-api/docs/pricing - DeepSeek: https://api-docs.deepseek.com/quick_start/pricing ## Quick Start ### Installation ```bash pip install openai google-cloud-aiplatform anthropic tqdm ``` ### Using Configuration File First, edit `api/config/llm_config.json` to fill in your configuration: Then use the following code to test: ```python from api import get_client_from_config # Create client client = get_client_from_config("gemini") # Generate text response = client.generate("Tell me a joke") print(response) ``` ================================================ FILE: benchmarks/api/__init__.py ================================================ """ Unified LLM API Wrapper Supports convenient calling of Gemini, DeepSeek, and Claude models """ import json from pathlib import Path from typing import List, Dict, Any, Optional from .base import BaseLLMClient from .gemini import GeminiClient from .deepseek import DeepSeekClient from .claude import ClaudeClient # Model mapping MODEL_CLASSES = { "gemini": GeminiClient, "deepseek": DeepSeekClient, "claude": ClaudeClient, } def load_config(config_path: str = None) -> Dict[str, Any]: """ Load configuration from JSON file Args: config_path: Configuration file path, defaults to api/config/llm_config.json Returns: dict: Configuration dictionary Raises: FileNotFoundError: Configuration file does not exist json.JSONDecodeError: Configuration file format error """ if config_path is None: current_dir = Path(__file__).parent config_path = current_dir / "config" / "llm_config.json" config_path = Path(config_path) if not config_path.exists(): raise FileNotFoundError(f"Configuration file does not exist: {config_path}") with open(config_path, 'r', encoding='utf-8') as f: return json.load(f) def get_client(model: str, **config) -> BaseLLMClient: """ Factory function: Create LLM client instance Args: model: Model name ("gemini" or "deepseek") **config: Model-specific configuration parameters Returns: BaseLLMClient: Client instance Raises: ValueError: Unsupported model type Example: >>> client = get_client("gemini", ... project="your-project", ... location="us-central1") >>> result = client.generate("Tell me a joke") """ model = model.lower() if model not in MODEL_CLASSES: raise ValueError( f"Unsupported model: {model}. " f"Supported models: {', '.join(MODEL_CLASSES.keys())}" ) client_class = MODEL_CLASSES[model] return client_class(**config) def get_client_from_config( model: str, config_path: Optional[str] = None ) -> BaseLLMClient: """ Create LLM client from configuration file Args: model: Model name ("gemini" or "deepseek") config_path: Configuration file path, defaults to api/config/llm_config.json Returns: BaseLLMClient: Client instance Raises: ValueError: Model configuration not found in configuration file Example: >>> client = get_client_from_config("gemini") >>> result = client.generate("Tell me a joke") """ config = load_config(config_path) model = model.lower() if model not in config: raise ValueError( f"Model '{model}' configuration not found in configuration file. " f"Available models: {', '.join(config.keys())}" ) model_config = config[model] return get_client(model, **model_config) def batch_generate( prompts: List[str], model: str, max_workers: int = 5, show_progress: bool = True, config_path: Optional[str] = None, **config ) -> List[Dict[str, Any]]: """ Batch generate text (with concurrent support) Args: prompts: List of prompts model: Model name ("gemini" or "deepseek") max_workers: Maximum number of concurrent threads, default 5 show_progress: Whether to show progress bar, default True config_path: Configuration file path (if provided, use configuration file first) **config: Model configuration parameters (if not using configuration file) Returns: List[Dict]: List of results, each element contains: - prompt: Original prompt - result: Generated text (on success) - error: Error message (on failure) - success: Whether successful Example: >>> # Using configuration file >>> results = batch_generate( ... prompts=["Question 1", "Question 2", "Question 3"], ... model="gemini", ... max_workers=3 ... ) >>> # Direct configuration >>> results = batch_generate( ... prompts=["Question 1", "Question 2"], ... model="deepseek", ... api_key="your-key", ... appid="your-appid" ... ) """ if config_path: client = get_client_from_config(model, config_path) else: client = get_client(model, **config) return client.batch_generate( prompts=prompts, max_workers=max_workers, show_progress=show_progress ) # Export all public interfaces __all__ = [ # Classes "BaseLLMClient", "GeminiClient", "DeepSeekClient", "ClaudeClient", # Functions "get_client", "get_client_from_config", "batch_generate", "load_config", ] ================================================ FILE: benchmarks/api/base.py ================================================ """ Base LLM Client Definition Provides unified interface specification with retry mechanism and batch processing """ from abc import ABC, abstractmethod from typing import Optional, Dict, Any, List import time import random from concurrent.futures import ThreadPoolExecutor, as_completed class BaseLLMClient(ABC): """ Base class for LLM clients, defining unified interface All concrete LLM clients (Gemini, DeepSeek, etc.) should inherit from this class Provides unified retry mechanism and batch processing capabilities """ def __init__(self, **config): """ Initialize client Args: **config: Model-specific configuration parameters """ self.config = config self.max_retries = config.get("max_retries", 3) self.retry_delay = config.get("retry_delay", 2) self._setup() @abstractmethod def _setup(self): """Setup client (subclasses implement specific initialization logic)""" pass @abstractmethod def _call_api( self, prompt: str, temperature: Optional[float] = None, max_tokens: Optional[int] = None, **kwargs ) -> str: """ Call API to generate text (subclasses implement specific API call logic) Args: prompt: Input prompt temperature: Temperature parameter max_tokens: Maximum number of tokens to generate **kwargs: Other model-specific parameters Returns: Generated text content Raises: Exception: Raised when API call fails """ pass def _is_retryable_error(self, error_msg: str) -> bool: """ Determine if error is retryable Args: error_msg: Error message Returns: bool: Whether the error is retryable """ retryable_keywords = [ '503', '429', '500', 'timeout', 'timed out', 'deadline', 'unavailable', 'failed to connect', 'connection', 'rate limit', 'overload' ] return any(keyword in error_msg.lower() for keyword in retryable_keywords) def _generate_with_retry( self, prompt: str, temperature: Optional[float] = None, max_tokens: Optional[int] = None, **kwargs ) -> str: """ Generation method with retry mechanism (template method) Args: prompt: Input prompt temperature: Temperature parameter max_tokens: Maximum number of tokens to generate **kwargs: Other parameters Returns: str: Generated text content Raises: Exception: Raised when API call fails """ if not prompt or not prompt.strip(): raise ValueError("prompt cannot be empty") last_error = None for attempt in range(self.max_retries): try: if attempt > 0: delay = self.retry_delay * (2 ** (attempt - 1)) jitter = random.uniform(0, delay * 0.3) time.sleep(delay + jitter) return self._call_api(prompt, temperature, max_tokens, **kwargs) except Exception as e: last_error = e error_msg = str(e) is_retryable = self._is_retryable_error(error_msg) if attempt == self.max_retries - 1 or not is_retryable: raise Exception(f"{self.__class__.__name__} API call failed: {error_msg}") print(f"{self.__class__.__name__} API call failed " f"(attempt {attempt + 1}/{self.max_retries}), " f"will retry in {self.retry_delay} seconds: {error_msg[:100]}") raise Exception(f"Maximum retry attempts reached ({self.max_retries}): {last_error}") def generate( self, prompt: str, temperature: Optional[float] = None, max_tokens: Optional[int] = None, **kwargs ) -> str: """ Generate text content (public interface) Args: prompt: Input prompt temperature: Temperature parameter (controls randomness) max_tokens: Maximum number of tokens to generate **kwargs: Other model-specific parameters Returns: str: Generated text content Raises: ValueError: Parameter error Exception: API call failed """ return self._generate_with_retry(prompt, temperature, max_tokens, **kwargs) def batch_generate( self, prompts: List[str], max_workers: int = 5, show_progress: bool = True, **kwargs ) -> List[Dict[str, Any]]: """ Batch generate text (with concurrent support) Args: prompts: List of prompts max_workers: Maximum number of concurrent threads, default 5 show_progress: Whether to show progress bar, default True **kwargs: Other parameters to pass to generate Returns: List[Dict]: List of results, each element contains: - prompt: Original prompt - result: Generated text (on success) - error: Error message (on failure) - success: Whether successful """ try: from tqdm import tqdm has_tqdm = True except ImportError: has_tqdm = False if show_progress: print("Warning: tqdm not installed, cannot show progress bar") def process_prompt(prompt: str, index: int) -> Dict[str, Any]: try: result = self.generate(prompt, **kwargs) return { "index": index, "prompt": prompt, "result": result, "success": True } except Exception as e: return { "index": index, "prompt": prompt, "error": str(e), "success": False } with ThreadPoolExecutor(max_workers=max_workers) as executor: future_to_index = { executor.submit(process_prompt, prompt, i): i for i, prompt in enumerate(prompts) } if show_progress and has_tqdm: progress = tqdm( as_completed(future_to_index), total=len(prompts), desc=f"Generating ({self.__class__.__name__})" ) else: progress = as_completed(future_to_index) temp_results = [] for future in progress: try: result = future.result() temp_results.append(result) except Exception as e: index = future_to_index[future] temp_results.append({ "index": index, "prompt": prompts[index], "error": f"Task execution failed: {str(e)}", "success": False }) results = sorted(temp_results, key=lambda x: x["index"]) for r in results: r.pop("index", None) return results def __repr__(self) -> str: return f"{self.__class__.__name__}(config={self.config})" ================================================ FILE: benchmarks/api/claude.py ================================================ """ Claude API Client Implementation Based on Anthropic official SDK """ from typing import Optional from anthropic import Anthropic from .base import BaseLLMClient class ClaudeClient(BaseLLMClient): """ Claude API Client Example: >>> client = ClaudeClient( ... api_key="your-api-key", ... model_name="claude-sonnet-4-20250514" ... ) >>> response = client.generate("Tell me a joke") """ def _setup(self): """Initialize Claude client""" self.api_key = self.config.get("api_key") self.model_name = self.config.get("model_name", "claude-sonnet-4-20250514") self.base_url = self.config.get("base_url") self.default_max_tokens = self.config.get("max_new_tokens", 1024) self.default_temperature = self.config.get("temperature", 1.0) if not self.api_key: raise ValueError("api_key is a required parameter") client_kwargs = {"api_key": self.api_key} if self.base_url: client_kwargs["base_url"] = self.base_url self.client = Anthropic(**client_kwargs) def _call_api( self, prompt: str, temperature: Optional[float] = None, max_tokens: Optional[int] = None, **kwargs ) -> str: """ Call Claude API to generate text Args: prompt: Input prompt temperature: Temperature parameter (0.0-1.0), default 1.0 max_tokens: Maximum number of tokens to generate, default 1024 **kwargs: Other Claude-specific parameters, such as: - system: System prompt - top_p: Nucleus sampling parameter - top_k: Top-k sampling parameter Returns: str: Generated text content Raises: Exception: Raised when API call fails """ if temperature is None: temperature = self.default_temperature if max_tokens is None: max_tokens = self.default_max_tokens system = kwargs.pop("system", None) request_params = { "model": self.model_name, "messages": [{"role": "user", "content": prompt}], "max_tokens": max_tokens, } if temperature is not None: request_params["temperature"] = temperature if system: request_params["system"] = system for key in ["top_p", "top_k", "stop_sequences"]: if key in kwargs: request_params[key] = kwargs.pop(key) response = self.client.messages.create(**request_params) if response and response.content: text_blocks = [ block.text for block in response.content if hasattr(block, 'text') ] if text_blocks: return "".join(text_blocks) else: raise Exception("API returned empty response") else: raise Exception("API returned invalid response") ================================================ FILE: benchmarks/api/config/llm_config.json ================================================ { "gemini": { "project": "", "location": "", "model_name": "gemini-2.5-flash-lite", "credentials_path": "", "max_new_tokens": 10000, "temperature": 0.01, "max_retries": 3, "retry_delay": 2 }, "deepseek": { "api_key": "", "base_url": "", "model_name": "deepseek-r1", "appid": "", "max_new_tokens": 10000, "temperature": 0.01, "max_retries": 3, "retry_delay": 2 }, "claude": { "api_key": "", "base_url": "", "model_name": "", "max_new_tokens": 10000, "temperature": 0.01, "max_retries": 3, "retry_delay": 2 } } ================================================ FILE: benchmarks/api/deepseek.py ================================================ """ DeepSeek API Client Implementation Call DeepSeek model through Baidu Qianfan platform """ from typing import Optional from openai import OpenAI from .base import BaseLLMClient class DeepSeekClient(BaseLLMClient): """ DeepSeek API Client (through Baidu Qianfan platform) Example: >>> client = DeepSeekClient( ... api_key="your-api-key", ... base_url="https://qianfan.baidubce.com/v2", ... model_name="deepseek-r1", ... appid="your-appid" ... ) >>> response = client.generate("Tell me a joke") """ def _setup(self): """Initialize DeepSeek client""" self.api_key = self.config.get("api_key") self.base_url = self.config.get("base_url", "https://qianfan.baidubce.com/v2") self.model_name = self.config.get("model_name", "deepseek-r1") self.appid = self.config.get("appid") self.default_max_tokens = self.config.get("max_new_tokens", 300) self.default_temperature = self.config.get("temperature", 0.7) if not self.api_key: raise ValueError("api_key is a required parameter") if not self.appid: raise ValueError("appid is a required parameter") self.client = OpenAI( api_key=self.api_key, base_url=self.base_url, default_headers={"appid": self.appid} ) def _call_api( self, prompt: str, temperature: Optional[float] = None, max_tokens: Optional[int] = None, **kwargs ) -> str: """ Call DeepSeek API to generate text Args: prompt: Input prompt temperature: Temperature parameter (0.0-2.0), default from config or 0.7 max_tokens: Maximum number of tokens to generate, default from config or 300 **kwargs: Other DeepSeek-specific parameters Returns: str: Generated text content Raises: Exception: Raised when API call fails """ if temperature is None: temperature = self.default_temperature if max_tokens is None: max_tokens = self.default_max_tokens request_params = { "model": self.model_name, "messages": [{"role": "user", "content": prompt}], "temperature": temperature, "max_tokens": max_tokens, "stream": False } request_params.update(kwargs) response = self.client.chat.completions.create(**request_params) if response and response.choices: content = response.choices[0].message.content if content: return content else: raise Exception("API returned empty response") else: raise Exception("API returned invalid response") ================================================ FILE: benchmarks/api/example.py ================================================ """ LLM API Usage Examples Demonstrates various calling methods and use cases """ # ============================================================================ # Example 1: Using Configuration File (Simplest) # ============================================================================ def example1_use_config(): """Load and use from configuration file""" from api import get_client_from_config print("=" * 60) print("Example 1: Using Configuration File") print("=" * 60) # Create client from configuration file client = get_client_from_config("gemini") # Generate text response = client.generate("Explain what AI is in one sentence") print(f"Answer: {response}\n") # ============================================================================ # Example 2: Direct Parameters # ============================================================================ def example2_direct_params(): """Pass configuration parameters directly""" from api import get_client print("=" * 60) print("Example 2: Direct Parameters") print("=" * 60) # Gemini gemini_client = get_client( "gemini", project="your-project", location="us-central1", model_name="gemini-2.5-pro", credentials_path="path/to/credentials.json" ) # DeepSeek deepseek_client = get_client( "deepseek", api_key="your-api-key", appid="your-appid", base_url="https://qianfan.baidubce.com/v2" ) # Usage response = gemini_client.generate("Hello") print(f"Gemini: {response}\n") # ============================================================================ # Example 3: Batch Generation (Concurrent) # ============================================================================ def example3_batch_generate(): """Batch text generation with concurrent support""" from api import get_client_from_config print("=" * 60) print("Example 3: Batch Generation (Concurrent)") print("=" * 60) prompts = [ "What is machine learning?", "Explain deep learning", "Principles of neural networks", "What is natural language processing?", "Applications of computer vision" ] # Use client instance's batch_generate method (recommended) client = get_client_from_config("gemini") results = client.batch_generate( prompts=prompts, max_workers=3, # 3 concurrent threads show_progress=True # Show progress bar ) # Process results for i, item in enumerate(results, 1): print(f"\nQuestion {i}: {item['prompt']}") if item['success']: print(f"Answer: {item['result'][:100]}...") else: print(f"Error: {item['error']}") # ============================================================================ # Example 4: Custom Generation Parameters # ============================================================================ def example4_custom_params(): """Custom generation parameters""" from api import get_client_from_config print("=" * 60) print("Example 4: Custom Generation Parameters") print("=" * 60) client = get_client_from_config("deepseek") # Creative generation (high temperature) creative = client.generate( "Write a poem about spring", temperature=0.9, max_tokens=200 ) print(f"Creative output:\n{creative}\n") # Precise generation (low temperature) precise = client.generate( "What is 1+1?", temperature=0.1, max_tokens=50 ) print(f"Precise output:\n{precise}\n") # ============================================================================ # Example 5: Error Handling # ============================================================================ def example5_error_handling(): """Demonstrate error handling""" from api import get_client_from_config print("=" * 60) print("Example 5: Error Handling") print("=" * 60) try: client = get_client_from_config("gemini") # Normal call response = client.generate("Hello") print(f"Success: {response}") # Empty prompt (will raise ValueError) response = client.generate("") except ValueError as e: print(f"Parameter error: {e}") except Exception as e: print(f"API call failed: {e}") # ============================================================================ # Example 6: Switch Models # ============================================================================ def example6_switch_models(): """Switch between different models""" from api import get_client_from_config print("=" * 60) print("Example 6: Switch Models") print("=" * 60) question = "What is quantum computing?" for model_name in ["gemini", "deepseek"]: try: client = get_client_from_config(model_name) response = client.generate(question) print(f"\n{model_name.upper()}'s answer:") print(response[:150] + "...") except Exception as e: print(f"\n{model_name} call failed: {e}") # ============================================================================ # Example 7: Real Application - User Profile Generation # ============================================================================ def example7_user_portrait(): """Real application: Generate user profile based on user behavior""" from api import get_client_from_config print("=" * 60) print("Example 7: User Profile Generation") print("=" * 60) # User behavior data user_behavior = """ User's recently watched videos: 1. Machine Learning Tutorial 2. Python Programming Tips 3. Deep Learning Practical Projects 4. Data Analysis Case Studies 5. Latest AI Trends """ prompt = f"""Based on the following user behavior data, generate a concise user profile: {user_behavior} Requirements: 1. Summarize user's areas of interest 2. Infer user's skill level 3. Provide 3-5 precise tags """ client = get_client_from_config("gemini") portrait = client.generate(prompt, temperature=0.5) print("User Profile:") print(portrait) # ============================================================================ # Example 8: Direct Import of Classes # ============================================================================ def example8_direct_import(): """Import client classes directly""" from api import GeminiClient, DeepSeekClient print("=" * 60) print("Example 8: Direct Import of Client Classes") print("=" * 60) # Direct instantiation gemini = GeminiClient( project="your-project", location="us-central1" ) deepseek = DeepSeekClient( api_key="your-key", appid="your-appid" ) print("Clients created successfully") print(f"Gemini client: {gemini}") print(f"DeepSeek client: {deepseek}") # ============================================================================ # Main Function # ============================================================================ def main(): """Run all examples""" examples = [ ("Using Configuration File", example1_use_config), ("Direct Parameters", example2_direct_params), ("Batch Generation", example3_batch_generate), ("Custom Parameters", example4_custom_params), ("Error Handling", example5_error_handling), ("Switch Models", example6_switch_models), ("User Profile Generation", example7_user_portrait), ("Direct Import Classes", example8_direct_import), ] print("\n" + "=" * 60) print("LLM API Usage Examples") print("=" * 60) print("\nAvailable examples:") for i, (name, _) in enumerate(examples, 1): print(f"{i}. {name}") print("\nNote: Please ensure api/config/llm_config.json is configured before running") print("\n" + "=" * 60 + "\n") # Uncomment the lines below to run specific examples # example1_use_config() # example2_direct_params() # example3_batch_generate() # example4_custom_params() # example5_error_handling() # example6_switch_models() # example7_user_portrait() # example8_direct_import() if __name__ == "__main__": main() ================================================ FILE: benchmarks/api/gemini.py ================================================ """ Gemini API Client Implementation Based on Google Vertex AI's Gemini model """ import os from typing import Optional from vertexai.generative_models import GenerativeModel import vertexai from .base import BaseLLMClient class GeminiClient(BaseLLMClient): """ Gemini API Client Example: >>> client = GeminiClient( ... project="your-project", ... location="us-central1", ... model_name="gemini-2.5-pro", ... credentials_path="path/to/credentials.json" ... ) >>> response = client.generate("Tell me a joke") """ def _setup(self): """Initialize Gemini client""" self.project = self.config.get("project") self.location = self.config.get("location") self.model_name = self.config.get("model_name", "gemini-2.5-pro") credentials_path = self.config.get("credentials_path") self.default_max_tokens = self.config.get("max_new_tokens") self.default_temperature = self.config.get("temperature") if credentials_path: os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials_path if not self.project or not self.location: raise ValueError("project and location are required parameters") vertexai.init(project=self.project, location=self.location) self.model = GenerativeModel(self.model_name) def _call_api( self, prompt: str, temperature: Optional[float] = None, max_tokens: Optional[int] = None, **kwargs ) -> str: """ Call Gemini API to generate text Args: prompt: Input prompt temperature: Temperature parameter (0.0-1.0) max_tokens: Maximum number of tokens to generate **kwargs: Other Gemini-specific parameters Returns: str: Generated text content Raises: Exception: Raised when API call fails """ if temperature is None: temperature = self.default_temperature if max_tokens is None: max_tokens = self.default_max_tokens generation_config = {} if temperature is not None: generation_config["temperature"] = temperature if max_tokens is not None: generation_config["max_output_tokens"] = max_tokens if generation_config: response = self.model.generate_content( prompt, generation_config=generation_config ) else: response = self.model.generate_content(prompt) if response and response.text: return response.text else: raise Exception("API returned empty response") ================================================ FILE: benchmarks/benchmark/__init__.py ================================================ from benchmark.benchmark import Benchmark from benchmark.base_generator import Generator from benchmark.generation_runner import GenerationRunner __version__ = "0.1.0" __all__ = [ "Benchmark", "Generator", "GenerationRunner", ] ================================================ FILE: benchmarks/benchmark/base_generator.py ================================================ import os from abc import ABC, abstractmethod from typing import Dict, List, Any, Optional from collections import defaultdict from benchmark.console import * # Global configuration: tasks that should disable optimizations (long prompts may cause issues) # Used by vLLM-based generators to control chunked_prefill and prefix_caching DISABLE_OPTIMIZATIONS_FOR_TASKS = ["rec_reason", "interactive"] class Generator(ABC): """ Abstract base class for generation models All generation models should inherit from this class. Subclasses must implement _generate_standard() to support the generate() method. """ def __init__( self, **kwargs ): """ Args: num_return_sequences: Number of candidates to generate per prompt max_new_tokens: Maximum number of tokens to generate **kwargs: Other generation parameters """ pass def __str__(self) -> str: """ Return model name (for directory naming, remove path separators) This method is shared across all generator implementations. Subclasses must set self.model_name for this method to work. Returns: str: Model name """ return os.path.basename(self.model_name.rstrip('/')) def generate( self, prompts: Dict[str, str], **kwargs ) -> tuple: """ Batch text generation Supports two-stage generation for recommendation tasks: - Stage 1: Generate thinking content with top_p/top_k sampling (if thinking enabled) - Stage 2: Generate SID sequences with beam search and prompt_token This method is shared across all generator implementations to reduce code duplication. Subclasses must implement _generate_standard() for this method to work. Args: prompts: {sample_id: prompt_text} **kwargs: Optional generation parameters (will override initialization parameters) Returns: Tuple of two dicts: - First dict: {sample_id: [generated_text_1, generated_text_2, ...]} - Second dict: {sample_id: [cum_logprob_1, cum_logprob_2, ...]} (only for beam search) """ prompt_token = kwargs.get("prompt_token", None) enable_thinking = kwargs.get("enable_thinking", False) max_new_thinking_tokens = kwargs.get("max_new_thinking_tokens", None) target_tokens = kwargs.get("target_tokens", None) # Check if this is a classification task (has target_tokens parameter) is_classification = target_tokens is not None # Generation logic based on task type: # A: has max_new_thinking_tokens + has prompt_token (recommendation tasks) # B: has max_new_thinking_tokens + no prompt_token (caption tasks) # C: no max_new_thinking_tokens (standard tasks) # D: classification task + no think # E: classification task + think if is_classification: # Classification task scenarios (D & E) if enable_thinking: # E: Classification with thinking console.print( f"Two-stage classification with thinking enabled: thinking (max_new_thinking_tokens={max_new_thinking_tokens}) + logprobs extraction for {target_tokens}", style=warning_style, ) return self._generate_two_stage_classification_with_thinking(prompts, **kwargs) else: # D: Classification without thinking console.print( f"Classification task: extracting logprobs for tokens {target_tokens}", style=warning_style, ) # Remove target_tokens from kwargs to avoid passing it twice kwargs_classification = kwargs.copy() kwargs_classification.pop("target_tokens", None) results, _, mfu_stats = self.extract_token_logprobs(prompts, target_tokens, **kwargs_classification) self.mfu_stats = mfu_stats return results, {} elif max_new_thinking_tokens: if enable_thinking: # A & B with thinking: two-stage generation console.print( f"Two-stage generation enabled: thinking (max_new_thinking_tokens={max_new_thinking_tokens}) + prompt_token ({prompt_token})", style=warning_style, ) return self._generate_two_stage_with_thinking(prompts, **kwargs) else: # A & B without thinking if prompt_token: # A without thinking: single-stage with prompt_token (beam search) console.print( f"Single-stage generation with prompt_token ({prompt_token})", style=warning_style, ) prompts_with_token = { sample_id: prompt + prompt_token for sample_id, prompt in prompts.items() } results, logprobs, mfu_stats = self._generate_standard(prompts_with_token, **kwargs) self.mfu_stats = mfu_stats return results, logprobs else: # B without thinking: single-stage sampling console.print( f"Warning: max_new_thinking_tokens={max_new_thinking_tokens} is set but " f"enable_thinking=False and prompt_token=None. The max_new_thinking_tokens parameter will be ignored.", style=warning_style, ) results, logprobs, mfu_stats = self._generate_standard(prompts, **kwargs) self.mfu_stats = mfu_stats return results, logprobs else: # C: standard single-stage sampling results, logprobs, mfu_stats = self._generate_standard(prompts, **kwargs) self.mfu_stats = mfu_stats return results, logprobs def get_hardware_info(self) -> Dict[str, Any]: """ Get GPU hardware information for MFU calculation Default implementation that works for all generators. Handles both single-machine and Ray-based multi-machine setups. Returns: Dictionary containing: - gpu_model: str, GPU model name - gpu_count: int, total number of GPUs used - gpu_tflops: float, theoretical peak TFLOPS for BF16/FP16 - tensor_parallel_size: int, tensor parallelism size - gpu_memory_total_gb: float, total GPU memory in GB """ from benchmark.gpu_utils import get_gpu_info gpu_info = get_gpu_info() # Calculate total GPU count tensor_parallel_size = getattr(self, 'tensor_parallel_size', 1) # For Ray-based generators, multiply by number of workers if hasattr(self, 'workers') and self.workers: num_workers = len(self.workers) total_gpus = num_workers * tensor_parallel_size else: # For single-machine generators total_gpus = tensor_parallel_size gpu_info["gpu_count"] = total_gpus gpu_info["tensor_parallel_size"] = tensor_parallel_size # Add worker info for Ray-based generators if hasattr(self, 'workers'): gpu_info["num_workers"] = len(self.workers) if self.workers else 0 return gpu_info def _generate_two_stage_with_thinking( self, prompts: Dict[str, str], **kwargs ) -> tuple: """ Two-stage generation with thinking mode Stage 1: Generate thinking content with top_p/top_k sampling until Stage 2: Continue generation (with prompt_token if provided, beam search or sampling) This method is shared across all generator implementations to reduce code duplication. Subclasses must implement _generate_standard() for this method to work. Args: prompts: {sample_id: prompt_text} **kwargs: Optional generation parameters Returns: Tuple of two dicts: - First dict: {sample_id: [generated_text_1, generated_text_2, ...]} - Second dict: {sample_id: [cum_logprob_1, cum_logprob_2, ...]} (only for beam search) """ prompt_token = kwargs.get("prompt_token", None) console.print( "Stage 1/2: Generating thinking content with top_p/top_k sampling...", style=warning_style, ) # Stage 1: Build kwargs for thinking generation (remove beam search, add stop) kwargs_stage1 = kwargs.copy() kwargs_stage1.pop("num_beams", None) # Remove beam search to force sampling mode kwargs_stage1["stop"] = [""] # Stop at tag # Use num_return_thinking_sequences for stage 1 if specified num_return_thinking = kwargs.get("num_return_thinking_sequences", 1) kwargs_stage1["num_return_sequences"] = num_return_thinking # Use max_new_thinking_tokens for stage 1 if specified max_new_thinking_tokens = kwargs.get("max_new_thinking_tokens", 1000) kwargs_stage1["max_new_tokens"] = max_new_thinking_tokens # Call _generate_standard for stage 1 (ignoring logprobs as they're not used) stage1_results, _, stage1_mfu_stats = self._generate_standard(prompts, **kwargs_stage1) # Prepare prompts for stage 2 by appending thinking + prompt_token # Each sample will have multiple thinking candidates stage2_prompts = {} sample_to_thinking_count = {} # Track how many thinking candidates each sample has for sample_id, thinking_list in stage1_results.items(): # Use ALL thinking candidates (not just the first one) sample_to_thinking_count[sample_id] = len(thinking_list) for idx, thinking_text in enumerate(thinking_list): # Create unique ID for each thinking candidate thinking_sample_id = f"{sample_id}_thinking_{idx}" # Append + prompt_token (if provided) # If model didn't generate , treat entire output as thinking if prompt_token: full_thinking = thinking_text + "\n" + prompt_token else: full_thinking = thinking_text + "\n" stage2_prompt = prompts[sample_id] + full_thinking stage2_prompts[thinking_sample_id] = stage2_prompt # Stage 2: Determine generation mode based on num_beams kwargs_stage2 = kwargs.copy() original_num_sequences = kwargs.get("num_return_sequences", 1) original_num_beams = kwargs.get("num_beams", None) # Determine if stage 2 uses beam search or sampling use_beam_search_stage2 = original_num_beams is not None if use_beam_search_stage2: # Beam search mode: num_beams is directly used per thinking candidate beams_per_thinking = original_num_beams # Validate configuration: total sequences should match if original_num_sequences != beams_per_thinking * num_return_thinking: raise ValueError( f"Configuration error: num_return_sequences ({original_num_sequences}) must equal " f"num_beams ({beams_per_thinking}) * num_return_thinking_sequences ({num_return_thinking}) = " f"{beams_per_thinking * num_return_thinking}. " f"Please adjust your parameters accordingly." ) kwargs_stage2["num_return_sequences"] = beams_per_thinking kwargs_stage2["num_beams"] = beams_per_thinking console.print( f"Stage 2/2: Generating sequences with beam search for {len(stage2_prompts)} thinking candidates...", style=warning_style, ) console.print( f"Each thinking candidate will use beam_width={beams_per_thinking}, return {beams_per_thinking} sequences " f"({num_return_thinking} thinking × {beams_per_thinking} = {num_return_thinking * beams_per_thinking} total per sample)", style=warning_style, ) else: # Sampling mode: each thinking generates 1 result kwargs_stage2["num_return_sequences"] = 1 kwargs_stage2.pop("num_beams", None) # Remove num_beams to use sampling console.print( f"Stage 2/2: Generating sequences with sampling for {len(stage2_prompts)} thinking candidates...", style=warning_style, ) console.print( f"Each thinking candidate will generate 1 sequence " f"({num_return_thinking} thinking × 1 = {num_return_thinking} total per sample)", style=warning_style, ) # Call _generate_standard for stage 2 stage2_results, stage2_logprobs, stage2_mfu_stats = self._generate_standard(stage2_prompts, **kwargs_stage2) # Merge mfu_stats from both stages self.mfu_stats = {} for sample_id, stats in stage1_mfu_stats.items(): self.mfu_stats[sample_id] = { "input_tokens": stats["input_tokens"].copy(), "output_tokens": stats["output_tokens"].copy(), "times": stats["times"].copy() } # Group stage2 stats by original_id first stage2_by_original = defaultdict(lambda: {"input_tokens": [], "output_tokens": [], "times": []}) for thinking_id, stats in stage2_mfu_stats.items(): original_id = thinking_id.rsplit("_thinking_", 1)[0] stage2_by_original[original_id]["input_tokens"].extend(stats["input_tokens"]) stage2_by_original[original_id]["output_tokens"].extend(stats["output_tokens"]) stage2_by_original[original_id]["times"].extend(stats["times"]) # Aggregate: sum tokens, max time for original_id, stats in stage2_by_original.items(): self.mfu_stats[original_id]["input_tokens"].append(sum(stats["input_tokens"])) self.mfu_stats[original_id]["output_tokens"].append(sum(stats["output_tokens"])) self.mfu_stats[original_id]["times"].append(max(stats["times"])) # Merge results back by original sample_id # Combine thinking + prompt_token + SID into final generation final_results = defaultdict(list) final_logprobs = defaultdict(list) for thinking_sample_id, sid_sequences in stage2_results.items(): # Extract original sample_id and thinking index # Format: "sampleID_thinking_N" parts = thinking_sample_id.rsplit("_thinking_", 1) original_sample_id = parts[0] thinking_idx = int(parts[1]) # Get the corresponding thinking text from stage 1 thinking_text = stage1_results[original_sample_id][thinking_idx] # Combine thinking + prompt_token + SID for each sequence for sid_seq in sid_sequences: # Format: thinking_text\n<|sid_begin|>sid_sequence combined = f"{thinking_text}\n{prompt_token or ''}{sid_seq}" final_results[original_sample_id].append(combined) # Also merge logprobs if available (from stage 2 beam search) if thinking_sample_id in stage2_logprobs: final_logprobs[original_sample_id].extend(stage2_logprobs[thinking_sample_id]) return (dict(final_results), dict(final_logprobs)) def _generate_two_stage_classification_with_thinking( self, prompts: Dict[str, str], **kwargs ) -> tuple: """ Two-stage generation for classification tasks with thinking mode Stage 1: Generate thinking content with top_p/top_k sampling until Stage 2: Extract logprobs for target tokens for each thinking candidate This method is shared across all generator implementations to reduce code duplication. Subclasses must implement _generate_standard() and extract_token_logprobs() for this method to work. Args: prompts: {sample_id: prompt_text} **kwargs: Optional generation parameters Returns: Tuple of two dicts: - First dict: {sample_id: ["thinking_1\n{'是': 0.8, '否': 0.2}", ...]} - Second dict: {} (empty, no logprobs for classification) """ # target_tokens is guaranteed to be in kwargs (checked in generate() method) target_tokens = kwargs["target_tokens"] console.print( "Stage 1/2: Generating thinking content with top_p/top_k sampling...", style=warning_style, ) # Stage 1: Build kwargs for thinking generation (remove beam search, add stop) kwargs_stage1 = kwargs.copy() kwargs_stage1.pop("num_beams", None) # Remove beam search to force sampling mode kwargs_stage1.pop("target_tokens", None) # Remove target_tokens for stage 1 kwargs_stage1["stop"] = [""] # Stop at tag # Use num_return_thinking_sequences for stage 1 if specified num_return_thinking = kwargs.get("num_return_thinking_sequences", 1) kwargs_stage1["num_return_sequences"] = num_return_thinking # Use max_new_thinking_tokens for stage 1 if specified max_new_thinking_tokens = kwargs.get("max_new_thinking_tokens", 1000) kwargs_stage1["max_new_tokens"] = max_new_thinking_tokens # Call _generate_standard for stage 1 (ignoring logprobs as they're not used) stage1_results, _, stage1_mfu_stats = self._generate_standard(prompts, **kwargs_stage1) # Prepare prompts for stage 2 by appending thinking + # Each sample will have multiple thinking candidates stage2_prompts = {} sample_to_thinking_count = {} # Track how many thinking candidates each sample has for sample_id, thinking_list in stage1_results.items(): # Use ALL thinking candidates (not just the first one) sample_to_thinking_count[sample_id] = len(thinking_list) for idx, thinking_text in enumerate(thinking_list): # Create unique ID for each thinking candidate thinking_sample_id = f"{sample_id}_thinking_{idx}" # Append to complete the thinking tag full_thinking = thinking_text + f"\n" stage2_prompt = prompts[sample_id] + full_thinking stage2_prompts[thinking_sample_id] = stage2_prompt console.print( f"Stage 2/2: Extracting logprobs for {len(stage2_prompts)} thinking candidates...", style=warning_style, ) console.print( f"Each thinking candidate will extract logprobs for tokens {target_tokens} " f"({num_return_thinking} thinking total per sample)", style=warning_style, ) # Build kwargs for stage 2 (remove target_tokens to avoid duplication) kwargs_stage2 = kwargs.copy() kwargs_stage2.pop("target_tokens", None) # Call extract_token_logprobs for stage 2 stage2_probs, _, stage2_mfu_stats = self.extract_token_logprobs(stage2_prompts, target_tokens, **kwargs_stage2) # Merge mfu_stats from both stages self.mfu_stats = {} for sample_id, stats in stage1_mfu_stats.items(): self.mfu_stats[sample_id] = { "input_tokens": stats["input_tokens"].copy(), "output_tokens": stats["output_tokens"].copy(), "times": stats["times"].copy() } # Group stage2 stats by original_id first stage2_by_original = defaultdict(lambda: {"input_tokens": [], "output_tokens": [], "times": []}) for thinking_id, stats in stage2_mfu_stats.items(): original_id = thinking_id.rsplit("_thinking_", 1)[0] stage2_by_original[original_id]["input_tokens"].extend(stats["input_tokens"]) stage2_by_original[original_id]["output_tokens"].extend(stats["output_tokens"]) stage2_by_original[original_id]["times"].extend(stats["times"]) # Aggregate: sum tokens, max time for original_id, stats in stage2_by_original.items(): self.mfu_stats[original_id]["input_tokens"].append(sum(stats["input_tokens"])) self.mfu_stats[original_id]["output_tokens"].append(sum(stats["output_tokens"])) self.mfu_stats[original_id]["times"].append(max(stats["times"])) # Merge results back by original sample_id # Combine thinking + probabilities into final generation final_results = defaultdict(list) for thinking_sample_id, json_str_list in stage2_probs.items(): # Extract original sample_id and thinking index # Format: "sampleID_thinking_N" parts = thinking_sample_id.rsplit("_thinking_", 1) original_sample_id = parts[0] thinking_idx = int(parts[1]) # Get the corresponding thinking text from stage 1 thinking_text = stage1_results[original_sample_id][thinking_idx] # Extract JSON string from list (extract_token_logprobs returns [json_str]) json_str = json_str_list[0] # Combine thinking + probabilities (json_str is already formatted) # Format: "thinking_text\n{\"是\": 0.8, \"否\": 0.2}" combined = f"{thinking_text}\n{json_str}" final_results[original_sample_id].append(combined) return (dict(final_results), {}) class HfTransformersMixin: """ Mixin for HuggingFace Transformers functionality Provides common parameter building logic for HuggingFace Transformers generate() API. This mixin can be combined with Generator or RayMixin to create HuggingFace-based generators. """ def _build_sampling_params(self, **kwargs) -> tuple: """ Build HuggingFace sampling/generation parameters Args: **kwargs: Optional parameters to override default values Returns: Tuple of (gen_kwargs dict, stop_sequences list) """ n = kwargs.get("num_return_sequences") max_tokens = kwargs.get("max_new_tokens") num_beams = kwargs.get("num_beams", None) use_beam_search = num_beams is not None stop_sequences = kwargs.get("stop", []) if use_beam_search: # Beam search mode if n and n > num_beams: raise ValueError( f"num_return_sequences ({n}) cannot be greater than num_beams ({num_beams}). " f"Beam search can only return at most {num_beams} sequences. " f"Please set num_return_sequences <= num_beams or increase num_beams." ) gen_kwargs = { "num_beams": num_beams, "num_return_sequences": n if n else num_beams, "max_new_tokens": max_tokens, "do_sample": False, "output_scores": True, "return_dict_in_generate": True, } if "repetition_penalty" in kwargs: gen_kwargs["repetition_penalty"] = kwargs["repetition_penalty"] else: # Sampling mode gen_kwargs = { "num_return_sequences": n, "max_new_tokens": max_tokens, "temperature": kwargs.get("temperature", 0.7), "top_p": kwargs.get("top_p", 0.9), "top_k": kwargs.get("top_k", -1), "repetition_penalty": kwargs.get("repetition_penalty", 1.0), "presence_penalty": kwargs.get("presence_penalty", 0.0), "frequency_penalty": kwargs.get("frequency_penalty", 0.0), "do_sample": kwargs.get("do_sample", True), } return gen_kwargs, stop_sequences class VllmMixin: """ Mixin for vLLM functionality Provides common parameter building logic for vLLM generate() API. This mixin can be combined with Generator or RayMixin to create vLLM-based generators. """ def _build_sampling_params(self, **kwargs): """ Build vLLM sampling parameters Args: **kwargs: Optional parameters to override default values Returns: SamplingParams or BeamSearchParams object """ from vllm import SamplingParams from vllm.sampling_params import BeamSearchParams temperature = kwargs.get("temperature", 0.7) top_p = kwargs.get("top_p", 0.9) top_k = kwargs.get("top_k", -1) repetition_penalty = kwargs.get("repetition_penalty", 1.0) presence_penalty = kwargs.get("presence_penalty", 0.0) frequency_penalty = kwargs.get("frequency_penalty", 0.0) max_tokens = kwargs.get("max_new_tokens") n = kwargs.get("num_return_sequences", 1) stop = kwargs.get("stop", None) num_beams = kwargs.get("num_beams", None) use_beam_search = num_beams is not None if use_beam_search: # Beam search: set beam_width to max(num_beams, n) actual_beam_width = max(num_beams, n) params = BeamSearchParams( beam_width=actual_beam_width, max_tokens=max_tokens, ) else: # Sampling mode params = SamplingParams( n=n, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, max_tokens=max_tokens, stop=stop, ) return params def _should_enable_optimizations(self) -> bool: """ Determine whether to enable optimizations based on task types and force flags This method is primarily used by vLLM-based generators to control chunked_prefill and prefix_caching optimizations. Returns: True if should enable optimizations, False otherwise """ # Priority 1: Force flags if self.force_enable_optimizations: return True if self.force_disable_optimizations: return False # Priority 2: Check if any task in task_types requires disabling optimizations if hasattr(self, 'task_types') and self.task_types: for task_type in self.task_types: if task_type in DISABLE_OPTIMIZATIONS_FOR_TASKS: return False # Default: enable optimizations return True class RayMixin: """ Mixin for Ray distributed computing functionality Provides Ray cluster management, GPU allocation, and resource cleanup for distributed generators. This is a mixin class designed to be combined with other generator classes using multiple inheritance. """ def _initialize_ray_cluster(self): """Initialize Ray cluster connection""" import ray if ray.is_initialized(): console.print( " ✓ Ray already initialized", style=success_style, ) return console.print( " Initializing Ray cluster connection...", style=subhead_style_2, ) # Determine connection mode if self.ray_address == "local": # Local mode (single machine) ray.init(ignore_reinit_error=True) console.print( " ✓ Ray initialized in local mode", style=success_style, ) elif self.ray_address == "auto": # Auto-detect mode try: ray.init(address="auto", ignore_reinit_error=True) console.print( " ✓ Ray connected to existing cluster (auto-detected)", style=success_style, ) except Exception: # Fallback to local mode console.print( " [yellow]No existing cluster found, initializing local mode...[/yellow]", style=warning_style, ) ray.init(ignore_reinit_error=True) console.print( " ✓ Ray initialized in local mode", style=success_style, ) else: # Specific address ray.init(address=self.ray_address, ignore_reinit_error=True) console.print( f" ✓ Ray connected to cluster at {self.ray_address}", style=success_style, ) def _determine_gpu_ids_from_cluster(self) -> List[Dict[str, Any]]: """ Determine GPU resources from Ray cluster Returns: List of GPU info dicts: [{"node_id": str, "gpu_index": int}, ...] """ import ray # Get all nodes in cluster nodes = ray.nodes() # Collect GPU information from all nodes gpu_list = [] for node in nodes: if not node['Alive']: continue node_id = node['NodeID'] node_resources = node.get('Resources', {}) # Count GPUs on this node num_gpus_on_node = int(node_resources.get('GPU', 0)) if num_gpus_on_node > 0: # Add GPU entries for this node for gpu_idx in range(num_gpus_on_node): gpu_list.append({ "node_id": node_id, "node_ip": node.get('NodeManagerAddress', 'unknown'), "gpu_index": gpu_idx, "global_index": len(gpu_list) # Global GPU index across cluster }) if not gpu_list: raise RuntimeError("No GPUs detected in Ray cluster") # Apply user filters if specified if self.gpu_ids is not None: # In cluster mode, gpu_ids refers to global indices filtered_list = [] for idx in self.gpu_ids: if idx < len(gpu_list): filtered_list.append(gpu_list[idx]) else: console.print( f" [yellow]Warning:[/yellow] GPU index {idx} out of range (max: {len(gpu_list)-1}), skipping", style=warning_style, ) gpu_list = filtered_list elif self.num_gpus is not None: # Limit to first num_gpus if self.num_gpus < len(gpu_list): gpu_list = gpu_list[:self.num_gpus] elif self.num_gpus > len(gpu_list): console.print( f" [yellow]Warning:[/yellow] Requested {self.num_gpus} GPUs, but only {len(gpu_list)} available in cluster", style=warning_style, ) return gpu_list def _group_gpus_for_workers( self, gpu_list: List[Dict[str, Any]], tensor_parallel_size: int ) -> tuple: """ Group GPUs for workers, ensuring same-node constraint for tensor parallelism Args: gpu_list: List of GPU info dicts tensor_parallel_size: Number of GPUs per worker Returns: (worker_gpu_groups, worker_node_assignments) - worker_gpu_groups: List of GPU index lists for each worker - worker_node_assignments: List of node IDs for each worker """ if len(gpu_list) % tensor_parallel_size != 0: raise ValueError( f"Number of GPUs ({len(gpu_list)}) must be divisible by tensor_parallel_size ({tensor_parallel_size})" ) num_workers = len(gpu_list) // tensor_parallel_size worker_gpu_groups = [] worker_node_assignments = [] if tensor_parallel_size == 1: # Simple case: one GPU per worker for gpu_info in gpu_list: worker_gpu_groups.append([gpu_info["gpu_index"]]) worker_node_assignments.append(gpu_info["node_id"]) else: # Complex case: multiple GPUs per worker # Need to ensure all GPUs in a group are on the same node if not self.allow_cross_node_tensor_parallel: # Group by node first node_to_gpus = {} for gpu_info in gpu_list: node_id = gpu_info["node_id"] if node_id not in node_to_gpus: node_to_gpus[node_id] = [] node_to_gpus[node_id].append(gpu_info) # Create workers from each node for node_id, node_gpus in node_to_gpus.items(): # Group GPUs on this node for i in range(0, len(node_gpus), tensor_parallel_size): if i + tensor_parallel_size <= len(node_gpus): gpu_group = [gpu["gpu_index"] for gpu in node_gpus[i:i+tensor_parallel_size]] worker_gpu_groups.append(gpu_group) worker_node_assignments.append(node_id) if len(worker_gpu_groups) != num_workers: raise ValueError( f"Cannot create {num_workers} workers with tensor_parallel_size={tensor_parallel_size} " f"while ensuring same-node constraint. Got {len(worker_gpu_groups)} workers instead. " f"Try setting --allow_cross_node_tensor_parallel or adjust tensor_parallel_size." ) else: # Allow cross-node tensor parallel (not recommended) console.print( " [yellow]Warning: Cross-node tensor parallelism enabled. This may cause performance degradation.[/yellow]", style=warning_style, ) for i in range(num_workers): start_idx = i * tensor_parallel_size end_idx = start_idx + tensor_parallel_size gpu_group = [gpu_list[j]["gpu_index"] for j in range(start_idx, end_idx)] worker_gpu_groups.append(gpu_group) # Use first GPU's node as primary node worker_node_assignments.append(gpu_list[start_idx]["node_id"]) return worker_gpu_groups, worker_node_assignments def _display_cluster_info(self, gpu_list: List[Dict[str, Any]], num_workers: int): """Display cluster and GPU information""" import ray # Get cluster info nodes = ray.nodes() alive_nodes = [n for n in nodes if n['Alive']] console.print( f" Cluster nodes: [green]{len(alive_nodes)}[/green]", style=subhead_style_2, ) # Group GPUs by node node_gpu_count = {} for gpu_info in gpu_list: node_ip = gpu_info["node_ip"] node_gpu_count[node_ip] = node_gpu_count.get(node_ip, 0) + 1 for node_ip, count in node_gpu_count.items(): console.print( f" - Node {node_ip}: {count} GPU(s)", style=subhead_style_2, ) console.print( f" Total GPUs: [green]{len(gpu_list)}[/green]", style=subhead_style_2, ) console.print( f" Tensor Parallel Size: [green]{self.tensor_parallel_size}[/green]", style=subhead_style_2, ) console.print( f" Worker count: [green]{num_workers}[/green]", style=subhead_style_2, ) # Display worker assignments console.print( f" Worker GPU assignments:", style=subhead_style_2, ) for i, (gpu_group, node_id) in enumerate(zip(self.worker_gpu_groups, self.worker_node_assignments)): # Find node IP for this node_id node_ip = "unknown" for gpu_info in gpu_list: if gpu_info["node_id"] == node_id: node_ip = gpu_info["node_ip"] break console.print( f" - Worker {i}: GPUs {gpu_group} on node {node_ip}", style=subhead_style_2, ) def cleanup(self): """ Explicitly cleanup resources and release GPU memory Called after generation tasks complete to release GPU memory occupied by Ray Workers. This is useful for avoiding OOM errors during subsequent metric calculations. """ import ray console.print( "\nReleasing Ray Workers and resources...", style=warning_style, ) try: # 1. Cleanup all Workers if hasattr(self, 'workers') and self.workers: for i, worker in enumerate(self.workers): try: ray.kill(worker) console.print( f" ✓ Worker {i} terminated", style=success_style, ) except Exception as e: console.print( f" ⚠ Worker {i} cleanup failed: {e}", style=err_style, ) self.workers = [] # 2. Shut down Ray (optional) if ray.is_initialized(): console.print( " Shutting down Ray...", style=subhead_style_2, ) ray.shutdown() console.print( " ✓ Ray shut down", style=subhead_style_2, ) console.print( "✓ Resource cleanup completed\n", style=success_style, ) except Exception as e: console.print( f"✗ Cleanup process error: {e}", style=err_style, ) ================================================ FILE: benchmarks/benchmark/benchmark.py ================================================ import os import json from typing import Any, Dict, List, Optional, Tuple, Union from pathlib import Path from datetime import datetime from benchmark.console import * from benchmark.generation_runner import GenerationRunner from benchmark.base_generator import Generator from benchmark.tasks import ( BenchmarkTable, LATEST_BENCHMARK_VERSION, check_benchmark_version, check_task_types, check_splits, ) from benchmark.tasks.v1_0.registry import get_loader, get_evaluator, get_task_config class DataLoaderWrapper: """Wrapper for unified data loading interface""" def __init__(self, model_path: str, benchmark_version: str, data_dir: str, enable_thinking: Optional[bool] = None): self.model_path = model_path self._tokenizer = self._create_tokenizer(model_path) if model_path else None self.benchmark_version = benchmark_version self.data_dir = data_dir self.enable_thinking = enable_thinking self._loader_cache = {} def _create_tokenizer(self, model_path: str): """Create tokenizer from model path""" try: from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( model_path, trust_remote_code=True ) console.print(f"[green]Tokenizer loaded from: {model_path}[/green]") return tokenizer except Exception as e: raise RuntimeError(f"Failed to load tokenizer from {model_path}: {e}") def load_data(self, task_name: str, split: str = "test", sample_size: Optional[Any] = None): """Load data using new loader system""" if task_name not in self._loader_cache: self._loader_cache[task_name] = get_loader( task_name=task_name, data_dir=self.data_dir, tokenizer=self._tokenizer, enable_thinking=self.enable_thinking, ) loader = self._loader_cache[task_name] return loader.load_data(split=split, sample_size=sample_size) class Benchmark: """ Benchmark Generation Task Evaluation Framework Usage Example: from benchmark import Benchmark from your_generator import YourGenerator benchmark = Benchmark( data_dir="./data" ) generator = YourGenerator("your-model-path") benchmark.run( generator=generator, output_dir="./results" ) """ def __init__( self, model_path: Optional[str] = None, task_types: Optional[List[str]] = None, splits: Optional[List[str]] = None, data_dir: Optional[str] = None, enable_thinking: Optional[bool] = None, ): """Initialize evaluation framework""" self.benchmark_version = LATEST_BENCHMARK_VERSION self.data_dir = data_dir self.task_types = check_task_types(task_types, self.benchmark_version) self.splits = check_splits(splits, self.benchmark_version) self.data_loader = DataLoaderWrapper( model_path=model_path, benchmark_version=self.benchmark_version, data_dir=data_dir, enable_thinking=enable_thinking, ) @staticmethod def print_benchmark_table(): """Print all available benchmark versions and tasks""" for benchmark_version in BenchmarkTable: console.print( head_print(f"Benchmark Dataset Version: {benchmark_version}"), style=head_style, justify="center", ) task_types_list = list(BenchmarkTable[benchmark_version].keys()) total_task_types = len(task_types_list) for task_idx, task_type in enumerate(task_types_list, start=1): console.print( f"\nTask Type [{task_idx}/{total_task_types}]: {task_type}\n", style=subhead_style, justify="center" ) task_config = BenchmarkTable[benchmark_version][task_type] console.print( f"Dataset Name: {task_config.get('name', task_type)}", style=row_style, justify="center", ) console.print( f"Source: {task_config.get('source', 'N/A')}", style=row_style, justify="center", ) console.print( f"Splits: {task_config.get('splits', [])}", style=row_style, justify="center", ) console.print( f"Sample Size: {task_config.get('sample_size', 'N/A')}", style=row_style, justify="center", ) console.print( f"Description: {task_config.get('description', 'N/A')}", style=row_style, justify="center", ) @staticmethod def check_generator(generator): """Verify that generator implements required methods""" required_methods = ["__str__", "generate"] for method in required_methods: if not hasattr(generator, method): raise ValueError(f"Generator should have `{method}` method.") if method != "__str__" and not callable(getattr(generator, method, None)): raise ValueError(f"Generator.{method} should be callable.") def run( self, generator: Generator, output_dir: str = "./results", overwrite: bool = False, **kwargs ): """Run benchmark evaluation""" self.check_generator(generator) console.print(f"\n\nStarting generation\n\n", style=head_style, justify="center") generation_runner = GenerationRunner(self.data_loader, overwrite=overwrite) total_tasks = 0 completed_tasks = 0 task_table = BenchmarkTable[self.benchmark_version] for task_name in self.task_types: if task_name not in task_table: continue task_config = task_table[task_name] available_splits = task_config.get("splits", ["test"]) for split in self.splits: if split in available_splits: total_tasks += 1 for task_name in self.task_types: if task_name not in task_table: console.print(f"Task does not exist: {task_name}") continue task_config = task_table[task_name] available_splits = task_config.get("splits", ["test"]) # Iterate through all splits for split in self.splits: if split not in available_splits: console.print(f"Split does not exist: {split} (task: {task_name})") continue # Determine displayed sample size sample_size_param = kwargs.get('sample_size') if sample_size_param is not None: if sample_size_param == "full": display_sample_size = task_config.get('size', 'N/A') else: display_sample_size = int(sample_size_param) else: display_sample_size = task_config.get('sample_size', 'N/A') console.print( f"\nTask [{completed_tasks + 1}/{total_tasks}]: {task_name} | Split: {split} | Sample Size: {display_sample_size}\n", style=subhead_style, justify="center", ) try: task_gen_config = task_config.get("generation_config", {}) prompt_config = task_config.get("prompt_config", {}) # Merge generation parameters (priority: user input > task config > Generator init parameters) # Filter out None values from kwargs to avoid overwriting task config valid_kwargs = {k: v for k, v in kwargs.items() if v is not None} merged_kwargs = {**task_gen_config, **prompt_config, **valid_kwargs} print(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}]") # Execute generation (without computing metrics) generation_runner( task_name=task_name, split=split, results_save_dir=output_dir, generator=generator, **merged_kwargs ) completed_tasks += 1 except Exception as e: import traceback console.print(f"✗ Task failed: {task_name}/{split}", style=err_style) console.print(f"✗ Error type: {type(e).__name__}", style=err_style) console.print(f"✗ Error message: {str(e)}", style=err_style) console.print("✗ Full stack trace:", style=err_style) console.print(traceback.format_exc(), style=dim_style) console.print(f"Total tasks: {total_tasks}") console.print(f"Completed tasks: {completed_tasks}") console.print(f"Failed tasks: {total_tasks - completed_tasks}") console.print(f"Results saved to: {output_dir}") @staticmethod def _evaluate_single_task( task_name: str, task_dir: str, generation_file: str, split: str, data_dir: str, overwrite: bool, valid_kwargs: Dict[str, Any], cached_metrics: Optional[Dict[str, Any]] = None ) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]], Optional[Dict[str, Any]]]: """Evaluate a single task split""" # Read generation results with open(generation_file, 'r', encoding='utf-8') as f: gen_data = json.load(f) if "samples" not in gen_data: raise ValueError("Generation result file missing 'samples' field (new format).") samples = gen_data["samples"] # Load task configuration if task_name not in BenchmarkTable[LATEST_BENCHMARK_VERSION]: console.print(f"⚠ Warning: Task '{task_name}' not found in BenchmarkTable[{LATEST_BENCHMARK_VERSION}], skipping...", style=warning_style) return None, None, None try: evaluator_class = get_evaluator(task_name=task_name) task_config = get_task_config(task_name=task_name) task_config['evaluation_config'].update(valid_kwargs) evaluator = evaluator_class( samples=samples, task_name=task_name, predictions_dir=task_dir, debug=True, # Enable debug mode for detailed info task_config=task_config, data_dir=data_dir, overwrite=overwrite, cached_metrics=cached_metrics ) console.print(f"Using {evaluator_class.__name__} for {task_name}") metrics, per_sample_metrics = evaluator.evaluate() # Compute MFU metrics if hardware info and token stats are available try: from benchmark.tasks.v1_0.mfu_evaluator import compute_mfu_from_generation_data mfu_metrics = compute_mfu_from_generation_data(gen_data) if mfu_metrics: metrics.update(mfu_metrics) # Display MFU for each stage if "mfu" in mfu_metrics: mfu_list = mfu_metrics["mfu"] if len(mfu_list) == 1: console.print(f"✓ MFU: {mfu_list[0]:.2%}", style=success_style) else: mfu_values = [f"Stage{i+1}: {mfu:.2%}" for i, mfu in enumerate(mfu_list)] console.print(f"✓ MFU (multi-stage): {', '.join(mfu_values)}", style=success_style) except Exception as e: console.print(f"⚠ Warning: MFU calculation failed: {e}", style=warning_style) # Update samples with per-sample metrics for sample_id, sample_metrics in per_sample_metrics.items(): if sample_id in samples: samples[sample_id].update(sample_metrics) # Write updated data back to generation result file gen_data["samples"] = samples with open(generation_file, 'w', encoding='utf-8') as f: json.dump(gen_data, f, indent=2, ensure_ascii=False) console.print(f"Updated sample metrics to: {generation_file}") return gen_data, metrics, samples except Exception as e: console.print(f"✗ Error evaluating {task_name}: {e}", style=err_style) console.print(f"Skipping task {task_name}", style=warning_style) return None, None, None @staticmethod def _create_debug_file(generation_file: str, gen_data: Dict[str, Any], samples: Dict[str, Any], overwrite: bool = False) -> None: """Create debug file with first 100 samples""" debug_file = f"{generation_file}.debug" if overwrite or not os.path.exists(debug_file): sorted_ids = sorted(samples.keys()) debug_sample_ids = sorted_ids[:100] debug_samples = {id: samples[id] for id in debug_sample_ids} debug_data = { "model_name": gen_data.get("model_name", ""), "task_name": gen_data.get("task_name", ""), "split": gen_data.get("split", ""), "total_time": gen_data.get("total_time", 0), "avg_time_per_sample": gen_data.get("avg_time_per_sample", 0), "samples": debug_samples, } with open(debug_file, 'w', encoding='utf-8') as f: json.dump(debug_data, f, indent=2, ensure_ascii=False) console.print(f"Created debug file: {debug_file}") @staticmethod def _calculate_model_total_time(model_results: Dict[str, Any]) -> float: """Calculate total time for all tasks of a model""" model_total_time = 0 for task_name, task_results in model_results.items(): if task_name.startswith("_"): continue for split, split_metrics in task_results.items(): model_total_time += split_metrics.get("total_time", 0) return model_total_time @staticmethod def _save_results_as_json(eval_results: Dict[str, Any], output_path: str) -> None: """Save evaluation results as JSON""" os.makedirs(os.path.dirname(output_path) if os.path.dirname(output_path) else ".", exist_ok=True) with open(output_path, 'w', encoding='utf-8') as f: json.dump(eval_results, f, indent=2, ensure_ascii=False) console.print(f"\n\n✓ Results Saved to {output_path}\n\n", style=success_style, justify="center") @staticmethod def _load_existing_results(output_path: str, task_types: List[str] = None) -> dict: """Load existing evaluation results from JSON file for incremental update""" eval_results = {} if os.path.exists(output_path) and output_path.endswith('.json'): try: with open(output_path, 'r', encoding='utf-8') as f: eval_results = json.load(f) console.print(f"✓ Loaded existing results from {output_path}", style=success_style, justify="center") if task_types is not None: console.print(f" Will update only specified tasks: {', '.join(task_types)}", style=success_style, justify="center") except Exception as e: console.print(f"⚠ Warning: Failed to load existing results: {e}", style=err_style, justify="center") console.print(f" Starting with empty results", style=err_style, justify="center") eval_results = {} return eval_results @staticmethod def evaluate_dev( generation_results_dir: str, output_path: str = "./eval_results.json", data_dir: str = None, overwrite: bool = False, task_types: List[str] = None, **kwargs ): """Batch evaluate generated results and generate report""" valid_kwargs = {k: v for k, v in kwargs.items() if v is not None} console.print(f"\n\nMetric Calculation\n", style=head_style, justify="center") console.print(f"Result Directory: {generation_results_dir}\n\n", style=head_style, justify="center") if not os.path.exists(generation_results_dir): console.print(f"✗ Error: Result Directory Not Found: {generation_results_dir}", style=err_style, justify="center") return eval_results = Benchmark._load_existing_results(output_path, task_types) for model_name in os.listdir(generation_results_dir): model_dir = os.path.join(generation_results_dir, model_name) if not os.path.isdir(model_dir): continue if model_name not in eval_results: eval_results[model_name] = {} all_tasks = [t for t in os.listdir(model_dir) if os.path.isdir(os.path.join(model_dir, t))] if task_types is not None: all_tasks = [t for t in all_tasks if t in task_types] total_tasks_count = len(all_tasks) for task_idx, task_name in enumerate(all_tasks, start=1): task_dir = os.path.join(model_dir, task_name) console.print(f"\nTask [{task_idx}/{total_tasks_count}]: {task_name}\n", style=subhead_style, justify="center") if task_name not in eval_results[model_name]: eval_results[model_name][task_name] = {} for filename in os.listdir(task_dir): if not filename.endswith('_generated.json'): continue split = filename.replace('_generated.json', '') generation_file = os.path.join(task_dir, filename) cached_metrics = eval_results.get(model_name, {}).get(task_name, {}).get(split, {}) # Evaluate single task gen_data, metrics, samples = Benchmark._evaluate_single_task( task_name=task_name, task_dir=task_dir, generation_file=generation_file, split=split, data_dir=data_dir, overwrite=overwrite, valid_kwargs=valid_kwargs, cached_metrics=cached_metrics ) if gen_data is None: continue Benchmark._create_debug_file(generation_file, gen_data, samples, overwrite) eval_results[model_name][task_name][split] = { **metrics, "total_time": gen_data.get("total_time", 0), "avg_time_per_sample": gen_data.get("avg_time_per_sample", 0), } model_total_time = Benchmark._calculate_model_total_time(eval_results[model_name]) eval_results[model_name]["_total_time"] = model_total_time console.print(f"\n✓ Total time: {model_total_time:.2f}s ({model_total_time/60:.2f}min)\n", style=success_style) Benchmark._save_results_as_json(eval_results, output_path) ================================================ FILE: benchmarks/benchmark/checkpoint_utils.py ================================================ """ PT format model checkpoint loading tool Supports loading PyTorch model checkpoints in non-safetensor format """ import torch import hashlib from pathlib import Path from typing import Dict, Optional, Tuple, List from difflib import SequenceMatcher from benchmark.console import console def match_checkpoint_keys_to_model( checkpoint_keys: List[str], model_keys: List[str], similarity_threshold: float = 0.8 ) -> Dict[str, str]: """ Intelligently match checkpoint key names to model key names Args: checkpoint_keys: List of key names in checkpoint model_keys: List of key names in model similarity_threshold: Similarity threshold Returns: Mapping dictionary {checkpoint_key: model_key} """ mapping = {} for ckpt_key in checkpoint_keys: # Try exact match first if ckpt_key in model_keys: mapping[ckpt_key] = ckpt_key continue # Try matching by removing "model." prefix if ckpt_key.startswith("model."): clean_key = ckpt_key[6:] # Remove "model." if clean_key in model_keys: mapping[ckpt_key] = clean_key continue # Try matching by adding "model." prefix prefixed_key = f"model.{ckpt_key}" if prefixed_key in model_keys: mapping[ckpt_key] = prefixed_key continue # Use similarity matching best_match = None best_score = 0.0 for model_key in model_keys: score = SequenceMatcher(None, ckpt_key, model_key).ratio() if score > best_score and score >= similarity_threshold: best_score = score best_match = model_key if best_match: mapping[ckpt_key] = best_match console.print(f"Similarity match: {ckpt_key} -> {best_match} (score: {best_score:.2f})") return mapping def check_embedding_weight_sharing( state_dict: Dict[str, torch.Tensor], verbose: bool = True ) -> Tuple[bool, Optional[str], Optional[str]]: """ Check if embed_tokens and lm_head weights are shared Args: state_dict: Model state dictionary verbose: Whether to print detailed information Returns: (is_shared, embed_key, lm_head_key) """ # Find embed_tokens and lm_head keys embed_key = None lm_head_key = None for key in state_dict.keys(): if "embed_tokens.weight" in key: embed_key = key elif "lm_head.weight" in key: lm_head_key = key if not embed_key or not lm_head_key: if verbose: console.print(f"Complete weight pair not found: embed_tokens={embed_key}, lm_head={lm_head_key}") return False, embed_key, lm_head_key embed_tensor = state_dict[embed_key] lm_head_tensor = state_dict[lm_head_key] if verbose: console.print(f"embed_tokens.weight shape: {embed_tensor.shape}") console.print(f"lm_head.weight shape: {lm_head_tensor.shape}") # Check if completely identical is_shared = torch.equal(embed_tensor, lm_head_tensor) if verbose: if is_shared: console.print("✓ embed_tokens and lm_head weights are identical (shared weights)") else: console.print("✗ embed_tokens and lm_head weights are different") # Calculate difference statistics diff = (embed_tensor != lm_head_tensor).sum().item() total = embed_tensor.numel() console.print(f" Different elements: {diff}/{total} ({diff/total*100:.2f}%)") return is_shared, embed_key, lm_head_key def handle_weight_tying( state_dict: Dict[str, torch.Tensor], model_keys: List[str], new_state_dict: Dict[str, str] ) -> Dict[str, torch.Tensor]: """ Handle weight tying situations In some models, embed_tokens and lm_head weights are tied Args: state_dict: Original state dictionary model_keys: List of model key names new_state_dict: Already mapped new state dictionary Returns: Updated state dictionary """ # Scenario 1: checkpoint has embed_tokens but no lm_head if any("embed_tokens.weight" in k for k in state_dict.keys()): embed_key = next((k for k in state_dict.keys() if "embed_tokens.weight" in k), None) # Check if lm_head is missing in new_state_dict has_lm_head = any("lm_head.weight" in k for k in new_state_dict.keys()) if not has_lm_head and embed_key: # Try to find lm_head key in model lm_head_candidates = ["lm_head.weight", "model.lm_head.weight"] for candidate in lm_head_candidates: if candidate in model_keys: new_state_dict[candidate] = state_dict[embed_key] console.print(f"✓ Weight tying: using {embed_key} to initialize {candidate}") break # Scenario 2: checkpoint has lm_head but no embed_tokens if any("lm_head.weight" in k for k in state_dict.keys()): lm_head_key = next((k for k in state_dict.keys() if "lm_head.weight" in k), None) # Check if embed_tokens is missing in new_state_dict has_embed = any("embed_tokens.weight" in k for k in new_state_dict.keys()) if not has_embed and lm_head_key: # Try to find embed_tokens key in model embed_candidates = ["embed_tokens.weight", "model.embed_tokens.weight"] for candidate in embed_candidates: if candidate in model_keys: new_state_dict[candidate] = state_dict[lm_head_key] console.print(f"✓ Weight tying: using {lm_head_key} to initialize {candidate}") break return new_state_dict def load_weights_from_pt( model: torch.nn.Module, checkpoint_path: str, device: str = "cpu", strict: bool = False, check_weight_sharing: bool = True, handle_weight_tying_flag: bool = True ) -> Tuple[List[str], List[str]]: """ Load PT format checkpoint into model Args: model: Target model checkpoint_path: Checkpoint file path device: Loading device strict: Whether to load strictly (requires all keys to match) check_weight_sharing: Whether to check weight sharing handle_weight_tying_flag: Whether to handle weight tying Returns: (missing_keys, unexpected_keys) Missing keys and unexpected keys """ console.print(f"Loading checkpoint: {checkpoint_path}") # 1. Load checkpoint try: state_dict = torch.load(checkpoint_path, map_location=device) except Exception as e: console.print(f"Failed to load checkpoint: {e}") raise # 2. Extract model state dictionary if 'model_state_dict' in state_dict: console.print("Detected 'model_state_dict' key, extracting nested state dictionary") state_dict = state_dict['model_state_dict'] elif 'state_dict' in state_dict: console.print("Detected 'state_dict' key, extracting nested state dictionary") state_dict = state_dict['state_dict'] checkpoint_keys = list(state_dict.keys()) model_keys = list(model.state_dict().keys()) console.print(f"Checkpoint key count: {len(checkpoint_keys)}") console.print(f"Model key count: {len(model_keys)}") if check_weight_sharing: check_embedding_weight_sharing(state_dict, verbose=True) console.print("Starting to match checkpoint key names to model key names...") key_mapping = match_checkpoint_keys_to_model(checkpoint_keys, model_keys) matched_count = len(key_mapping) console.print(f"Successfully matched: {matched_count}/{len(checkpoint_keys)} keys") new_state_dict = {} skipped_keys = [] for ckpt_key in checkpoint_keys: target_key = key_mapping.get(ckpt_key) if target_key is None: skipped_keys.append(ckpt_key) continue new_state_dict[target_key] = state_dict[ckpt_key] if skipped_keys: console.print(f"Skipped {len(skipped_keys)} unmatched keys") if len(skipped_keys) <= 10: console.print(f"Skipped keys: {skipped_keys}") if handle_weight_tying_flag: new_state_dict = handle_weight_tying(state_dict, model_keys, new_state_dict) console.print("Loading state dictionary into model...") missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=strict) if missing_keys: console.print(f"Missing keys ({len(missing_keys)}): {missing_keys[:10]}{'...' if len(missing_keys) > 10 else ''}") else: console.print("✓ No missing keys") if unexpected_keys: console.print(f"Unexpected keys ({len(unexpected_keys)}): {unexpected_keys[:10]}{'...' if len(unexpected_keys) > 10 else ''}") else: console.print("✓ No unexpected keys") console.print(f"✓ Checkpoint loading completed") return missing_keys, unexpected_keys def build_model_from_pt( config_path: str, checkpoint_path: str, device: str = "cuda", torch_dtype: Optional[torch.dtype] = None, trust_remote_code: bool = True ) -> torch.nn.Module: """ Create model from config and load PT checkpoint This is the unified function used by both HfTransformersGenerator and RayHfTransformersGenerator. Args: config_path: Model configuration path checkpoint_path: PT checkpoint path device: Target device torch_dtype: Data type trust_remote_code: Whether to trust remote code Returns: Model with checkpoint loaded """ from transformers import AutoConfig, AutoModelForCausalLM config = AutoConfig.from_pretrained( config_path, trust_remote_code=trust_remote_code ) model = AutoModelForCausalLM.from_config( config, trust_remote_code=trust_remote_code ) if torch_dtype is not None: model = model.to(torch_dtype) if device != 'cpu': model = model.to(device) target_load_device = device if device != 'cpu' else 'cpu' load_weights_from_pt( model=model, checkpoint_path=checkpoint_path, device=target_load_device, strict=False, check_weight_sharing=True, handle_weight_tying_flag=True ) return model def build_model_from_hf( model_name_or_path: str, device: str = "cuda", torch_dtype: Optional[torch.dtype] = None, trust_remote_code: bool = True, use_device_map: bool = True ) -> torch.nn.Module: """ Load pretrained model from HuggingFace This is the unified function used by both HfTransformersGenerator and RayHfTransformersGenerator. Args: model_name_or_path: Model name or path device: Target device torch_dtype: Data type trust_remote_code: Whether to trust remote code use_device_map: Whether to use device_map="auto" for multi-GPU Returns: Loaded model """ from transformers import AutoModelForCausalLM should_use_device_map = use_device_map and device != "cpu" and "cuda" in device model = AutoModelForCausalLM.from_pretrained( model_name_or_path, torch_dtype=torch_dtype, device_map="auto" if should_use_device_map else None, trust_remote_code=trust_remote_code ) if not should_use_device_map: model = model.to(device) return model def export_pt_to_safetensor( config_path: str, checkpoint_path: str, output_dir: Optional[str] = None, trust_remote_code: bool = True, use_cache: bool = True ) -> str: """ Convert PT checkpoint to HuggingFace format for vLLM compatibility Args: config_path: Model configuration path (HuggingFace model path or local config) checkpoint_path: PT checkpoint path output_dir: Output directory for converted model (optional, will use /tmp if not specified) trust_remote_code: Whether to trust remote code use_cache: Whether to use cached conversion (skip if already converted) Returns: Path to converted HuggingFace format model """ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig hash_input = f"{config_path}_{checkpoint_path}".encode('utf-8') hash_suffix = hashlib.md5(hash_input).hexdigest()[:16] if output_dir is None: output_dir = f"/tmp/hf_checkpoint_{hash_suffix}" temp_model_path = Path(output_dir) / "converted_model" if use_cache and temp_model_path.exists(): has_config = (temp_model_path / "config.json").exists() has_weights = ( (temp_model_path / "model.safetensors").exists() or (temp_model_path / "pytorch_model.bin").exists() or any(temp_model_path.glob("*.safetensors")) or any(temp_model_path.glob("pytorch_model*.bin")) ) if has_config and has_weights: console.print( f"✓ Found converted model, skipping conversion", ) console.print( f" Converted model path: {temp_model_path}", ) return str(temp_model_path) # Create output directory temp_model_path.mkdir(parents=True, exist_ok=True) console.print(f" Output directory: {temp_model_path}") try: # 1. Load configuration console.print(" [1/4] Loading model configuration...") config = AutoConfig.from_pretrained( config_path, trust_remote_code=trust_remote_code ) # 2. Create model from config console.print(" [2/4] Initializing model...") model = AutoModelForCausalLM.from_config( config, trust_remote_code=trust_remote_code ) # 3. Load checkpoint console.print(" [3/4] Loading PT checkpoint...") load_weights_from_pt( model=model, checkpoint_path=checkpoint_path, device='cpu', strict=False, check_weight_sharing=True, handle_weight_tying_flag=True ) # 4. Save as HuggingFace format console.print(" [4/4] Saving as HuggingFace format...") model.save_pretrained(temp_model_path, safe_serialization=True) # Save tokenizer tokenizer = AutoTokenizer.from_pretrained( config_path, trust_remote_code=trust_remote_code ) tokenizer.save_pretrained(temp_model_path) console.print(f"✓ Model conversion completed: {temp_model_path}") return str(temp_model_path) except Exception as e: console.print(f"✗ Conversion failed: {e}") # Clean up on failure import shutil if temp_model_path.exists(): shutil.rmtree(temp_model_path) raise ================================================ FILE: benchmarks/benchmark/console.py ================================================ from rich.console import Console from pyfiglet import Figlet console = Console() err_style = "bold red" warning_style = "bold yellow" success_style = "green" dim_style = "dim" # benchmark dataset f = Figlet(font='digital') head_print = lambda x : f.renderText(x) head_style = "bold white on blue" subhead_style = "bold black on bright_blue" row_style = "black on bright_white" # Generator styles head_style_2 = "bold white on magenta" subhead_style_2 = "white" ================================================ FILE: benchmarks/benchmark/generation_runner.py ================================================ """ Generation Runner Responsible for: 1. Loading test data via data loader 2. Calling Generator to produce model outputs 3. Saving generation results to JSON files Note: Does NOT compute evaluation metrics (handled by task-specific evaluators) """ import json import os import time from typing import Dict, List, Optional, Any from pathlib import Path from benchmark.console import * from benchmark.base_generator import Generator from benchmark.tasks.v1_0.base_loader import BaseLoader class GenerationRunner: """ Generation task runner Orchestrates the generation phase of evaluation: - Loads test data via data loader - Calls generator to produce model outputs - Saves generation results to disk Evaluation metrics are computed separately by task-specific evaluators. """ def __init__( self, data_loader: BaseLoader, overwrite: bool = False ): """ Args: data_loader: Data loader (any object with load_data method) overwrite: Whether to overwrite existing results """ self.data_loader = data_loader self.overwrite = overwrite self.benchmark_version = data_loader.benchmark_version def __call__( self, task_name: str, split: str, results_save_dir: str, generator: Generator, **kwargs ) -> None: """ Execute generation pipeline This method is responsible for generation and saving only, NOT for computing evaluation metrics. Args: task_name: Task name split: Dataset split results_save_dir: Results save directory generator: Generator instance **kwargs: Generation parameters Returns: None """ model_name = str(generator) results_dir = os.path.join( results_save_dir, model_name, task_name ) os.makedirs(results_dir, exist_ok=True) generation_file = os.path.join(results_dir, f"{split}_generated.json") # Check if generation results already exist if os.path.exists(generation_file) and not self.overwrite: console.print(f"Generation results already exist, skipping: {generation_file}") console.print("To regenerate, please set overwrite=True") return None start_time = time.time() # Extract sample_size parameter (don't pass to generator) sample_size_param = kwargs.pop('sample_size', None) # 1. Load data test_data = self.data_loader.load_data(task_name=task_name, split=split, sample_size=sample_size_param) # 2. Extract prompts and references prompts = {id: data["prompt"] for id, data in test_data.items()} references = {id: data["ground_truth"] for id, data in test_data.items()} # 3. Generate text (unified entry point) # All tasks now go through the unified generate() method # For classification tasks, target_tokens is already in kwargs from generation_config generations, logprobs = generator.generate(prompts, **kwargs) end_time = time.time() total_time = end_time - start_time num_samples = len(test_data) avg_time_per_sample = total_time / num_samples if num_samples > 0 else 0 console.print(f"Total time: {total_time:.2f}s, Average per sample: {avg_time_per_sample:.4f}s") # 4. Collect hardware info and MFU statistics (for MFU calculation) console.print("[MFU DEBUG] Starting MFU data collection...") hardware_info = None mfu_stats = None try: # Check if generator has get_hardware_info method if not hasattr(generator, 'get_hardware_info'): console.print("[MFU ERROR] generator does NOT have get_hardware_info() method!") console.print(f"[MFU ERROR] Generator type: {type(generator)}") console.print(f"[MFU ERROR] Generator class: {generator.__class__.__name__}") else: hardware_info = generator.get_hardware_info() if hardware_info: console.print(f"[MFU DEBUG] GPU Model: {hardware_info.get('gpu_model')}") console.print(f"[MFU DEBUG] GPU Count: {hardware_info.get('gpu_count')}") console.print(f"[MFU DEBUG] GPU TFLOPs: {hardware_info.get('gpu_tflops')}") else: console.print("[MFU WARNING] hardware_info is None!") # Check if generator has mfu_stats attribute if not hasattr(generator, 'mfu_stats'): console.print("[MFU WARNING] generator does NOT have 'mfu_stats' attribute!") else: mfu_stats = getattr(generator, 'mfu_stats', None) if mfu_stats: console.print(f"[MFU DEBUG] mfu_stats sample count: {len(mfu_stats)}") if len(mfu_stats) > 0: first_key = list(mfu_stats.keys())[0] first_stats = mfu_stats[first_key] console.print(f"[MFU DEBUG] First sample: {first_key}") console.print(f"[MFU DEBUG] input_tokens: {first_stats.get('input_tokens', 'MISSING')}") console.print(f"[MFU DEBUG] output_tokens: {first_stats.get('output_tokens', 'MISSING')}") console.print(f"[MFU DEBUG] times: {first_stats.get('times', 'MISSING')}") else: console.print("[MFU WARNING] mfu_stats is None!") except Exception as e: console.print(f"Warning: Failed to collect hardware info or MFU stats: {e}", style=warning_style) num_params_value = getattr(generator, 'num_params', None) console.print(f"[MFU DEBUG] num_params value: {num_params_value}") # 5. Save generation results self.save_generations( model_name=model_name, task_name=task_name, split=split, generations=generations, references=references, logprobs=logprobs, test_data=test_data, output_path=generation_file, total_time=total_time, avg_time_per_sample=avg_time_per_sample, hardware_info=hardware_info, mfu_stats=mfu_stats, num_params=getattr(generator, 'num_params', None), ) console.print(f"Generation results saved to: {generation_file}") return None @staticmethod def save_generations( model_name: str, task_name: str, split: str, generations: Dict[str, List[str]], references: Dict[str, str], logprobs: Dict[str, List[float]], test_data: Dict[str, Dict[str, Any]], output_path: str, total_time: float, avg_time_per_sample: float, hardware_info: Optional[Dict[str, Any]] = None, mfu_stats: Optional[Dict[str, Dict[str, List[int]]]] = None, num_params: Optional[float] = None, ): """ Save generation results (excluding evaluation metrics) Result format: { "model_name": "...", "task_name": "...", "split": "...", "total_time": "...", "avg_time_per_sample": "...", "samples": { "": { "prompt": "...", "generations": ["...", "..."], "ground_truth": "...", "metadata": {...} # Contains metadata from original data }, ... } } """ # Check if this is a classification task (label_pred) is_classification_task = task_name == "label_pred" samples: Dict[str, Any] = {} for id, gens in generations.items(): sample_data = { "prompt": test_data.get(id, {}).get("prompt", ""), "generations": gens, "ground_truth": references.get(id, ""), } if id in logprobs and logprobs[id]: sample_data["logprobs"] = logprobs[id] # Add MFU statistics for this sample (for MFU calculation) if mfu_stats and id in mfu_stats: sample_data["input_tokens"] = mfu_stats[id].get("input_tokens", []) sample_data["output_tokens"] = mfu_stats[id].get("output_tokens", []) sample_data["times"] = mfu_stats[id].get("times", []) if is_classification_task and id in test_data: metadata = test_data[id].get("metadata", {}) if "uid" in metadata: sample_data["user_id"] = metadata["uid"] if id in test_data and "metadata" in test_data[id]: sample_data["metadata"] = test_data[id]["metadata"] samples[id] = sample_data data = { "model_name": model_name, "task_name": task_name, "split": split, "total_time": total_time, "avg_time_per_sample": avg_time_per_sample, "samples": samples, } # Add hardware info and token statistics (for MFU calculation) if hardware_info: data["hardware_info"] = hardware_info else: console.print("[MFU DEBUG] ❌ Skipping hardware_info (None or empty)") if num_params: data["num_params"] = num_params else: console.print("[MFU DEBUG] ❌ Skipping num_params (None or 0)") # Save mfu_stats_aggregate for multi-stage MFU calculation # Compute aggregate statistics from per-sample mfu_stats if mfu_stats: # Determine number of stages from first sample num_stages = 0 for sample_stats in mfu_stats.values(): num_stages = len(sample_stats.get("input_tokens", [])) console.print(f"[MFU DEBUG] Determined num_stages: {num_stages}") break # New structure: dict with lists instead of array of dicts data["mfu_stats_aggregate"] = { "total_input_tokens": [], "total_output_tokens": [], "total_time": [] } for stage_idx in range(num_stages): total_input_tokens = 0 total_output_tokens = 0 # Aggregate token stats across all samples for this stage for sample_stats in mfu_stats.values(): input_tokens_list = sample_stats.get("input_tokens", []) output_tokens_list = sample_stats.get("output_tokens", []) if stage_idx < len(input_tokens_list): total_input_tokens += input_tokens_list[stage_idx] if stage_idx < len(output_tokens_list): total_output_tokens += output_tokens_list[stage_idx] # Calculate stage time as max across all samples # Ray workers run in parallel, so stage time = slowest worker time stage_times = [] for sample_stats in mfu_stats.values(): times_list = sample_stats.get("times", []) if stage_idx < len(times_list): stage_times.append(times_list[stage_idx]) # Use max time if available, otherwise 0.0 stage_time = max(stage_times) if stage_times else 0.0 data["mfu_stats_aggregate"]["total_input_tokens"].append(total_input_tokens) data["mfu_stats_aggregate"]["total_output_tokens"].append(total_output_tokens) data["mfu_stats_aggregate"]["total_time"].append(stage_time) else: console.print("[MFU DEBUG] ❌ Skipping mfu_stats processing (None or empty)") os.makedirs(os.path.dirname(output_path), exist_ok=True) with open(output_path, 'w', encoding='utf-8') as f: json.dump(data, f, indent=2, ensure_ascii=False) ================================================ FILE: benchmarks/benchmark/gpu_utils.py ================================================ """ GPU hardware detection and FLOPS calculation utilities for MFU computation. """ from typing import Dict, Any, Optional from benchmark.console import console # GPU theoretical peak FLOPS (TFLOPS) for BF16/FP16 # Source: Official vendor specifications GPU_TFLOPS_MAP = { # NVIDIA A100 series "A100-SXM4-40GB": 312.0, "A100-SXM4-80GB": 312.0, "A100-PCIE-40GB": 312.0, "A100-PCIE-80GB": 312.0, # NVIDIA A800 series (China-specific A100 variant) "A800-SXM4-80GB": 312.0, "A800": 312.0, # NVIDIA H100 series "H100-SXM5-80GB": 989.0, "H100-PCIE-80GB": 756.0, "H100": 989.0, # NVIDIA V100 series "V100-SXM2-16GB": 125.0, "V100-SXM2-32GB": 125.0, "V100-PCIE-16GB": 112.0, "V100-PCIE-32GB": 112.0, # NVIDIA A40 "A40": 149.7, # NVIDIA A30 "A30": 165.0, # NVIDIA A10 "A10": 125.0, # NVIDIA RTX series "RTX 4090": 82.6, "RTX 4080": 48.7, "RTX 3090": 35.6, "RTX 3080": 29.8, } def _normalize_gpu_name(gpu_name: str) -> str: """ Normalize GPU name for lookup in TFLOPS map. Args: gpu_name: Raw GPU name from torch.cuda Returns: Normalized GPU name """ gpu_name = gpu_name.strip() # Try exact match first if gpu_name in GPU_TFLOPS_MAP: return gpu_name # Try fuzzy matching gpu_name_upper = gpu_name.upper() # Match A100 variants if "A100" in gpu_name_upper: if "80GB" in gpu_name_upper or "80G" in gpu_name_upper: return "A100-SXM4-80GB" else: return "A100-SXM4-40GB" # Match A800 if "A800" in gpu_name_upper: return "A800" # Match H100 variants if "H100" in gpu_name_upper: if "PCIE" in gpu_name_upper or "PCIe" in gpu_name_upper: return "H100-PCIE-80GB" else: return "H100-SXM5-80GB" # Match V100 variants if "V100" in gpu_name_upper: if "32GB" in gpu_name_upper or "32G" in gpu_name_upper: return "V100-SXM2-32GB" else: return "V100-SXM2-16GB" # Match other GPUs for known_gpu in GPU_TFLOPS_MAP.keys(): if known_gpu.upper() in gpu_name_upper: return known_gpu return gpu_name def get_gpu_tflops(gpu_name: str) -> Optional[float]: """ Get theoretical peak TFLOPS for a given GPU model. Args: gpu_name: GPU model name Returns: TFLOPS value for BF16/FP16, or None if unknown """ normalized_name = _normalize_gpu_name(gpu_name) return GPU_TFLOPS_MAP.get(normalized_name) def get_gpu_info() -> Dict[str, Any]: """ Detect GPU hardware information using PyTorch. Returns: Dictionary containing: - gpu_available: bool, whether GPU is available - gpu_count: int, number of GPUs - gpu_model: str, GPU model name - gpu_memory_total_gb: float, total GPU memory in GB - gpu_tflops: float, theoretical peak TFLOPS for BF16/FP16 """ try: import torch except ImportError: console.print("PyTorch not available, cannot detect GPU info") return { "gpu_available": False, "gpu_count": 0, "gpu_model": "unknown", "gpu_memory_total_gb": 0.0, "gpu_tflops": None, } if not torch.cuda.is_available(): console.print("CUDA not available") return { "gpu_available": False, "gpu_count": 0, "gpu_model": "unknown", "gpu_memory_total_gb": 0.0, "gpu_tflops": None, } gpu_count = torch.cuda.device_count() # Get properties of the first GPU (assume homogeneous cluster) gpu_props = torch.cuda.get_device_properties(0) gpu_model = gpu_props.name gpu_memory_total_gb = gpu_props.total_memory / (1024 ** 3) # Convert bytes to GB # Get TFLOPS gpu_tflops = get_gpu_tflops(gpu_model) if gpu_tflops is None: console.print( f"Unknown GPU model '{gpu_model}', cannot determine TFLOPS. " f"Please add it to GPU_TFLOPS_MAP in gpu_utils.py" ) gpu_info = { "gpu_available": True, "gpu_count": gpu_count, "gpu_model": gpu_model, "gpu_memory_total_gb": round(gpu_memory_total_gb, 2), "gpu_tflops": gpu_tflops, } console.print(f"Detected GPU: {gpu_model} x {gpu_count}, {gpu_tflops} TFLOPS (BF16/FP16)") return gpu_info ================================================ FILE: benchmarks/benchmark/tasks/__init__.py ================================================ """ Tasks definition for Benchmark """ from .tasks import ( BenchmarkTable, check_benchmark_version, check_task_types, check_splits, LATEST_BENCHMARK_VERSION, ) __all__ = [ "BenchmarkTable", "check_benchmark_version", "check_task_types", "check_splits", "LATEST_BENCHMARK_VERSION", ] ================================================ FILE: benchmarks/benchmark/tasks/tasks.py ================================================ """ Task table and utility functions for Benchmark """ from typing import List, Optional, Tuple from benchmark.tasks.v1_0.registry import TaskTable as TaskTable_v1_0 LATEST_BENCHMARK_VERSION = "v1.0" BenchmarkTable = { "v1.0": TaskTable_v1_0, } def get_available_benchmark_versions() -> List[str]: """Get all available benchmark versions""" return sorted(list(BenchmarkTable.keys())) def get_available_task_types(benchmark_version: str = LATEST_BENCHMARK_VERSION) -> List[str]: """Get all task types for the specified version""" task_table = BenchmarkTable[benchmark_version] return sorted(list(task_table.keys())) def get_available_domains(benchmark_version: str = LATEST_BENCHMARK_VERSION) -> List[str]: """Get all domains for the specified version""" domains = set() for task_table in BenchmarkTable[benchmark_version].values(): for domain in task_table.keys(): domains.add(domain) return sorted(list(domains)) def get_available_languages(benchmark_version: str = LATEST_BENCHMARK_VERSION) -> List[str]: """Get all languages for the specified version""" languages = set() for task_table in BenchmarkTable[benchmark_version].values(): for task in task_table.values(): for lang in task.keys(): languages.add(lang) return sorted(list(languages)) def check_benchmark_version(benchmark_version: Optional[str]) -> str: """ Validate if benchmark version is valid Args: benchmark_version: Version to validate, returns latest version if None Returns: str: Valid benchmark version Raises: ValueError: If version is invalid """ if benchmark_version is None: benchmark_version = LATEST_BENCHMARK_VERSION else: available_benchmark_versions = get_available_benchmark_versions() if benchmark_version not in available_benchmark_versions: raise ValueError( f"Invalid benchmark version: {benchmark_version}. Available versions: {', '.join(available_benchmark_versions)}" ) return benchmark_version def check_task_types( task_types: Optional[List[str]], benchmark_version: str = LATEST_BENCHMARK_VERSION, ) -> List[str]: """ Validate if task types are valid Args: task_types: List of task types to validate, returns all task types if None benchmark_version: Benchmark version Returns: List[str]: Valid task types list Raises: ValueError: If task type is invalid """ available_task_types = get_available_task_types(benchmark_version) if task_types is None: task_types = available_task_types else: if isinstance(task_types, str): task_types = [task_types] task_types = sorted(list(set(task_types))) task_types = [task_type.lower() for task_type in task_types] for task_type in task_types: if task_type not in available_task_types: raise ValueError( f"{benchmark_version} | Invalid task type: {task_type}. Available task types: {', '.join(available_task_types)}" ) return task_types def check_splits( splits: Optional[List[str]], benchmark_version: str = LATEST_BENCHMARK_VERSION, ) -> List[str]: """ Validate if dataset splits are valid Args: splits: List of splits to validate, returns all splits if None benchmark_version: Benchmark version Returns: List[str]: Valid splits list Raises: ValueError: If split is invalid """ # Only allow test split available_splits = ["test"] if splits is None: splits = available_splits else: if isinstance(splits, str): splits = [splits] splits = sorted(list(set(splits))) splits = [split.lower() for split in splits] for split in splits: if split not in available_splits: raise ValueError( f"{benchmark_version} | Invalid split: {split}. Available splits: {', '.join(available_splits)}" ) return splits ================================================ FILE: benchmarks/benchmark/tasks/v1_0/__init__.py ================================================ """ v1.0 Version Task Definitions """ from .registry import TaskTable __all__ = ["TaskTable"] ================================================ FILE: benchmarks/benchmark/tasks/v1_0/base_evaluator.py ================================================ """ Base Evaluator for all task evaluators Provides common interface for evaluation logic. """ import json import os from abc import ABC, abstractmethod from typing import Dict, Any, Tuple, Optional, List from benchmark.console import console, success_style class BaseEval(ABC): """Base class for all task evaluators""" def __init__( self, samples: Dict[str, Dict[str, Any]], task_name: Optional[str] = None, predictions_dir: Optional[str] = None, debug: bool = False, task_config: Optional[Dict[str, Any]] = None, data_dir: Optional[str] = None, overwrite: bool = False, cached_metrics: Optional[Dict[str, Any]] = None ): """ Initialize base evaluator Args: samples: Dictionary of samples from test_generated.json Format: { sample_id: { "prompt": "...", "generations": ["..."], "ground_truth": "...", "metadata": {...} } } task_name: Task name (e.g., "math_500") predictions_dir: Directory to save debug files (optional) debug: Whether to save debug information task_config: Task configuration dictionary (optional) data_dir: Data directory path (optional) overwrite: Whether to overwrite existing metrics and recompute from scratch cached_metrics: Existing overall metrics from eval_results (optional) """ self.samples = samples self.task_name = task_name self.predictions_dir = predictions_dir self.debug = debug self.task_config = task_config or {} self.data_dir = data_dir self.overwrite = overwrite self.cached_metrics = cached_metrics or {} def evaluate(self) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Evaluate the samples and return metrics This method provides a simplified two-level caching-aware evaluation flow: 1. If overwrite=True, always recompute from scratch 2. If cached overall metrics exist in eval_results, return them with empty per_sample_metrics 3. Otherwise, compute from scratch Subclasses should override: - required_metrics property: Return list of overall metric names - _compute_metrics_from_scratch(): Compute all metrics from scratch Returns: Tuple of (metrics, per_sample_metrics) """ # If overwrite=True, always recompute from scratch if self.overwrite: console.print("[cyan]Overwrite=True, recomputing all metrics from scratch...[/cyan]") return self._compute_metrics_from_scratch() # If cached overall metrics exist, use them if self._has_all_required_metrics(): console.print("[cyan]Using existing overall metrics from eval_results...[/cyan]") # Return cached metrics with empty per_sample_metrics (not needed when using cache) return self.cached_metrics, {} # Otherwise, compute from scratch console.print("[cyan]Computing metrics from scratch...[/cyan]") return self._compute_metrics_from_scratch() def _all_samples_have_keys(self, required_keys: List[str]) -> bool: """Check if all samples have required keys""" for sample in self.samples.values(): for key in required_keys: if key not in sample: return False return True @property def required_metrics(self) -> Optional[List[str]]: """Define required overall metric keys""" return None def _has_all_required_metrics(self) -> bool: """Check if cached_metrics contains all required keys (override for custom logic)""" if self.required_metrics is not None: return all(key in self.cached_metrics for key in self.required_metrics) return False def _compute_metrics_from_scratch(self) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """Compute metrics from scratch (override in subclasses)""" raise NotImplementedError("Subclasses must implement _compute_metrics_from_scratch()") def _save_debug_json( self, debug_info: Dict[str, Any], filename: str = "debug.json" ) -> Optional[str]: """Save debug information to JSON file""" if not self.predictions_dir: return None debug_filename = os.path.join(self.predictions_dir, filename) os.makedirs(os.path.dirname(debug_filename), exist_ok=True) with open(debug_filename, 'w', encoding='utf-8') as f: json.dump(debug_info, f, indent=2, ensure_ascii=False) console.print(f"✓ Debug information saved to: {debug_filename}", style=success_style) return debug_filename ================================================ FILE: benchmarks/benchmark/tasks/v1_0/base_loader.py ================================================ """ Base Loader for all task data loaders Provides common functionality for data loading, sampling, and file path resolution. """ import os import json import pandas as pd from typing import Dict, Any, Optional from abc import ABC from benchmark.console import * class BaseLoader(ABC): """Base class for all task data loaders""" def __init__( self, task_config: Dict[str, Any], data_dir: Optional[str] = None, tokenizer: Optional[Any] = None, enable_thinking: Optional[bool] = None, ): """Initialize base loader""" self.task_config = task_config self.data_dir = data_dir self.tokenizer = tokenizer self.enable_thinking = enable_thinking self.task_name = task_config.get("name", "unknown") # Validate tokenizer is provided for messages-based format if self.tokenizer is None: raise ValueError( f"{self.task_name} requires tokenizer for messages-based format. " f"Please provide model_path when initializing Benchmark.\n" f"Example: Benchmark(task_types=['{self.task_name}'], model_path='your-model-path')" ) def load_data(self, split: str = "test", sample_size: Optional[Any] = None) -> Dict[str, Dict[str, Any]]: """ Load data for the task in messages-based format Args: split: Dataset split (default "test") sample_size: Override sample size (can be int, "full", or None to use task config) Returns: Dictionary mapping sample_id to sample data: { sample_id: { "prompt": "formatted prompt from apply_chat_template", "ground_truth": "answer", "metadata": { "row_index": idx, "messages": [...] } } } """ # Determine effective sample size if sample_size is not None: if sample_size == "full": effective_sample_size = self.task_config.get("size") else: effective_sample_size = int(sample_size) else: effective_sample_size = self.task_config.get("sample_size") full_size = self.task_config.get("size") # Try to load cached sample dataframe df = None if effective_sample_size is not None and full_size is not None and effective_sample_size < full_size: df = self._load_sample_dataframe(split, effective_sample_size) # If no cache, load and sample original data if df is None: df = self._load_dataframe(split) # Perform sampling if needed if effective_sample_size is not None and effective_sample_size < len(df): df = self._sample_data(df, effective_sample_size) # Save sampled data if full_size is not None and effective_sample_size < full_size: self._save_sample_data(df, split, effective_sample_size) if 'messages' not in df.columns: raise ValueError( f"{self.task_name} requires 'messages' column in data file. " f"Found columns: {list(df.columns)}\n" f"Please ensure your data is in messages-based format." ) if 'metadata' not in df.columns: raise ValueError( f"{self.task_name} requires 'metadata' column in data file. " f"Found columns: {list(df.columns)}\n" f"Please ensure your data is in messages-based format." ) console.print(f"[green]Processing {self.task_name} data in messages-based format[/green]") result = self._process_dataframe(df) return result @staticmethod def _is_empty_value(value) -> bool: """Check if a value is None, NaN, or empty""" if value is None: return True if isinstance(value, float): try: return pd.isna(value) except (ValueError, TypeError): return False if isinstance(value, str): return len(value.strip()) == 0 try: if hasattr(value, '__len__'): return len(value) == 0 except (ValueError, TypeError): pass return False @staticmethod def _convert_messages_format(messages: list) -> list: """ Convert message format. {"role": "user", "content": [{"type": "text", "text": "..."}]} -> {"role": "user", "content": "..."} """ converted = [] for msg in messages: content = msg.get("content") if isinstance(content, list): # Extract text from content list text_parts = [] for item in content: if isinstance(item, dict) and item.get("type") == "text": text_parts.append(item.get("text", "")) converted.append({ "role": msg.get("role"), "content": "".join(text_parts) }) else: # Already in old format converted.append(msg) return converted def _load_custom_chat_template(self): """Load custom chat template based on configuration""" if not self.tokenizer: return prompt_config = self.task_config.get("prompt_config", {}) custom_template = prompt_config.get("custom_chat_template") template_path = os.path.join( os.path.dirname(__file__), custom_template ) if not os.path.exists(template_path): raise FileNotFoundError(f"✗ Custom chat template not found: {template_path}") with open(template_path, "r", encoding="utf-8") as f: self.tokenizer.chat_template = f.read() console.print(f"✓ Loaded custom chat template: {custom_template}", style=success_style) def _get_data_file_path(self, split: str) -> str: """Get data file path for the given split""" if self.data_dir: base_dir = self.data_dir else: base_dir = "./data" filename = f"{self.task_name}_{split}.parquet" possible_paths = [ os.path.join(base_dir, self.task_name, filename), ] for file_path in possible_paths: if os.path.exists(file_path): return file_path return possible_paths[0] def _get_sample_data_file_path(self, split: str, sample_size: int) -> str: """Get sample data file path""" if self.data_dir: base_dir = self.data_dir else: base_dir = "./data" possible_paths = [ os.path.join(base_dir, self.task_name, f"{self.task_name}_{split}_sample_{sample_size}.parquet"), os.path.join(base_dir, f"{self.task_name}_{split}_sample_{sample_size}.parquet"), ] for path in possible_paths: if os.path.exists(path): return path return possible_paths[0] def _load_dataframe(self, split: str) -> pd.DataFrame: """Load DataFrame from data file""" data_file = self._get_data_file_path(split) if not os.path.exists(data_file): raise FileNotFoundError(f"Data file not found: {data_file}") console.print(f"Loading data file: {data_file}") if data_file.endswith('.parquet'): df = pd.read_parquet(data_file) else: raise ValueError(f"Unsupported file format: {data_file}") return df def _sample_data(self, df: pd.DataFrame, sample_size: int) -> pd.DataFrame: """Sample data from DataFrame""" if sample_size >= len(df): return df console.print(f"Sampling {sample_size} samples (total: {len(df)})") return df.head(sample_size) def _save_sample_data( self, df: pd.DataFrame, split: str, sample_size: int ): """Save sample data in parquet format""" sample_file = self._get_sample_data_file_path(split, sample_size) sample_dir = os.path.dirname(sample_file) if sample_dir: os.makedirs(sample_dir, exist_ok=True) df.to_parquet(sample_file, index=False) console.print(f"Sample data saved to: {sample_file}") def _load_sample_dataframe(self, split: str, sample_size: int) -> Optional[pd.DataFrame]: """Load sample dataframe from cache if exists""" sample_file = self._get_sample_data_file_path(split, sample_size) if not os.path.exists(sample_file): return None console.print(f"Loading sample data from cache: {sample_file}") df = pd.read_parquet(sample_file) return df def _process_dataframe(self, df: pd.DataFrame) -> Dict[str, Dict[str, Any]]: """Process DataFrame and convert to model input format""" self._load_custom_chat_template() result = {} prompt_config = self.task_config.get("prompt_config", {}) # Command-line parameter has higher priority than config if self.enable_thinking is not None: enable_thinking = self.enable_thinking else: enable_thinking = prompt_config.get("enable_thinking", False) console.print(f"[cyan]Auto Thinking: {'✓ Enabled' if enable_thinking else '✗ Disabled'}[/cyan]") for idx, row in df.iterrows(): sample_id = str(idx) messages = row.get('messages') if self._is_empty_value(messages): console.print(f"Sample {sample_id}: messages is empty, skipping") continue if isinstance(messages, str): try: messages = json.loads(messages) except Exception: console.print(f"Sample {sample_id}: failed to parse messages, skipping") continue messages = self._convert_messages_format(messages) try: formatted_prompt = self.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=enable_thinking, ) except Exception as e: console.print(f"Sample {sample_id}: failed to apply chat template: {e}, skipping") continue metadata_raw = row.get('metadata') if self._is_empty_value(metadata_raw): console.print(f"Sample {sample_id}: metadata is empty, skipping") continue if isinstance(metadata_raw, str): try: metadata_dict = json.loads(metadata_raw) except Exception: console.print(f"Sample {sample_id}: failed to parse metadata, skipping") continue elif isinstance(metadata_raw, dict): metadata_dict = metadata_raw else: console.print(f"Sample {sample_id}: invalid metadata format, skipping") continue answer = metadata_dict.get('answer') if self._is_empty_value(answer): console.print(f"Sample {sample_id}: answer is empty in metadata, skipping") continue ground_truth_str = str(answer).strip() result_item = { "prompt": formatted_prompt, "ground_truth": ground_truth_str, "metadata": self._make_metadata_serializable(idx, metadata_dict) } result[sample_id] = result_item console.print(f"[green]Loaded {len(result)} samples for {self.task_name}[/green]") return result def _make_metadata_serializable( self, idx: Any, metadata_dict: dict, ) -> dict: """Convert metadata to JSON-serializable format""" del metadata_dict["answer"] metadata = { "row_index": int(idx) if hasattr(idx, '__int__') else str(idx), **metadata_dict, } return metadata ================================================ FILE: benchmarks/benchmark/tasks/v1_0/item_understand/__init__.py ================================================ """ Item Understand Task Module """ from .config import ITEM_UNDERSTAND_CONFIG from .evaluator import ItemUnderstandEvaluator from . import utils __all__ = [ "ITEM_UNDERSTAND_CONFIG", "ItemUnderstandEvaluator", "utils", ] ================================================ FILE: benchmarks/benchmark/tasks/v1_0/item_understand/config.py ================================================ """ Item Understand Task Configuration """ # Item Understand Task Configuration ITEM_UNDERSTAND_CONFIG = { "name": "item_understand", "source": "Kuaishou Internal", "splits": ["test"], "size": 500, "sample_size": 500, "description": "Video SID to Caption generation task", "data_fields": { "messages_field": "messages", "metadata_field": "metadata", }, "prompt_config": { "enable_thinking": False, # Enable thinking mode for apply_chat_template "custom_chat_template": "qwen3_soft_switch.jinja2", # Custom jinja2 template (file in v1_0 directory) }, # Generation parameter configuration "generation_config": { "num_return_sequences": 1, "max_new_tokens": 128, "temperature": 0.01, "top_p": 0.95, "repetition_penalty": 1.0, "do_sample": False, "num_return_thinking_sequences": 1, "max_new_thinking_tokens": 1000, }, "evaluation_config": { "metrics": ["macro_wip_double_weighted_f1", "micro_wip_double_weighted_f1"], "bertscore_model_type": "bert-base-chinese", "bertscore_num_layers": 9, "bertscore_lang": "zh", # WIP (Weighted Information Points) evaluation config "wip_enabled": True, # Whether to enable WIP evaluation "wip_judge_model": "gemini", # Judge LLM type: gemini/deepseek/claude "wip_max_workers": 1, # Concurrent workers for LLM calls "wip_core_threshold": 5, # Core threshold for importance score (1-5) "wip_max_samples": 500, # Max samples to evaluate (None for all) } } ================================================ FILE: benchmarks/benchmark/tasks/v1_0/item_understand/evaluator.py ================================================ """ Item Understand Evaluator Evaluates model predictions on Item Understand task using WIP (LLM-as-Judge). """ import os from typing import Dict, Any, Tuple, List from benchmark.console import console from benchmark.tasks.v1_0.base_evaluator import BaseEval class ItemUnderstandEvaluator(BaseEval): """Item Understand task evaluator""" @property def required_metrics(self) -> List[str]: """Define required overall metrics for Item Understand evaluation""" return ["macro_wip_double_weighted_f1"] def _compute_metrics_from_scratch(self) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Compute all metrics from scratch Returns: Tuple of (metrics, per_sample_metrics) """ total_samples = len(self.samples) # Prepare data for evaluation sample_ids = list(self.samples.keys()) predictions = [] references = [] for sample_id in sample_ids: sample = self.samples[sample_id] # Get ground truth ground_truth = sample.get("ground_truth", "") references.append(ground_truth) # Get model prediction (first generation) generations = sample.get("generations", []) if not generations: prediction = "" else: prediction = generations[0] predictions.append(prediction) # Get evaluation config eval_config = self.task_config.get("evaluation_config", {}) # Build per-sample metrics per_sample_metrics = {} for sample_id in sample_ids: per_sample_metrics[sample_id] = {} # Build overall metrics metrics = { "num_samples": total_samples } # WIP Evaluation (if enabled) wip_enabled = eval_config.get("wip_enabled", False) if wip_enabled: console.print("[cyan]WIP evaluation enabled, starting LLM-as-Judge evaluation...[/cyan]") wip_metrics, wip_per_sample = self._evaluate_wip( sample_ids=sample_ids, predictions=predictions, references=references, eval_config=eval_config ) # Merge WIP metrics into overall metrics metrics.update(wip_metrics) # Merge WIP per-sample metrics for sample_id in sample_ids: if sample_id in wip_per_sample: per_sample_metrics[sample_id].update(wip_per_sample[sample_id]) # Save debug information if requested if self.debug and self.predictions_dir: self._save_debug_info(metrics, per_sample_metrics, predictions, references) return metrics, per_sample_metrics def _evaluate_wip( self, sample_ids: list, predictions: list, references: list, eval_config: Dict[str, Any] ) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Perform WIP (Weighted Information Points) evaluation using LLM-as-Judge. Args: sample_ids: List of sample IDs predictions: List of prediction texts references: List of reference texts eval_config: Evaluation configuration Returns: Tuple of (wip_metrics, wip_per_sample_metrics) """ try: from api import get_client_from_config from benchmark.tasks.v1_0.item_understand.utils import evaluate_wip except ImportError as e: console.print(f"[red]Failed to import WIP evaluation modules: {e}[/red]") return {}, {} # Get WIP config wip_judge_model = eval_config.get("wip_judge_model", "deepseek") wip_max_workers = eval_config.get("wip_max_workers", 5) wip_max_samples = eval_config.get("wip_max_samples", 100) wip_core_threshold = eval_config.get("wip_core_threshold", 5) wip_gt_cache_dir = os.path.join(self.data_dir, self.task_name) # Use data_dir / task_name as GT cache directory # Use BERTScore config from evaluation_config (not separate wip config) bertscore_model = eval_config.get("bertscore_model_type", "bert-base-chinese") bertscore_num_layers = eval_config.get("bertscore_num_layers", 9) # Create LLM client try: llm_client = get_client_from_config(wip_judge_model) console.print(f"[green]Using {wip_judge_model} as WIP judge[/green]") except Exception as e: console.print(f"[red]Failed to create LLM client for WIP evaluation: {e}[/red]") return {}, {} # Prepare data as dicts predictions_dict = {id: pred for id, pred in zip(sample_ids, predictions)} references_dict = {id: ref for id, ref in zip(sample_ids, references)} # Get model name for cache file naming # Try to extract from llm_client config model_name = getattr(llm_client, 'model_name', wip_judge_model) # Run WIP evaluation try: wip_metrics, wip_per_sample = evaluate_wip( predictions=predictions_dict, references=references_dict, llm_client=llm_client, max_workers=wip_max_workers, max_samples=wip_max_samples, gt_cache_dir=wip_gt_cache_dir, model_name=model_name, save_dir=self.predictions_dir, bertscore_model=bertscore_model, bertscore_num_layers=bertscore_num_layers, core_threshold=wip_core_threshold, ) console.print(f"[green]WIP evaluation completed: {wip_metrics.get('wip_num_samples', 0)} samples evaluated[/green]") return wip_metrics, wip_per_sample except Exception as e: console.print(f"[red]WIP evaluation failed: {e}[/red]") import traceback traceback.print_exc() return {}, {} def _save_debug_info( self, metrics: Dict[str, Any], per_sample_metrics: Dict[str, Dict[str, Any]], predictions: list, references: list ): """ Save detailed debug information to file Args: metrics: Overall metrics per_sample_metrics: Per-sample metrics predictions: List of predictions references: List of references """ # Prepare debug info debug_info = { "overall_metrics": metrics, "per_sample_metrics": per_sample_metrics, "sample_count": len(predictions), } # Add some examples sample_ids = list(self.samples.keys()) debug_info["examples"] = [] for i in range(min(10, len(sample_ids))): sample_id = sample_ids[i] debug_info["examples"].append({ "sample_id": sample_id, "prediction": predictions[i], "reference": references[i], "wip_unweighted_f1": per_sample_metrics[sample_id].get("wip_unweighted_f1"), "wip_unweighted_core_f1": per_sample_metrics[sample_id].get("wip_unweighted_core_f1"), "wip_importance_weighted_f1": per_sample_metrics[sample_id].get("wip_importance_weighted_f1"), "wip_importance_weighted_core_f1": per_sample_metrics[sample_id].get("wip_importance_weighted_core_f1"), "wip_double_weighted_f1": per_sample_metrics[sample_id].get("wip_double_weighted_f1"), "wip_double_weighted_core_f1": per_sample_metrics[sample_id].get("wip_double_weighted_core_f1"), }) # Save to file using base class method self._save_debug_json(debug_info, filename="debug.json") # Print summary statistics console.print(f"Total samples: {metrics['num_samples']}") # Print WIP metrics if available if metrics.get('macro_wip_unweighted_f1') is not None: console.print(f"Macro WIP Unweighted F1: {metrics['macro_wip_unweighted_f1']:.4f}") if metrics.get('macro_wip_double_weighted_f1') is not None: console.print(f"Macro WIP Double-weighted F1: {metrics['macro_wip_double_weighted_f1']:.4f}") ================================================ FILE: benchmarks/benchmark/tasks/v1_0/item_understand/utils.py ================================================ import json import os import re from typing import Dict, List, Any, Optional, Tuple from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path import pandas as pd from tqdm import tqdm from benchmark.console import console WIP_EXTRACTION_PROMPT = """你是一位顶级的【信息抽取专家】,擅长从非结构化的文本中解析出结构化的信息。 ### 你的核心任务 你的任务是分析我提供的描述性文字,并将其分解为结构化的【原子化且唯一】的"信息点"列表。 ### 输出结构 对于列表中的每一个信息点,你必须提供: 1. **info_point**: 一个简洁的、陈述事实的短语。 2. **importance_score**: 一个 [1, 5] 之间的【整数】,代表该信息点的重要性。 --- ### 关键原则 (必须遵守) 1. **原子性 (Atomic):** 每个 `info_point` 应只包含一个独立的事实。 * (好): `{{"info_point": "女孩在吃饭", "importance_score": 4}}` * (差): `{{"info_point": "女孩在吃饭,妈妈在旁边看", "importance_score": 4}}` 2. **唯一性 (Unique):** 确保你提取的每个 `info_point` 都是**概念上唯一**的。 3. **合并 (Consolidate):** 如果原始文本中的多个短语描述的是【同一个核心思想】,你【必须】将它们合并成一个单一的、最具代表性的 `info_point`。 * (例如): 如果文本说 "活动环境是温馨的" 和 "视频色彩营造温馨氛围",你应该只提取一个,如:`{{"info_point": "视频氛围温馨", "importance_score": 5}}`。 * **不要创建重复或语义高度重叠的条目。** --- ### 打分指南 (1-5分制) * **5分 (绝对核心):** 视频的"灵魂"。如果缺少这个点,整个摘要就毫无意义。(例如:"如何制作煎蛋卷"、"XX游戏的评测") * **4分 (关键信息):** 视频的"骨架"。关键的事件、步骤或场景。(例如:"打散三个鸡蛋"、"使用了不粘锅"、"游戏画面评测") * **3分 (重要细节):** 视频的"肉"。支撑骨架的具体、重要的细节。(例如:"加入了盐和胡椒"、"用中火加热黄油"、"角色动作流畅") * **2分 (补充细节):** 补充性的上下文或次要信息。(例如:"煎蛋卷折叠了三次"、"背景音乐很好听") * **1分 (琐碎信息):** 琐碎的、风格化的或背景性的描述。(例如:"主持人穿着蓝色围裙"、"视频光线很好") --- ### 格式与示例 你的输出必须是【纯粹的 JSON 格式】,可以被 `json.loads` 直接解析。JSON应包含一个 "wips" 键,其值为一个列表。如果文本中没有可提取的信息点,请返回 `{{"wips": []}}`。 **[示例输入]** 这是一段关于如何制作法式煎蛋卷的教程视频。主持人首先将三个鸡蛋打入碗中,并加入了盐和一小撮胡椒进行搅拌。视频强调了使用中火和不粘锅的重要性。接着,她在锅中融化了一块黄油,然后倒入蛋液。在烹饪过程中,她不断晃动平底锅,并将边缘的蛋液推向中心。最后,她将煎蛋卷折叠成三折,盛入盘中。整个过程非常快速。 **[示例输出]** ```json {{ "wips": [ {{ "info_point": "教程:如何制作法式煎蛋卷", "importance_score": 5 }}, {{ "info_point": "使用三个鸡蛋,加盐和胡椒搅拌", "importance_score": 3 }}, {{ "info_point": "强调使用中火", "importance_score": 4 }}, {{ "info_point": "使用不粘锅和黄油", "importance_score": 4 }}, {{ "info_point": "晃动锅并将蛋液边缘推向中心", "importance_score": 3 }}, {{ "info_point": "煎蛋卷被折叠成三折", "importance_score": 2 }}, {{ "info_point": "烹饪过程快速", "importance_score": 1 }} ] }} ``` 现在,请开始分析我提供的描述性文字: {} 你的输出结果 (请严格按照上述要求返回一个格式规整的 JSON,可以被 json.loads 直接解析。请不要在 JSON 数据前后添加任何额外的解释性文字或代码块标记): """ WIP_MATCHING_PROMPT = """你是一位极其严谨的**语义匹配专家**。你的任务是精确地对比两组关于同一个视频摘要的结构化信息点 (WIPs),并找出它们之间的匹配关系。 **背景信息:** - **Ground Truth WIPs (GT列表)**: 这是视频摘要的"事实标准",代表视频中真实存在的所有核心信息。每个点都有一个 [1-5] 的重要性分数 (`importance_score`)。 - **Model-Generated WIPs (模型列表)**: 这是由一个AI模型生成的摘要信息点,代表它"声称"在视频中看到的内容。每个点也有一个 [1-5] 的重要性分数。 **你的核心任务:** 对比这两个列表,并输出一个包含三类结果的JSON对象: 1. **`matches`**: 一个匹配对的列表。对于"模型列表"中的每一个项,如果在"GT列表"中找到了一个**语义上非常相似**的对应项,就将它们配对。 2. **`unmatched_model_wips` (幻觉)**: "模型列表"中,那些在"GT列表"里找不到任何合理对应项的条目。这些代表了模型的**幻觉 (False Positives)**。 3. **`unmatched_gt_wips` (漏报)**: "GT列表"中,那些没有被"模型列表"中任何条目匹配到的条目。这些代表了模型的**漏报 (False Negatives)**。 **至关重要的匹配规则:** 1. **语义核心**: 匹配的核心是 `info_point` 的语义。 2. **部分匹配**: 如果两个 `info_point` 语义上"部分重叠"但"不完全相同",你【也应该】将它们匹配。 * (例如): GT的 `"一场激烈精彩的篮球比赛"` 和 Gen的 `"球员在打篮球"` 应该被【匹配】(因为核心"篮球"匹配上了)。 * (例如): GT的 `"评测《魔龙巢穴:暗影崛起》"` 和 Gen的 `"评测《魔龙巢穴:冰封王座》"` 应该被【匹配】(因为核心"《魔龙巢穴》评测"匹配上了)。 3. **一对一匹配**: 找出最佳的匹配组合。 --- **[输出结构示例]** **[输入]** - GT列表: `[ {{"info_point": "节气是秋分", "importance_score": 5}}, {{"info_point": "农民在收割稻谷", "importance_score": 4}} ]` - 模型列表: `[ {{"info_point": "这是一个关于秋分的视频", "importance_score": 4}}, {{"info_point": "狗在田里跑", "importance_score": 1}} ]` **[你的输出]** ```json {{ "matches": [ {{ "model_wip": {{"info_point": "这是一个关于秋分的视频", "importance_score": 4}}, "gt_wip": {{"info_point": "节气是秋分", "importance_score": 5}} }} ], "unmatched_model_wips": [ {{ "info_point": "狗在田里跑", "importance_score": 1 }} ], "unmatched_gt_wips": [ {{ "info_point": "农民在收割稻谷", "importance_score": 4 }} ] }} ``` 现在,请开始你的匹配工作: [Ground Truth WIPs (GT列表)] {} [Model-Generated WIPs (模型列表)] {} 你的匹配结果 (请严格按照上述要求返回一个格式规整的 JSON,可以被 json.loads 直接解析。请不要在 JSON 数据前后添加任何额外的解释性文字或代码块标记): """ def extract_json_from_response(response: str) -> Optional[Dict]: """ Extract JSON from LLM response (simplified version for well-behaved LLMs). """ if not response: return None try: response = response.rstrip('```').lstrip('```json') return json.loads(response.strip()) except json.JSONDecodeError: print(response) return None def extract_wips_single( text: str, llm_client ) -> Tuple[Optional[List[Dict]], Optional[str]]: """ Extract WIPs from a single text using LLM. Args: text: Input text to extract WIPs from llm_client: LLM client instance (with built-in retry mechanism) Returns: Tuple of (wips_list, error_message) - wips_list: List of WIP dicts if successful, None if failed - error_message: Error message if failed, None if successful """ prompt = WIP_EXTRACTION_PROMPT.format(text) try: response = llm_client.generate(prompt) result = extract_json_from_response(response) if result is not None and "wips" in result: return result["wips"], None return None, "Failed to parse JSON from response" except Exception as e: return None, f"API error: {str(e)}" def extract_wips_batch( texts: Dict[str, str], llm_client, max_workers: int = 5, desc: str = "Extracting WIPs" ) -> Tuple[Dict[str, List[Dict]], Dict[str, str]]: """ Extract WIPs from multiple texts in parallel. Args: texts: Dict of {sample_id: text} llm_client: LLM client instance (with built-in retry mechanism) max_workers: Number of concurrent workers desc: Progress bar description Returns: Tuple of (results, errors): - results: Dict of {sample_id: wips_list} - errors: Dict of {sample_id: error_message} """ results = {} errors = {} def process_single(sample_id: str, text: str): wips, error = extract_wips_single(text, llm_client) return sample_id, wips, error with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = { executor.submit(process_single, sid, text): sid for sid, text in texts.items() } for future in tqdm(as_completed(futures), total=len(futures), desc=desc): sample_id, wips, error = future.result() if wips: results[sample_id] = wips if error: errors[sample_id] = error # Statistics: count valid (non-empty) extraction results total_attempted = len(texts) total_parsed = len(results) valid_results = sum(1 for wips in results.values() if wips) # Count non-empty lists console.print(f"[cyan]{desc} statistics: {total_attempted} attempted, {total_parsed} parsed, {valid_results} valid (non-empty)[/cyan]") return results, errors def match_wips_single( gt_wips: List[Dict], model_wips: List[Dict], llm_client ) -> Tuple[Optional[Dict], Optional[str]]: """ Match WIPs between ground truth and model generation. Args: gt_wips: Ground truth WIPs list model_wips: Model-generated WIPs list llm_client: LLM client instance (with built-in retry mechanism) Returns: Tuple of (match_result, error_message) """ gt_str = json.dumps(gt_wips, ensure_ascii=False, indent=2) model_str = json.dumps(model_wips, ensure_ascii=False, indent=2) prompt = WIP_MATCHING_PROMPT.format(gt_str, model_str) try: response = llm_client.generate(prompt) result = extract_json_from_response(response) if result is not None and all(k in result for k in ["matches", "unmatched_model_wips", "unmatched_gt_wips"]): return result, None return None, "Failed to parse match JSON from response" except Exception as e: return None, f"API error: {str(e)}" def match_wips_batch( gt_wips_dict: Dict[str, List[Dict]], model_wips_dict: Dict[str, List[Dict]], llm_client, max_workers: int = 5 ) -> Tuple[Dict[str, Dict], Dict[str, str]]: """ Match WIPs for multiple samples in parallel. Args: gt_wips_dict: Dict of {sample_id: gt_wips_list} model_wips_dict: Dict of {sample_id: model_wips_list} llm_client: LLM client instance (with built-in retry mechanism) max_workers: Number of concurrent workers Returns: Tuple of (results, errors) """ results = {} errors = {} # Only match samples that have both GT and model WIPs (and both are non-empty) common_ids = { id for id in (set(gt_wips_dict.keys()) & set(model_wips_dict.keys())) if gt_wips_dict[id] and model_wips_dict[id] } def process_single(sample_id: str): gt_wips = gt_wips_dict[sample_id] model_wips = model_wips_dict[sample_id] match_result, error = match_wips_single(gt_wips, model_wips, llm_client) return sample_id, match_result, error with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = { executor.submit(process_single, sid): sid for sid in common_ids } for future in tqdm(as_completed(futures), total=len(futures), desc="Matching WIPs"): sample_id, match_result, error = future.result() if match_result is not None: results[sample_id] = match_result if error is not None: errors[sample_id] = error # Statistics: count valid (non-empty) match results total_attempted = len(common_ids) total_parsed = len(results) valid_results = 0 for sample_id, match_result in results.items(): # Check if result is not empty (has at least one non-empty field) if match_result: matches = match_result.get("matches", []) unmatched_model = match_result.get("unmatched_model_wips", []) unmatched_gt = match_result.get("unmatched_gt_wips", []) # Consider valid if result has any content if matches or unmatched_model or unmatched_gt: valid_results += 1 console.print(f"[cyan]Matching statistics: {total_attempted} attempted, {total_parsed} parsed, {valid_results} valid (non-empty)[/cyan]") return results, errors def get_wip_score_int(wip: Optional[Dict]) -> int: """Get importance score from WIP, defaulting to 1.""" if not wip: return 1 return wip.get("importance_score", 1) def calculate_unweighted_metrics(match_results: Dict[str, Dict], core_threshold: int = 5) -> Dict[str, Any]: """ Calculate unweighted metrics (count-based) with macro and per-sample versions. Args: match_results: Dict of {sample_id: match_result} core_threshold: Threshold for core WIPs (importance_score >= threshold) Returns: Dict with macro F1, core versions, and per-sample F1s (unweighted) """ if not match_results: return {} # Per-sample metrics (for macro calculation) per_sample = {} for sample_id, result in match_results.items(): if not result: per_sample[sample_id] = {"overall_f1": 0.0, "core_f1": 0.0} continue # Sample-level counts sample_tp = len(result.get("matches", [])) sample_fp = len(result.get("unmatched_model_wips", [])) sample_fn = len(result.get("unmatched_gt_wips", [])) sample_core_tp = 0 sample_core_fp = 0 sample_core_fn = 0 # Core: count only WIPs with importance_score >= threshold for match in result.get("matches", []): gt_wip = match.get("gt_wip", {}) if get_wip_score_int(gt_wip) >= core_threshold: sample_core_tp += 1 for fp_wip in result.get("unmatched_model_wips", []): if get_wip_score_int(fp_wip) >= core_threshold: sample_core_fp += 1 for fn_wip in result.get("unmatched_gt_wips", []): if get_wip_score_int(fn_wip) >= core_threshold: sample_core_fn += 1 # Calculate per-sample F1s sample_overall_f1 = 2 * sample_tp / (2 * sample_tp + sample_fp + sample_fn) if (2 * sample_tp + sample_fp + sample_fn) > 0 else 0.0 sample_core_f1 = 2 * sample_core_tp / (2 * sample_core_tp + sample_core_fp + sample_core_fn) if (2 * sample_core_tp + sample_core_fp + sample_core_fn) > 0 else 0.0 per_sample[sample_id] = { "overall_f1": sample_overall_f1, "core_f1": sample_core_f1, } # Calculate macro F1 (average of per-sample F1s) valid_samples = [v for v in per_sample.values() if v] macro_f1 = sum(s["overall_f1"] for s in valid_samples) / len(valid_samples) if valid_samples else 0.0 macro_core_f1 = sum(s["core_f1"] for s in valid_samples) / len(valid_samples) if valid_samples else 0.0 return { "macro_wip_unweighted_f1": macro_f1, "macro_wip_unweighted_core_f1": macro_core_f1, "per_sample": per_sample, } def calculate_importance_weighted_metrics( match_results: Dict[str, Dict], core_threshold: int = 5 ) -> Dict[str, Any]: """ Calculate importance-weighted metrics (weighted by importance_score only) with macro and per-sample versions. Args: match_results: Dict of {sample_id: match_result} core_threshold: Threshold for core WIPs (importance_score >= threshold) Returns: Dict with macro F1, core versions, and per-sample F1s (importance-weighted) """ if not match_results: return {} # Per-sample metrics (for macro calculation) per_sample = {} for sample_id, result in match_results.items(): if not result: per_sample[sample_id] = {"overall_f1": 0.0, "core_f1": 0.0} continue # Sample-level metrics sample_tp, sample_fp, sample_fn = 0.0, 0.0, 0.0 sample_core_tp, sample_core_fp, sample_core_fn = 0.0, 0.0, 0.0 # TP from matches (use GT score) for match in result.get("matches", []): gt_wip = match.get("gt_wip") gt_score = get_wip_score_int(gt_wip) sample_tp += gt_score if gt_score >= core_threshold: sample_core_tp += gt_score # FP from unmatched model WIPs for fp_wip in result.get("unmatched_model_wips", []): fp_score = get_wip_score_int(fp_wip) sample_fp += fp_score if fp_score >= core_threshold: sample_core_fp += fp_score # FN from unmatched GT WIPs for fn_wip in result.get("unmatched_gt_wips", []): fn_score = get_wip_score_int(fn_wip) sample_fn += fn_score if fn_score >= core_threshold: sample_core_fn += fn_score # Calculate per-sample F1s sample_overall_f1 = 2 * sample_tp / (2 * sample_tp + sample_fp + sample_fn) if (2 * sample_tp + sample_fp + sample_fn) > 0 else 0.0 sample_core_f1 = 2 * sample_core_tp / (2 * sample_core_tp + sample_core_fp + sample_core_fn) if (2 * sample_core_tp + sample_core_fp + sample_core_fn) > 0 else 0.0 per_sample[sample_id] = { "overall_f1": sample_overall_f1, "core_f1": sample_core_f1, } # Calculate macro F1 (average of per-sample F1s) valid_samples = [v for v in per_sample.values() if v] macro_f1 = sum(s["overall_f1"] for s in valid_samples) / len(valid_samples) if valid_samples else 0.0 macro_core_f1 = sum(s["core_f1"] for s in valid_samples) / len(valid_samples) if valid_samples else 0.0 return { "macro_wip_importance_weighted_f1": macro_f1, "macro_wip_importance_weighted_core_f1": macro_core_f1, "per_sample": per_sample, } def calculate_double_weighted_metrics( match_results: Dict[str, Dict], core_threshold: int = 5, ) -> Dict[str, Any]: """ Calculate double-weighted metrics using V6.2 logic (importance_score × match_quality) with macro and per-sample versions. NOTE: This function now uses pre-computed match_quality from match results (no BERTScore computation here). V6.2 Logic: - For matched pairs: - TP = gt_score × match_quality - FN = gt_score × (1 - match_quality) - FP = model_score × (1 - match_quality) - For unmatched GT WIPs: FN += gt_score (complete miss) - For unmatched model WIPs: FP += model_score (complete hallucination) Args: match_results: Dict of {sample_id: match_result} (with pre-computed match_quality) core_threshold: Threshold for core WIPs (importance_score >= threshold) Returns: Dict with macro F1, core versions, and per-sample F1s (double-weighted) """ if not match_results: return {} # Per-sample metrics (for macro calculation) per_sample = {} for sample_id, result in match_results.items(): if not result: per_sample[sample_id] = {"overall_f1": 0.0, "core_f1": 0.0} continue # Sample-level metrics sample_tp, sample_fp, sample_fn = 0.0, 0.0, 0.0 sample_core_tp, sample_core_fp, sample_core_fn = 0.0, 0.0, 0.0 # Process matched pairs using pre-computed match_quality matches = result.get("matches", []) for match in matches: gt_wip = match.get("gt_wip", {}) model_wip = match.get("model_wip", {}) match_quality = match.get("match_quality") # Skip if match_quality not computed if match_quality is None: continue gt_score = get_wip_score_int(gt_wip) model_score = get_wip_score_int(model_wip) # V6.2 formulas for all WIPs tp_contrib = gt_score * match_quality fn_contrib = gt_score * (1 - match_quality) fp_contrib = model_score * (1 - match_quality) sample_tp += tp_contrib sample_fn += fn_contrib sample_fp += fp_contrib # Core: V6.2 formulas only for WIPs with importance_score >= threshold if gt_score >= core_threshold: sample_core_tp += tp_contrib sample_core_fn += fn_contrib if model_score >= core_threshold: sample_core_fp += fp_contrib # Complete misses (unmatched GT WIPs) for fn_wip in result.get("unmatched_gt_wips", []): fn_score = get_wip_score_int(fn_wip) sample_fn += fn_score if fn_score >= core_threshold: sample_core_fn += fn_score # Complete hallucinations (unmatched model WIPs) for fp_wip in result.get("unmatched_model_wips", []): fp_score = get_wip_score_int(fp_wip) sample_fp += fp_score if fp_score >= core_threshold: sample_core_fp += fp_score # Calculate per-sample F1s sample_overall_f1 = 2 * sample_tp / (2 * sample_tp + sample_fp + sample_fn) if (2 * sample_tp + sample_fp + sample_fn) > 0 else 0.0 sample_core_f1 = 2 * sample_core_tp / (2 * sample_core_tp + sample_core_fp + sample_core_fn) if (2 * sample_core_tp + sample_core_fp + sample_core_fn) > 0 else 0.0 per_sample[sample_id] = { "overall_f1": sample_overall_f1, "core_f1": sample_core_f1, } # Calculate macro F1 (average of per-sample F1s) valid_samples = [v for v in per_sample.values() if v] macro_f1 = sum(s["overall_f1"] for s in valid_samples) / len(valid_samples) if valid_samples else 0.0 macro_core_f1 = sum(s["core_f1"] for s in valid_samples) / len(valid_samples) if valid_samples else 0.0 return { "macro_wip_double_weighted_f1": macro_f1, "macro_wip_double_weighted_core_f1": macro_core_f1, "per_sample": per_sample, } def save_wip_detailed_results( save_dir: str, sample_ids: List[str], gt_wips_dict: Dict[str, List[Dict]], model_wips_dict: Dict[str, List[Dict]], match_results_dict: Dict[str, Dict], per_sample_f1s_dict: Dict[str, Dict], predictions_dict: Dict[str, str], references_dict: Dict[str, str], filename: str = "wip_results.json" ): """ Save detailed WIP evaluation results to file. This saves: - prediction: Model prediction text for each sample - reference: Ground truth reference text for each sample - gt_wips: Ground truth information points for each sample - model_wips: Model-generated information points for each sample - match_result: Matching results (matches, unmatched_gt_wips, unmatched_model_wips) - per_sample_f1s: 6 F1 scores for each sample (3 types × 2 versions) Args: save_dir: Directory to save the file sample_ids: List of sample IDs gt_wips_dict: Dict of {sample_id: gt_wips_list} model_wips_dict: Dict of {sample_id: model_wips_list} match_results_dict: Dict of {sample_id: match_result} per_sample_f1s_dict: Dict of {sample_id: f1_scores_dict} with 6 F1 scores predictions_dict: Dict of {sample_id: prediction_text} references_dict: Dict of {sample_id: reference_text} filename: Output filename """ os.makedirs(save_dir, exist_ok=True) save_path = os.path.join(save_dir, filename) detailed_results = {} for sample_id in sample_ids: detailed_results[sample_id] = { "prediction": predictions_dict.get(sample_id, ""), "reference": references_dict.get(sample_id, ""), "gt_wips": gt_wips_dict.get(sample_id, []), "model_wips": model_wips_dict.get(sample_id, []), "match_result": match_results_dict.get(sample_id, {}), "f1_scores": per_sample_f1s_dict.get(sample_id, {}), } with open(save_path, 'w', encoding='utf-8') as f: json.dump(detailed_results, f, ensure_ascii=False, indent=2) console.print(f"[green]WIP detailed results saved to {save_path}[/green]") def get_gt_cache_path(cache_dir: str, model_name: str) -> str: """Get the path for GT WIPs cache file.""" return os.path.join(cache_dir, f"test_gt_wip_{model_name}.parquet") def load_wip_results_cache(cache_path: str) -> Optional[Dict[str, Any]]: """ Load previously saved WIP results (model_wips and match_results). Args: cache_path: Path to wip_{model_name}.json file Returns: Dict with "model_wips" and "match_results" or None if not found """ if not os.path.exists(cache_path): return None try: with open(cache_path, 'r', encoding='utf-8') as f: data = json.load(f) # Extract model_wips and match_results from saved file model_wips = {} match_results = {} for sample_id, sample_data in data.items(): # Load model_wips except it's an empty list if "model_wips" in sample_data and sample_data["model_wips"]: model_wips[sample_id] = sample_data["model_wips"] # Load match_result only if it's not empty (empty dict means no matching was done) if "match_result" in sample_data and sample_data["match_result"]: match_results[sample_id] = sample_data["match_result"] return { "model_wips": model_wips, "match_results": match_results, } except Exception as e: console.print(f"[yellow]Failed to load WIP results cache: {e}[/yellow]") return None def load_gt_wips_cache(cache_path: str) -> Optional[Dict[str, List[Dict]]]: """ Load GT WIPs from cache file. Args: cache_path: Path to parquet cache file Returns: Dict of {sample_id: wips_list} or None if not found """ if not os.path.exists(cache_path): return None try: df = pd.read_parquet(cache_path) result = {} for _, row in df.iterrows(): sample_id = str(row["sample_id"]) wips = row["wips"] if isinstance(wips, str): wips = json.loads(wips) if wips: result[sample_id] = wips console.print(f"[green]Loaded GT WIPs cache from {cache_path} ({len(result)} samples)[/green]") return result except Exception as e: console.print(f"[yellow]Failed to load GT cache: {e}[/yellow]") return None def save_gt_wips_cache(gt_wips: Dict[str, List[Dict]], cache_path: str): """ Save GT WIPs to cache file. Args: gt_wips: Dict of {sample_id: wips_list} cache_path: Path to save parquet file """ os.makedirs(os.path.dirname(cache_path), exist_ok=True) data = [] for sample_id, wips in gt_wips.items(): data.append({ "sample_id": sample_id, "wips": json.dumps(wips, ensure_ascii=False) }) df = pd.DataFrame(data) df.to_parquet(cache_path, index=False) console.print(f"[green]Saved GT WIPs cache to {cache_path} ({len(gt_wips)} samples)[/green]") def _load_or_extract_gt_wips( sample_ids: List[str], references: Dict[str, str], llm_client, max_workers: int, gt_cache_dir: Optional[str], model_name: str, ) -> Dict[str, List[Dict]]: """Load GT WIPs from cache or extract if missing.""" gt_wips = None if gt_cache_dir: cache_path = get_gt_cache_path(gt_cache_dir, model_name) full_gt_cache = load_gt_wips_cache(cache_path) if full_gt_cache is not None: gt_wips = {id: full_gt_cache[id] for id in sample_ids if id in full_gt_cache} missing_ids = set(sample_ids) - set(gt_wips.keys()) if missing_ids: console.print(f"[yellow]Missing {len(missing_ids)} samples in GT cache, extracting...[/yellow]") missing_refs = {id: references[id] for id in missing_ids} new_gt_wips, gt_errors = extract_wips_batch( missing_refs, llm_client, max_workers, "Extracting GT WIPs" ) gt_wips.update(new_gt_wips) full_gt_cache.update(new_gt_wips) save_gt_wips_cache(full_gt_cache, cache_path) if gt_errors: console.print(f"[red]GT extraction errors: {len(gt_errors)} samples[/red]") if gt_wips is None: console.print("[cyan]Extracting GT WIPs...[/cyan]") gt_wips, gt_errors = extract_wips_batch( references, llm_client, max_workers, "Extracting GT WIPs" ) if gt_cache_dir: cache_path = get_gt_cache_path(gt_cache_dir, model_name) save_gt_wips_cache(gt_wips, cache_path) if gt_errors: console.print(f"[red]GT extraction errors: {len(gt_errors)} samples[/red]") return gt_wips # Extract text after last tag if present def extract_after_think(text: str) -> str: """Extract text after the last tag""" if '' in text: return text.split('')[-1].strip() return text def _load_or_extract_model_wips( sample_ids: List[str], predictions: Dict[str, str], gt_wips: Dict[str, List[Dict]], llm_client, max_workers: int, save_dir: Optional[str], model_name: str, ) -> Dict[str, List[Dict]]: """Load Model WIPs from cache or extract if missing (incremental).""" model_wips = {} if save_dir: wip_cache_path = os.path.join(save_dir, f"wip_{model_name}.json") cached_data = load_wip_results_cache(wip_cache_path) if cached_data: model_wips = cached_data.get("model_wips", {}) console.print(f"[green]Loaded {len(model_wips)} cached model_wips[/green]") missing_model_ids = set(sample_ids) - set(model_wips.keys()) missing_model_ids = {id for id in missing_model_ids if id in gt_wips} if missing_model_ids: console.print(f"[cyan]Extracting Model WIPs for {len(missing_model_ids)} missing samples...[/cyan]") missing_predictions = {id: extract_after_think(predictions[id]) for id in missing_model_ids} new_model_wips, model_errors = extract_wips_batch( missing_predictions, llm_client, max_workers, "Extracting Model WIPs" ) model_wips.update(new_model_wips) if model_errors: console.print(f"[red]Model extraction errors: {len(model_errors)} samples[/red]") else: console.print(f"[green]All {len(sample_ids)} samples already have Model WIPs (from cache)[/green]") return model_wips def _load_or_match_wips( sample_ids: List[str], gt_wips: Dict[str, List[Dict]], model_wips: Dict[str, List[Dict]], llm_client, max_workers: int, save_dir: Optional[str], model_name: str, ) -> Dict[str, Dict]: """Load match results from cache or match if missing (incremental).""" match_results = {} if save_dir: wip_cache_path = os.path.join(save_dir, f"wip_{model_name}.json") cached_data = load_wip_results_cache(wip_cache_path) if cached_data: match_results = cached_data.get("match_results", {}) console.print(f"[green]Loaded {len(match_results)} cached match_results[/green]") missing_match_ids = set(sample_ids) - set(match_results.keys()) # Only match if both gt_wips and model_wips exist and are non-empty missing_match_ids = { id for id in missing_match_ids if id in gt_wips and id in model_wips and gt_wips[id] and model_wips[id] } if missing_match_ids: console.print(f"[cyan]Matching WIPs for {len(missing_match_ids)} missing samples...[/cyan]") missing_gt_wips = {id: gt_wips[id] for id in missing_match_ids} missing_model_wips = {id: model_wips[id] for id in missing_match_ids} new_match_results, match_errors = match_wips_batch( missing_gt_wips, missing_model_wips, llm_client, max_workers ) match_results.update(new_match_results) if match_errors: console.print(f"[red]Matching errors: {len(match_errors)} samples[/red]") else: console.print(f"[green]All {len(sample_ids)} samples already have match results (from cache)[/green]") return match_results def _compute_bertscore_incremental( sample_ids: List[str], match_results: Dict[str, Dict], bertscore_model: str, bertscore_num_layers: int, ) -> None: """Compute BERTScore for matches that don't have it yet (incremental, in-place update).""" import torch from bert_score import BERTScorer console.print("[cyan]Computing BERTScore for matched pairs...[/cyan]") all_gt_texts = [] all_model_texts = [] sample_match_indices = [] for sample_id in sample_ids: if sample_id in match_results: matches = match_results[sample_id].get("matches", []) for match_idx, match in enumerate(matches): if match.get("match_quality") is not None: continue gt_wip = match.get("gt_wip") model_wip = match.get("model_wip") # Skip if either wip is None or not a dict if not gt_wip or not isinstance(gt_wip, dict) or not model_wip or not isinstance(model_wip, dict): continue gt_text = gt_wip.get("info_point", "") model_text = model_wip.get("info_point", "") if gt_text and model_text: batch_idx = len(all_gt_texts) all_gt_texts.append(gt_text) all_model_texts.append(model_text) sample_match_indices.append((sample_id, match_idx, batch_idx)) if all_gt_texts and all_model_texts: console.print(f"[cyan]Computing BERTScore for {len(all_gt_texts)} new matched pairs...[/cyan]") device = "cuda" if torch.cuda.is_available() else "cpu" scorer = BERTScorer( model_type=bertscore_model, num_layers=bertscore_num_layers, device=device, lang="zh", rescale_with_baseline=False, ) try: P, R, F1 = scorer.score(all_model_texts, all_gt_texts) match_qualities = F1.tolist() for sample_id, match_idx, batch_idx in sample_match_indices: match_results[sample_id]["matches"][match_idx]["match_quality"] = match_qualities[batch_idx] console.print(f"[green]Computed BERTScore for {len(match_qualities)} matched pairs[/green]") except Exception as e: console.print(f"[red]BERTScore computation failed: {e}[/red]") for sample_id, match_idx, _ in sample_match_indices: match_results[sample_id]["matches"][match_idx]["match_quality"] = None else: console.print(f"[green]All matches already have BERTScore (from cache)[/green]") def evaluate_wip( predictions: Dict[str, str], references: Dict[str, str], llm_client, max_workers: int = 5, max_samples: Optional[int] = None, gt_cache_dir: Optional[str] = None, model_name: str = "unknown", save_dir: Optional[str] = None, bertscore_model: str = "bert-base-chinese", bertscore_num_layers: int = 9, core_threshold: int = 5, ) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Main WIP evaluation function with six types of F1 metrics (3 types × 2 versions). Computes: 1. Unweighted F1 (count-based) - overall and core 2. Importance-weighted F1 (weighted by importance_score only) - overall and core 3. Double-weighted F1 (V6.2 logic: importance_score × match_quality) - overall and core Args: predictions: Dict of {sample_id: prediction_text} references: Dict of {sample_id: reference_text} llm_client: LLM client instance for judge (with built-in retry mechanism) max_workers: Number of concurrent workers max_samples: Maximum samples to evaluate (None for all) gt_cache_dir: Directory for GT WIPs cache model_name: Model name for cache file naming save_dir: Directory to save results bertscore_model: BERT model for computing match quality bertscore_num_layers: Number of layers for BERTScore core_threshold: Threshold for core WIPs (importance_score >= threshold) Returns: Tuple of (metrics, per_sample_metrics): - metrics: Dict with 6 F1 scores and sample count (flattened) - per_sample_metrics: Dict of {sample_id: {6 F1 scores}} """ # Select samples (sorted by sample_id for consistency) all_sample_ids = sorted(set(predictions.keys()) & set(references.keys())) if max_samples is not None and max_samples < len(all_sample_ids): sample_ids = all_sample_ids[:max_samples] console.print(f"[cyan]Selected {len(sample_ids)} samples for WIP evaluation (sorted by sample_id)[/cyan]") else: sample_ids = all_sample_ids console.print(f"[cyan]Evaluating all {len(sample_ids)} samples for WIP[/cyan]") selected_predictions = {id: predictions[id] for id in sample_ids} selected_references = {id: references[id] for id in sample_ids} # Step 1: Load/Extract GT WIPs gt_wips = _load_or_extract_gt_wips( sample_ids, selected_references, llm_client, max_workers, gt_cache_dir, model_name ) # Step 2: Load/Extract Model WIPs (incremental) model_wips = _load_or_extract_model_wips( sample_ids, selected_predictions, gt_wips, llm_client, max_workers, save_dir, model_name ) # Step 3: Load/Match WIPs (incremental) match_results = _load_or_match_wips( sample_ids, gt_wips, model_wips, llm_client, max_workers, save_dir, model_name ) # Step 3.5: Compute BERTScore (incremental) _compute_bertscore_incremental(sample_ids, match_results, bertscore_model, bertscore_num_layers) # Step 4: Calculate three types of metrics (each with overall, core, and per-sample) console.print("[cyan]Computing all metrics (unweighted, importance-weighted, double-weighted)...[/cyan]") # 4.1: Unweighted metrics unweighted_metrics = calculate_unweighted_metrics( match_results, core_threshold=core_threshold ) # 4.2: Importance-weighted metrics importance_metrics = calculate_importance_weighted_metrics( match_results, core_threshold=core_threshold ) # 4.3: Double-weighted metrics (using pre-computed BERTScore) double_metrics = calculate_double_weighted_metrics( match_results, core_threshold=core_threshold ) # Flattened overall metrics (6 F1 scores: 3 types × 2 versions) metrics = { # Macro F1 (average of per-sample F1s) "macro_wip_unweighted_f1": unweighted_metrics.get("macro_wip_unweighted_f1", 0.0), "macro_wip_unweighted_core_f1": unweighted_metrics.get("macro_wip_unweighted_core_f1", 0.0), "macro_wip_importance_weighted_f1": importance_metrics.get("macro_wip_importance_weighted_f1", 0.0), "macro_wip_importance_weighted_core_f1": importance_metrics.get("macro_wip_importance_weighted_core_f1", 0.0), "macro_wip_double_weighted_f1": double_metrics.get("macro_wip_double_weighted_f1", 0.0), "macro_wip_double_weighted_core_f1": double_metrics.get("macro_wip_double_weighted_core_f1", 0.0), "wip_num_samples": len(match_results), } # Merge per-sample metrics from all three types (6 F1 scores per sample, same as before since per-sample is used for macro) per_sample_metrics = {} for sample_id in sample_ids: unweighted_per_sample = unweighted_metrics.get("per_sample", {}).get(sample_id, {"overall_f1": 0.0, "core_f1": 0.0}) importance_per_sample = importance_metrics.get("per_sample", {}).get(sample_id, {"overall_f1": 0.0, "core_f1": 0.0}) double_per_sample = double_metrics.get("per_sample", {}).get(sample_id, {"overall_f1": 0.0, "core_f1": 0.0}) per_sample_metrics[sample_id] = { "wip_unweighted_f1": unweighted_per_sample["overall_f1"], "wip_unweighted_core_f1": unweighted_per_sample["core_f1"], "wip_importance_weighted_f1": importance_per_sample["overall_f1"], "wip_importance_weighted_core_f1": importance_per_sample["core_f1"], "wip_double_weighted_f1": double_per_sample["overall_f1"], "wip_double_weighted_core_f1": double_per_sample["core_f1"], } # Step 5: Save detailed results to file if save_dir: console.print("[cyan]Saving WIP detailed results...[/cyan]") save_wip_detailed_results( save_dir=save_dir, sample_ids=sample_ids, gt_wips_dict=gt_wips, model_wips_dict=model_wips, match_results_dict=match_results, per_sample_f1s_dict=per_sample_metrics, predictions_dict=selected_predictions, references_dict=selected_references, filename=f"wip_{model_name}.json" ) console.print(f"[green]WIP evaluation completed: {metrics['wip_num_samples']} samples[/green]") console.print(f"[green] Macro Unweighted F1: {metrics['macro_wip_unweighted_f1']:.4f} (Core: {metrics['macro_wip_unweighted_core_f1']:.4f})[/green]") console.print(f"[green] Macro Importance-weighted F1: {metrics['macro_wip_importance_weighted_f1']:.4f} (Core: {metrics['macro_wip_importance_weighted_core_f1']:.4f})[/green]") console.print(f"[green] Macro Double-weighted F1: {metrics['macro_wip_double_weighted_f1']:.4f} (Core: {metrics['macro_wip_double_weighted_core_f1']:.4f})[/green]") return metrics, per_sample_metrics ================================================ FILE: benchmarks/benchmark/tasks/v1_0/label_pred/__init__.py ================================================ """ Label Prediction Task Module Classification task for predicting user engagement with video content. Uses logprobs-based classification with AUC and wuAUC metrics. """ from .config import LABEL_PRED_CONFIG from .evaluator import LabelPredEvaluator from . import utils __all__ = [ "LABEL_PRED_CONFIG", "LabelPredEvaluator", "utils", ] ================================================ FILE: benchmarks/benchmark/tasks/v1_0/label_pred/config.py ================================================ """ Label Prediction Task Configuration This is a classification task for predicting user engagement with video content. Uses logprobs-based classification with AUC and wuAUC metrics. """ # Label Pred Task Configuration LABEL_PRED_CONFIG = { "name": "label_pred", "source": "Kuaishou Internal", "splits": ["test"], "size": 346190, "sample_size": 346190, "description": "Predict user engagement with video content (yes/no classification)", "data_fields": { "messages_field": "messages", "metadata_field": "metadata", }, "prompt_config": { "enable_thinking": False, # Enable thinking mode for apply_chat_template "custom_chat_template": "qwen3_soft_switch.jinja2", # Custom jinja2 template (file in v1_0 directory) }, "generation_config": { "max_new_tokens": 1, "temperature": 1, "top_p": 1, "top_k": -1, "do_sample": True, "num_return_sequences": 1, "return_logprobs": True, # Need to return logprobs for probability extraction "logprobs": 10000, # Return top-10 logprobs to ensure "是" and "否" are included "target_tokens": ["是", "否"], # Target tokens for logprobs extraction (classification) "max_new_thinking_tokens": 1000 }, "evaluation_config": { "metrics": ["auc"], }, "task_type": "logprobs_classification", # Special task type } ================================================ FILE: benchmarks/benchmark/tasks/v1_0/label_pred/evaluator.py ================================================ """ Label Prediction Task Evaluator Evaluator for label_pred classification task. Computes AUC metric from logprobs-based predictions. """ from typing import Dict, Any, Tuple, List from benchmark.console import console from benchmark.tasks.v1_0.base_evaluator import BaseEval from benchmark.tasks.v1_0.label_pred.utils import ( extract_label_from_answer, extract_probability_from_logprobs, calculate_auc, get_debug_info, ) class LabelPredEvaluator(BaseEval): """ Label prediction task evaluator This is a classification task for predicting user engagement. Uses logprobs-based predictions to compute AUC metric. Metrics: - AUC: Area Under ROC Curve """ @property def required_metrics(self) -> List[str]: """Define required overall metrics for label prediction evaluation""" return ["auc"] def _compute_metrics_from_scratch(self) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Compute all evaluation metrics from scratch Extracts probabilities from logprobs and computes AUC metric. Also stores per-sample metrics back into self.samples for caching. Returns: Tuple of (metrics, per_sample_metrics): - metrics: Overall metrics including auc, etc. - per_sample_metrics: Per-sample evaluation results """ total_samples = len(self.samples) # Extract predictions and labels predictions = {} # {sample_id: probability} labels = {} # {sample_id: 0 or 1} # Per-sample metrics per_sample_metrics = {} # Debug information collection debug_info = { "correct_predictions": [], "incorrect_predictions": [], "invalid_samples": [], } for sample_id, sample in self.samples.items(): # Get ground truth answer ground_truth = sample.get("ground_truth", "") # Extract label from ground truth label = extract_label_from_answer(ground_truth) if label == -1: # Invalid label console.print(f"[yellow]Sample {sample_id}: unrecognized answer '{ground_truth}'[/yellow]") if self.debug: debug_info["invalid_samples"].append({ "sample_id": sample_id, "ground_truth": ground_truth, "reason": "unrecognized_label" }) continue labels[sample_id] = label # Get model prediction (logprobs dictionary) # For label_pred, generations contains {token: probability} dict generations = sample.get("generations", {}) # Variables to store probability extraction results predicted_prob = 0.5 parsed_probs = None normalized_probs = None if not generations: # No generation - log as invalid sample console.print(f"[yellow]Sample {sample_id}: no generation found[/yellow]") if self.debug: debug_info["invalid_samples"].append({ "sample_id": sample_id, "ground_truth": ground_truth, "reason": "no_generation" }) # Skip this sample - don't include in predictions continue else: try: # Extract probability for positive class ("是") # Now returns dict with parsed_probs, normalized_probs, and score prob_result = extract_probability_from_logprobs( generations, positive_token="是", negative_token="否", sample_id=sample_id ) predicted_prob = prob_result["score"] parsed_probs = prob_result["parsed_probs"] normalized_probs = prob_result["normalized_probs"] except ValueError as e: # Parsing failed - log detailed error and skip sample console.print(f"[red]Sample {sample_id}: {str(e)}[/red]") if self.debug: debug_info["invalid_samples"].append({ "sample_id": sample_id, "ground_truth": ground_truth, "reason": "parsing_error", "error": str(e) }) # Skip this sample - don't include in predictions continue predictions[sample_id] = predicted_prob # Store per-sample metrics (both in return dict and in self.samples for caching) sample_metrics = { "label": label, "predicted_prob": predicted_prob, } per_sample_metrics[sample_id] = sample_metrics # Cache metrics in self.samples for future use, including debug info self.samples[sample_id]["label"] = label self.samples[sample_id]["predicted_prob"] = predicted_prob # Add new debug fields to sample for tracking self.samples[sample_id]["y_true"] = label self.samples[sample_id]["y_score"] = predicted_prob if parsed_probs is not None: self.samples[sample_id]["parsed_probs"] = parsed_probs if normalized_probs is not None: self.samples[sample_id]["normalized_probs"] = normalized_probs # Debug information collection if self.debug: debug_item = get_debug_info( sample_id=sample_id, logprobs_dict=parsed_probs, predicted_prob=predicted_prob, ground_truth=ground_truth, label=label, ) # Determine if prediction is correct # Correct: (predicted_prob > 0.5 and label = 1) OR (predicted_prob <= 0.5 and label = 0) is_correct = (predicted_prob > 0.5 and label == 1) or (predicted_prob <= 0.5 and label == 0) if is_correct: debug_info["correct_predictions"].append(debug_item) else: debug_info["incorrect_predictions"].append(debug_item) # Calculate AUC auc = calculate_auc(predictions, labels) # Prepare overall metrics metrics = { "auc": auc, "total_samples": total_samples, "valid_samples": len(labels), "invalid_samples": len(debug_info["invalid_samples"]) if self.debug else 0, } # Save debug information if requested if self.debug and self.predictions_dir: self._save_debug_info(debug_info, metrics) return metrics, per_sample_metrics def _save_debug_info( self, debug_info: Dict[str, Any], metrics: Dict[str, Any], ): """ Save detailed debug information to file Args: debug_info: Debug information dictionary metrics: Overall metrics """ # Add statistics to debug_info debug_info["statistics"] = { "total_samples": metrics["total_samples"], "valid_samples": metrics["valid_samples"], "correct_predictions_count": len(debug_info["correct_predictions"]), "incorrect_predictions_count": len(debug_info["incorrect_predictions"]), "invalid_samples_count": len(debug_info["invalid_samples"]), } # Add metrics debug_info["metrics"] = metrics # Save debug info to file using base class method self._save_debug_json(debug_info, filename="debug.json") console.print(f"Total samples: {metrics['total_samples']}") console.print(f"Valid samples: {metrics['valid_samples']}") console.print(f"Correct predictions: {len(debug_info['correct_predictions'])}") console.print(f"Incorrect predictions: {len(debug_info['incorrect_predictions'])}") # Calculate and display accuracy if we have valid predictions total_predictions = len(debug_info['correct_predictions']) + len(debug_info['incorrect_predictions']) if total_predictions > 0: accuracy = len(debug_info['correct_predictions']) / total_predictions * 100 console.print(f"Accuracy: {accuracy:.2f}%") console.print(f"Invalid samples: {len(debug_info['invalid_samples'])}") # Print metrics console.print("\n[bold]Metrics:[/bold]") console.print(f" AUC: {metrics['auc']:.4f}") # Show some invalid sample examples if debug_info["invalid_samples"]: console.print(f"\n[yellow]Invalid sample examples (first 3):[/yellow]") for i, item in enumerate(debug_info["invalid_samples"][:3]): console.print(f" Example {i+1}:") console.print(f" Sample ID: {item['sample_id']}") console.print(f" Reason: {item['reason']}") console.print(f" Ground truth: {item['ground_truth']}") console.print() ================================================ FILE: benchmarks/benchmark/tasks/v1_0/label_pred/utils.py ================================================ """ Label Prediction Task Utilities Functions for label extraction, probability processing, and AUC/wuAUC computation. """ import json import numpy as np from typing import Dict, Tuple, Any, List from sklearn.metrics import roc_auc_score from benchmark.console import console def extract_label_from_answer(answer: str) -> int: """ Extract binary label from answer string Args: answer: Answer string (e.g., "是<|im_end|>" or "否<|im_end|>") Returns: 1 if positive ("是"), 0 if negative ("否"), -1 if unrecognized Examples: >>> extract_label_from_answer("是<|im_end|>") 1 >>> extract_label_from_answer("否") 0 """ if "是" in answer: return 1 elif "否" in answer: return 0 else: return -1 def extract_probability_from_logprobs( generations: List[str], positive_token: str = "是", negative_token: str = "否", sample_id: str = None ) -> Dict[str, Any]: """ Extract probability for positive class from generations list containing reasoning and JSON probabilities. Applies softmax normalization to ensure probabilities sum to 1. Args: generations: List of strings, each containing reasoning text followed by JSON probabilities positive_token: Token representing positive class (default "是") negative_token: Token representing negative class (default "否") sample_id: Optional sample ID for error messages Returns: Dictionary containing: - parsed_probs: Original parsed probabilities before normalization - normalized_probs: Softmax normalized probabilities - score: Final positive class probability (after normalization) Raises: ValueError: If JSON parsing fails or required tokens are missing Examples: >>> generations = ['\\n{"是": 0.7, "否": 0.3}'] >>> result = extract_probability_from_logprobs(generations) >>> result['score'] 0.7 >>> result['normalized_probs'] {'是': 0.7, '否': 0.3} """ parsed_list = [] normalized_list = [] scores = [] for idx, generation in enumerate(generations): # Extract JSON part: check for tag first if "" in generation: # Extract content after json_str = generation.split("")[-1].strip() else: # No tag, try to parse the entire string json_str = generation.strip() # Parse JSON and extract probability try: probs_dict = json.loads(json_str) # Validate that it's a dict and contains required tokens if not isinstance(probs_dict, dict): raise ValueError(f"Parsed JSON is not a dictionary: {type(probs_dict)}") if positive_token not in probs_dict: raise ValueError(f"Positive token '{positive_token}' not found in probabilities: {probs_dict}") if negative_token not in probs_dict: raise ValueError(f"Negative token '{negative_token}' not found in probabilities: {probs_dict}") # Extract probabilities p_pos = float(probs_dict[positive_token]) p_neg = float(probs_dict[negative_token]) # Apply softmax normalization (ensure probabilities sum to 1) total = p_pos + p_neg if total <= 0: raise ValueError(f"Sum of probabilities is non-positive: {total}") p_pos_normalized = p_pos / total p_neg_normalized = p_neg / total # Store results parsed_list.append({positive_token: p_pos, negative_token: p_neg}) normalized_list.append({positive_token: p_pos_normalized, negative_token: p_neg_normalized}) scores.append(p_pos_normalized) except (json.JSONDecodeError, TypeError, AttributeError, ValueError, KeyError) as e: # Raise detailed exception error_msg = f"Failed to parse generation" if sample_id: error_msg += f" for sample_id '{sample_id}'" error_msg += f" at index {idx}:\n" error_msg += f" Generation: {generation[:200]}..." if len(generation) > 200 else f" Generation: {generation}\n" error_msg += f"\n Error: {str(e)}" raise ValueError(error_msg) # If no valid probabilities were found (empty list), raise error if not scores: error_msg = "No valid probabilities found in generations" if sample_id: error_msg += f" for sample_id '{sample_id}'" raise ValueError(error_msg) # Average across all valid elements (usually just one element) if len(scores) == 1: return { "parsed_probs": parsed_list[0], "normalized_probs": normalized_list[0], "score": scores[0] } else: # Average the probabilities for each token avg_parsed = { positive_token: sum(p[positive_token] for p in parsed_list) / len(parsed_list), negative_token: sum(p[negative_token] for p in parsed_list) / len(parsed_list) } avg_normalized = { positive_token: sum(p[positive_token] for p in normalized_list) / len(normalized_list), negative_token: sum(p[negative_token] for p in normalized_list) / len(normalized_list) } avg_score = sum(scores) / len(scores) return { "parsed_probs": avg_parsed, "normalized_probs": avg_normalized, "score": avg_score } def calculate_auc( predictions: Dict[str, float], labels: Dict[str, int] ) -> float: """ Calculate AUC (Area Under ROC Curve) using sklearn Args: predictions: Predicted probabilities, format: {sample_id: probability} labels: Ground truth labels (0 or 1), format: {sample_id: label} Returns: AUC value (float between 0 and 1) """ if not predictions or not labels: console.print("[red]✗ Predictions or labels are empty[/red]") return 0.0 # Align predictions and labels sample_ids = sorted(set(predictions.keys()) & set(labels.keys())) if len(sample_ids) == 0: console.print("[red]✗ No overlapping samples between predictions and labels[/red]") return 0.0 y_true = np.array([labels[id] for id in sample_ids]) y_scores = np.array([predictions[id] for id in sample_ids]) # Check if we have both positive and negative samples if len(np.unique(y_true)) < 2: console.print("[yellow]⚠ Only one class present in labels, AUC is not defined[/yellow]") return 0.5 try: # Calculate AUC using sklearn auc = roc_auc_score(y_true, y_scores) return float(auc) except ValueError as e: console.print(f"[red]✗ Error calculating AUC: {e}[/red]") return 0.5 def get_debug_info( sample_id: str, logprobs_dict: Dict[str, float], predicted_prob: float, ground_truth: str, label: int, user_id: str = "" ) -> Dict[str, Any]: """ Prepare debug information for a sample Args: sample_id: Sample ID logprobs_dict: Dictionary of token probabilities predicted_prob: Predicted probability for positive class ground_truth: Ground truth answer string label: Ground truth label (0 or 1) user_id: User ID (optional) Returns: Debug information dictionary """ debug_item = { "sample_id": sample_id, "ground_truth": ground_truth, "label": label, "predicted_prob": predicted_prob, "logprobs": logprobs_dict, } if user_id: debug_item["user_id"] = user_id return debug_item ================================================ FILE: benchmarks/benchmark/tasks/v1_0/mfu_evaluator.py ================================================ """ MFU (Model FLOPs Utilization) Evaluator Computes MFU metric based on: - Model parameters - Token statistics - GPU hardware information - Generation time MFU = (num_params × 2 × total_tokens) / (gpu_flops × gpu_count × time_s) """ from typing import Dict, Any, Optional from benchmark.console import console, warning_style, dim_style def compute_mfu( num_params: float, total_tokens: int, gpu_tflops: float, gpu_count: int, time_seconds: float, ) -> float: """ Compute Model FLOPs Utilization (MFU) Formula: MFU = (num_params × 2 × total_tokens) / (gpu_flops × gpu_count × time_s) Args: num_params: Number of model parameters total_tokens: Total tokens processed (input + output) gpu_tflops: GPU theoretical peak TFLOPS (for BF16/FP16) gpu_count: Number of GPUs used time_seconds: Total time in seconds Returns: MFU value (0-1, typically 0.01-0.5 for inference) """ if time_seconds <= 0: console.print("⚠ Time is zero or negative, cannot compute MFU", style=warning_style) return 0.0 if gpu_tflops is None or gpu_tflops <= 0: console.print("⚠ GPU TFLOPS is not available, cannot compute MFU", style=warning_style) return 0.0 if num_params is None or num_params <= 0: console.print("⚠ Model parameters not specified, cannot compute MFU", style=warning_style) return 0.0 # Convert TFLOPS to FLOPS gpu_flops = gpu_tflops * 1e12 # Compute total FLOPs required # For inference: FLOPs ≈ 2 × num_params × num_tokens total_flops = num_params * 2 * total_tokens # Compute theoretical peak FLOPs available theoretical_flops = gpu_flops * gpu_count * time_seconds # MFU = actual FLOPs / theoretical FLOPs mfu = total_flops / theoretical_flops return mfu def compute_mfu_from_generation_data(gen_data: Dict[str, Any]) -> Optional[Dict[str, Any]]: """ Compute MFU metrics from generation result data Args: gen_data: Generation result data from JSON file, containing: - num_params: Model parameters - mfu_stats_aggregate: MFU statistics aggregate (dict with lists) - hardware_info: GPU hardware info - total_time: Total generation time Returns: Dictionary containing MFU metrics, or None if cannot compute For multi-stage generation, returns MFU_stages as a list """ # Extract required fields num_params = gen_data.get("num_params") mfu_stats_aggregate = gen_data.get("mfu_stats_aggregate", {}) hardware_info = gen_data.get("hardware_info", {}) total_time = gen_data.get("total_time", 0) # Validate required data if not num_params: console.print("[DEBUG] MFU: Model parameters not available in generation data, skipping MFU calculation", style=dim_style) return None if not mfu_stats_aggregate or len(mfu_stats_aggregate.get("total_time", [])) == 0: console.print("[DEBUG] MFU: MFU statistics not available in generation data, skipping MFU calculation", style=dim_style) return None if not hardware_info: console.print("[DEBUG] MFU: Hardware info not available in generation data, skipping MFU calculation", style=dim_style) return None # Extract hardware info gpu_tflops = hardware_info.get("gpu_tflops") gpu_count = hardware_info.get("gpu_count", 1) gpu_model = hardware_info.get("gpu_model", "unknown") if gpu_tflops is None: console.print(f"⚠ GPU TFLOPS not available for {gpu_model}, cannot compute MFU", style=warning_style) return None # Extract lists from aggregate stats total_input_tokens_list = mfu_stats_aggregate.get("total_input_tokens", []) total_output_tokens_list = mfu_stats_aggregate.get("total_output_tokens", []) total_time_list = mfu_stats_aggregate.get("total_time", []) # Validate list lengths are consistent if not (len(total_input_tokens_list) == len(total_output_tokens_list) == len(total_time_list)): console.print( f"⚠ Inconsistent list lengths in mfu_stats_aggregate: " f"input_tokens={len(total_input_tokens_list)}, " f"output_tokens={len(total_output_tokens_list)}, " f"times={len(total_time_list)}", style=warning_style ) return None num_stages = len(total_time_list) # Compute MFU for each stage mfu_list = [] for stage_idx in range(num_stages): stage_num = stage_idx + 1 total_input_tokens = total_input_tokens_list[stage_idx] if stage_idx < len(total_input_tokens_list) else 0 total_output_tokens = total_output_tokens_list[stage_idx] if stage_idx < len(total_output_tokens_list) else 0 stage_time = total_time_list[stage_idx] total_tokens = total_input_tokens + total_output_tokens if total_tokens == 0: console.print(f"⚠ Stage {stage_num}: Total tokens is zero, skipping", style=warning_style) return None if stage_time <= 0: console.print(f"⚠ Stage {stage_num}: Stage time is zero or negative, skipping", style=warning_style) return None # Compute MFU for this stage using per-stage time mfu = compute_mfu( num_params=num_params, total_tokens=total_tokens, gpu_tflops=gpu_tflops, gpu_count=gpu_count, time_seconds=stage_time, ) mfu_list.append(round(mfu, 6)) if len(mfu_list) == 0: console.print("⚠ No valid stages for MFU calculation", style=warning_style) return None # Create metrics with symmetric list structure mfu_metrics = { "mfu": mfu_list, "gpu_model": gpu_model, "gpu_count": gpu_count, "num_params": num_params, "total_input_tokens": total_input_tokens_list, "total_output_tokens": total_output_tokens_list, "stage_time": total_time_list, } return mfu_metrics ================================================ FILE: benchmarks/benchmark/tasks/v1_0/qwen3.jinja2 ================================================ {%- if tools %} {{- '<|im_start|>system\n' }} {%- if messages[0].role == 'system' %} {{- messages[0].content + '\n\n' }} {%- endif %} {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} {%- for tool in tools %} {{- "\n" }} {{- tool | tojson }} {%- endfor %} {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} {%- else %} {%- if messages[0].role == 'system' %} {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }} {%- endif %} {%- endif %} {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %} {%- for message in messages[::-1] %} {%- set index = (messages|length - 1) - loop.index0 %} {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('') and message.content.endswith('')) %} {%- set ns.multi_step_tool = false %} {%- set ns.last_query_index = index %} {%- endif %} {%- endfor %} {%- for message in messages %} {%- if message.content is string %} {%- set content = message.content %} {%- else %} {%- set content = '' %} {%- endif %} {%- if (message.role == "user") or (message.role == "system" and not loop.first) %} {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }} {%- elif message.role == "assistant" %} {%- set reasoning_content = '' %} {%- if message.reasoning_content is string %} {%- set reasoning_content = message.reasoning_content %} {%- else %} {%- if '' in content %} {%- set reasoning_content = content.split('')[0].rstrip('\n').split('')[-1].lstrip('\n') %} {%- set content = content.split('')[-1].lstrip('\n') %} {%- endif %} {%- endif %} {%- if loop.index0 > ns.last_query_index %} {%- if loop.last or (not loop.last and reasoning_content) %} {{- '<|im_start|>' + message.role + '\n\n' + reasoning_content.strip('\n') + '\n\n\n' + content.lstrip('\n') }} {%- else %} {{- '<|im_start|>' + message.role + '\n' + content }} {%- endif %} {%- else %} {{- '<|im_start|>' + message.role + '\n' + content }} {%- endif %} {%- if message.tool_calls %} {%- for tool_call in message.tool_calls %} {%- if (loop.first and content) or (not loop.first) %} {{- '\n' }} {%- endif %} {%- if tool_call.function %} {%- set tool_call = tool_call.function %} {%- endif %} {{- '\n{"name": "' }} {{- tool_call.name }} {{- '", "arguments": ' }} {%- if tool_call.arguments is string %} {{- tool_call.arguments }} {%- else %} {{- tool_call.arguments | tojson }} {%- endif %} {{- '}\n' }} {%- endfor %} {%- endif %} {{- '<|im_end|>\n' }} {%- elif message.role == "tool" %} {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %} {{- '<|im_start|>user' }} {%- endif %} {{- '\n\n' }} {{- content }} {{- '\n' }} {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} {{- '<|im_end|>\n' }} {%- endif %} {%- endif %} {%- endfor %} {%- if add_generation_prompt %} {{- '<|im_start|>assistant\n' }} {%- if enable_thinking is defined and enable_thinking is false %} {{- '\n\n\n\n' }} {%- endif %} {%- endif %} ================================================ FILE: benchmarks/benchmark/tasks/v1_0/qwen3_soft_switch.jinja2 ================================================ {%- if tools %} {{- '<|im_start|>system\n' }} {%- if messages[0].role == 'system' %} {{- messages[0].content + '\n\n' }} {%- endif %} {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} {%- for tool in tools %} {{- "\n" }} {{- tool | tojson }} {%- endfor %} {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} {%- else %} {%- if messages[0].role == 'system' %} {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }} {%- endif %} {%- endif %} {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %} {%- for message in messages[::-1] %} {%- set index = (messages|length - 1) - loop.index0 %} {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('') and message.content.endswith('')) %} {%- set ns.multi_step_tool = false %} {%- set ns.last_query_index = index %} {%- endif %} {%- endfor %} {%- for message in messages %} {%- if message.content is string %} {%- set content = message.content %} {%- else %} {%- set content = '' %} {%- endif %} {%- if (message.role == "user") or (message.role == "system" and not loop.first) %} {%- set suffix = '' %} {%- if message.role == "user" and loop.index0 == ns.last_query_index %} {%- if enable_thinking is defined and enable_thinking is false %} {%- set suffix = '/no_think' %} {%- else %} {%- set suffix = '/think' %} {%- endif %} {%- endif %} {{- '<|im_start|>' + message.role + '\n' + content + suffix + '<|im_end|>' + '\n' }} {%- elif message.role == "assistant" %} {%- set reasoning_content = '' %} {%- if message.reasoning_content is string %} {%- set reasoning_content = message.reasoning_content %} {%- else %} {%- if '' in content %} {%- set reasoning_content = content.split('')[0].rstrip('\n').split('')[-1].lstrip('\n') %} {%- set content = content.split('')[-1].lstrip('\n') %} {%- endif %} {%- endif %} {%- if loop.index0 > ns.last_query_index %} {%- if loop.last or (not loop.last and reasoning_content) %} {{- '<|im_start|>' + message.role + '\n\n' + reasoning_content.strip('\n') + '\n\n\n' + content.lstrip('\n') }} {%- else %} {{- '<|im_start|>' + message.role + '\n' + content }} {%- endif %} {%- else %} {{- '<|im_start|>' + message.role + '\n' + content }} {%- endif %} {%- if message.tool_calls %} {%- for tool_call in message.tool_calls %} {%- if (loop.first and content) or (not loop.first) %} {{- '\n' }} {%- endif %} {%- if tool_call.function %} {%- set tool_call = tool_call.function %} {%- endif %} {{- '\n{"name": "' }} {{- tool_call.name }} {{- '", "arguments": ' }} {%- if tool_call.arguments is string %} {{- tool_call.arguments }} {%- else %} {{- tool_call.arguments | tojson }} {%- endif %} {{- '}\n' }} {%- endfor %} {%- endif %} {{- '<|im_end|>\n' }} {%- elif message.role == "tool" %} {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %} {{- '<|im_start|>user' }} {%- endif %} {{- '\n\n' }} {{- content }} {{- '\n' }} {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} {{- '<|im_end|>\n' }} {%- endif %} {%- endif %} {%- endfor %} {%- if add_generation_prompt %} {{- '<|im_start|>assistant\n' }} {%- if enable_thinking is defined and enable_thinking is false %} {{- '\n\n\n\n' }} {%- endif %} {%- endif %} ================================================ FILE: benchmarks/benchmark/tasks/v1_0/rec_reason/__init__.py ================================================ """ Recommendation Reason Task Module """ from .config import REC_REASON_CONFIG from .evaluator import RecoReasonEvaluator from . import utils __all__ = [ "REC_REASON_CONFIG", "RecoReasonEvaluator", "utils", ] ================================================ FILE: benchmarks/benchmark/tasks/v1_0/rec_reason/config.py ================================================ """ Recommendation Reason Task Configuration """ # Recommendation Reason Task Configuration REC_REASON_CONFIG = { "name": "rec_reason", "source": "Kuaishou Internal", "splits": ["test"], "size": 470, "sample_size": 470, "description": "Recommendation reason inference", "data_fields": { "messages_field": "messages", "metadata_field": "metadata", }, "prompt_config": { "enable_thinking": True, # Enable thinking mode for apply_chat_template "custom_chat_template": "qwen3_soft_switch.jinja2", # Custom jinja2 template (file in v1_0 directory) }, "generation_config": { "num_return_sequences": 1, "max_new_tokens": 2000, "temperature": 0.01, "top_p": 0.95, "repetition_penalty": 1.1, "do_sample": False, "num_return_thinking_sequences": 1, "max_new_thinking_tokens": 10000, }, "evaluation_config": { "metrics": ["avg_score"], # LLM multi-dimensional evaluation config "llm_eval_enabled": True, # Whether to enable LLM evaluation "llm_judge_model": "gemini", # Judge LLM type: gemini/deepseek/claude "llm_max_workers": 1, # Concurrent workers for LLM calls "llm_max_samples": 470, # Max samples to evaluate (None for all) } } ================================================ FILE: benchmarks/benchmark/tasks/v1_0/rec_reason/evaluator.py ================================================ """ Recommendation Reason Evaluator Evaluates model predictions on Recommendation Reason task using LLM-based multi-dimensional evaluation. """ import os from typing import Dict, Any, Tuple, List from benchmark.console import console from benchmark.tasks.v1_0.base_evaluator import BaseEval from benchmark.tasks.v1_0.rec_reason.utils import extract_after_think, evaluate_reasoning class RecoReasonEvaluator(BaseEval): """Recommendation Reason task evaluator""" @property def required_metrics(self) -> List[str]: """Define required overall metrics for Recommendation Reason evaluation""" return ["llm_score"] def _compute_metrics_from_scratch(self) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Compute all metrics from scratch Returns: Tuple of (metrics, per_sample_metrics) """ total_samples = len(self.samples) # Prepare data for evaluation sample_ids = list(self.samples.keys()) predictions = [] references = [] for sample_id in sample_ids: sample = self.samples[sample_id] # Get ground truth ground_truth = sample.get("ground_truth", "") references.append(ground_truth) # Get model prediction (first generation) generations = sample.get("generations", []) if not generations: prediction = "" else: # Extract text after tag if present prediction = extract_after_think(generations[0]) predictions.append(prediction) # Get evaluation config eval_config = self.task_config.get("evaluation_config", {}) # Build per-sample metrics per_sample_metrics = {} for sample_id in sample_ids: per_sample_metrics[sample_id] = {} # Build overall metrics metrics = { "num_samples": total_samples, } # LLM Evaluation (if enabled) llm_eval_enabled = eval_config.get("llm_eval_enabled", False) if llm_eval_enabled: console.print("[cyan]LLM evaluation enabled, starting multi-dimensional evaluation...[/cyan]") llm_metrics, llm_per_sample = self._evaluate_reasoning( sample_ids=sample_ids, predictions=predictions, references=references, eval_config=eval_config ) # Merge LLM metrics into overall metrics metrics.update(llm_metrics) # Merge LLM per-sample metrics for sample_id in sample_ids: if sample_id in llm_per_sample: per_sample_metrics[sample_id].update(llm_per_sample[sample_id]) # Save debug information if requested if self.debug and self.predictions_dir: self._save_debug_info(metrics, per_sample_metrics, predictions, references) return metrics, per_sample_metrics def _evaluate_reasoning( self, sample_ids: list, predictions: list, references: list, eval_config: Dict[str, Any] ) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Perform LLM-based multi-dimensional evaluation. Args: sample_ids: List of sample IDs predictions: List of prediction texts references: List of reference texts eval_config: Evaluation configuration Returns: Tuple of (llm_metrics, llm_per_sample_metrics) """ try: from api import get_client_from_config except ImportError as e: console.print(f"[red]Failed to import LLM evaluation modules: {e}[/red]") return {}, {} # Get LLM eval config llm_judge_model = eval_config.get("llm_judge_model", "gemini") llm_max_workers = eval_config.get("llm_max_workers", 3) llm_max_samples = eval_config.get("llm_max_samples", 300) # Create LLM client try: llm_client = get_client_from_config(llm_judge_model) console.print(f"[green]Using {llm_judge_model} as LLM judge[/green]") except Exception as e: console.print(f"[red]Failed to create LLM client for evaluation: {e}[/red]") return {}, {} # Prepare data as dicts predictions_dict = {id: pred for id, pred in zip(sample_ids, predictions)} references_dict = {id: ref for id, ref in zip(sample_ids, references)} # Get model name for cache file naming model_name = getattr(llm_client, 'model_name', llm_judge_model) # Run LLM evaluation try: llm_metrics, llm_per_sample = evaluate_reasoning( predictions=predictions_dict, references=references_dict, llm_client=llm_client, max_workers=llm_max_workers, max_samples=llm_max_samples, model_name=model_name, save_dir=self.predictions_dir, ) console.print(f"[green]LLM evaluation completed: {llm_metrics.get('llm_eval_num_samples', 0)} samples evaluated[/green]") return llm_metrics, llm_per_sample except Exception as e: console.print(f"[red]LLM evaluation failed: {e}[/red]") import traceback traceback.print_exc() return {}, {} def _save_debug_info( self, metrics: Dict[str, Any], per_sample_metrics: Dict[str, Dict[str, Any]], predictions: list, references: list ): """ Save detailed debug information to file Args: metrics: Overall metrics per_sample_metrics: Per-sample metrics predictions: List of predictions references: List of references """ # Prepare debug info debug_info = { "overall_metrics": metrics, "per_sample_metrics": per_sample_metrics, "sample_count": len(predictions), } # Add some examples sample_ids = list(self.samples.keys()) debug_info["examples"] = [] for i in range(min(10, len(sample_ids))): sample_id = sample_ids[i] debug_info["examples"].append({ "sample_id": sample_id, "prediction": predictions[i][:500] + "..." if len(predictions[i]) > 500 else predictions[i], "reference": references[i][:500] + "..." if len(references[i]) > 500 else references[i], "llm_score": per_sample_metrics[sample_id].get("llm_score"), "llm_reason": per_sample_metrics[sample_id].get("llm_reason"), }) # Save to file using base class method self._save_debug_json(debug_info, filename="debug.json") # Print summary statistics console.print(f"Total samples: {metrics['num_samples']}") # Print LLM eval metrics if available if metrics.get('llm_score') is not None: console.print(f"LLM Eval Score: {metrics['llm_score']:.4f}") ================================================ FILE: benchmarks/benchmark/tasks/v1_0/rec_reason/utils.py ================================================ """ Recommendation Reason LLM Evaluation Utilities Provides functions for extracting refined reasoning and multi-dimensional LLM evaluation. """ import json import os import re from typing import Dict, List, Optional, Tuple, Any from concurrent.futures import ThreadPoolExecutor, as_completed import pandas as pd from tqdm import tqdm from benchmark.console import console EVALUATION_PROMPT = """你是一位专业的推荐系统评估专家。你的任务是评估一个AI模型生成的"推荐理由"与"标准答案"的匹配程度。 ### 评估任务 请对模型生成的推荐理由进行综合评分(1-5分)。 **核心评估原则:** 请严格按照以下步骤进行思考和评估: 1. **核心要素提取**: - 从【标准答案】中提取:推荐的核心动机(用户为什么看)+ 推荐的内容类型(看的是什么)。 - 从【模型生成】中提取:推荐的核心动机 + 推荐的内容类型。 2. **噪音过滤(关键步骤)**: - 忽略具体的措辞差异(同义词替换)。 - **忽略与推荐逻辑无关的用户画像细节**(例如:具体的年龄数字、与推理逻辑和视频内容无关的兴趣等)。 3. **匹配度分析**: - 对比核心动机:是否抓住了相同的推荐的核心动机? - 对比内容方向:推荐的视频类别/主题是否一致? 4. **评分**:基于评分标准给出最终得分。 **评分标准:** - 5分:核心逻辑与内容方向完全一致。即使表达方式不同,但语义内核完全相同。 - 4分:核心逻辑正确,内容方向正确。可能遗漏了标准答案中极次要的补充信息,或包含了无伤大雅的冗余信息。 - 3分:大方向(如视频类型)正确,但对“用户为什么喜欢”的归因不够准确,或遗漏了关键的转化动机。 - 2分:推荐逻辑有明显误读,或者推荐的内容类型与标准答案有偏差(例如:把“学习教程”理解成了“娱乐搞笑”)。 - 1分:逻辑和内容完全错误,或生成了风马牛不相及的内容。 ### 输入 **[标准答案]** {} **[模型生成]** {} ### 输出格式 你的输出必须是【纯粹的 JSON 格式】,可以被 `json.loads` 直接解析。 ```json {{ "llm_score": <1-5的整数>, "llm_reason": "<简短的打分理由,不超过50字>" }} ``` 你的评估结果 (请严格按照上述要求返回一个格式规整的 JSON,可以被 json.loads 直接解析。请不要在 JSON 数据前后添加任何额外的解释性文字或代码块标记): """ def extract_refined_reasoning(text: str) -> str: """ Extract the refined reasoning section from the full text. Finds the last occurrence of "精炼推理" and extracts the text after it. Args: text: Full text containing the reasoning Returns: Extracted refined reasoning text, or original text if pattern not found """ if not text: return "" # Find the last occurrence of "精炼推理" keyword = "精炼推理" last_pos = text.rfind(keyword) if last_pos != -1: # Extract text after "精炼推理" after_keyword = text[last_pos + len(keyword):] # Remove leading punctuation, whitespace, and markdown symbols after_keyword = re.sub(r'^[\s\*#::\n]+', '', after_keyword) if after_keyword.strip(): return after_keyword.strip() # If "精炼推理" not found, return original text return text.strip() def extract_after_think(text: str) -> str: """Extract text after the last tag if present.""" if '' in text: return text.split('')[-1].strip() return text def extract_json_from_response(response: str) -> Optional[Dict]: """ Extract JSON from LLM response. Args: response: LLM response text Returns: Parsed JSON dict or None if parsing fails """ if not response: return None try: response = response.strip() # Remove markdown code blocks if present if response.startswith('```json'): response = response[7:] elif response.startswith('```'): response = response[3:] if response.endswith('```'): response = response[:-3] return json.loads(response.strip()) except json.JSONDecodeError: # Try to find JSON object in the response match = re.search(r'\{[^{}]*\}', response, re.DOTALL) if match: try: return json.loads(match.group()) except json.JSONDecodeError: pass console.print(f"[yellow]Failed to parse JSON: {response[:200]}...[/yellow]") return None def evaluate_single( gt_reasoning: str, model_reasoning: str, llm_client ) -> Tuple[Optional[Dict], Optional[str]]: """ Evaluate a single sample using LLM. Args: gt_reasoning: Ground truth refined reasoning model_reasoning: Model-generated refined reasoning llm_client: LLM client instance Returns: Tuple of (evaluation_result, error_message) """ prompt = EVALUATION_PROMPT.format(gt_reasoning, model_reasoning) try: response = llm_client.generate(prompt) result = extract_json_from_response(response) if result is not None and "llm_score" in result: # Ensure score is in valid range score = result["llm_score"] if not isinstance(score, (int, float)) or score < 1 or score > 5: result["llm_score"] = 3 # Default to middle score if invalid return result, None return None, f"Failed to parse JSON or missing 'llm_score': {response[:100]}" except Exception as e: return None, f"API error: {str(e)}" def evaluate_batch( gt_reasonings: Dict[str, str], model_reasonings: Dict[str, str], llm_client, max_workers: int = 5, desc: str = "Evaluating reasoning" ) -> Tuple[Dict[str, Dict], Dict[str, str]]: """ Evaluate multiple samples in parallel. Args: gt_reasonings: Dict of {sample_id: gt_reasoning} model_reasonings: Dict of {sample_id: model_reasoning} llm_client: LLM client instance max_workers: Number of concurrent workers desc: Progress bar description Returns: Tuple of (results, errors) """ results = {} errors = {} # Only evaluate samples that have both GT and model reasoning common_ids = set(gt_reasonings.keys()) & set(model_reasonings.keys()) common_ids = {id for id in common_ids if gt_reasonings[id] and model_reasonings[id]} def process_single(sample_id: str): gt = gt_reasonings[sample_id] model = model_reasonings[sample_id] result, error = evaluate_single(gt, model, llm_client) return sample_id, result, error with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = { executor.submit(process_single, sid): sid for sid in common_ids } for future in tqdm(as_completed(futures), total=len(futures), desc=desc): sample_id, result, error = future.result() if result is not None: results[sample_id] = result if error is not None: errors[sample_id] = error # Statistics total_attempted = len(common_ids) total_success = len(results) console.print(f"[cyan]{desc} statistics: {total_attempted} attempted, {total_success} successful[/cyan]") return results, errors def calculate_metrics(eval_results: Dict[str, Dict]) -> Dict[str, Any]: """ Calculate micro and macro metrics from evaluation results. Args: eval_results: Dict of {sample_id: evaluation_result} Returns: Dict with micro/macro scores """ if not eval_results: return {} # Collect scores scores = [] for sample_id, result in eval_results.items(): if "llm_score" in result: score = result["llm_score"] if isinstance(score, (int, float)) and 1 <= score <= 5: scores.append(score) metrics = {} if scores: # micro and macro are the same for single score avg_score = sum(scores) / len(scores) metrics["micro_llm_score"] = avg_score metrics["macro_llm_score"] = avg_score metrics["llm_score"] = avg_score metrics["llm_eval_num_samples"] = len(eval_results) return metrics def get_per_sample_metrics(eval_results: Dict[str, Dict]) -> Dict[str, Dict[str, Any]]: """ Extract per-sample metrics from evaluation results. Args: eval_results: Dict of {sample_id: evaluation_result} Returns: Dict of {sample_id: {llm_score, llm_reason}} """ per_sample = {} for sample_id, result in eval_results.items(): sample_metrics = {} if "llm_score" in result: sample_metrics["llm_score"] = result["llm_score"] if "llm_reason" in result: sample_metrics["llm_reason"] = result["llm_reason"] per_sample[sample_id] = sample_metrics return per_sample def get_cache_path(save_dir: str, model_name: str) -> str: """Get the path for evaluation results cache file.""" return os.path.join(save_dir, f"llm_eval_{model_name}.json") def load_eval_cache(cache_path: str) -> Optional[Dict[str, Dict]]: """ Load evaluation results from cache. Args: cache_path: Path to cache file Returns: Dict of {sample_id: evaluation_result} or None if not found """ if not os.path.exists(cache_path): return None try: with open(cache_path, 'r', encoding='utf-8') as f: data = json.load(f) # Extract evaluation results eval_results = {} for sample_id, sample_data in data.items(): if "eval_result" in sample_data and sample_data["eval_result"]: eval_results[sample_id] = sample_data["eval_result"] console.print(f"[green]Loaded {len(eval_results)} cached evaluation results[/green]") return eval_results except Exception as e: console.print(f"[yellow]Failed to load evaluation cache: {e}[/yellow]") return None def save_eval_results( save_dir: str, sample_ids: List[str], gt_reasonings: Dict[str, str], model_reasonings: Dict[str, str], eval_results: Dict[str, Dict], model_name: str ): """ Save evaluation results to file. Args: save_dir: Directory to save the file sample_ids: List of sample IDs gt_reasonings: Dict of {sample_id: gt_reasoning} model_reasonings: Dict of {sample_id: model_reasoning} eval_results: Dict of {sample_id: evaluation_result} model_name: Model name for filename """ os.makedirs(save_dir, exist_ok=True) save_path = get_cache_path(save_dir, model_name) detailed_results = {} for sample_id in sample_ids: detailed_results[sample_id] = { "gt_reasoning": gt_reasonings.get(sample_id, ""), "model_reasoning": model_reasonings.get(sample_id, ""), "eval_result": eval_results.get(sample_id, {}), } with open(save_path, 'w', encoding='utf-8') as f: json.dump(detailed_results, f, ensure_ascii=False, indent=2) console.print(f"[green]Evaluation results saved to {save_path}[/green]") def evaluate_reasoning( predictions: Dict[str, str], references: Dict[str, str], llm_client, max_workers: int = 5, max_samples: Optional[int] = None, model_name: str = "unknown", save_dir: Optional[str] = None, ) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Main evaluation function for recommendation reasoning. Args: predictions: Dict of {sample_id: prediction_text} references: Dict of {sample_id: reference_text} llm_client: LLM client instance for evaluation max_workers: Number of concurrent workers max_samples: Maximum samples to evaluate (None for all) model_name: Model name for cache file naming save_dir: Directory to save results Returns: Tuple of (metrics, per_sample_metrics) """ # Select samples all_sample_ids = sorted(set(predictions.keys()) & set(references.keys())) if max_samples is not None and max_samples < len(all_sample_ids): sample_ids = all_sample_ids[:max_samples] console.print(f"[cyan]Selected {len(sample_ids)} samples for LLM evaluation[/cyan]") else: sample_ids = all_sample_ids console.print(f"[cyan]Evaluating all {len(sample_ids)} samples[/cyan]") # Extract refined reasoning from both GT and model outputs console.print("[cyan]Extracting refined reasoning...[/cyan]") gt_reasonings = {} model_reasonings = {} for sample_id in sample_ids: # Extract from reference (GT) gt_text = references.get(sample_id, "") gt_reasonings[sample_id] = extract_refined_reasoning(gt_text) # Extract from prediction (model output) pred_text = predictions.get(sample_id, "") pred_text = extract_after_think(pred_text) # Remove tags first model_reasonings[sample_id] = extract_refined_reasoning(pred_text) # Load cached results if available eval_results = {} if save_dir: cache_path = get_cache_path(save_dir, model_name) cached_results = load_eval_cache(cache_path) if cached_results: eval_results = {k: v for k, v in cached_results.items() if k in sample_ids} # Find samples that need evaluation missing_ids = set(sample_ids) - set(eval_results.keys()) missing_ids = { id for id in missing_ids if gt_reasonings.get(id) and model_reasonings.get(id) } if missing_ids: console.print(f"[cyan]Evaluating {len(missing_ids)} samples with LLM...[/cyan]") missing_gt = {id: gt_reasonings[id] for id in missing_ids} missing_model = {id: model_reasonings[id] for id in missing_ids} new_results, errors = evaluate_batch( missing_gt, missing_model, llm_client, max_workers ) eval_results.update(new_results) if errors: console.print(f"[red]Evaluation errors: {len(errors)} samples[/red]") else: console.print(f"[green]All {len(sample_ids)} samples already evaluated (from cache)[/green]") # Calculate metrics metrics = calculate_metrics(eval_results) per_sample_metrics = get_per_sample_metrics(eval_results) # Save results if save_dir: save_eval_results( save_dir, sample_ids, gt_reasonings, model_reasonings, eval_results, model_name ) # Print summary console.print(f"[green]LLM evaluation completed: {metrics.get('llm_eval_num_samples', 0)} samples[/green]") console.print(f"[green] LLM Score: {metrics.get('llm_score', 0):.4f}[/green]") return metrics, per_sample_metrics ================================================ FILE: benchmarks/benchmark/tasks/v1_0/recommendation/__init__.py ================================================ """ Recommendation Task Module Universal module for all recommendation tasks including: - label_cond: Predict next video given specified consumption behavior - video: Next video prediction - product: Predict next clicked product - ad: Predict next clicked advertisement """ from .config import ( LABEL_COND_CONFIG, VIDEO_CONFIG, PRODUCT_CONFIG, AD_CONFIG, INTERACTIVE_CONFIG, RECOMMENDATION_PROMPT_CONFIG, RECOMMENDATION_TASK_CONFIGS, RECOMMENDATION_GENERATION_CONFIG, RECOMMENDATION_EVALUATION_CONFIG, ) from .evaluator import RecommendationEvaluator from . import utils __all__ = [ # Configs "LABEL_COND_CONFIG", "VIDEO_CONFIG", "PRODUCT_CONFIG", "AD_CONFIG", "INTERACTIVE_CONFIG", "RECOMMENDATION_PROMPT_CONFIG", "RECOMMENDATION_TASK_CONFIGS", "RECOMMENDATION_GENERATION_CONFIG", "RECOMMENDATION_EVALUATION_CONFIG", # Classes "RecommendationEvaluator", # Utils module "utils", ] ================================================ FILE: benchmarks/benchmark/tasks/v1_0/recommendation/config.py ================================================ """ Recommendation Task Configurations This module contains configurations for all recommendation tasks including: - label_cond: Predict next video given specified consumption behavior - video: Next video prediction - product: Predict next clicked product - ad: Predict next clicked advertisement """ # Common prompt config for recommendation tasks RECOMMENDATION_PROMPT_CONFIG = { "enable_thinking": False, "custom_chat_template": "qwen3_soft_switch.jinja2", } # Common generation config for recommendation tasks RECOMMENDATION_GENERATION_CONFIG = { "num_return_sequences": 128, "max_new_tokens": 3, "temperature": 0.6, "top_p": 0.95, "top_k": 50, "presence_penalty": 0, "frequency_penalty": 0, "prompt_token": "<|sid_begin|>", # Token to append for two-stage generation "max_new_thinking_tokens": 1000, "num_return_thinking_sequences": 8, # Number of thinking candidates to generate in stage 1 "num_beams": 16, } # Common evaluation config for recommendation tasks RECOMMENDATION_EVALUATION_CONFIG = { "metrics": ["pass@k", "position1_pass@k", "recall@k"], "k_values": [1, 32], "select_k": "first_k", # Strategy for selecting k predictions: 'first_k' or 'random_k' # PID-based evaluation settings "evaluation_mode": "both", # Evaluation mode: 'sid', 'pid', or 'both' "sid_to_pid_strategy": "most_popular_after_downsampling", # Strategy for SID->PID conversion: 'most_popular_originally', 'most_popular_after_downsampling', or 'random' } # Label Cond Task Configuration LABEL_COND_CONFIG = { "name": "label_cond", "source": "Kuaishou Internal", "splits": ["test"], "size": 34891, "sample_size": 34891, "description": "Predict next video given specified consumption behavior", "data_fields": { "messages_field": "messages", "metadata_field": "metadata", }, "prompt_config": RECOMMENDATION_PROMPT_CONFIG.copy(), "generation_config": RECOMMENDATION_GENERATION_CONFIG.copy(), "evaluation_config": RECOMMENDATION_EVALUATION_CONFIG.copy(), } # SID USER Doc Task Configuration VIDEO_CONFIG = { "name": "video", "source": "Kuaishou Internal", "splits": ["test"], "size": 38781, "sample_size": 38781, "description": "Next video prediction", "data_fields": { "messages_field": "messages", "metadata_field": "metadata", }, "prompt_config": RECOMMENDATION_PROMPT_CONFIG.copy(), "generation_config": RECOMMENDATION_GENERATION_CONFIG.copy(), "evaluation_config": RECOMMENDATION_EVALUATION_CONFIG.copy(), } # Product Task Configuration PRODUCT_CONFIG = { "name": "product", "source": "Kuaishou Internal", "splits": ["test"], "size": 27910, "sample_size": 27910, "description": "Predict next clicked product", "data_fields": { "messages_field": "messages", "metadata_field": "metadata", }, "prompt_config": RECOMMENDATION_PROMPT_CONFIG.copy(), "generation_config": RECOMMENDATION_GENERATION_CONFIG.copy(), "evaluation_config": RECOMMENDATION_EVALUATION_CONFIG.copy(), } # Ad Task Configuration AD_CONFIG = { "name": "ad", "source": "Kuaishou Internal", "splits": ["test"], "size": 27677, "sample_size": 27677, "description": "Predict next clicked advertisement", "data_fields": { "messages_field": "messages", "metadata_field": "metadata", }, "prompt_config": RECOMMENDATION_PROMPT_CONFIG.copy(), "generation_config": RECOMMENDATION_GENERATION_CONFIG.copy(), "evaluation_config": RECOMMENDATION_EVALUATION_CONFIG.copy(), } # Interactive Task Configuration INTERACTIVE_CONFIG = { "name": "interactive", "source": "Kuaishou Internal", "splits": ["test"], "size": 1000, "sample_size": 1000, "description": "Predict next interacted video", "data_fields": { "messages_field": "messages", "metadata_field": "metadata", }, "prompt_config": RECOMMENDATION_PROMPT_CONFIG.copy(), "generation_config": RECOMMENDATION_GENERATION_CONFIG.copy(), "evaluation_config": RECOMMENDATION_EVALUATION_CONFIG.copy(), } # Task configuration mapping RECOMMENDATION_TASK_CONFIGS = { "label_cond": LABEL_COND_CONFIG, "video": VIDEO_CONFIG, "product": PRODUCT_CONFIG, "ad": AD_CONFIG, "interactive": INTERACTIVE_CONFIG, } ================================================ FILE: benchmarks/benchmark/tasks/v1_0/recommendation/evaluator.py ================================================ """ Recommendation Task Evaluator Universal evaluator for all recommendation tasks. Computes Pass@k and Position1_Pass@k metrics. """ import json from typing import Dict, Any, Tuple, List from benchmark.console import console, warning_style from benchmark.tasks.v1_0.base_evaluator import BaseEval from benchmark.tasks.v1_0.recommendation import utils as utils_sid from benchmark.tasks.v1_0.recommendation import utils_by_pid as utils_pid class RecommendationEvaluator(BaseEval): """ Universal evaluator for recommendation tasks Supports: - label_cond: Predict next video given specified consumption behavior - video: Next video prediction - product: Predict next clicked product - ad: Predict next clicked advertisement Metrics: - Pass@k: Check if any of top-k predictions match any ground truth SID - Position1_Pass@k: Check if any of top-k predictions match the first ground truth SID """ @property def required_metrics(self) -> List[str]: """Define required overall metrics for Recommendation evaluation""" k_values = self.task_config.get("evaluation_config", {}).get("k_values", [128]) evaluation_mode = self.task_config.get("evaluation_config", {}).get("evaluation_mode", "sid") metrics = [] if evaluation_mode in ("sid", "both"): for k in k_values: metrics.extend([f"pass@{k}", f"position1_pass@{k}", f"recall@{k}"]) if evaluation_mode in ("pid", "both"): for k in k_values: metrics.extend([f"pid_pass@{k}", f"pid_position1_pass@{k}", f"pid_recall@{k}"]) return metrics def _select_generations_by_strategy( self, generations: List[str], logprobs: List[float], strategy: str ) -> List[str]: """ Select and reorder generations based on the specified strategy Args: generations: List of generation strings logprobs: List of cumulative logprobs for each generation strategy: Selection strategy ('first_k' or 'top_k_by_logprobs') Returns: Reordered list of generations Raises: ValueError: If strategy is 'top_k_by_logprobs' but logprobs data is invalid """ if strategy == "first_k": # Keep original order return generations elif strategy == "top_k_by_logprobs": # Validate logprobs data if not logprobs: raise ValueError( f"Strategy 'top_k_by_logprobs' requires logprobs data, but logprobs is empty. " f"Please ensure the generation was run with logprobs enabled." ) if len(logprobs) != len(generations): raise ValueError( f"Strategy 'top_k_by_logprobs' requires logprobs length to match generations length. " f"Got logprobs length {len(logprobs)}, generations length {len(generations)}." ) # Sort generations by logprobs in descending order (higher logprob = better) paired = list(zip(generations, logprobs)) paired_sorted = sorted(paired, key=lambda x: x[1], reverse=True) # Deduplicate while preserving order (keep first occurrence with highest logprob) seen = set() unique_generations = [] for gen, _ in paired_sorted: if gen not in seen: seen.add(gen) unique_generations.append(gen) return unique_generations else: raise ValueError( f"Unknown selection strategy: '{strategy}'. " f"Supported strategies: 'first_k', 'top_k_by_logprobs'" ) def _evaluate_single_mode( self, k_values: List[int], evaluation_mode: str, select_k_strategy: str, code_to_pid: Dict[int, List[Tuple[int, float]]] = None, sid_to_pid_strategy: str = "most_popular" ) -> Tuple[Dict[str, int], Dict[str, int], Dict[str, float], Dict[str, Dict[str, Any]], Dict[str, List[Dict[str, Any]]]]: """ Evaluate samples using a single mode (SID or PID) Args: k_values: List of k values to compute evaluation_mode: Either 'sid' or 'pid' select_k_strategy: Selection strategy for generations code_to_pid: PID mapping dictionary (required for 'pid' mode) sid_to_pid_strategy: Strategy for SID->PID conversion ("most_popular" or "random") Returns: Tuple of (pass_counts, position1_pass_counts, recall_sums, per_sample_metrics, debug_info_lists) """ # Select utils module based on mode if evaluation_mode == "sid": utils = utils_sid elif evaluation_mode == "pid": if code_to_pid is None: raise ValueError("code_to_pid is required for PID evaluation mode") utils = utils_pid else: raise ValueError(f"Invalid evaluation_mode: {evaluation_mode}") # Initialize counters pass_at_k_counts = {k: 0 for k in k_values} position1_pass_at_k_counts = {k: 0 for k in k_values} recall_at_k_sums = {k: 0.0 for k in k_values} # Per-sample metrics collection per_sample_metrics = {} # Debug information collection debug_info = { "passed_samples": [], "failed_samples": [], "no_generation_samples": [], } # Helper function to create failed metrics def create_failed_metrics(): """Create metrics dict for failed samples (all False/0.0)""" metrics = {} for k in k_values: metrics[f"pass@{k}"] = False metrics[f"position1_pass@{k}"] = False metrics[f"recall@{k}"] = 0.0 return metrics for sample_id, sample in self.samples.items(): # Get model predictions generations = sample.get("generations", []) logprobs = sample.get("logprobs", []) if not generations: # No generation, treat as failure per_sample_metrics[sample_id] = create_failed_metrics() if self.debug: debug_info["no_generation_samples"].append({ "sample_id": sample_id, "ground_truth": sample.get("ground_truth", "") if evaluation_mode == "sid" else sample.get("metadata", {}).get("answer_pid", []), }) continue # Get ground truth based on mode if evaluation_mode == "sid": ground_truth = sample.get("ground_truth", "") ground_truth_ids = utils.extract_ids_from_answer(ground_truth) first_ground_truth_id = utils.extract_first_id_from_answer(ground_truth) else: # pid mode # Try answer_pid first, fallback to answer_iid if not available ground_truth_pids = sample.get("metadata", {}).get("answer_pid") if ground_truth_pids is None: ground_truth_pids = sample.get("metadata", {}).get("answer_iid", []) if isinstance(ground_truth_pids, str): ground_truth_pids = json.loads(ground_truth_pids) ground_truth_ids = utils.extract_ids_from_answer(ground_truth_pids) first_ground_truth_id = utils.extract_first_id_from_answer(ground_truth_pids) if not ground_truth_ids: console.print(f"Sample {sample_id}: no valid ID found in ground truth ({evaluation_mode} mode)", style=warning_style) per_sample_metrics[sample_id] = create_failed_metrics() continue # Apply selection strategy to reorder generations selected_generations = self._select_generations_by_strategy( generations=generations, logprobs=logprobs, strategy=select_k_strategy ) # Extract predicted IDs from selected generations if evaluation_mode == "sid": predicted_ids = [utils.extract_id_from_generation(gen) for gen in selected_generations] else: # pid mode predicted_ids = [utils.extract_id_from_generation(gen, code_to_pid, sid_to_pid_strategy) for gen in selected_generations] # Compute metrics for each k sample_pass_results = {} sample_position1_pass_results = {} sample_recall_results = {} for k in k_values: # Compute Pass@k pass_result = utils.compute_pass_at_k(predicted_ids, ground_truth_ids, k) sample_pass_results[f"pass@{k}"] = pass_result if pass_result: pass_at_k_counts[k] += 1 # Compute Position1_Pass@k position1_pass_result = utils.compute_position1_pass_at_k( predicted_ids, first_ground_truth_id, k ) sample_position1_pass_results[f"position1_pass@{k}"] = position1_pass_result if position1_pass_result: position1_pass_at_k_counts[k] += 1 # Compute Recall@k recall_result = utils.compute_recall_at_k(predicted_ids, ground_truth_ids, k) sample_recall_results[f"recall@{k}"] = recall_result recall_at_k_sums[k] += recall_result # Store per-sample metrics sample_metrics = { **sample_pass_results, **sample_position1_pass_results, **sample_recall_results } # For PID mode, save pid_generations (convert None/invalid to -1) if evaluation_mode == "pid": pid_generations = [pid if pid is not None else -1 for pid in predicted_ids] sample_metrics["generations"] = pid_generations per_sample_metrics[sample_id] = sample_metrics # Debug information collection if self.debug: metadata = sample.get("metadata", {}) raw_prompt = metadata.get("raw_prompt", "") if evaluation_mode == "sid": debug_item = utils.get_debug_info( sample_id=sample_id, generations=generations, ground_truth=sample.get("ground_truth", ""), pass_results=sample_pass_results, position1_pass_results=sample_position1_pass_results, raw_prompt=raw_prompt, ) else: # pid mode answer_pid = metadata.get("answer_pid", metadata.get("answer_iid", [])) if isinstance(answer_pid, str): answer_pid = json.loads(answer_pid) debug_item = utils.get_debug_info( sample_id=sample_id, generations=generations, ground_truth=answer_pid, pass_results=sample_pass_results, position1_pass_results=sample_position1_pass_results, code_to_pid=code_to_pid, strategy=sid_to_pid_strategy, raw_prompt=raw_prompt, ) # Check if any pass@k is True if any(sample_pass_results.values()): debug_info["passed_samples"].append(debug_item) else: debug_info["failed_samples"].append(debug_item) return pass_at_k_counts, position1_pass_at_k_counts, recall_at_k_sums, per_sample_metrics, debug_info def _calculate_metrics_from_counts( self, pass_counts: Dict[int, int], position1_pass_counts: Dict[int, int], recall_sums: Dict[int, float], total_samples: int, k_values: List[int], prefix: str = "" ) -> Dict[str, float]: """ Calculate metrics from counts Args: pass_counts: Pass@k counts for each k position1_pass_counts: Position1_Pass@k counts for each k recall_sums: Recall@k sums for each k total_samples: Total number of samples k_values: List of k values prefix: Prefix for metric names (e.g., "pid_") Returns: Dictionary of calculated metrics """ metrics = {} for k in k_values: metrics[f"{prefix}pass@{k}"] = pass_counts[k] / total_samples if total_samples > 0 else 0.0 metrics[f"{prefix}position1_pass@{k}"] = position1_pass_counts[k] / total_samples if total_samples > 0 else 0.0 metrics[f"{prefix}recall@{k}"] = recall_sums[k] / total_samples if total_samples > 0 else 0.0 return metrics def _compute_metrics_from_scratch(self) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]: """ Compute all evaluation metrics from scratch Returns: Tuple of (metrics, per_sample_metrics) """ total_samples = len(self.samples) # Get configuration evaluation_config = self.task_config.get('evaluation_config', {}) k_values = evaluation_config.get("k_values", [128]) select_k_strategy = evaluation_config.get('select_k', 'first_k') evaluation_mode = evaluation_config.get('evaluation_mode', 'both') sid_to_pid_strategy = evaluation_config.get('sid_to_pid_strategy', 'most_popular_after_downsampling') # Load PID mapping if needed code_to_pid = None if evaluation_mode in ("pid", "both"): from pathlib import Path task_name = self.task_config.get("name", "") if task_name == "product": mapping_filename = "sid2iid.json" else: mapping_filename = "sid2pid.json" pid_mapping_path = str(Path(self.data_dir) / mapping_filename) console.print(f"[cyan]Loading PID mapping from {pid_mapping_path}...[/cyan]") code_to_pid = utils_pid.load_pid_mapping(pid_mapping_path) # Define evaluation modes to run # Format: (mode_name, metric_prefix, debug_filename, log_message) modes_config = { "sid": [("sid", "", "debug.json", "Evaluating using SID mode...")], "pid": [("pid", "pid_", "debug_pid.json", "Evaluating using PID mode...")], "both": [ ("sid", "", "debug_sid.json", " Running SID evaluation..."), ("pid", "pid_", "debug_pid.json", " Running PID evaluation...") ] } if evaluation_mode not in modes_config: raise ValueError(f"Invalid evaluation_mode: '{evaluation_mode}'. Must be 'sid', 'pid', or 'both'") if evaluation_mode == "both": console.print("[cyan]Evaluating using both SID and PID modes...[/cyan]") # Initialize metrics metrics = {"total_samples": total_samples} per_sample_metrics = {} all_debug_info = {} # Run evaluation for each configured mode for mode_name, metric_prefix, debug_filename, log_message in modes_config[evaluation_mode]: console.print(f"[cyan]{log_message}[/cyan]") # Run evaluation pass_counts, position1_pass_counts, recall_sums, mode_per_sample_metrics, debug_info = self._evaluate_single_mode( k_values=k_values, evaluation_mode=mode_name, select_k_strategy=select_k_strategy, code_to_pid=code_to_pid if mode_name == "pid" else None, sid_to_pid_strategy=sid_to_pid_strategy if mode_name == "pid" else "most_popular" ) # Calculate and add metrics mode_metrics = self._calculate_metrics_from_counts( pass_counts, position1_pass_counts, recall_sums, total_samples, k_values, metric_prefix ) metrics.update(mode_metrics) # Merge per-sample metrics with appropriate prefix for sample_id, sample_metric in mode_per_sample_metrics.items(): if sample_id not in per_sample_metrics: per_sample_metrics[sample_id] = {} # Add metrics with prefix (for PID mode) or without (for SID mode) if metric_prefix: # PID mode: add prefix to metric names for metric_name, metric_value in sample_metric.items(): prefixed_name = f"{metric_prefix}{metric_name}" per_sample_metrics[sample_id][prefixed_name] = metric_value else: # SID mode: no prefix per_sample_metrics[sample_id].update(sample_metric) # Store debug info for later saving if self.debug and self.predictions_dir: all_debug_info[mode_name] = (debug_info, debug_filename, mode_metrics) # Save debug info if self.debug and self.predictions_dir: for mode_name, (debug_info, debug_filename, mode_metrics) in all_debug_info.items(): # For single mode, include all metrics; for both mode, filter by prefix if evaluation_mode == "both": prefix = "pid_" if mode_name == "pid" else "" filtered_metrics = { k: v for k, v in mode_metrics.items() if k == "total_samples" or k.startswith(prefix) } filtered_metrics["total_samples"] = total_samples else: filtered_metrics = dict(metrics) self._save_debug_info(debug_info, filtered_metrics, debug_filename) # Record configuration metrics["select_k_strategy"] = select_k_strategy metrics["evaluation_mode"] = evaluation_mode if evaluation_mode in ("pid", "both"): metrics["sid_to_pid_strategy"] = sid_to_pid_strategy return metrics, per_sample_metrics def _save_debug_info(self, debug_info: Dict[str, Any], metrics: Dict[str, Any], debug_filename: str = None): """ Save detailed debug information to file Args: debug_info: Debug information dictionary metrics: Overall metrics debug_filename: Optional custom filename (absolute path or relative to predictions_dir) """ # Add statistics to debug_info debug_info["statistics"] = { "total_samples": metrics.get("total_samples", 0), "passed_samples_count": len(debug_info.get("passed_samples", [])), "failed_samples_count": len(debug_info.get("failed_samples", [])), "no_generation_samples_count": len(debug_info.get("no_generation_samples", [])), } # Add metrics debug_info["metrics"] = metrics # Use default filename if not specified if debug_filename is None: debug_filename = "debug.json" # Save debug info to file using base class method self._save_debug_json(debug_info, filename=debug_filename) console.print(f"Total samples: {metrics['total_samples']}") console.print(f"Passed samples: {len(debug_info['passed_samples'])}") console.print(f"Failed samples: {len(debug_info['failed_samples'])}") console.print(f"No generation samples: {len(debug_info['no_generation_samples'])}") # Print metrics console.print("\n[bold]Metrics:[/bold]") for metric_name, metric_value in metrics.items(): if metric_name != "total_samples": console.print(f" {metric_name}: {metric_value}") # Show some failed examples if debug_info["failed_samples"]: console.print(f"\n[yellow]Failed sample examples (first 3):[/yellow]") for i, item in enumerate(debug_info["failed_samples"][:3]): console.print(f" Example {i+1}:") console.print(f" Sample ID: {item['sample_id']}") # Handle both SID and PID modes if 'ground_truth_sids' in item: console.print(f" Ground truth SIDs: {item['ground_truth_sids']}") elif 'ground_truth_pids' in item: console.print(f" Ground truth PIDs: {item['ground_truth_pids']}") console.print(f" Top 5 generations: {item['top_10_generations'][:5]}") console.print() ================================================ FILE: benchmarks/benchmark/tasks/v1_0/recommendation/utils.py ================================================ """ Recommendation Task Utilities Functions for SID extraction and recommendation metrics computation. """ from typing import Set, Dict, List, Any def extract_ids_from_answer(answer: str) -> list[str]: """Extract all SIDs from answer field, preserving original order. Returns a deduplicated list that keeps the first occurrence order. >>> extract_ids_from_answer("<|sid_begin|>123<|sid_end|><|sid_begin|>456<|sid_end|>") ['123', '456'] """ seen: set[str] = set() correct_answers: list[str] = [] for part in answer.split('<|sid_begin|>'): if '<|sid_end|>' in part: sid = part.split('<|sid_end|>')[0].strip() if sid and sid not in seen: correct_answers.append(sid) seen.add(sid) return correct_answers def extract_first_id_from_answer(answer: str) -> str: """ Extract the first SID from answer field Args: answer: String containing multiple <|sid_begin|>...<|sid_end|> patterns Returns: The first extracted SID, or empty string if none found Examples: >>> extract_first_id_from_answer("<|sid_begin|>123<|sid_end|><|sid_begin|>456<|sid_end|>") '123' """ for part in answer.split('<|sid_begin|>'): if '<|sid_end|>' in part: sid = part.split('<|sid_end|>')[0].strip() if sid: return sid return "" def extract_id_from_generation(generation: str) -> str: """ Extract SID from model generation The generation may contain: - SID directly: "123" - Wrapped in tags: "<|sid_begin|>123<|sid_end|>" - With thinking: "...\\n<|sid_begin|>123" (two-stage generation) Args: generation: Model generation string Returns: Extracted SID, or the stripped generation if no pattern found Examples: >>> extract_id_from_generation("<|sid_begin|>123<|sid_end|>") '123' >>> extract_id_from_generation("123") '123' >>> extract_id_from_generation("reasoning\\n<|sid_begin|>123") '123' """ generation = generation.strip() # If generation contains , only process content after it if '' in generation: generation = generation.split('')[-1].strip() # Try to extract from <|sid_begin|>...<|sid_end|> pattern if '<|sid_begin|>' in generation: for part in generation.split('<|sid_begin|>'): if '<|sid_end|>' in part: sid = part.split('<|sid_end|>')[0].strip() if sid: return sid elif part.strip(): # No end marker, take the content after begin marker return part.strip() # Otherwise, return the stripped generation return generation def compute_pass_at_k( predicted_sids: List[str], ground_truth_sids: Set[str], k: int ) -> bool: """ Compute Pass@k for a single sample Pass@k definition: - Take the first k candidate SIDs from predictions - If any of these k SIDs appears in the ground truth SIDs, return True Args: predicted_sids: List of predicted SIDs (already extracted from generations) ground_truth_sids: Set of ground truth SIDs k: Number of top predictions to consider Returns: True if any of the top-k predictions match ground truth, False otherwise """ if not predicted_sids or not ground_truth_sids: return False # Take first k predicted SIDs top_k_sids = predicted_sids[:k] # Check if any matches ground truth for sid in top_k_sids: if sid in ground_truth_sids: return True return False def compute_position1_pass_at_k( predicted_sids: List[str], first_ground_truth_sid: str, k: int ) -> bool: """ Compute Position1_Pass@k for a single sample Position1_Pass@k definition: - Take the first k candidate SIDs from predictions - Only consider the first SID in the ground truth - If any of these k SIDs matches the first ground truth, return True Args: predicted_sids: List of predicted SIDs (already extracted from generations) first_ground_truth_sid: The first ground truth SID k: Number of top predictions to consider Returns: True if any of the top-k predictions match the first ground truth, False otherwise """ if not predicted_sids or not first_ground_truth_sid: return False # Take first k predicted SIDs top_k_sids = predicted_sids[:k] # Check if any matches the first ground truth for sid in top_k_sids: if sid == first_ground_truth_sid: return True return False def compute_recall_at_k( predicted_sids: List[str], ground_truth_sids: Set[str], k: int ) -> float: """ Compute Recall@k for a single sample Recall@k definition: - Take the first k candidate SIDs from predictions - Count how many unique ground truth SIDs are hit by these k SIDs - Return the ratio: hit_count / total_ground_truth_count Args: predicted_sids: List of predicted SIDs (already extracted from generations) ground_truth_sids: Set of ground truth SIDs k: Number of top predictions to consider Returns: Recall@k score (0.0 to 1.0) Examples: >>> predicted_sids = ["123", "456", "999", "888"] >>> ground_truth_sids = {"123", "456", "789"} >>> compute_recall_at_k(predicted_sids, ground_truth_sids, k=2) 0.6667 # Hit 2 out of 3 ground truth SIDs >>> compute_recall_at_k(predicted_sids, ground_truth_sids, k=4) 0.6667 # Still hit only 2, since 789 is not in top-4 """ if not predicted_sids or not ground_truth_sids: return 0.0 # Take first k predicted SIDs top_k_sids = predicted_sids[:k] # Convert to set and filter out empty strings predicted_sids_set = set(sid for sid in top_k_sids if sid) # Count how many ground truth SIDs are hit hit_count = len(predicted_sids_set & ground_truth_sids) # Set intersection # Calculate recall recall = hit_count / len(ground_truth_sids) return recall def get_unique_generations( generations: List[str], max_count: int, logprobs: List[float] = None, exclude_sids: Set[str] = None, sources: List[str] = None ): """ Get first N unique SIDs from generations, optionally sorted by logprobs This function extracts unique SIDs, optionally sorting by logprobs first. Useful for merging results from multiple generation runs. Args: generations: List of model generation strings (may contain <|sid_begin|>...<|sid_end|> or ...) max_count: Maximum number of unique SIDs to return logprobs: Optional list of log probabilities (same length as generations). If provided, sorts by logprobs (descending) before extracting unique SIDs exclude_sids: Optional set of SIDs to exclude from results sources: Optional list of source labels (same length as generations). If provided, returns tuple (sids, sources) Returns: List of unique SIDs (up to max_count), sorted by logprobs if provided, otherwise in generation order If sources provided, returns tuple (List[str], List[str]) of (unique_sids, corresponding_sources) Examples: >>> gens = ["<|sid_begin|>123<|sid_end|>", "456", "...\\n123", "789", "456", "999"] >>> get_unique_generations(gens, max_count=3) ['123', '456', '789'] >>> get_unique_generations(gens, max_count=3, logprobs=[-0.5, -1.2, -0.8, -0.3, -1.5, -2.0]) ['789', '123', '456'] # Sorted by logprobs first >>> get_unique_generations(gens, max_count=3, exclude_sids={'456', '789'}) ['123', '999'] # Excluded '456' and '789' >>> get_unique_generations(gens, max_count=3, sources=['a', 'b', 'a', 'c', 'b', 'd']) (['123', '456', '789'], ['a', 'b', 'c']) """ # Track sources if provided track_sources = sources is not None and len(sources) == len(generations) # If logprobs provided, sort generations by logprobs (descending) if logprobs is not None and len(logprobs) == len(generations): # Create tuples and sort by logprob (descending) if track_sources: gen_data = list(zip(generations, logprobs, sources)) gen_data.sort(key=lambda x: x[1], reverse=True) sorted_generations = [gen for gen, _, _ in gen_data] sorted_sources = [src for _, _, src in gen_data] else: gen_logprob_pairs = list(zip(generations, logprobs)) gen_logprob_pairs.sort(key=lambda x: x[1], reverse=True) sorted_generations = [gen for gen, _ in gen_logprob_pairs] sorted_sources = None else: sorted_generations = generations sorted_sources = sources if track_sources else None seen = set() unique_sids = [] unique_sources = [] if track_sources else None exclude = exclude_sids or set() for i, gen in enumerate(sorted_generations): # Skip empty strings if not gen or not gen.strip(): continue # Extract SID from generation text sid = extract_id_from_generation(gen) # Skip if SID is empty, already seen, or in exclude list if not sid or sid in seen or sid in exclude: continue unique_sids.append(sid) seen.add(sid) if track_sources: unique_sources.append(sorted_sources[i]) # Stop if we've collected enough unique SIDs if len(unique_sids) >= max_count: break if track_sources: return unique_sids, unique_sources return unique_sids def get_debug_info( sample_id: str, generations: List[str], ground_truth: str, pass_results: Dict[str, bool], position1_pass_results: Dict[str, bool], raw_prompt: str = "" ) -> Dict[str, Any]: """ Prepare debug information for a sample Args: sample_id: Sample ID generations: List of generated SIDs ground_truth: Ground truth answer string pass_results: Pass@k results for this sample position1_pass_results: Position1_Pass@k results for this sample raw_prompt: Raw prompt (optional) Returns: Debug information dictionary """ ground_truth_sids = extract_ids_from_answer(ground_truth) first_ground_truth_sid = extract_first_id_from_answer(ground_truth) # Extract top-k generated IDs top_k_sids = [extract_id_from_generation(gen) for gen in generations[:10]] # Show top-10 debug_item = { "sample_id": sample_id, "ground_truth_sids": list(ground_truth_sids), "first_ground_truth_sid": first_ground_truth_sid, "top_10_generations": top_k_sids, "pass_results": pass_results, "position1_pass_results": position1_pass_results, } if raw_prompt: debug_item["raw_prompt_snippet"] = raw_prompt[:200] + "..." if len(raw_prompt) > 200 else raw_prompt return debug_item ================================================ FILE: benchmarks/benchmark/tasks/v1_0/recommendation/utils_by_pid.py ================================================ """ Recommendation Task Utilities (PID-based) Functions for PID extraction and recommendation metrics computation using PIDs. """ import re import json import random from typing import Set, Dict, List, Any, Tuple, Optional from pathlib import Path from collections import Counter # Encoding constants for (code1, code2, code3) -> single int # Each code is in range [0, 8192], needs 13 bits CODE_MULTIPLIER_1 = 8192 * 8192 # 67108864 CODE_MULTIPLIER_2 = 8192 def load_pid_mapping(mapping_path: str) -> Dict[int, List[Dict[str, int]]]: """ Load SID to PID mapping from JSON file Args: mapping_path: Path to the JSON file containing SID to PID mapping Returns: Dictionary mapping encoded SID (int) to list of PID info dictionaries Format: {encoded_sid: [{"pid": pid1, "count": count1, "count_after_downsample": count2}, ...]} PIDs are sorted by original count in descending order """ mapping_path = Path(mapping_path) if not mapping_path.exists(): raise FileNotFoundError(f"PID mapping file not found: {mapping_path}") with open(mapping_path, 'r') as f: sid_to_pid_json = json.load(f) # Convert string keys back to integers code_to_pid = {int(k): v for k, v in sid_to_pid_json.items()} print(f"[INFO] Loaded {len(code_to_pid)} SID to PID mappings from {mapping_path}") return code_to_pid def encode_sid(c1: int, c2: int, c3: int) -> int: """ Encode (code1, code2, code3) into a single integer key Args: c1, c2, c3: SID code components Returns: Encoded integer key """ return c1 * CODE_MULTIPLIER_1 + c2 * CODE_MULTIPLIER_2 + c3 def extract_sid_codes_from_text(text: str) -> Optional[Tuple[int, int, int]]: """ Extract SID codes from text using regex pattern Args: text: Input text containing SID patterns like <|sid_begin|><|sid_end|> Returns: Tuple (a, b, c) representing extracted SID codes, or None if not found Expects exactly one SID in the text """ pattern = r'' matches = re.findall(pattern, text) if not matches: return None if len(matches) > 1: # Log warning but use first match print(f"[WARNING] Expected 1 SID code, got {len(matches)}, using first") return (int(matches[0][0]), int(matches[0][1]), int(matches[0][2])) def _get_id_from_info(info: Dict[str, int]) -> int: """ Extract ID from info dict, supporting both 'pid' and 'iid' keys. Args: info: Dictionary containing either 'pid' or 'iid' key Returns: The ID value (int) """ return info.get("pid", info.get("iid", 0)) def apply_sid_to_pid_strategy(pid_info_list: List[Dict[str, int]], strategy: str) -> int: """ Apply strategy to select a single PID from a list Args: pid_info_list: List of PID info dictionaries Format: [{"pid": pid1, "count": count1, "count_after_downsample": count2}, ...] or [{"iid": iid1, "count": count1, "count_after_downsample": count2}, ...] for product strategy: One of "most_popular_originally", "most_popular_after_downsampling", or "random" Returns: Selected PID/IID (int), or 0 if list is empty Strategies: - "most_popular_originally": Return the PID with highest original count (already sorted) - "most_popular_after_downsampling": Return the PID with highest downsampled count (random if tie) - "random": Randomly select one PID from the list """ if not pid_info_list: return 0 if strategy == "most_popular_originally": # Return the first PID/IID (highest original count, already sorted) return _get_id_from_info(pid_info_list[0]) elif strategy == "most_popular_after_downsampling": # Find max downsampled count max_count = max(info["count_after_downsample"] for info in pid_info_list) # Get all PIDs/IIDs with max downsampled count max_pids = [_get_id_from_info(info) for info in pid_info_list if info["count_after_downsample"] == max_count] # Randomly select one if there are ties return random.choice(max_pids) elif strategy == "random": # Randomly select a PID/IID return random.choice([_get_id_from_info(info) for info in pid_info_list]) else: raise ValueError(f"Unknown strategy: {strategy}. Must be 'most_popular_originally', 'most_popular_after_downsampling', or 'random'") def extract_ids_from_answer(answer: list[int]) -> list[int]: """Extract all PIDs from answer field, preserving original order. Returns a deduplicated list that keeps the first occurrence order. >>> extract_ids_from_answer([123, 456, 123, 789]) [123, 456, 789] """ seen: set[int] = set() correct_answers: list[int] = [] for pid in answer: if pid != 0 and pid not in seen: correct_answers.append(pid) seen.add(pid) return correct_answers def extract_first_id_from_answer(answer: List[int]) -> int: """ Extract the first PID from answer field Examples: >>> extract_first_id_from_answer([123, 456, 789]) 123 """ valid_pids = [pid for pid in answer if pid != 0] return valid_pids[0] if valid_pids else 0 def extract_id_from_generation( generation: str, code_to_pid: Dict[int, List[Dict[str, int]]], strategy: str = "most_popular_originally" ) -> int: """ Extract PID from model generation The generation may contain: - SID wrapped in tags: "<|sid_begin|><|sid_end|>" - With thinking: "...\\n<|sid_begin|>..." Args: generation: Model generation string (contains exactly one SID) code_to_pid: Mapping dictionary {encoded_sid: [{"pid": pid, "count": ..., "count_after_downsample": ...}, ...]} strategy: Strategy for selecting PID ("most_popular_originally", "most_popular_after_downsampling", or "random") Returns: Extracted PID (int), or 0 if not found Examples: >>> extract_id_from_generation("<|sid_begin|><|sid_end|>", code_to_pid) 12345 # Assuming this SID maps to PID 12345 """ generation = generation.strip() # If generation contains , only process content after it if '' in generation: generation = generation.split('')[-1].strip() # Extract SID codes from the generation (should be exactly one) sid_codes = extract_sid_codes_from_text(generation) if sid_codes is None: return 0 # Encode SID and look up PID list encoded = encode_sid(*sid_codes) pid_freq_list = code_to_pid.get(encoded, []) # Apply strategy to select PID return apply_sid_to_pid_strategy(pid_freq_list, strategy) def compute_pass_at_k( predicted_ids: List[int], ground_truth_ids: Set[int], k: int ) -> bool: """ Compute Pass@k for a single sample using PIDs Pass@k definition: - Take the first k candidate PIDs from predictions - If any of these k PIDs appears in the ground truth PIDs, return True Args: predicted_ids: List of predicted PIDs (already extracted from generations) ground_truth_ids: Set of ground truth PIDs k: Number of top predictions to consider Returns: True if any of the top-k predictions match ground truth, False otherwise """ if not predicted_ids or not ground_truth_ids: return False # Take first k predicted PIDs top_k_ids = predicted_ids[:k] # Check if any matches ground truth for pid in top_k_ids: if pid != 0 and pid in ground_truth_ids: return True return False def compute_position1_pass_at_k( predicted_ids: List[int], first_ground_truth_id: int, k: int ) -> bool: """ Compute Position1_Pass@k for a single sample using PIDs Position1_Pass@k definition: - Take the first k candidate PIDs from predictions - Only consider the first PID in the ground truth - If any of these k PIDs matches the first ground truth, return True Args: predicted_ids: List of predicted PIDs (already extracted from generations) first_ground_truth_id: The first ground truth PID k: Number of top predictions to consider Returns: True if any of the top-k predictions match the first ground truth, False otherwise """ if not predicted_ids or not first_ground_truth_id or first_ground_truth_id == 0: return False # Take first k predicted PIDs top_k_ids = predicted_ids[:k] # Check if any matches the first ground truth for pid in top_k_ids: if pid != 0 and pid == first_ground_truth_id: return True return False def compute_recall_at_k( predicted_ids: List[int], ground_truth_ids: Set[int], k: int ) -> float: """ Compute Recall@k for a single sample using PIDs Recall@k definition: - Take the first k candidate PIDs from predictions - Count how many unique ground truth PIDs are hit by these k PIDs - Return the ratio: hit_count / total_ground_truth_count Args: predicted_ids: List of predicted PIDs (already extracted from generations) ground_truth_ids: Set of ground truth PIDs k: Number of top predictions to consider Returns: Recall@k score (0.0 to 1.0) Examples: >>> predicted_ids = [123, 456, 999, 888] >>> ground_truth_ids = {123, 456, 789} >>> compute_recall_at_k(predicted_ids, ground_truth_ids, k=2) 0.6667 # Hit 2 out of 3 ground truth PIDs """ if not predicted_ids or not ground_truth_ids: return 0.0 # Take first k predicted PIDs top_k_ids = predicted_ids[:k] # Convert to set and filter out zeros predicted_ids_set = set(pid for pid in top_k_ids if pid != 0) # Count how many ground truth PIDs are hit hit_count = len(predicted_ids_set & ground_truth_ids) # Set intersection # Calculate recall recall = hit_count / len(ground_truth_ids) return recall def get_unique_generations( generations: List[str], max_count: int, code_to_pid: Dict[int, List[Dict[str, int]]], strategy: str = "most_popular_originally", logprobs: List[float] = None, exclude_ids: Set[int] = None, sources: List[str] = None ): """ Get first N unique PIDs from generations, optionally sorted by logprobs This function extracts unique PIDs, optionally sorting by logprobs first. Useful for merging results from multiple generation runs. Args: generations: List of model generation strings containing SID patterns max_count: Maximum number of unique PIDs to return code_to_pid: Mapping dictionary {encoded_sid: [{"pid": pid, "count": ..., "count_after_downsample": ...}, ...]} strategy: Strategy for selecting PID ("most_popular_originally", "most_popular_after_downsampling", or "random") logprobs: Optional list of log probabilities (same length as generations) exclude_ids: Optional set of PIDs to exclude from results sources: Optional list of source labels (same length as generations) Returns: List of unique PIDs (up to max_count), sorted by logprobs if provided If sources provided, returns tuple (List[int], List[str]) of (unique_pids, corresponding_sources) """ # Track sources if provided track_sources = sources is not None and len(sources) == len(generations) # If logprobs provided, sort generations by logprobs (descending) if logprobs is not None and len(logprobs) == len(generations): # Create tuples and sort by logprob (descending) if track_sources: gen_data = list(zip(generations, logprobs, sources)) gen_data.sort(key=lambda x: x[1], reverse=True) sorted_generations = [gen for gen, _, _ in gen_data] sorted_sources = [src for _, _, src in gen_data] else: gen_logprob_pairs = list(zip(generations, logprobs)) gen_logprob_pairs.sort(key=lambda x: x[1], reverse=True) sorted_generations = [gen for gen, _ in gen_logprob_pairs] sorted_sources = None else: sorted_generations = generations sorted_sources = sources if track_sources else None seen = set() unique_pids = [] unique_sources = [] if track_sources else None exclude = exclude_ids or set() for i, gen in enumerate(sorted_generations): # Skip empty strings if not gen or not gen.strip(): continue # Extract PID from generation text pid = extract_id_from_generation(gen, code_to_pid, strategy) # Skip if PID is 0 (not found), already seen, or in exclude list if pid == 0 or pid in seen or pid in exclude: continue unique_pids.append(pid) seen.add(pid) if track_sources: unique_sources.append(sorted_sources[i]) # Stop if we've collected enough unique PIDs if len(unique_pids) >= max_count: break if track_sources: return unique_pids, unique_sources return unique_pids def get_debug_info( sample_id: str, generations: List[str], ground_truth: List[int], pass_results: Dict[str, bool], position1_pass_results: Dict[str, bool], code_to_pid: Dict[int, List[Dict[str, int]]], strategy: str = "most_popular_originally", raw_prompt: str = "" ) -> Dict[str, Any]: """ Prepare debug information for a sample (PID-based) Args: sample_id: Sample ID generations: List of generated SIDs ground_truth: Ground truth answer string pass_results: Pass@k results for this sample position1_pass_results: Position1_Pass@k results for this sample code_to_pid: Mapping dictionary {encoded_sid: [{"pid": pid, "count": ..., "count_after_downsample": ...}, ...]} strategy: Strategy for selecting PID ("most_popular_originally", "most_popular_after_downsampling", or "random") raw_prompt: Raw prompt (optional) Returns: Debug information dictionary """ ground_truth_ids = extract_ids_from_answer(ground_truth) first_ground_truth_id = extract_first_id_from_answer(ground_truth) # Extract top-k generated PIDs top_k_ids = [extract_id_from_generation(gen, code_to_pid, strategy) for gen in generations[:10]] debug_item = { "sample_id": sample_id, "ground_truth_pids": list(ground_truth_ids), "first_ground_truth_pid": first_ground_truth_id, "top_10_generations": top_k_ids, "pass_results": pass_results, "position1_pass_results": position1_pass_results, } if raw_prompt: debug_item["raw_prompt_snippet"] = raw_prompt[:200] + "..." if len(raw_prompt) > 200 else raw_prompt return debug_item ================================================ FILE: benchmarks/benchmark/tasks/v1_0/registry.py ================================================ """ Task Registry - Unified Task Registration This module consolidates: - loader_factory.py - evaluator_factory.py - tasks.py Purpose: Each task is defined in ONE place only, avoiding duplication across multiple files. """ from dataclasses import dataclass from typing import Type, Dict, Any, Optional # ===== Import all configs ===== from .label_pred.config import LABEL_PRED_CONFIG from .item_understand.config import ITEM_UNDERSTAND_CONFIG from .rec_reason.config import REC_REASON_CONFIG from .recommendation.config import ( LABEL_COND_CONFIG, VIDEO_CONFIG, PRODUCT_CONFIG, AD_CONFIG, INTERACTIVE_CONFIG, ) # ===== Import base loader ===== from .base_loader import BaseLoader # ===== Import all evaluators ===== from .label_pred.evaluator import LabelPredEvaluator from .item_understand.evaluator import ItemUnderstandEvaluator from .rec_reason.evaluator import RecoReasonEvaluator from .recommendation.evaluator import RecommendationEvaluator @dataclass class TaskRegistration: """Task registration information""" name: str config: Dict[str, Any] evaluator_class: Type category: str # "general", "recommendation", "caption" # ======================================== # Unified Task Registry # ======================================== TASK_REGISTRY: Dict[str, TaskRegistration] = { "label_cond": TaskRegistration( name="label_cond", config=LABEL_COND_CONFIG, evaluator_class=RecommendationEvaluator, category="recommendation" ), "video": TaskRegistration( name="video", config=VIDEO_CONFIG, evaluator_class=RecommendationEvaluator, category="recommendation" ), "product": TaskRegistration( name="product", config=PRODUCT_CONFIG, evaluator_class=RecommendationEvaluator, category="recommendation" ), "ad": TaskRegistration( name="ad", config=AD_CONFIG, evaluator_class=RecommendationEvaluator, category="recommendation" ), "interactive": TaskRegistration( name="interactive", config=INTERACTIVE_CONFIG, evaluator_class=RecommendationEvaluator, category="recommendation" ), "label_pred": TaskRegistration( name="label_pred", config=LABEL_PRED_CONFIG, evaluator_class=LabelPredEvaluator, category="recommendation" ), "item_understand": TaskRegistration( name="item_understand", config=ITEM_UNDERSTAND_CONFIG, evaluator_class=ItemUnderstandEvaluator, category="caption" ), "rec_reason": TaskRegistration( name="rec_reason", config=REC_REASON_CONFIG, evaluator_class=RecoReasonEvaluator, category="caption" ), } # ======================================== # Factory Functions # ======================================== def get_loader(task_name: str, data_dir: str, tokenizer: Optional[Any] = None, enable_thinking: Optional[bool] = None): """ Get loader instance for a task Replaces loader_factory.get_loader() Args: task_name: Name of the task benchmark_version: Version of the benchmark (used for task selection, not passed to loader) data_dir: Data directory path tokenizer: Tokenizer instance (optional, required for message-based formats) enable_thinking: Enable thinking mode (optional, overrides task config if set) Returns: Loader instance Raises: ValueError: If task_name is not registered """ if task_name not in TASK_REGISTRY: available_tasks = ", ".join(TASK_REGISTRY.keys()) raise ValueError( f"Unknown task: {task_name}. " f"Available tasks: {available_tasks}" ) reg = TASK_REGISTRY[task_name] # Create loader instance with aligned parameters return BaseLoader( task_config=reg.config, data_dir=data_dir, tokenizer=tokenizer, enable_thinking=enable_thinking ) def get_evaluator(task_name: str): """ Get evaluator class for a task Replaces evaluator_factory.get_evaluator() Args: task_name: Name of the task Returns: Evaluator class (not instance) Raises: ValueError: If task_name is not registered """ if task_name not in TASK_REGISTRY: available_tasks = ", ".join(TASK_REGISTRY.keys()) raise ValueError( f"Unknown task: {task_name}. " f"Available tasks: {available_tasks}" ) return TASK_REGISTRY[task_name].evaluator_class def get_task_config(task_name: str) -> Dict[str, Any]: """ Get task configuration Args: task_name: Name of the task Returns: Task configuration dictionary Raises: ValueError: If task_name is not registered """ if task_name not in TASK_REGISTRY: available_tasks = ", ".join(TASK_REGISTRY.keys()) raise ValueError( f"Unknown task: {task_name}. " f"Available tasks: {available_tasks}" ) return TASK_REGISTRY[task_name].config def get_all_tasks() -> list: """ Get list of all registered task names Returns: List of task names """ return list(TASK_REGISTRY.keys()) def get_tasks_by_category(category: str) -> list: """ Get tasks filtered by category Args: category: Category name ("general", "recommendation", "caption") Returns: List of task names in the specified category """ return [ name for name, reg in TASK_REGISTRY.items() if reg.category == category ] # ======================================== # Backward Compatibility # ======================================== # Replaces tasks.py - TaskTable TaskTable = {name: reg.config for name, reg in TASK_REGISTRY.items()} ================================================ FILE: benchmarks/eval_script.sh ================================================ #!/bin/bash # Set common variables MODEL_PATH=$1 VERSION="${VERSION:-v1.0}" BASE_OUTPUT_DIR="${BENCHMARK_BASE_DIR}/results/${VERSION}/results_${2}" BASE_LOG_NAME="${BENCHMARK_BASE_DIR}/auto_eval_logs/${VERSION}/$2" ENABLE_THINKING=$3 # Read configuration from environment variables (set by eval_script.py) # Fallback to hardcoded paths if not set BENCHMARK_BASE_DIR="${BENCHMARK_BASE_DIR:-/home/user/benchmark}" DATA_VERSION="${DATA_VERSION:-v1.0}" BENCHMARK_DATA_DIR="${BENCHMARK_DATA_DIR:-${BENCHMARK_BASE_DIR}/data_${DATA_VERSION}}" DATA_DIR="$BENCHMARK_DATA_DIR" # Create output directory and log directory mkdir -p "$(dirname "${BASE_LOG_NAME}")" mkdir -p "$BASE_OUTPUT_DIR" # Write debug info to log file { echo "========== Task Configuration ==========" echo "DATA_DIR: $DATA_DIR" echo "Enable Thinking: $ENABLE_THINKING" echo "========================================" } >> "${BASE_LOG_NAME}.log" # Build thinking arguments THINKING_ARGS="" if [ "$ENABLE_THINKING" = "true" ]; then THINKING_ARGS="--enable_thinking" fi echo "Thinking args: $THINKING_ARGS" echo "Running all tasks" # Task: rec_reason python3 -u scripts/ray-vllm/evaluate.py \ --task_types rec_reason \ --gpu_memory_utilization 0.9 \ --model_path "$MODEL_PATH" \ --data_dir "$DATA_DIR" \ --output_dir "${BASE_OUTPUT_DIR}" \ --dtype bfloat16 \ --worker_batch_size 5 \ --overwrite \ $THINKING_ARGS >> "${BASE_LOG_NAME}.log" 2>&1 # Task: item_understand python3 -u scripts/ray-vllm/evaluate.py \ --task_types item_understand \ --gpu_memory_utilization 0.8 \ --model_path "$MODEL_PATH" \ --data_dir "$DATA_DIR" \ --output_dir "${BASE_OUTPUT_DIR}" \ --dtype bfloat16 \ --worker_batch_size 250 \ --overwrite \ $THINKING_ARGS >> "${BASE_LOG_NAME}.log" 2>&1 # Task: ad python3 -u scripts/ray-vllm/evaluate.py \ --task_types ad \ --gpu_memory_utilization 0.8 \ --model_path "$MODEL_PATH" \ --data_dir "$DATA_DIR" \ --output_dir "${BASE_OUTPUT_DIR}" \ --dtype bfloat16 \ --worker_batch_size 1875 \ --overwrite \ --num_beams 32 --num_return_sequences 32 --num_return_thinking_sequences 1 \ $THINKING_ARGS >> "${BASE_LOG_NAME}.log" 2>&1 # Task: product python3 -u scripts/ray-vllm/evaluate.py \ --task_types product \ --gpu_memory_utilization 0.8 \ --model_path "$MODEL_PATH" \ --data_dir "$DATA_DIR" \ --output_dir "${BASE_OUTPUT_DIR}" \ --dtype bfloat16 \ --worker_batch_size 1875 \ --overwrite \ --num_beams 32 --num_return_sequences 32 --num_return_thinking_sequences 1 \ $THINKING_ARGS >> "${BASE_LOG_NAME}.log" 2>&1 # Task: label_cond python3 -u scripts/ray-vllm/evaluate.py \ --task_types label_cond \ --gpu_memory_utilization 0.8 \ --model_path "$MODEL_PATH" \ --data_dir "$DATA_DIR" \ --output_dir "${BASE_OUTPUT_DIR}" \ --dtype bfloat16 \ --worker_batch_size 1875 \ --overwrite \ --num_beams 32 --num_return_sequences 32 --num_return_thinking_sequences 1 \ $THINKING_ARGS >> "${BASE_LOG_NAME}.log" 2>&1 # Task: video python3 -u scripts/ray-vllm/evaluate.py \ --task_types video \ --gpu_memory_utilization 0.8 \ --model_path "$MODEL_PATH" \ --data_dir "$DATA_DIR" \ --output_dir "${BASE_OUTPUT_DIR}" \ --dtype bfloat16 \ --worker_batch_size 1875 \ --overwrite \ --num_beams 32 --num_return_sequences 32 --num_return_thinking_sequences 1 \ $THINKING_ARGS >> "${BASE_LOG_NAME}.log" 2>&1 # Task: interactive python3 -u scripts/ray-vllm/evaluate.py \ --task_types interactive \ --gpu_memory_utilization 0.8 \ --model_path "$MODEL_PATH" \ --data_dir "$DATA_DIR" \ --output_dir "${BASE_OUTPUT_DIR}" \ --dtype bfloat16 \ --worker_batch_size 250 \ --overwrite \ --num_beams 32 --num_return_sequences 32 --num_return_thinking_sequences 1 \ $THINKING_ARGS >> "${BASE_LOG_NAME}.log" 2>&1 # Task: label_pred python3 -u scripts/ray-vllm/evaluate.py \ --task_types label_pred \ --gpu_memory_utilization 0.8 \ --model_path "$MODEL_PATH" \ --data_dir "$DATA_DIR" \ --output_dir "${BASE_OUTPUT_DIR}" \ --dtype bfloat16 \ --worker_batch_size 3200 \ --max_logprobs 10000 \ --overwrite \ $THINKING_ARGS >> "${BASE_LOG_NAME}.log" 2>&1 echo "All tasks completed successfully" ================================================ FILE: benchmarks/pyproject.toml ================================================ [build-system] requires = ["setuptools>=45", "wheel"] build-backend = "setuptools.build_meta" [project] name = "onerec-benchamrk" version = "0.1.0" description = "OneRec Benchmark" readme = "README.md" requires-python = ">=3.10" license = {text = "Apache License 2.0"} # Core dependencies - pinned to specific versions from pip list dependencies = [ "torch==2.5.1", "transformers==4.52.0", "ray==2.43.0", "vllm==0.7.3", "gradio==4.44.1", "datasets==3.6.0", "safetensors==0.5.3", "numpy==1.26.4", "peft==0.15.2", "accelerate==1.8.1", "bert_score", "pyfiglet", "pylatexenc", "scikit-learn", "vertexai", "openai", "anthropic" ] [tool.setuptools] packages = ["benchmark", "api", "scripts"] [tool.setuptools.package-data] "*" = ["*.json", "*.yaml", "*.yml"] ================================================ FILE: benchmarks/requirements.txt ================================================ absl-py==2.1.0 accelerate==1.8.1 aiodns==3.6.1 aiohappyeyeballs==2.6.1 aiohttp==3.11.14 aiohttp-cors==0.8.0 aiosignal==1.3.2 airportsdata==20250224 annotated-types==0.7.0 anthropic==0.75.0 antlr4-python3-runtime==4.13.2 anyio==4.9.0 APScheduler==3.11.1 astor==0.8.1 asttokens==3.0.0 async-timeout==5.0.1 attrs==25.3.0 av==14.0.1 bert-score==0.3.13 blake3==1.0.4 blinker==1.4 boto3==1.35.97 botocore==1.35.98 braceexpand==0.1.7 build==1.2.2.post1 cachetools==4.2.4 cchardet==2.1.7 certifi==2021.10.8 cffi==2.0.0 charset-normalizer==2.0.12 cityhash==0.2.4.post11 click==8.1.8 cloudpickle==3.1.1 colorful==0.5.6 compressed-tensors==0.9.1 contourpy==1.3.1 cramjam==2.10.0 cryptography==3.4.8 cupy-cuda12x==13.4.1 cycler==0.12.1 datasets==3.6.0 decorator==5.1.1 decord==0.6.0 deepspeed==0.16.2 depyf==0.18.0 dill==0.3.8 diskcache==5.6.3 distlib==0.3.9 distro==1.7.0 dnspython==2.7.0 docstring_parser==0.17.0 einops==0.8.0 email_validator==2.2.0 exceptiongroup==1.2.2 executing==2.1.0 fastapi==0.115.11 fastapi-cli==0.0.7 fastparquet==2024.2.0 fastrlock==0.8.3 filelock==3.18.0 fonttools==4.55.3 frozenlist==1.5.0 fsspec==2024.2.0 func-timeout==4.3.5 gguf==0.10.0 h11==0.14.0 hf-xet==1.2.1 hiredis==2.4.0 hjson==3.1.0 httpcore==1.0.7 httplib2==0.20.2 httptools==0.6.4 httpx==0.28.1 huggingface-hub==0.36.0 idna==3.3 importlib-metadata==4.6.4 iniconfig==2.1.0 interegular==0.3.3 ipython==8.30.0 jedi==0.19.2 jeepney==0.7.1 Jinja2==3.1.6 jiter==0.9.0 jmespath==1.0.1 joblib==1.5.2 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 kazoo==2.10.0 keyring==23.5.0 kiwisolver==1.4.7 lark==1.2.2 latex2sympy2_extended==1.10.2 launchpadlib==1.10.16 lazr.restfulclient==0.14.4 lazr.uri==1.0.6 llvmlite==0.43.0 lm-format-enforcer==0.10.11 lxml==4.9.4 lz4==3.1.10 Markdown==3.7 markdown-it-py==3.0.0 MarkupSafe==3.0.2 math-verify==0.8.0 matplotlib==3.10.0 matplotlib-inline==0.1.7 mdurl==0.1.2 mistral_common==1.5.4 more-itertools==8.10.0 mpi4py==4.1.1 mpmath==1.3.0 msgpack==1.1.0 msgspec==0.19.0 multidict==6.2.0 multiprocess==0.70.16 nest-asyncio==1.6.0 networkx==3.2.1 ninja==1.11.1.3 nltk==3.9.2 numba==0.60.0 numpy==1.26.4 nvidia-cublas-cu11==11.11.3.6 nvidia-cuda-cupti-cu11==11.8.87 nvidia-cuda-nvrtc-cu11==11.8.89 nvidia-cuda-runtime-cu11==11.8.89 nvidia-cudnn-cu11==9.1.0.70 nvidia-cufft-cu11==10.9.0.58 nvidia-curand-cu11==10.3.0.86 nvidia-cusolver-cu11==11.4.1.48 nvidia-cusparse-cu11==11.7.5.86 nvidia-ml-py==13.590.44 nvidia-nccl-cu11==2.21.5 nvidia-nvtx-cu11==11.8.86 oauthlib==3.2.0 openai==1.67.0 opencensus==0.11.4 opencensus-context==0.1.3 opencv-python-headless==4.11.0.86 outlines==0.1.11 outlines_core==0.1.26 packaging==24.2 pandas==2.2.3 parso==0.8.4 partial-json-parser==0.2.1.1.post5 peft==0.15.2 pexpect==4.9.0 pillow==11.0.0 platformdirs==4.3.7 pluggy==1.5.0 prettytable==2.5.0 prometheus-fastapi-instrumentator==7.1.0 prometheus_client==0.21.1 prompt_toolkit==3.0.48 propcache==0.3.0 proto-plus==1.26.1 psutil==7.1.3 ptyprocess==0.7.0 pure_eval==0.2.3 py-cpuinfo==9.0.0 py-spy==0.4.0 pyarrow==18.1.0 pyasn1==0.6.1 pyasn1_modules==0.4.1 pybind11==2.13.6 pycares==4.11.0 pycountry==24.6.1 pycparser==2.23 pycryptodome==3.23.0 pydantic==2.10.4 pydantic_core==2.27.2 pyfiglet==1.0.4 Pygments==2.18.0 PyJWT==2.3.0 pylatexenc==2.10 pynvml==13.0.1 pyparsing==2.4.7 pyproject_hooks==1.2.0 pysmhasher==0.2.5 pytest==8.3.5 python-dateutil==2.9.0.post0 python-dotenv==1.0.1 python-multipart==0.0.20 python-snappy==0.6.1 pytz==2021.3 pytz-deprecation-shim==0.1.0.post0 PyYAML==6.0.2 pyzmq==26.3.0 qwen-vl-utils==0.0.8 ray==2.43.0 redis==4.6.0 referencing==0.36.2 regex==2024.11.6 rich==13.9.4 rich-toolkit==0.13.2 rouge-score==0.1.2 rpds-py==0.23.1 rsa==4.9 s3transfer==0.10.4 safetensors==0.5.3 scikit-learn==1.7.2 scipy==1.15.3 SecretStorage==3.3.1 sentencepiece==0.2.0 setuptools-scm==9.2.2 shapely==2.1.2 shellingham==1.5.4 six==1.16.0 smart-open==7.1.0 sniffio==1.3.1 sqlparse==0.4.4 ssh-import-id==5.11 stack-data==0.6.3 starlette==0.46.1 sympy==1.13.1 tensorboard==2.18.0 tensorboard-data-server==0.7.2 threadpoolctl==3.6.0 tiktoken==0.9.0 timm==1.0.15 tokenizers==0.21.1 tomli==2.2.1 torchao==0.11.0 torchdata==0.10.1 tornado==6.5.4 tqdm==4.67.1 traitlets==5.14.3 transformers==4.52.0 triton==3.1.0 typer==0.15.2 typing_extensions==4.12.2 tzdata==2024.2 tzlocal==4.3.1 unpaddedbase64==2.1.0 urllib3==1.26.8 uvicorn==0.34.0 uvloop==0.21.0 vertexai==1.71.1 virtualenv==20.29.3 wadllib==1.3.6 watchfiles==1.0.4 wcwidth==0.2.13 webdataset==0.2.100 websockets==15.0.1 Werkzeug==3.1.3 wrapt==1.17.2 xformers==0.0.28.post3 xgrammar==0.1.11 xmltodict==0.12.0 xxhash==3.6.0 yarl==1.18.3 zipp==1.0.0 ================================================ FILE: benchmarks/scripts/__init__.py ================================================ ================================================ FILE: benchmarks/scripts/eval_dev_results.py ================================================ import argparse from benchmark import Benchmark def get_args(): parser = argparse.ArgumentParser() parser.add_argument( "--output_dir", type=str, required=True, help="The directory where the generation results are saved." ) parser.add_argument( "--data_dir", type=str, default=None ) parser.add_argument( "--overwrite", action="store_true", help="Whether to overwrite existing metrics and recompute from scratch" ) parser.add_argument( "--task_types", type=str, nargs='+', default=None, help="Task name list (e.g., item_understand rec_reason). If not specified, all tasks will be evaluated." ) return parser.parse_args() def main(): args = get_args() eval_results_path = f"{args.output_dir}/eval_results.json" Benchmark.evaluate_dev( generation_results_dir=args.output_dir, output_path=eval_results_path, data_dir=args.data_dir, overwrite=args.overwrite, task_types=args.task_types ) if __name__ == "__main__": main() ================================================ FILE: benchmarks/scripts/init_ray.sh ================================================ #!/bin/bash # Single Node Ray Initialization Script # Usage: bash init_ray.sh # HEAD_NODE_IP: IP address of the head node # PORT: Ray port (default: 6379) # RANK: Node rank (0 for head, >0 for workers) set -e # Parse arguments HEAD_NODE_IP=${1:-"127.0.0.1"} PORT=${2:-6379} RANK=${3:-0} # Configuration NUM_CPUS=${NUM_CPUS:-""} NUM_GPUS=${NUM_GPUS:-""} OBJECT_STORE_MEMORY=${OBJECT_STORE_MEMORY:-""} CONDA_ENV_NAME=${CONDA_ENV_NAME:-"benchmark"} # Colors GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' log_info() { echo -e "${GREEN}[INFO]${NC} $(hostname): $1" } log_warn() { echo -e "${YELLOW}[WARN]${NC} $(hostname): $1" } # Activate conda environment if [ -f "/root/anaconda3/etc/profile.d/conda.sh" ]; then source "/root/anaconda3/etc/profile.d/conda.sh" elif [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then source "$HOME/anaconda3/etc/profile.d/conda.sh" elif [ -f "$HOME/miniconda3/etc/profile.d/conda.sh" ]; then source "$HOME/miniconda3/etc/profile.d/conda.sh" fi if command -v conda &> /dev/null; then conda activate ${CONDA_ENV_NAME} 2>/dev/null || log_warn "Could not activate conda env: ${CONDA_ENV_NAME}" fi # Build ray start command options RAY_OPTS="" if [ -n "${NUM_CPUS}" ]; then RAY_OPTS="${RAY_OPTS} --num-cpus=${NUM_CPUS}" fi if [ -n "${NUM_GPUS}" ]; then RAY_OPTS="${RAY_OPTS} --num-gpus=${NUM_GPUS}" fi if [ -n "${OBJECT_STORE_MEMORY}" ]; then RAY_OPTS="${RAY_OPTS} --object-store-memory=${OBJECT_STORE_MEMORY}" fi # Stop existing Ray instance ray stop --force 2>/dev/null || true sleep 2 # Start Ray if [ "${RANK}" -eq 0 ]; then log_info "Starting Ray HEAD node on port ${PORT}..." ray start --head --port=${PORT} ${RAY_OPTS} else log_info "Starting Ray WORKER node, connecting to ${HEAD_NODE_IP}:${PORT}..." ray start --address=${HEAD_NODE_IP}:${PORT} ${RAY_OPTS} fi sleep 3 # Check status log_info "Ray node started. Checking status..." ray status ================================================ FILE: benchmarks/scripts/init_ray_cluster.sh ================================================ #!/bin/bash # Multi-node Ray Cluster Initialization Script # Usage: bash init_ray_cluster.sh [--stop] # --stop: Stop Ray on all nodes instead of starting set -e SCRIPT_DIR=$(cd $(dirname $0); pwd) PROJECT_DIR=${SCRIPT_DIR} # Configuration PORT=${RAY_PORT:-6379} HOSTFILE=${HOSTFILE:-"/etc/mpi/hostfile"} CONDA_ENV_NAME=${CONDA_ENV_NAME:-"benchmark"} LOG_DIR="${PROJECT_DIR}/logs/ray" # Colors RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' log_info() { echo -e "${GREEN}[INFO]${NC} $1" } log_warn() { echo -e "${YELLOW}[WARN]${NC} $1" } log_error() { echo -e "${RED}[ERROR]${NC} $1" } # Generate conda initialization command that works with both anaconda and miniconda get_conda_init_cmd() { cat << 'EOF' for conda_sh in /root/miniconda3/etc/profile.d/conda.sh \ /root/anaconda3/etc/profile.d/conda.sh \ $HOME/miniconda3/etc/profile.d/conda.sh \ $HOME/anaconda3/etc/profile.d/conda.sh \ /opt/conda/etc/profile.d/conda.sh; do [ -f "$conda_sh" ] && source "$conda_sh" && break done EOF } # Function to stop Ray on all nodes stop_cluster() { log_info "Stopping Ray on all nodes..." if [ ! -f "${HOSTFILE}" ]; then log_warn "Hostfile not found, stopping local Ray only" ray stop --force 2>/dev/null || true return fi ALL_NODES=$(awk '!a[$1]++ {print $1}' ${HOSTFILE}) for node in ${ALL_NODES}; do log_info "Stopping Ray on ${node}..." ssh -n ${node} "$(get_conda_init_cmd) && conda activate ${CONDA_ENV_NAME} && ray stop --force" 2>/dev/null & done wait log_info "Ray stopped on all nodes" } # Function to start Ray cluster start_cluster() { # Check hostfile if [ ! -f "${HOSTFILE}" ]; then log_error "Hostfile not found: ${HOSTFILE}" log_info "Please create a hostfile with one IP per line" log_info "Example:" echo " 192.168.1.100" echo " 192.168.1.101" echo " 192.168.1.102" exit 1 fi # Get head node (first line) HEAD_NODE=$(awk 'NR==1 {print $1}' ${HOSTFILE}) ALL_NODES=$(awk '!a[$1]++ {print $1}' ${HOSTFILE}) log_info "Head node: ${HEAD_NODE}" log_info "Ray port: ${PORT}" log_info "Conda env: ${CONDA_ENV_NAME}" echo "" log_info "Nodes in cluster:" echo "${ALL_NODES}" echo "" # Create log directory mkdir -p "${LOG_DIR}" # Stop existing Ray instances first log_info "Stopping any existing Ray instances..." stop_cluster sleep 3 # Start head node first (synchronously) log_info "Starting Ray HEAD on ${HEAD_NODE}..." ssh -n ${HEAD_NODE} "CONDA_ENV_NAME=${CONDA_ENV_NAME} bash ${SCRIPT_DIR}/init_ray.sh ${HEAD_NODE} ${PORT} 0" \ > "${LOG_DIR}/ray_${HEAD_NODE}.log" 2>&1 if [ $? -ne 0 ]; then log_error "Failed to start Ray HEAD. Check ${LOG_DIR}/ray_${HEAD_NODE}.log" exit 1 fi log_info "Ray HEAD started successfully" # Wait for head to be ready sleep 5 # Start worker nodes (asynchronously) rank=1 for node in ${ALL_NODES}; do if [ "${node}" == "${HEAD_NODE}" ]; then continue fi log_info "Starting Ray WORKER on ${node} (rank ${rank})..." ssh -n ${node} "CONDA_ENV_NAME=${CONDA_ENV_NAME} bash ${SCRIPT_DIR}/init_ray.sh ${HEAD_NODE} ${PORT} ${rank}" \ > "${LOG_DIR}/ray_${node}.log" 2>&1 & rank=$((rank + 1)) done # Wait for all workers log_info "Waiting for all workers to join..." wait sleep 3 # Check cluster status echo "" log_info "Ray cluster initialization complete!" log_info "Logs saved to: ${LOG_DIR}/" echo "" log_info "Cluster status:" ssh -n ${HEAD_NODE} "$(get_conda_init_cmd) && conda activate ${CONDA_ENV_NAME} && ray status" } # Main case "${1}" in --stop) stop_cluster ;; *) start_cluster ;; esac ================================================ FILE: benchmarks/scripts/ray-vllm/evaluate.py ================================================ from transformers import HfArgumentParser import torch from benchmark import Benchmark from benchmark.console import * from utils.generator import RayVllmGenerator from utils.arguments import ( ModelConfig, InfrastructureConfig, InferenceConfig, GenerationConfig, PromptConfig, BenchmarkConfig ) def main(): parser = HfArgumentParser([ ModelConfig, InfrastructureConfig, InferenceConfig, GenerationConfig, PromptConfig, BenchmarkConfig ]) model_config, infra_config, inference_config, generation_config, prompt_config, benchmark_config = \ parser.parse_args_into_dataclasses() # 1. Initialize Benchmark benchmark = Benchmark( model_path=model_config.model_path, task_types=benchmark_config.task_types, splits=benchmark_config.splits, data_dir=benchmark_config.data_dir, enable_thinking=prompt_config.enable_thinking, ) # Benchmark.print_benchmark_table() # 2. Initialize Ray + vLLM generator (Multi-Node Support) generator = RayVllmGenerator( model_name_or_path=model_config.model_path, checkpoint_path=model_config.checkpoint_path, trust_remote_code=model_config.trust_remote_code, dtype=model_config.dtype, max_model_len=model_config.max_model_len, max_logprobs=model_config.max_logprobs, gpu_memory_utilization=infra_config.gpu_memory_utilization, tensor_parallel_size=infra_config.tensor_parallel_size, ray_address=infra_config.ray_address, # Ray cluster address allow_cross_node_tensor_parallel=infra_config.allow_cross_node_tensor_parallel, # Cross-node TP num_gpus=infra_config.num_gpus, gpu_ids=infra_config.gpu_ids, force_enable_optimizations=inference_config.force_enable_optimizations, force_disable_optimizations=inference_config.force_disable_optimizations, worker_batch_size=inference_config.worker_batch_size, task_types=benchmark_config.task_types ) # 3. Generate text benchmark.run( generator=generator, output_dir=benchmark_config.output_dir, overwrite=benchmark_config.overwrite, # Generation parameters enable_thinking=prompt_config.enable_thinking, num_beams=generation_config.num_beams, num_return_sequences=generation_config.num_return_sequences, temperature=generation_config.temperature, top_p=generation_config.top_p, top_k=generation_config.top_k, presence_penalty=generation_config.presence_penalty, num_return_thinking_sequences=generation_config.num_return_thinking_sequences, sample_size=benchmark_config.sample_size, ) # 4. Release GPU memory occupied by vLLM console.print("\nReleasing vLLM GPU memory...", style=warning_style) generator.cleanup() del generator import gc gc.collect() # Clear CUDA cache if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() console.print("✓ GPU memory release completed\n", style=success_style) # 5. Calculate evaluation metrics eval_results_path = f"{benchmark_config.output_dir}/eval_results.json" Benchmark.evaluate_dev( generation_results_dir=benchmark_config.output_dir, output_path=eval_results_path, data_dir=benchmark_config.data_dir, overwrite=benchmark_config.overwrite, task_types=benchmark_config.task_types ) if __name__ == "__main__": main() ================================================ FILE: benchmarks/scripts/ray-vllm/utils/__init__.py ================================================ # Ray-vLLM Utils ================================================ FILE: benchmarks/scripts/ray-vllm/utils/arguments.py ================================================ from dataclasses import dataclass, field from typing import Optional, List @dataclass class ModelConfig: """Model loading and initialization parameters""" model_path: str = field( metadata={"help": "Model path or HuggingFace model name (e.g., Qwen/Qwen2-7B)", "required": True} ) checkpoint_path: Optional[str] = field( default=None, metadata={"help": "PT checkpoint path (optional, for loading .pt format models, will auto-convert to HuggingFace format)"} ) dtype: str = field( default='bfloat16', metadata={"help": "Model data type: auto, half, float16, bfloat16, float, float32"} ) max_model_len: Optional[int] = field( default=None, metadata={"help": "Maximum model length (optional, for limiting context length)"} ) trust_remote_code: bool = field( default=True, metadata={"help": "Whether to trust remote code"} ) max_logprobs: int = field( default=384, metadata={"help": "Maximum number of log probabilities to return (for beam search and logprob extraction)"} ) @dataclass class InfrastructureConfig: """Hardware and distributed computing configuration""" # GPU allocation num_gpus: Optional[int] = field( default=None, metadata={"help": "Number of GPUs to use (default uses all visible GPUs)"} ) gpu_ids: Optional[List[int]] = field( default=None, metadata={"help": "List of GPU IDs to use (e.g., [0,2,4], default uses all visible GPUs)"} ) gpu_memory_utilization: float = field( default=0.5, metadata={"help": "GPU memory utilization (0-1, recommended 0.8)"} ) # Parallelism tensor_parallel_size: int = field( default=1, metadata={"help": "Tensor parallel size (default 1, single GPU per worker)"} ) allow_cross_node_tensor_parallel: bool = field( default=False, metadata={"help": "Allow tensor parallelism across different nodes (not recommended due to network latency)"} ) # Ray cluster ray_address: Optional[str] = field( default="auto", metadata={"help": "Ray cluster address: 'auto' (auto-detect), 'local' (single machine), or 'ray://head_ip:10001' (specific cluster address)"} ) @dataclass class InferenceConfig: """Inference execution and optimization parameters""" # vLLM optimizations (chunked_prefill, prefix_caching) force_enable_optimizations: bool = field( default=False, metadata={"help": "Force enable chunked_prefill and prefix_caching for all tasks (overrides task-specific settings)"} ) force_disable_optimizations: bool = field( default=False, metadata={"help": "Force disable chunked_prefill and prefix_caching for all tasks (overrides task-specific settings)"} ) # Batch processing worker_batch_size: int = field( default=4, metadata={"help": "Batch size for each worker to process prompts (reduce this if KV cache is insufficient)"} ) @dataclass class GenerationConfig: """Text generation parameters (sampling, beam search)""" # Beam search num_beams: Optional[int] = field( default=None, metadata={"help": "Number of beams for beam search"} ) # Sampling num_return_sequences: Optional[int] = field( default=None, metadata={"help": "Number of sequences to return"} ) temperature: Optional[float] = field( default=None, metadata={"help": "Sampling temperature"} ) top_p: Optional[float] = field( default=None, metadata={"help": "Top-p (nucleus) sampling probability"} ) top_k: Optional[int] = field( default=None, metadata={"help": "Top-k sampling"} ) presence_penalty: Optional[float] = field( default=None, metadata={"help": "Presence penalty for sampling (-2.0 to 2.0, positive values penalize new tokens based on whether they appear in the text so far)"} ) # Two-stage generation (thinking mode) num_return_thinking_sequences: Optional[int] = field( default=None, metadata={"help": "Number of thinking candidates to generate in stage 1"} ) @dataclass class PromptConfig: """Prompt formatting and template parameters""" # Thinking mode (affects both template and generation) enable_thinking: bool = field( default=False, metadata={"help": "Enable thinking mode for apply_chat_template (overrides task config if set)"} ) @dataclass class BenchmarkConfig: """Benchmark execution and evaluation parameters""" # Task selection task_types: Optional[List[str]] = field( default=None, metadata={"help": "Task name list (e.g., item_understand rec_reason)"} ) sample_size: Optional[str] = field( default=None, metadata={"help": "Sample size for evaluation (e.g., 'full' for all data, or a number like '100')"} ) splits: List[str] = field( default_factory=lambda: ['test'], metadata={"help": "Dataset split list"} ) # Data I/O data_dir: str = field( default='./data', metadata={"help": "Data directory path"} ) output_dir: str = field( default='./results', metadata={"help": "Output directory for results"} ) overwrite: bool = field( default=False, metadata={"help": "Whether to overwrite existing results"} ) ================================================ FILE: benchmarks/scripts/ray-vllm/utils/generator.py ================================================ import os import ray import math import json from typing import Dict, List, Any, Optional from vllm import LLM, SamplingParams from vllm.sampling_params import BeamSearchParams from benchmark.base_generator import Generator, RayMixin, VllmMixin, DISABLE_OPTIMIZATIONS_FOR_TASKS from benchmark.checkpoint_utils import export_pt_to_safetensor from benchmark.console import * class VllmWorker: """ vLLM Worker that can use one or more GPUs Each Worker is responsible for: - Loading one vLLM model instance (potentially across multiple GPUs with tensor parallelism) - Processing inference tasks assigned to it - Returning generation results """ def __init__( self, worker_id: int, model_path: str, gpu_ids: List[int], gpu_memory_utilization: float = 0.9, trust_remote_code: bool = True, dtype: str = "auto", max_model_len: Optional[int] = None, tensor_parallel_size: int = 1, enable_optimizations: bool = True, **kwargs ): """ Args: worker_id: Worker ID model_path: Model path (converted HuggingFace format) gpu_ids: List of GPU IDs assigned to this worker gpu_memory_utilization: GPU memory utilization trust_remote_code: Whether to trust remote code dtype: Data type max_model_len: Maximum model length tensor_parallel_size: Tensor parallel size (must match len(gpu_ids)) enable_optimizations: Whether to enable chunked_prefill and prefix_caching **kwargs: Other vLLM parameters """ self.worker_id = worker_id self.gpu_ids = gpu_ids # Set environment variable so current process only sees specified GPUs os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, gpu_ids)) opt_status = "optimized" if enable_optimizations else "standard" gpu_str = ",".join(map(str, gpu_ids)) print(f" [Worker {worker_id}] Initializing ({opt_status})... (GPU {gpu_str}, TP={tensor_parallel_size})") # Initialize vLLM vllm_kwargs = { "model": model_path, "tensor_parallel_size": tensor_parallel_size, "gpu_memory_utilization": gpu_memory_utilization, "trust_remote_code": trust_remote_code, "dtype": dtype, "enable_chunked_prefill": enable_optimizations, "enable_prefix_caching": enable_optimizations, "max_logprobs": kwargs.get("max_logprobs", 384), # Support beam search, need large enough logprobs } if max_model_len is not None: vllm_kwargs["max_model_len"] = max_model_len vllm_kwargs.update(kwargs) try: self.llm = LLM(**vllm_kwargs) self.tokenizer = self.llm.get_tokenizer() print(f" [Worker {worker_id}] ✓ Initialized successfully (GPU {gpu_str}, TP={tensor_parallel_size})") except Exception as e: print(f" [Worker {worker_id}] ✗ Initialization failed: {e}") raise def get_model_parameters(self) -> Optional[float]: """ Get model parameter count from the worker's vLLM instance Returns: float: Total number of parameters, or None if unable to count """ try: model_executor = self.llm.llm_engine.model_executor if hasattr(model_executor, 'driver_worker'): model = model_executor.driver_worker.model_runner.model else: model = model_executor.model # Count parameters total_params = sum(p.numel() for p in model.parameters()) return float(total_params) except Exception as e: print(f" [Worker {self.worker_id}] Warning: Failed to count parameters: {e}") return None def generate_batch( self, prompts: Dict[str, str], sampling_params: Dict[str, Any], worker_batch_size: int = 8 ) -> tuple: """ Batch text generation (internal batch processing to avoid vLLM scheduler issues) Args: prompts: {sample_id: prompt_text} sampling_params: Sampling parameter dictionary worker_batch_size: Worker internal batch size (default 8) Returns: Tuple of three dicts: - First dict: {sample_id: [generated_text_1, generated_text_2, ...]} - Second dict: {sample_id: [cum_logprob_1, cum_logprob_2, ...]} (only for beam search) - Third dict: {sample_id: {"input_tokens": [int], "output_tokens": [int], "times": [float]}} (lists for multi-stage support) """ import time stage_start_time = time.time() if not prompts: return ({}, {}, {}) # Determine whether to use BeamSearchParams or SamplingParams based on parameters if sampling_params.get("use_beam_search", False): # Beam search mode params_dict = { "beam_width": sampling_params.get("beam_width", 1), "max_tokens": sampling_params.get("max_tokens", 128), } sp = BeamSearchParams(**params_dict) else: # Sampling mode - remove parameters not belonging to SamplingParams # stop parameter is already included in the dict comprehension params_dict = {k: v for k, v in sampling_params.items() if k not in ["use_beam_search", "beam_width", "return_logprobs"]} # If return_logprobs is enabled, add logprobs parameter if sampling_params.get("return_logprobs", False): params_dict["logprobs"] = 1 # Enable logprobs for cumulative calculation sp = SamplingParams(**params_dict) # Prepare input sample_ids = list(prompts.keys()) prompt_texts = list(prompts.values()) # Batch processing to avoid vLLM scheduler issues all_results = {} all_logprobs = {} # Store cum_logprobs for beam search all_mfu_stats = {} # Store MFU statistics for MFU calculation num_batches = (len(sample_ids) + worker_batch_size - 1) // worker_batch_size for batch_idx in range(num_batches): start_idx = batch_idx * worker_batch_size end_idx = min(start_idx + worker_batch_size, len(sample_ids)) batch_sample_ids = sample_ids[start_idx:end_idx] batch_prompt_texts = prompt_texts[start_idx:end_idx] try: # If using beam search, need to record each prompt's length batch_prompt_lengths = [] if isinstance(sp, BeamSearchParams): for text in batch_prompt_texts: prompt_tokens = self.tokenizer.encode(text, add_special_tokens=True) batch_prompt_lengths.append(len(prompt_tokens)) # Choose different generation method based on parameter type if isinstance(sp, BeamSearchParams): # Beam search needs to use beam_search method # beam_search input format is [{"prompt": "text"}] batch_prompt_dicts = [{"prompt": text} for text in batch_prompt_texts] batch_outputs = self.llm.beam_search(batch_prompt_dicts, sp) else: # Sampling mode uses generate method batch_outputs = self.llm.generate(batch_prompt_texts, sp) # Organize results for idx, (sample_id, output) in enumerate(zip(batch_sample_ids, batch_outputs)): if isinstance(sp, BeamSearchParams): # Beam search returns complete token IDs (including prompt), need to remove prompt part before decoding prompt_length = batch_prompt_lengths[idx] generated_texts = [ self.tokenizer.decode(seq.tokens[prompt_length:], skip_special_tokens=True) for seq in output.sequences ] # Extract cum_logprob for each sequence cum_logprobs = [seq.cum_logprob for seq in output.sequences] all_results[sample_id] = generated_texts all_logprobs[sample_id] = cum_logprobs # Collect MFU stats for beam search input_tokens = prompt_length output_tokens_list = [len(seq.tokens) - prompt_length for seq in output.sequences] all_mfu_stats[sample_id] = { "input_tokens": [input_tokens], "output_tokens": [sum(output_tokens_list)] } else: generated_texts = [out.text for out in output.outputs] all_results[sample_id] = generated_texts # If return_logprobs is enabled, calculate cumulative logprobs if sampling_params.get("return_logprobs", False): cum_logprobs = [] for out in output.outputs: # Calculate cumulative logprob by summing all token logprobs cum_logprob = 0.0 if out.logprobs and out.token_ids: # Iterate through each position and get the logprob of the actual generated token for i, token_logprobs in enumerate(out.logprobs): if token_logprobs and i < len(out.token_ids): # Get the actual token ID that was generated at this position actual_token_id = out.token_ids[i] # Look up the logprob for this specific token if actual_token_id in token_logprobs: cum_logprob += token_logprobs[actual_token_id].logprob cum_logprobs.append(cum_logprob) all_logprobs[sample_id] = cum_logprobs # Collect MFU stats for sampling mode prompt_text = batch_prompt_texts[idx] input_tokens = len(self.tokenizer.encode(prompt_text, add_special_tokens=True)) output_tokens_list = [len(out.token_ids) for out in output.outputs] all_mfu_stats[sample_id] = { "input_tokens": [input_tokens], "output_tokens": [sum(output_tokens_list)] } except Exception as e: # When a single batch fails, return empty string and print detailed error import traceback print(f"\n[Worker {self.worker_id}] Batch {batch_idx}/{num_batches} generation failed:") print(f" Error type: {type(e).__name__}") print(f" Error message: {str(e)}") print(f" Batch size: {len(batch_sample_ids)}") if batch_prompt_texts: prompt_lens = [len(self.tokenizer.encode(t, add_special_tokens=True)) for t in batch_prompt_texts] print(f" Prompt token length range: min={min(prompt_lens)}, max={max(prompt_lens)}, avg={sum(prompt_lens)/len(prompt_lens):.1f}") print(f" Full stack trace:\n{traceback.format_exc()}") num_return = sampling_params.get("n", 1) if sampling_params.get("use_beam_search", False): num_return = sampling_params.get("beam_width", 1) for sample_id in batch_sample_ids: all_results[sample_id] = [""] * num_return # If beam search, also set empty logprobs if sampling_params.get("use_beam_search", False): all_logprobs[sample_id] = [0.0] * num_return # Don't include failed samples in MFU stats (they would have times=[0.0] which breaks MFU calculation) # Calculate stage time stage_elapsed_time = time.time() - stage_start_time # Add time to all samples (same time for all samples in this worker) for sample_id in all_mfu_stats: all_mfu_stats[sample_id]["times"] = [stage_elapsed_time] return (all_results, all_logprobs, all_mfu_stats) def extract_token_logprobs_batch( self, prompts: Dict[str, str], target_tokens: List[str], sampling_params: Dict[str, Any], worker_batch_size: int = 8 ) -> tuple: """ Extract logprobs for specific target tokens Args: prompts: {sample_id: prompt_text} target_tokens: List of target tokens (e.g., ["是", "否"]) sampling_params: Sampling parameter dictionary worker_batch_size: Worker internal batch size Returns: Tuple of two dicts: - First dict: {sample_id: [json_string]} where json_string is formatted probabilities - Second dict: {sample_id: {"input_tokens": [int], "output_tokens": [int], "times": [float]}} """ import time stage_start_time = time.time() if not prompts: return ({}, {}) # Get token IDs for target tokens target_token_ids = {} for token in target_tokens: token_ids = self.tokenizer.encode(token, add_special_tokens=False) if len(token_ids) == 1: target_token_ids[token] = token_ids[0] else: print(f" [Worker {self.worker_id}] Warning: Token '{token}' is encoded as multiple tokens: {token_ids}") # For multi-token case, we only use the first token for now target_token_ids[token] = token_ids[0] # Build sampling parameters with logprobs enabled params_dict = { "n": sampling_params.get("n", 1), "max_tokens": sampling_params.get("max_tokens", 1), "temperature": sampling_params.get("temperature", 1.0), "top_p": sampling_params.get("top_p", 1.0), "top_k": sampling_params.get("top_k", -1), "repetition_penalty": sampling_params.get("repetition_penalty", 1.0), "presence_penalty": sampling_params.get("presence_penalty", 0.0), "frequency_penalty": sampling_params.get("frequency_penalty", 0.0), "logprobs": sampling_params.get("logprobs", 10), } sp = SamplingParams(**params_dict) # Prepare input sample_ids = list(prompts.keys()) prompt_texts = list(prompts.values()) # Batch processing all_results = {} all_mfu_stats = {} num_batches = (len(sample_ids) + worker_batch_size - 1) // worker_batch_size for batch_idx in range(num_batches): start_idx = batch_idx * worker_batch_size end_idx = min(start_idx + worker_batch_size, len(sample_ids)) batch_sample_ids = sample_ids[start_idx:end_idx] batch_prompt_texts = prompt_texts[start_idx:end_idx] try: # Generate with logprobs batch_outputs = self.llm.generate(batch_prompt_texts, sp) # Extract logprobs for target tokens for idx, (sample_id, output) in enumerate(zip(batch_sample_ids, batch_outputs)): token_probs = {} # Get logprobs from the first generated token if output.outputs and len(output.outputs) > 0: first_output = output.outputs[0] if first_output.logprobs and len(first_output.logprobs) > 0: # Get logprobs dict for the first token first_token_logprobs = first_output.logprobs[0] # Extract probabilities for target tokens for token, token_id in target_token_ids.items(): if token_id in first_token_logprobs: logprob = first_token_logprobs[token_id].logprob prob = math.exp(logprob) token_probs[token] = prob else: # Token not in top-k, assign very small probability token_probs[token] = 1e-10 all_results[sample_id] = [json.dumps(token_probs, ensure_ascii=False)] prompt_text = batch_prompt_texts[idx] input_tokens = len(self.tokenizer.encode(prompt_text, add_special_tokens=True)) # Classification only generates 1 token output_tokens = 1 all_mfu_stats[sample_id] = { "input_tokens": [input_tokens], "output_tokens": [output_tokens] } except Exception as e: import traceback print(f"\n[Worker {self.worker_id}] Batch {batch_idx}/{num_batches} logprobs extraction failed:") print(f" Error: {str(e)}") print(f" Full stack trace:\n{traceback.format_exc()}") for sample_id in batch_sample_ids: token_probs = {token: 0.0 for token in target_tokens} all_results[sample_id] = [json.dumps(token_probs, ensure_ascii=False)] # Don't include failed samples in MFU stats stage_elapsed_time = time.time() - stage_start_time for sample_id in all_mfu_stats: all_mfu_stats[sample_id]["times"] = [stage_elapsed_time] return (all_results, all_mfu_stats) class RayVllmGenerator(RayMixin, VllmMixin, Generator): """ Ray-based Multi-GPU vLLM Generator (Data Parallel) """ def __init__( self, model_name_or_path: str, checkpoint_path: Optional[str] = None, num_return_sequences: int = 2, max_new_tokens: int = 128, temperature: float = 0.7, top_p: float = 0.9, top_k: int = -1, repetition_penalty: float = 1.0, presence_penalty: float = 0.0, frequency_penalty: float = 0.0, do_sample: bool = True, gpu_memory_utilization: float = 0.9, trust_remote_code: bool = True, dtype: str = "auto", max_model_len: Optional[int] = None, max_logprobs: int = 384, tensor_parallel_size: int = 1, num_gpus: Optional[int] = None, gpu_ids: Optional[List[int]] = None, task_types: Optional[List[str]] = None, force_enable_optimizations: bool = False, force_disable_optimizations: bool = False, worker_batch_size: int = 4, ray_address: Optional[str] = "auto", allow_cross_node_tensor_parallel: bool = False, **kwargs ): """ Args: model_name_or_path: Model name or path checkpoint_path: PT checkpoint path (optional) num_return_sequences: Number of candidate sequences per prompt max_new_tokens: Maximum number of tokens to generate temperature: Sampling temperature top_p: Nucleus sampling parameter top_k: Top-k sampling parameter repetition_penalty: Repetition penalty presence_penalty: Presence penalty (penalizes tokens that appeared in the text) frequency_penalty: Frequency penalty (penalizes tokens based on frequency) do_sample: Whether to sample gpu_memory_utilization: GPU memory utilization trust_remote_code: Whether to trust remote code dtype: Model data type max_model_len: Maximum model length max_logprobs: Maximum number of log probabilities to return (for beam search and logprob extraction) tensor_parallel_size: Tensor parallel size (default 1, single GPU per worker) num_gpus: Number of GPUs to use (default uses all cluster GPUs) gpu_ids: List of GPU IDs to use (only for single-node mode) task_types: List of task types to evaluate (for auto optimization control) force_enable_optimizations: Force enable optimizations for all tasks force_disable_optimizations: Force disable optimizations for all tasks worker_batch_size: Batch size for each worker (reduce if KV cache is insufficient) ray_address: Ray cluster address ('auto', 'local', or specific address) allow_cross_node_tensor_parallel: Allow tensor parallel across nodes (not recommended) **kwargs: Other parameters """ super().__init__( num_return_sequences=num_return_sequences, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, do_sample=do_sample, **kwargs ) self.model_name = model_name_or_path self.checkpoint_path = checkpoint_path self.gpu_memory_utilization = gpu_memory_utilization self.trust_remote_code = trust_remote_code self.dtype = dtype self.max_model_len = max_model_len self.tensor_parallel_size = tensor_parallel_size self.worker_batch_size = worker_batch_size self.task_types = task_types or [] self.force_enable_optimizations = force_enable_optimizations self.force_disable_optimizations = force_disable_optimizations self.ray_address = ray_address self.allow_cross_node_tensor_parallel = allow_cross_node_tensor_parallel self.num_gpus = num_gpus self.gpu_ids = gpu_ids console.print( "\nLoading Model\n", style=head_style_2, justify="center", ) console.print( f" Using Ray + vLLM (Multi-Node) to load model: [cyan]{model_name_or_path}[/cyan]", style=subhead_style_2, ) # 1. Initialize Ray cluster connection self._initialize_ray_cluster() # 2. Determine GPUs to use (from cluster) all_gpu_ids = self._determine_gpu_ids_from_cluster() # 3. Group GPUs for workers (ensuring same-node constraint if needed) self.worker_gpu_groups, self.worker_node_assignments = self._group_gpus_for_workers( all_gpu_ids, tensor_parallel_size ) num_workers = len(self.worker_gpu_groups) # Display cluster and GPU information self._display_cluster_info(all_gpu_ids, num_workers) # 4. Handle PT checkpoint (main process converts) if checkpoint_path: console.print( f" checkpoint: [yellow]{checkpoint_path}[/yellow]", style=subhead_style_2, ) console.print( " [yellow]Converting PT checkpoint to HuggingFace format in main process...[/yellow]", style=subhead_style_2, ) model_path = export_pt_to_safetensor( config_path=model_name_or_path, checkpoint_path=checkpoint_path, trust_remote_code=trust_remote_code ) else: model_path = model_name_or_path # 5. Create Workers console.print( f" Creating {num_workers} vLLM Workers...", style=subhead_style_2, ) self.workers = [] vllm_kwargs = { "gpu_memory_utilization": gpu_memory_utilization, "trust_remote_code": trust_remote_code, "dtype": dtype, "tensor_parallel_size": tensor_parallel_size, "max_logprobs": max_logprobs, } if max_model_len is not None: vllm_kwargs["max_model_len"] = max_model_len vllm_kwargs.update(kwargs) # Determine whether to enable optimizations (default behavior, can be overridden by force flags) # Check if any task in task_types requires disabling optimizations enable_optimizations = self._should_enable_optimizations() # Create Ray remote class with dynamic GPU count and scheduling strategy # Use scheduling_strategy to place workers on specific nodes for i, (gpu_group, node_id) in enumerate(zip(self.worker_gpu_groups, self.worker_node_assignments)): # Create worker with node placement constraint VllmWorkerRemote = ray.remote(num_gpus=tensor_parallel_size)(VllmWorker) # If we have node assignment, use scheduling strategy if node_id is not None: from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy scheduling_strategy = NodeAffinitySchedulingStrategy( node_id=node_id, soft=False # Hard constraint: must be on this node ) worker = VllmWorkerRemote.options( scheduling_strategy=scheduling_strategy ).remote( worker_id=i, model_path=model_path, gpu_ids=gpu_group, enable_optimizations=enable_optimizations, **vllm_kwargs ) else: # No node constraint, let Ray decide worker = VllmWorkerRemote.remote( worker_id=i, model_path=model_path, gpu_ids=gpu_group, enable_optimizations=enable_optimizations, **vllm_kwargs ) self.workers.append(worker) # Wait for all Workers to initialize console.print( " Waiting for all Workers to initialize...\n\n", style=subhead_style_2, ) ray.get([worker.generate_batch.remote({}, {}) for worker in self.workers]) console.print( f"✓ All Workers initialized successfully\n", style=success_style, ) # Print optimization configuration summary if force_enable_optimizations: console.print( "⚙️ Optimization mode: FORCED ENABLED (chunked_prefill & prefix_caching enabled for all tasks)", style=warning_style, ) elif force_disable_optimizations: console.print( "⚙️ Optimization mode: FORCED DISABLED (chunked_prefill & prefix_caching disabled for all tasks)", style=warning_style, ) self.num_params = self._count_model_parameters() def _count_model_parameters(self) -> Optional[float]: """ Override VllmMixin._count_model_parameters() for Ray-based generators. In Ray-based architecture, vLLM instances are in worker processes. Query the first worker to get model parameter count. Returns: float or None: Total number of parameters """ tensor_parallel_size = getattr(self, 'tensor_parallel_size', 1) if tensor_parallel_size > 1: console.print( f"Warning: Tensor parallel (size={tensor_parallel_size}) detected. " f"Skipping parameter count (would only count local shard).", style=warning_style, ) return None # Query the first worker to get parameter count try: import ray num_params = ray.get(self.workers[0].get_model_parameters.remote()) console.print( f"✓ Model parameters: {num_params / 1e9:.2f}B\n", style=success_style, ) return num_params except Exception as e: console.print( f"Warning: Failed to get parameter count from worker: {e}", style=warning_style, ) return None def _generate_standard( self, prompts: Dict[str, str], **kwargs ) -> tuple: """ Standard single-stage generation (round-robin assignment to multiple Workers) Args: prompts: {sample_id: prompt_text} **kwargs: Optional generation parameters, including: - worker_batch_size: Worker internal batch size, for avoiding vLLM scheduler issues (default 16) - return_logprobs: Whether to return cumulative logprobs for sampling mode (default False) Returns: Tuple of three dicts: - First dict: {sample_id: [generated_text_1, generated_text_2, ...]} - Second dict: {sample_id: [cum_logprob_1, cum_logprob_2, ...]} (for beam search or when return_logprobs=True) - Third dict: {sample_id: {"input_tokens": [int], "output_tokens": [int], "times": [float]}} (lists for multi-stage support) """ # Auto-enable return_logprobs if prompt_token is used (for recommendation tasks) has_prompt_token = bool(kwargs.get("prompt_token", None)) # Build sampling parameters using mixin method sampling_params_obj = self._build_sampling_params(**kwargs) # Convert to dict for passing to workers if hasattr(sampling_params_obj, 'beam_width'): # BeamSearchParams use_beam_search = True sampling_params = { "use_beam_search": True, "beam_width": sampling_params_obj.beam_width, "max_tokens": sampling_params_obj.max_tokens, } else: # SamplingParams use_beam_search = False sampling_params = { "n": sampling_params_obj.n, "max_tokens": sampling_params_obj.max_tokens, "temperature": sampling_params_obj.temperature, "top_p": sampling_params_obj.top_p, "top_k": sampling_params_obj.top_k, "repetition_penalty": sampling_params_obj.repetition_penalty, "presence_penalty": sampling_params_obj.presence_penalty, "frequency_penalty": sampling_params_obj.frequency_penalty, "return_logprobs": kwargs.get("return_logprobs", has_prompt_token), } # Add stop parameter if specified if sampling_params_obj.stop: sampling_params["stop"] = sampling_params_obj.stop console.print( f"Starting generation...", style=subhead_style_2, ) if use_beam_search: console.print( f"Sampling parameters (beam search): beam_width={sampling_params['beam_width']}, " f"max_tokens={sampling_params['max_tokens']}", style=subhead_style_2, ) else: console.print( f"Sampling parameters: n={sampling_params['n']}, max_tokens={sampling_params['max_tokens']}, " f"temperature={sampling_params['temperature']}, top_p={sampling_params['top_p']}, top_k={sampling_params['top_k']}, " f"repetition_penalty={sampling_params['repetition_penalty']}, " f"presence_penalty={sampling_params['presence_penalty']}, " f"frequency_penalty={sampling_params['frequency_penalty']}, " f"return_logprobs={sampling_params['return_logprobs']}", style=subhead_style_2, ) # Round-robin assign tasks to Workers sample_ids = list(prompts.keys()) num_workers = len(self.workers) worker_tasks = [dict() for _ in range(num_workers)] for i, sample_id in enumerate(sample_ids): worker_idx = i % num_workers worker_tasks[worker_idx][sample_id] = prompts[sample_id] console.print( f"Task distribution: {[len(task) for task in worker_tasks]}", style=subhead_style_2, ) console.print( f"Worker batch size: {self.worker_batch_size}", style=subhead_style_2, ) # Execute in parallel futures = [] for i, (worker, task) in enumerate(zip(self.workers, worker_tasks)): if task: # Only submit non-empty tasks future = worker.generate_batch.remote(task, sampling_params, self.worker_batch_size) futures.append(future) # Collect results worker_results = ray.get(futures) # Merge results (each worker_result is a tuple of (texts_dict, logprobs_dict, mfu_stats_dict)) results = {} logprobs = {} mfu_stats = {} for worker_result in worker_results: texts_dict, logprobs_dict, mfu_stats_dict = worker_result results.update(texts_dict) logprobs.update(logprobs_dict) mfu_stats.update(mfu_stats_dict) console.print( f"✓ Generation completed", style=success_style, ) return (results, logprobs, mfu_stats) def extract_token_logprobs( self, prompts: Dict[str, str], target_tokens: List[str], **kwargs ) -> tuple: """ Extract logprobs for specific target tokens (round-robin assignment to multiple Workers) Args: prompts: {sample_id: prompt_text} target_tokens: List of target tokens to extract probabilities for (e.g., ["是", "否"]) **kwargs: Optional parameters including generation config Returns: Tuple of three dicts: - First dict: {sample_id: [json_string]} where json_string is formatted probabilities - Second dict: {} (empty, no beam search logprobs for classification) - Third dict: {sample_id: {"input_tokens": [int], "output_tokens": [int], "times": [float]}} """ console.print( f"Extracting logprobs for tokens: {target_tokens}", style=subhead_style_2, ) console.print( f"Worker batch size: {self.worker_batch_size}", style=subhead_style_2, ) if not prompts: return ({}, {}, {}) # Build sampling parameters sampling_params = { "n": kwargs.get("num_return_sequences", 1), "max_tokens": kwargs.get("max_new_tokens", 1), "temperature": kwargs.get("temperature", 1.0), "top_p": kwargs.get("top_p", 1.0), "top_k": kwargs.get("top_k", -1), "repetition_penalty": kwargs.get("repetition_penalty", 1.0), "presence_penalty": kwargs.get("presence_penalty", 0.0), "frequency_penalty": kwargs.get("frequency_penalty", 0.0), "logprobs": kwargs.get("logprobs", 10), } console.print( f"Sampling parameters: n={sampling_params['n']}, max_tokens={sampling_params['max_tokens']}, " f"temperature={sampling_params['temperature']}, top_p={sampling_params['top_p']}, " f"top_k={sampling_params['top_k']}, repetition_penalty={sampling_params['repetition_penalty']}, " f"presence_penalty={sampling_params['presence_penalty']}, frequency_penalty={sampling_params['frequency_penalty']}, " f"logprobs={sampling_params['logprobs']}", style=subhead_style_2, ) # Round-robin assign tasks to Workers sample_ids = list(prompts.keys()) num_workers = len(self.workers) worker_tasks = [dict() for _ in range(num_workers)] for i, sample_id in enumerate(sample_ids): worker_idx = i % num_workers worker_tasks[worker_idx][sample_id] = prompts[sample_id] console.print( f"Task distribution: {[len(task) for task in worker_tasks]}", style=subhead_style_2, ) # Get Worker internal batch size worker_batch_size = self.worker_batch_size # Execute in parallel futures = [] for worker, task in zip(self.workers, worker_tasks): if task: # Only submit non-empty tasks future = worker.extract_token_logprobs_batch.remote( task, target_tokens, sampling_params, worker_batch_size ) futures.append(future) # Collect results worker_results = ray.get(futures) # Merge results (each worker_result is a tuple of (probs_dict, mfu_stats_dict)) results = {} mfu_stats = {} for worker_result in worker_results: probs_dict, mfu_stats_dict = worker_result results.update(probs_dict) mfu_stats.update(mfu_stats_dict) console.print( f"✓ Logprobs extraction completed", style=success_style, ) return (results, {}, mfu_stats) ================================================ FILE: data/README.md ================================================ # Dataset Documentation This directory contains data processing scripts and dataset format specifications for the OpenOneRec project. ## Table of Contents - [Quick Start](#quick-start) - Get started quickly with dataset download and processing - [Directory Structure](#directory-structure) - [Dataset Format Specification](#dataset-format-specification) - [Notes](#notes) ## Directory Structure - **general_text/**: General text data used in training, including pretraining and SFT datasets for mathematics, code, reasoning, and other domains - **onerec_data/**: Recommendation scenario data and corresponding processing scripts that convert raw recommendation data into LLM pretraining and SFT training formats ### General Text Data (general_text) The general text data directory contains information about the main general text datasets used in the project. The `pretrain.csv` and `sft.csv` files list all HuggingFace dataset URLs and their corresponding sample counts. For easier reproduction, we have also released our processed datasets on HuggingFace: - [Pretraining Dataset on HuggingFace](https://huggingface.co/datasets/OpenOneRec/OpenOneRec-General-Pretrain) - [SFT Dataset on HuggingFace](https://huggingface.co/datasets/OpenOneRec/OpenOneRec-General-SFT) > **NOTE**: The processed data on HuggingFace currently does not include some datasets (Nemotron_CC_Math_v1, Nemotron_Pretraining_Code_v1, Nemotron_CC_v2). We will provide a data processing script later to facilitate reproduction. ### OneRec Business Data (onerec_data) The OneRec business data directory contains data processing scripts for recommendation systems, converting raw data into LLM pretraining and SFT training formats. It includes data processing scripts for various recommendation scenarios such as video recommendation, user profiling, interactive recommendation, label prediction, and cross-domain recommendation. - [OpenOneRec Dataset on HuggingFace](https://huggingface.co/datasets/OpenOneRec/OpenOneRec-RecIF) ## Dataset Format Specification To standardize data processing, we use a unified Parquet data format. Each Parquet file contains the following fields: ### Field Description | Field | Type | Required | Default | Description | Requirements | |-------|------|----------|---------|-------------|--------------| | uuid | str | Yes | Auto-generated UUID | Unique identifier | Must be a valid UUID format, must be unique within the same dataset | | source | str | Yes | - | Data source identifier | Cannot be an empty string | | metadata | str | No | "{}" | JSON-formatted metadata dictionary | Must be a valid JSON dictionary string | | images | str | No | "{}" | (Deprecated) This project only trains on text, this field is not used | - | | videos | str | No | "{}" | (Deprecated) This project only trains on text, this field is not used | - | | messages | str | No | None | JSON-formatted message list for conversation format data | Must be a valid JSON array, each message must have role and content fields | | segments | str | No | None | JSON-formatted segment list for segmented data | Must be a valid JSON array, each segment must have a type field | | image | str | No | None | (Deprecated) This project only trains on text, this field is not used | - | | video | str | No | None | (Deprecated) This project only trains on text, this field is not used | - | | text | str | No | None | Text content | No special requirements | | label | str | No | None | Label information, if `image`, `video`, `text` exists, it is the corresponding label | No special requirements | ### Data Format Examples The data format supports two main types: - **Segments Format**: For regular text data, using the `segments` field to store text segment lists - **Chat Format**: For conversation data, using the `messages` field to store conversation message lists **Chat Format Data (Conversation Data):** | Field | Value | |-------|-------| | uuid | 550e8400-e29b-41d4-a716-446655440001 | | source | conversation_dataset | | metadata | '{}' | | images | '{}' | | videos | '{}' | | messages | '[{"role": "user", "content": [{"type": "text", "text": "What is machine learning?"}]}, {"role": "assistant", "content": [{"type": "text", "text": "Machine learning is a subset of artificial intelligence."}]}]' | **Segments Format Data (Regular Text):** | Field | Value | |-------|-------| | uuid | 550e8400-e29b-41d4-a716-446655440002 | | source | document_dataset | | metadata | '{}' | | images | '{}' | | videos | '{}' | | segments | '[{"type": "text", "text": "Introduction paragraph..."}, {"type": "text", "text": "Main content..."}]' | ### Field Validation Rules | Validation Item | Rule Description | |-----------------|------------------| | JSON Field Validation | metadata must be a valid JSON dictionary string; images and videos fields (deprecated) should be set to "{}" | | Message Format Validation | messages field (if present) must contain a valid message list, each message must have role and content fields | | Role Validation | Message role must be one of user, assistant, or system | | Content Type Validation | The type in message content must be text (this project only trains on text, image and video types are not supported) | | Segment Format Validation | segments field (if present) must contain a valid segment list, each segment must have a type field, type should be "text" | ### File Size Recommendations For efficient DataLoader data loading, it is recommended that each Parquet file contains approximately **1000 samples**. If the data volume is large, you can use sharding to split the data into multiple files. The recommended file naming format is: ``` part-00000-of-00010.parquet part-00001-of-00010.parquet ... part-00009-of-00010.parquet ``` ## Quick Start ### 1. Download Datasets First, download the corresponding datasets from HuggingFace: - [Pretraining General Text Dataset](https://huggingface.co/datasets/OpenOneRec/OpenOneRec-General-Pretrain) - [SFT General Text Dataset](https://huggingface.co/datasets/OpenOneRec/OpenOneRec-General-SFT) - [OneRec Recommendation Dataset](https://huggingface.co/datasets/OpenOneRec/OpenOneRec-RecIF) You can download the datasets using the following commands (run from the **project root directory**): ```bash pip install huggingface_hub export HF_TOKEN= hf download OpenOneRec/OpenOneRec-General-Pretrain \ --repo-type dataset \ --token $HF_TOKEN \ --local-dir ./raw_data/general_text/pretrain hf download OpenOneRec/OpenOneRec-General-SFT \ --repo-type dataset \ --token $HF_TOKEN \ --local-dir ./raw_data/general_text/sft hf download OpenOneRec/OpenOneRec-RecIF \ --repo-type dataset \ --token $HF_TOKEN \ --local-dir ./raw_data/onerec_data ``` ### 2. Process Recommendation Data run: ```bash cd data/onerec_data bash run.sh ``` ### 3. Pretraining Data Sharding The generated data can be processed by calling the prepare scripts. Edit `prepare_pretrain.sh` or `prepare_sft.sh` and modify the following configuration: ```bash GENERAL_TEXT_PATH="data/general_text" # General text data path REC_DATA_PATH="data/onerec_data/output" # Recommendation data output path OUTPUT_DIR="./output/split_data" # Final output path MAX_ROWS=1000 # Number of samples per file ``` Then run: ```bash # Process pretraining data bash prepare_pretrain.sh # Process SFT data bash prepare_sft.sh ``` ### 4. Distillation Data Processing Data processing for on-policy distillation. Edit `prepare_distillation.sh` and modify the following configuration: ```bash INPUT_PATH="data/general_text" # General text data path OUTPUT_FILE="./output/onpolicy_distillation.parquet" # Output file path NUM_SAMPLES=200000 # Number of samples to sample SEED=42 # Random seed ``` Then run: ```bash bash prepare_distillation.sh ``` ### 5. RL Data Processing Data processing for reinforcement learning (RL) training. Merges multiple RL task datasets and splits them into training and test sets. Edit `prepare_rl.sh` and modify the following configuration: ```bash REC_DATA_PATH="data/onerec_data" # OneRec dataset path OUTPUT_DIR="./output/rl_data" # Output directory path TEST_SIZE=1000 # Number of test samples per subtask SEED=42 # Random seed ``` The script processes the following 5 RL task datasets: - `sft_video_rec.parquet` - Video recommendation task - `sft_ad_rec.parquet` - Ad recommendation task - `sft_product_rec.parquet` - Product recommendation task - `sft_interactive_rec.parquet` - Interactive recommendation task - `sft_label_cond_rec.parquet` - Label-conditioned recommendation task Then run: ```bash bash prepare_rl.sh ``` Output: - `./output/rl_data/train.parquet` - Training set (remaining data after merging all tasks) - `./output/rl_data/test.parquet` - Test set (1000 samples randomly sampled from merged data) ## Notes * All scripts only process `split=0` (training set) data by default ================================================ FILE: data/general_text/pretrain.csv ================================================ dataname,sample_num,huggingface_repo Nemotron_CC_Math_v1,15440682 ,https://huggingface.co/datasets/nvidia/Nemotron-CC-Math-v1 Nemotron_Pretraining_Code_v1,5329298 ,https://huggingface.co/datasets/nvidia/Nemotron-Pretraining-Code-v1 Nemotron_CC_v2,2306412 ,https://huggingface.co/datasets/nvidia/Nemotron-CC-v2 reasoning_v1_20m,1666229 ,https://huggingface.co/datasets/glaiveai/reasoning-v1-20m OpenMathReasoning,477179 ,https://huggingface.co/datasets/nvidia/OpenMathReasoning NuminaMath-QwQ-CoT-5M,324270 ,https://huggingface.co/datasets/PrimeIntellect/NuminaMath-QwQ-CoT-5M OpenCodeReasoning,109292 ,https://huggingface.co/datasets/nvidia/OpenCodeReasoning KodCode_V1_SFT_R1,39211 ,https://huggingface.co/datasets/KodCode/KodCode-V1-SFT-R1 Chinese-Reasoning-Distil-Data,30000 ,https://huggingface.co/datasets/Mxode/Chinese-Reasoning-Distil-Data medical-o1-reasoning-SFT,7000 ,https://huggingface.co/datasets/FreedomIntelligence/medical-o1-reasoning-SFT Bespoke-Stratos-17k,2000 ,https://huggingface.co/datasets/bespokelabs/Bespoke-Stratos-17k ================================================ FILE: data/general_text/sft.csv ================================================ dataname,sample_num,huggingface_repo OpenMathReasoning,510163,https://huggingface.co/datasets/nvidia/OpenMathReasoning R1-Distill-SFT,502818,https://huggingface.co/datasets/ServiceNow-AI/R1-Distill-SFT Infinity_Instruct,446773,https://huggingface.co/datasets/BAAI/Infinity-Instruct OpenCoderReasoning,437768,https://huggingface.co/datasets/nvidia/OpenCodeReasoning Chinese-Reasoning-Distil-Data,179037,https://huggingface.co/datasets/Mxode/Chinese-Reasoning-Distil-Data Reasoning_Multi_subject_RLVR,172108,https://huggingface.co/datasets/punwaiw/multi-subject-rlvr-final-reasoning-traces Reasoning_KodCode_V1_SFT_R1,163908,https://huggingface.co/datasets/KodCode/KodCode-V1-SFT-R1 DeepMath103K,92886,https://huggingface.co/datasets/zwhe99/DeepMath-103K medical-o1-reasoning-SFT,50245,https://huggingface.co/datasets/FreedomIntelligence/medical-o1-reasoning-SFT ================================================ FILE: data/onerec_data/README.md ================================================ # OneRec Data Processing Scripts This directory contains data processing scripts for the OneRec project, converting raw data into LLM pretraining and SFT training formats. ## Directory Structure ``` data/ ├── pretrain/ # Pretrain data processing scripts │ ├── video_rec.py # Video recommendation pretrain │ ├── user_profile.py # User profile pretrain │ └── item_understand.py # item understanding alignment pretrain ├── sft/ # SFT data processing scripts │ ├── video_rec.py # Video recommendation │ ├── interactive_rec.py # Interactive recommendation │ ├── label_cond_rec.py # Label conditional recommendation │ ├── label_pred.py # Label prediction (binary classification) │ ├── ad_rec.py # Ad recommendation (cross-domain) │ ├── product_rec.py # Product recommendation (cross-domain) │ ├── item_understand.py # Item understand │ └── reco_reason.py # Recommendation reasoning ├── run.sh # Main execution script └── README.md ``` ## Quick Start ### 1. Configure Input Paths Edit `run.sh` to set the following paths: ```bash INPUT_METADATA="path/to/onerec_bench_release.parquet" PID2SID_MAPPING="path/to/video_ad_pid2sid.parquet" PRODUCT_PID2SID_MAPPING="path/to/product_pid2sid.parquet" CAPTION_INPUT="path/to/pid2caption.parquet" OUTPUT_BASE_DIR="./output" ``` ### 2. Select Tasks to Run Uncomment the tasks you want to run in `run.sh`: ```bash # Pretrain tasks RUN_PRETRAIN_VIDEO_REC=1 RUN_PRETRAIN_USER_PROFILE=1 RUN_PRETRAIN_SID2CAPTION=1 # SFT tasks RUN_SFT_VIDEO_REC=1 RUN_SFT_INTERACTIVE_REC=1 # ... ``` ### 3. Run ```bash cd data bash run.sh ``` ## Task Descriptions ### Pretrain Tasks | Task | Script | Description | |------|--------|-------------| | video_rec | `pretrain/video_rec.py` | Concatenate user history SID sequence with target SID sequence for sequence modeling pretrain | | user_profile | `pretrain/user_profile.py` | Use `inter_user_profile_with_sid` field as pretrain text | | item_understand | `pretrain/item_understand.py` | Build item understanding alignment data using various template formats | ### SFT Tasks | Task | Script | Description | |------|--------|-------------| | video_rec | `sft/video_rec.py` | Predict next video based on user browsing history | | interactive_rec | `sft/interactive_rec.py` | Recommend content based on user profile and search keywords | | label_cond_rec | `sft/label_cond_rec.py` | Predict items by interaction type (like/follow/forward/etc.) | | label_pred | `sft/label_pred.py` | Binary classification: predict if user will watch a video for long | | ad_rec | `sft/ad_rec.py` | Cross-domain: predict ad clicks based on video and ad history | | product_rec | `sft/product_rec.py` | Cross-domain: predict product clicks based on video and product history | | item_understand | `sft/item_understand.py` | Generate video description from SID | | reco_reason | `sft/reco_reason.py` | Generate recommendation reasoning: analyze user interests and explain recommendations | ## Output Format ### Pretrain Format ```json { "source": "RecIF_VideoRec_Pretrain", "uuid": "xxx", "segments": [{"type": "text", "text": "..."}], "metadata": {"uid": 123} } ``` ### SFT Format ```json { "source": "RecIF_VideoRec", "uuid": "xxx", "messages": [ {"role": "system", "content": [{"type": "text", "text": "..."}]}, {"role": "user", "content": [{"type": "text", "text": "..."}]}, {"role": "assistant", "content": [{"type": "text", "text": "..."}]} ], "metadata": {"uid": 123} } ``` ## SID Format All scripts use a unified SID format: ``` <|sid_begin|><|sid_end|> ``` Where `c0`, `c1`, `c2` are triplet codes obtained from the `pid2sid` mapping table. ## Dependencies - pandas - numpy - tqdm ## Running Individual Scripts Each script can also be run independently: ```bash # Example: Run video_rec SFT task python sft/video_rec.py \ --input /path/to/metadata.parquet \ --pid2sid /path/to/pid2sid.parquet \ --output_dir ./output \ --seed 42 ``` ## Notes 1. All scripts process only `split=0` (training set) data by default 2. Output files are named as `{task_type}_{task_name}.parquet` 3. Cross-domain tasks (product_rec) require additional pid2sid mapping files ================================================ FILE: data/onerec_data/pretrain/item_understand.py ================================================ """ Item Understand Pretrain Task Input: caption parquet (pid, dense_caption) + pid2sid parquet Output: LLM Pretrain format parquet (segments) Task: Build pretrain data with SID and caption using various templates. """ import pandas as pd import argparse import json import uuid import random from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' # Pretrain format templates PRETRAIN_TEMPLATES = [ # Format 1: JSON format lambda sid, caption: json.dumps({"视频ID": sid, "视频内容": caption}, ensure_ascii=False), # Format 2: Display format lambda sid, caption: f"视频{sid} 展示了以下内容:{caption}", # Format 3: Full description format lambda sid, caption: f"视频{sid} 的内容完整描述如下:{caption}", ] # ============== Core Functions ============== def pid_to_sid(pid, pid2sid: dict) -> str: """Convert a single pid to SID string.""" if pid not in pid2sid: return "" code = pid2sid[pid] return SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) def build_segments(sid: str, caption: str) -> str: """Build segments format JSON string for pretrain.""" template = random.choice(PRETRAIN_TEMPLATES) text = template(sid, caption) segments = [{"type": "text", "text": text}] return json.dumps(segments, ensure_ascii=False) def process_row(row, pid2sid: dict) -> dict: """Process a single row of data.""" pid = row['pid'] dense_caption = row['dense_caption'] # Check data validity if dense_caption is None or (isinstance(dense_caption, float) and pd.isna(dense_caption)): return None if not dense_caption: return None # Convert pid to SID sid = pid_to_sid(pid, pid2sid) if not sid: return None return { 'source': 'RecIF_ItemUnderstand_Pretrain', 'uuid': str(uuid.uuid4()), 'segments': build_segments(sid, dense_caption), 'metadata': json.dumps({'pid': int(pid), 'sid': sid}, ensure_ascii=False) } # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Item Understand Pretrain Data Processing") parser.add_argument('--input', type=str, required=True, help='Input caption parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() random.seed(args.seed) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load pid2sid mapping print(f"Loading pid2sid from {args.pid2sid}...") df_pid2sid = pd.read_parquet(args.pid2sid) pid2sid = dict(zip(df_pid2sid['pid'], df_pid2sid['sid'])) print(f" Loaded {len(pid2sid):,} mappings") # 2. Load caption data print(f"Loading caption data from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 3. Process data print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): result = process_row(row, pid2sid) if result: results.append(result) # 4. Save results df_output = pd.DataFrame(results) output_path = output_dir / 'train.parquet' df_output.to_parquet(output_path, index=False) print(f"Saved: {output_path} ({len(df_output):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/pretrain/user_profile.py ================================================ """ User Profile Pretrain Task Input: metadata parquet Output: LLM Pretrain format parquet (segments) Task: Directly use inter_user_profile_with_sid as pretrain text. """ import pandas as pd import argparse import json import uuid from pathlib import Path from tqdm import tqdm def process_row(row) -> dict: """Process a single row of data.""" user_profile = row.get('inter_user_profile_with_sid') # Check data validity if user_profile is None or (isinstance(user_profile, float) and pd.isna(user_profile)): return None if not user_profile or not isinstance(user_profile, str): return None segments = [{"type": "text", "text": user_profile}] return { 'source': 'RecIF_UserProfile_Pretrain', 'uuid': str(uuid.uuid4()), 'segments': json.dumps(segments, ensure_ascii=False), 'metadata': json.dumps({'uid': int(row['uid'])}, ensure_ascii=False) } def main(): parser = argparse.ArgumentParser(description="User Profile Pretrain Data Processing") parser.add_argument('--input', type=str, required=True, help='Input metadata parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # Load metadata print(f"Loading metadata from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # Process data (train only, split=0) print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): if row['split'] != 0: continue result = process_row(row) if result: results.append(result) # Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/pretrain/video_rec.py ================================================ """ Video Recommendation Pretrain Task Input: metadata parquet + pid2sid parquet Output: LLM Pretrain format parquet (segments instead of messages) Task: Directly concatenate history SIDs and target SIDs without prompts. """ import pandas as pd import argparse import json import uuid from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' HIST_MAX_LEN = 512 TARGET_MAX_LEN = 10 # ============== Core Functions ============== def pids_to_sids(pids, pid2sid: dict) -> str: """Convert a list of pids to SID string.""" if pids is None or (isinstance(pids, float) and pd.isna(pids)): return "" sids = [] for pid in pids: if pid in pid2sid: code = pid2sid[pid] sid = SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) sids.append(sid) return ''.join(sids) def build_segments(hist_sids: str, target_sids: str) -> str: """Build segments format JSON string for pretrain.""" text = f"{hist_sids}{target_sids}" segments = [{"type": "text", "text": text}] return json.dumps(segments, ensure_ascii=False) def process_row(row, pid2sid: dict) -> dict: """Process a single row of data.""" hist_pids = row['hist_video_pid'] target_pids = row['target_video_pid'] # Check data validity if hist_pids is None or (isinstance(hist_pids, float) and pd.isna(hist_pids)): return None if target_pids is None or (isinstance(target_pids, float) and pd.isna(target_pids)): return None # Truncate and convert to SID hist_sids = pids_to_sids(hist_pids[-HIST_MAX_LEN:], pid2sid) target_sids = pids_to_sids(target_pids[:TARGET_MAX_LEN], pid2sid) if not hist_sids or not target_sids: return None return { 'source': 'RecIF_VideoRec_Pretrain', 'uuid': str(uuid.uuid4()), 'segments': build_segments(hist_sids, target_sids), 'metadata': json.dumps({'uid': int(row['uid'])}, ensure_ascii=False) } # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Video Recommendation Pretrain Data Processing") parser.add_argument('--input', type=str, required=True, help='Input metadata parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load pid2sid mapping print(f"Loading pid2sid from {args.pid2sid}...") df_pid2sid = pd.read_parquet(args.pid2sid) pid2sid = dict(zip(df_pid2sid['pid'], df_pid2sid['sid'])) print(f" Loaded {len(pid2sid):,} mappings") # 2. Load metadata print(f"Loading metadata from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 3. Process data (train only, split=0) print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): if row['split'] != 0: continue result = process_row(row, pid2sid) if result: results.append(result) # 4. Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/run.sh ================================================ #!/bin/bash # RecIF Data Processing Script # Generate all pretrain and SFT data set -e # ============== Task Selection ============== # Comment out tasks you don't want to run # Pretrain tasks RUN_PRETRAIN_VIDEO_REC=1 RUN_PRETRAIN_USER_PROFILE=1 RUN_PRETRAIN_ITEM_UNDERSTAND=1 # SFT tasks RUN_SFT_VIDEO_REC=1 RUN_SFT_INTERACTIVE_REC=1 RUN_SFT_LABEL_COND_REC=1 RUN_SFT_LABEL_PRED=1 RUN_SFT_AD_REC=1 RUN_SFT_PRODUCT_REC=1 RUN_SFT_ITEM_UNDERSTAND=1 RUN_SFT_REC_REASON=1 # ============== Configuration ============== SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" INPUT_METADATA="../../raw_data/onerec_data/onerec_bench_release.parquet" PID2SID_MAPPING="../../raw_data/onerec_data/video_ad_pid2sid.parquet" PRODUCT_PID2SID_MAPPING="../../raw_data/onerec_data/product_pid2sid.parquet" CAPTION_INPUT="../../raw_data/onerec_data/pid2caption.parquet" OUTPUT_BASE_DIR="../../output" SEED=42 # ============== Helper Function ============== run_task() { local task_type=$1 local task_name=$2 local script_path=$3 shift 3 local extra_args="$@" local output_file="${OUTPUT_BASE_DIR}/${task_type}_${task_name}.parquet" local temp_dir=$(mktemp -d) echo " Output: ${output_file}" python3 "${script_path}" --output_dir "${temp_dir}" ${extra_args} if [ -f "${temp_dir}/train.parquet" ]; then mv "${temp_dir}/train.parquet" "${output_file}" fi rm -rf "${temp_dir}" } # ============== Main ============== echo "========================================" echo "RecIF Data Processing" echo "========================================" echo "Metadata: ${INPUT_METADATA}" echo "PID2SID: ${PID2SID_MAPPING}" echo "Caption: ${CAPTION_INPUT}" echo "Output: ${OUTPUT_BASE_DIR}" echo "" mkdir -p "${OUTPUT_BASE_DIR}" # ============== Pretrain Tasks ============== echo "========================================" echo "Pretrain Tasks" echo "========================================" if [ "${RUN_PRETRAIN_VIDEO_REC}" = "1" ]; then echo "[pretrain] video_rec..." run_task "pretrain" "video_rec" "${SCRIPT_DIR}/pretrain/video_rec.py" \ --input "${INPUT_METADATA}" --pid2sid "${PID2SID_MAPPING}" fi if [ "${RUN_PRETRAIN_USER_PROFILE}" = "1" ]; then echo "[pretrain] user_profile..." run_task "pretrain" "user_profile" "${SCRIPT_DIR}/pretrain/user_profile.py" \ --input "${INPUT_METADATA}" fi if [ "${RUN_PRETRAIN_ITEM_UNDERSTAND}" = "1" ]; then echo "[pretrain] item_understand..." run_task "pretrain" "item_understand" "${SCRIPT_DIR}/pretrain/item_understand.py" \ --input "${CAPTION_INPUT}" --pid2sid "${PID2SID_MAPPING}" --seed ${SEED} fi # ============== SFT Tasks ============== echo "" echo "========================================" echo "SFT Tasks" echo "========================================" if [ "${RUN_SFT_VIDEO_REC}" = "1" ]; then echo "[sft] video_rec..." run_task "sft" "video_rec" "${SCRIPT_DIR}/sft/video_rec.py" \ --input "${INPUT_METADATA}" --pid2sid "${PID2SID_MAPPING}" --seed ${SEED} fi if [ "${RUN_SFT_INTERACTIVE_REC}" = "1" ]; then echo "[sft] interactive_rec..." run_task "sft" "interactive_rec" "${SCRIPT_DIR}/sft/interactive_rec.py" \ --input "${INPUT_METADATA}" --pid2sid "${PID2SID_MAPPING}" --seed ${SEED} fi if [ "${RUN_SFT_LABEL_COND_REC}" = "1" ]; then echo "[sft] label_cond_rec..." run_task "sft" "label_cond_rec" "${SCRIPT_DIR}/sft/label_cond_rec.py" \ --input "${INPUT_METADATA}" --pid2sid "${PID2SID_MAPPING}" --seed ${SEED} fi if [ "${RUN_SFT_LABEL_PRED}" = "1" ]; then echo "[sft] label_pred..." run_task "sft" "label_pred" "${SCRIPT_DIR}/sft/label_pred.py" \ --input "${INPUT_METADATA}" --pid2sid "${PID2SID_MAPPING}" --seed ${SEED} fi if [ "${RUN_SFT_AD_REC}" = "1" ]; then echo "[sft] ad_rec..." run_task "sft" "ad_rec" "${SCRIPT_DIR}/sft/ad_rec.py" \ --input "${INPUT_METADATA}" --pid2sid "${PID2SID_MAPPING}" --seed ${SEED} fi if [ "${RUN_SFT_PRODUCT_REC}" = "1" ]; then echo "[sft] product_rec..." run_task "sft" "product_rec" "${SCRIPT_DIR}/sft/product_rec.py" \ --input "${INPUT_METADATA}" --pid2sid "${PID2SID_MAPPING}" \ --product_pid2sid "${PRODUCT_PID2SID_MAPPING}" --seed ${SEED} fi if [ "${RUN_SFT_ITEM_UNDERSTAND}" = "1" ]; then echo "[sft] item_understand..." run_task "sft" "item_understand" "${SCRIPT_DIR}/sft/item_understand.py" \ --input "${CAPTION_INPUT}" --pid2sid "${PID2SID_MAPPING}" --seed ${SEED} fi if [ "${RUN_SFT_REC_REASON}" = "1" ]; then echo "[sft] rec_reason..." run_task "sft" "rec_reason" "${SCRIPT_DIR}/sft/rec_reason.py" \ --input "${INPUT_METADATA}" fi # ============== Summary ============== echo "" echo "========================================" echo "Summary" echo "========================================" ls -lh "${OUTPUT_BASE_DIR}"/*.parquet 2>/dev/null || echo "No parquet files found" echo "" echo "Done!" ================================================ FILE: data/onerec_data/sft/ad_rec.py ================================================ """ Ad Recommendation Task (Cross-domain) Input: metadata parquet + pid2sid parquet Output: LLM SFT training format parquet Task: Predict ad videos the user will click based on video watch history and ad click history. """ import pandas as pd import argparse import json import uuid import random from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' VIDEO_HIST_MAX_LEN = 100 AD_HIST_MAX_LEN = 200 TARGET_MAX_LEN = 10 # System prompts (Chinese) SYSTEM_PROMPTS = [ "你是一个智能广告推荐助手,能够根据用户的视频观看历史和广告点击行为,预测用户接下来可能点击的广告视频。", "你是一个广告点击预测专家,擅长分析用户的观看习惯和广告点击偏好,预测用户的广告兴趣。", "你是一个个性化广告推荐系统,能够基于用户的视频观看历史和广告点击记录,预测用户未来可能点击的广告。", "你是一个用户行为分析助手,专注于理解用户的内容偏好和广告兴趣,推荐相关广告视频。", "你是一个广告推荐引擎,通过学习用户的视频观看和广告点击历史,预测用户对广告的兴趣。", ] # Video watch history prompts (Chinese) VIDEO_WATCH_PROMPTS = [ "用户观看过的视频:", "用户浏览过的视频内容:", "用户长时间观看的视频:", "用户感兴趣的视频:", ] # Ad click history prompts (Chinese) AD_CLICK_PROMPTS = [ "用户点击过的广告视频:", "用户浏览过的广告视频:", "用户感兴趣的广告视频:", "用户历史广告点击记录:", ] # Task prompts (Chinese) TASK_PROMPTS = [ "请根据用户的观看和广告点击历史,预测用户接下来可能点击的广告视频。", "基于以上记录,推荐用户可能感兴趣并点击的广告视频。", "分析用户的行为偏好,预测用户下一步会点击哪些广告视频。", "根据用户的视频观看和广告点击习惯,推荐用户可能点击的广告视频。", "请推荐用户接下来可能感兴趣并点击的广告视频。", ] # ============== Core Functions ============== def pids_to_sids(pids, pid2sid: dict) -> str: """Convert a list of pids to SID string.""" if pids is None or (isinstance(pids, float) and pd.isna(pids)): return "" sids = [] for pid in pids: if pid in pid2sid: code = pid2sid[pid] sid = SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) sids.append(sid) return ''.join(sids) def build_messages(user_content: str, task_prompt: str, answer: str) -> str: """Build messages format JSON string.""" system_prompt = random.choice(SYSTEM_PROMPTS) messages = [ {"role": "system", "content": [{"type": "text", "text": system_prompt}]}, {"role": "user", "content": [{"type": "text", "text": user_content + "\n" + task_prompt}]}, {"role": "assistant", "content": [{"type": "text", "text": answer}]} ] return json.dumps(messages, ensure_ascii=False) def process_row(row, pid2sid: dict) -> dict: """Process a single row of data.""" hist_ad_pids = row['hist_ad_pid'] target_ad_pids = row['target_ad_pid'] # Check data validity if target_ad_pids is None or (isinstance(target_ad_pids, float) and pd.isna(target_ad_pids)): return None if len(target_ad_pids) == 0: return None # Build user content parts user_content_parts = [] # 1. Process video watch history (long-view videos) hist_longview_video_list = row['hist_longview_video_list'] if hist_longview_video_list is not None and not (isinstance(hist_longview_video_list, float) and pd.isna(hist_longview_video_list)): if len(hist_longview_video_list) > 0: # Keep the most recent videos (rightmost in the list) video_sids = pids_to_sids(hist_longview_video_list[-VIDEO_HIST_MAX_LEN:], pid2sid) if video_sids: video_prompt = random.choice(VIDEO_WATCH_PROMPTS) user_content_parts.append(f"{video_prompt}{video_sids}") # 2. Process ad click history if hist_ad_pids is not None and not (isinstance(hist_ad_pids, float) and pd.isna(hist_ad_pids)): if len(hist_ad_pids) > 0: # Keep the most recent ads (rightmost in the list) ad_sids = pids_to_sids(hist_ad_pids[-AD_HIST_MAX_LEN:], pid2sid) if ad_sids: ad_prompt = random.choice(AD_CLICK_PROMPTS) user_content_parts.append(f"{ad_prompt}{ad_sids}") # Need at least one type of history if not user_content_parts: return None # 3. Process target ad videos answer = pids_to_sids(target_ad_pids[:TARGET_MAX_LEN], pid2sid) if not answer: return None # Build final messages user_content = "\n".join(user_content_parts) task_prompt = random.choice(TASK_PROMPTS) return { 'source': 'RecIF_AdRec', 'uuid': str(uuid.uuid4()), 'messages': build_messages(user_content, task_prompt, answer), 'metadata': json.dumps({'uid': int(row['uid'])}, ensure_ascii=False) } # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Ad Recommendation Task Data Processing") parser.add_argument('--input', type=str, required=True, help='Input metadata parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() random.seed(args.seed) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load pid2sid mapping print(f"Loading pid2sid from {args.pid2sid}...") df_pid2sid = pd.read_parquet(args.pid2sid) pid2sid = dict(zip(df_pid2sid['pid'], df_pid2sid['sid'])) print(f" Loaded {len(pid2sid):,} mappings") # 2. Load metadata print(f"Loading metadata from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 3. Process data (train only, split=0) print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): if row['split'] != 0: continue result = process_row(row, pid2sid) if result: results.append(result) # 4. Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/sft/interactive_rec.py ================================================ """ Interactive Recommendation Task Input: metadata parquet + pid2sid parquet Output: LLM SFT training format parquet Task: Given user profile (inter_user_profile_with_sid) and search keyword, predict items the user will interact with. """ import pandas as pd import json import uuid import random import argparse from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' TARGET_MAX_LEN = 10 # System prompts (Chinese) SYSTEM_PROMPTS = [ "你是一个智能推荐助手,能够根据用户的兴趣画像和当前对话需求,精准推荐用户可能感兴趣的内容。", "你是一个个性化推荐专家,擅长理解用户画像和对话意图,提供精准的内容推荐。", "你是一个对话式推荐系统,能够基于用户的兴趣特征和搜索意图,推荐最相关的内容。", "你是一个交互式内容推荐引擎,专注于理解用户画像和对话上下文,提供个性化推荐。", "你是一个智能内容顾问,通过分析用户兴趣和对话关键词,推荐符合需求的内容。", "你是一位资深的推荐算法专家,精通用户画像分析和个性化匹配,能够为每位用户提供量身定制的内容推荐。", "你是一个具备深度学习能力的推荐引擎,可以准确捕捉用户兴趣点,并结合实时需求给出最优推荐方案。", "你是一个智能化的内容匹配系统,擅长从海量信息中筛选出与用户画像和查询意图高度契合的内容。", "你是一个AI驱动的推荐助理,能够综合分析用户的历史偏好和当前需求,提供精准且多元化的内容推荐。", "你是一个智慧型推荐顾问,通过理解用户的兴趣图谱和语义意图,实现千人千面的个性化推荐。", ] # User prompts (Chinese) USER_PROMPTS = [ "用户画像:\n{user_profile}\n\n用户查询:{keyword}\n\n请推荐相关内容。", "用户兴趣:\n{user_profile}\n\n搜索关键词:{keyword}\n\n请根据用户需求推荐内容。", "用户特征:\n{user_profile}\n\n当前需求:{keyword}\n\n请提供个性化推荐。", "【用户画像】\n{user_profile}\n\n【用户输入】\n{keyword}\n\n基于以上信息,推荐合适的内容。", "用户的兴趣偏好:\n{user_profile}\n\n用户正在寻找:{keyword}\n\n请推荐最相关的内容。", "这是用户的兴趣画像:\n{user_profile}\n\n用户现在想了解关于\"{keyword}\"的内容,能帮忙推荐一些吗?", "用户平时喜欢:\n{user_profile}\n\n现在用户搜索了\"{keyword}\",请根据用户的兴趣推荐相关内容。", "根据用户画像显示,用户兴趣如下:\n{user_profile}\n\n用户刚刚输入了\"{keyword}\",麻烦推荐一些合适的内容。", "用户的兴趣领域包括:\n{user_profile}\n\n用户正在查找\"{keyword}\"相关的内容,请给出推荐。", "用户的个人画像如下:\n{user_profile}\n\n用户搜索了\"{keyword}\"这个关键词,请推荐一些相关的内容。", ] # ============== Core Functions ============== def pids_to_sids(pids, pid2sid: dict) -> str: """Convert a list of pids to SID string.""" if pids is None or (isinstance(pids, float) and pd.isna(pids)): return "" sids = [] for pid in pids: if pid in pid2sid: code = pid2sid[pid] sid = SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) sids.append(sid) return ''.join(sids) def build_messages(user_profile: str, keyword: str, answer: str) -> str: """Build messages format JSON string.""" system_prompt = random.choice(SYSTEM_PROMPTS) user_prompt = random.choice(USER_PROMPTS).format(user_profile=user_profile, keyword=keyword) messages = [ {"role": "system", "content": [{"type": "text", "text": system_prompt}]}, {"role": "user", "content": [{"type": "text", "text": user_prompt}]}, {"role": "assistant", "content": [{"type": "text", "text": answer}]} ] return json.dumps(messages, ensure_ascii=False) def process_row(row, pid2sid: dict) -> list: """Process a single row of data. Returns a list of results (one per keyword).""" user_profile = row.get('inter_user_profile_with_sid') inter_keyword_to_items = row['inter_keyword_to_items'] # Check user profile validity if user_profile is None or (isinstance(user_profile, float) and pd.isna(user_profile)): return [] if not user_profile or not isinstance(user_profile, str): return [] # Check keyword_to_items validity if inter_keyword_to_items is None or (isinstance(inter_keyword_to_items, float) and pd.isna(inter_keyword_to_items)): return [] # Parse JSON string if needed if isinstance(inter_keyword_to_items, str): try: inter_keyword_to_items = json.loads(inter_keyword_to_items) except json.JSONDecodeError: return [] if not isinstance(inter_keyword_to_items, dict) or len(inter_keyword_to_items) == 0: return [] results = [] for keyword, item_ids in inter_keyword_to_items.items(): if not keyword or not item_ids: continue # Convert target items to SIDs answer = pids_to_sids(item_ids[:TARGET_MAX_LEN], pid2sid) if not answer: continue result = { 'source': 'RecIF_InteractiveRec', 'uuid': str(uuid.uuid4()), 'messages': build_messages(user_profile, keyword, answer), 'metadata': json.dumps({'uid': int(row['uid']), 'keyword': keyword}, ensure_ascii=False) } results.append(result) return results # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Interactive Recommendation Task Data Processing") parser.add_argument('--input', type=str, required=True, help='Input metadata parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() random.seed(args.seed) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load pid2sid mapping print(f"Loading pid2sid from {args.pid2sid}...") df_pid2sid = pd.read_parquet(args.pid2sid) pid2sid = dict(zip(df_pid2sid['pid'], df_pid2sid['sid'])) print(f" Loaded {len(pid2sid):,} mappings") # 2. Load metadata print(f"Loading metadata from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 3. Process data (train only, split=0) print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): if row['split'] != 0: continue row_results = process_row(row, pid2sid) for result in row_results: results.append(result) # 4. Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/sft/item_understand.py ================================================ """ Item Understand Task Input: caption parquet (pid, dense_caption) + pid2sid parquet Output: LLM SFT training format parquet Task: Given a video SID, generate its description/caption. """ import pandas as pd import argparse import json import uuid import random from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' # System prompts (Chinese) SYSTEM_PROMPTS = [ "你是一名视频描述生成器,请根据下面的视频token生成视频描述。", "你是一个专业的视频内容分析助手,能够理解视频token并生成准确的描述。", "你是一位视频理解专家,擅长将视频token转换为详细的文字描述。", "作为视频内容解析助手,你需要根据视频token提供精准的内容描述。", "你是一个智能视频解说员,可以根据视频token创建生动的描述。", "你具备理解视频token并生成高质量描述的能力。", "你是视频内容描述专家,能够将视频token转化为易懂的文字说明。", "作为AI视频分析助手,你可以根据视频token生成详细准确的描述。", ] # User prompts (Chinese) USER_PROMPTS = [ "请描述 {sid} 的内容", "这段视频 {sid} 展示了什么?", "请解释 {sid} 中的内容", "能否说明 {sid} 里发生了什么?", "请分析 {sid} 的具体内容", "{sid} 这个视频讲的是什么?", "请详细描述 {sid}", "告诉我 {sid} 的内容是什么", "请为 {sid} 生成描述", "{sid} 包含哪些内容?", "请说明视频 {sid} 的主要内容", "描述一下 {sid} 中展现的场景", "{sid} 这段内容是关于什么的?", "请解读 {sid} 的视频内容", "能描述下 {sid} 吗?", "{sid} 里面有什么?", "请对 {sid} 进行内容说明", "这个 {sid} 是什么内容?", "分析 {sid} 并给出描述", "请阐述 {sid} 的内容细节", ] # ============== Core Functions ============== def pid_to_sid(pid, pid2sid: dict) -> str: """Convert a single pid to SID string.""" if pid not in pid2sid: return "" code = pid2sid[pid] return SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) def build_messages(sid: str, caption: str) -> str: """Build messages format JSON string.""" system_prompt = random.choice(SYSTEM_PROMPTS) user_prompt = random.choice(USER_PROMPTS).format(sid=sid) messages = [ {"role": "system", "content": [{"type": "text", "text": system_prompt}]}, {"role": "user", "content": [{"type": "text", "text": user_prompt}]}, {"role": "assistant", "content": [{"type": "text", "text": caption}]} ] return json.dumps(messages, ensure_ascii=False) def process_row(row, pid2sid: dict) -> dict: """Process a single row of data.""" pid = row['pid'] dense_caption = row['dense_caption'] # Check data validity if dense_caption is None or (isinstance(dense_caption, float) and pd.isna(dense_caption)): return None if not dense_caption: return None # Convert pid to SID sid = pid_to_sid(pid, pid2sid) if not sid: return None return { 'source': 'RecIF_ItemUnderstand', 'uuid': str(uuid.uuid4()), 'messages': build_messages(sid, dense_caption), 'metadata': json.dumps({'pid': int(pid), 'sid': sid}, ensure_ascii=False) } # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Item Understand Task Data Processing") parser.add_argument('--input', type=str, required=True, help='Input caption parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() random.seed(args.seed) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load pid2sid mapping print(f"Loading pid2sid from {args.pid2sid}...") df_pid2sid = pd.read_parquet(args.pid2sid) pid2sid = dict(zip(df_pid2sid['pid'], df_pid2sid['sid'])) print(f" Loaded {len(pid2sid):,} mappings") # 2. Load caption data print(f"Loading caption data from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 3. Process data print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): result = process_row(row, pid2sid) if result: results.append(result) # 4. Save results df_output = pd.DataFrame(results) output_path = output_dir / 'train.parquet' df_output.to_parquet(output_path, index=False) print(f"Saved: {output_path} ({len(df_output):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/sft/label_cond_rec.py ================================================ """ Label Conditional Recommendation Task Input: metadata parquet + pid2sid parquet Output: LLM SFT training format parquet Task: Predict items that users will interact with under specific behavior types (longview/like/follow/forward/not_interested). """ import pandas as pd import numpy as np import argparse import json import uuid import random from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' INTERACTION_MAX_LEN = 10 # Max items per interaction type # Interaction types INTERACTION_TYPES = ["longview", "like", "follow", "forward", "not_interested"] # System prompts (Chinese) SYSTEM_PROMPTS = [ "你是一个智能推荐助手,能够根据用户对不同内容的互动行为,精准推荐用户可能感兴趣的下一个内容。", "你是一个内容推荐专家,擅长分析用户的互动模式,预测用户的内容偏好。", "你是一个个性化推荐系统,能够基于用户的历史互动行为,预测用户未来可能产生的互动。", "你是一个用户行为分析助手,专注于理解用户的兴趣偏好,并推荐相关内容。", ] # Interaction type descriptions (Chinese) INTERACTION_PROMPTS = { "longview": ["用户长时观看过以下内容:", "用户完整观看过的内容:", "用户深度浏览过以下内容:"], "like": ["用户点赞过以下内容:", "用户喜欢的内容:", "获得用户点赞的内容:"], "follow": ["用户关注过以下内容的作者:", "用户关注了这些内容的创作者:"], "forward": ["用户转发过以下内容:", "用户分享过的内容:", "用户向他人推荐的内容:"], "not_interested": ["用户表示不感兴趣的内容:", "用户标记为不感兴趣的内容:"], } # Task prompts for each interaction type (Chinese) TASK_PROMPTS = { "longview": ["请根据用户的互动行为,推荐用户可能会长时观看的内容。", "基于以上互动记录,预测用户会完整观看的内容。"], "like": ["请根据用户的互动行为,推荐用户可能会点赞的内容。", "基于用户的互动偏好,预测用户会给哪些内容点赞。"], "follow": ["请根据用户的互动行为,推荐用户可能会关注其作者的内容。", "基于用户的关注偏好,预测用户会关注哪些内容的创作者。"], "forward": ["请根据用户的互动行为,推荐用户可能会转发的内容。", "基于用户的分享习惯,预测用户会转发的内容。"], "not_interested": ["请根据用户的互动行为,预测用户可能会表示不感兴趣的内容。", "基于用户的偏好,预测用户可能会标记为不感兴趣的内容。"], } # ============== Core Functions ============== def pids_to_sids(pids, pid2sid: dict) -> str: """Convert a list of pids to SID string.""" if pids is None or (isinstance(pids, float) and pd.isna(pids)): return "" sids = [] for pid in pids: if pid in pid2sid: code = pid2sid[pid] sid = SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) sids.append(sid) return ''.join(sids) def build_messages(user_content: str, task_prompt: str, answer: str) -> str: """Build messages format JSON string.""" system_prompt = random.choice(SYSTEM_PROMPTS) messages = [ {"role": "system", "content": [{"type": "text", "text": system_prompt}]}, {"role": "user", "content": [{"type": "text", "text": user_content + "\n" + task_prompt}]}, {"role": "assistant", "content": [{"type": "text", "text": answer}]} ] return json.dumps(messages, ensure_ascii=False) def process_row(row, pid2sid: dict) -> dict: """Process a single row of data.""" hist_pids = row['hist_video_pid'] target_pids = row['target_video_pid'] # Check data validity if hist_pids is None or (isinstance(hist_pids, float) and pd.isna(hist_pids)): return None if target_pids is None or (isinstance(target_pids, float) and pd.isna(target_pids)): return None # Build user interaction history description user_content_parts = [] for interaction in INTERACTION_TYPES: hist_col = f'hist_video_{interaction}' if hist_col not in row or row[hist_col] is None: continue mask = row[hist_col] if isinstance(mask, float) and pd.isna(mask): continue # Filter pids with interaction based on mask if len(mask) == len(hist_pids): mask_array = np.array(mask) pids_array = np.array(hist_pids) interaction_pids = pids_array[mask_array == 1].tolist() interaction_pids = interaction_pids[-INTERACTION_MAX_LEN:] if interaction_pids: sids = pids_to_sids(interaction_pids, pid2sid) if sids: prompt = random.choice(INTERACTION_PROMPTS[interaction]) user_content_parts.append(f"{prompt}{sids}") if not user_content_parts: return None # Randomly select a target interaction type with data available_targets = [] for interaction in INTERACTION_TYPES: target_col = f'target_video_{interaction}' if target_col not in row or row[target_col] is None: continue target_mask = row[target_col] if isinstance(target_mask, float) and pd.isna(target_mask): continue if len(target_mask) == len(target_pids) and sum(1 for x in target_mask if x == 1) > 0: available_targets.append(interaction) if not available_targets: return None # Randomly select an interaction type as target selected_interaction = random.choice(available_targets) target_col = f'target_video_{selected_interaction}' target_mask = row[target_col] # Filter target_pids target_mask_array = np.array(target_mask) target_pids_array = np.array(target_pids) filtered_target_pids = target_pids_array[target_mask_array == 1].tolist() filtered_target_pids = filtered_target_pids[:INTERACTION_MAX_LEN] # Convert to SID answer = pids_to_sids(filtered_target_pids, pid2sid) if not answer: return None # Build final messages user_content = "\n".join(user_content_parts) task_prompt = random.choice(TASK_PROMPTS[selected_interaction]) return { 'source': 'RecIF_LabelCondRec', 'uuid': str(uuid.uuid4()), 'messages': build_messages(user_content, task_prompt, answer), 'metadata': json.dumps({'uid': int(row['uid']), 'target_interaction': selected_interaction}, ensure_ascii=False) } # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Label Conditional Recommendation Task Data Processing") parser.add_argument('--input', type=str, required=True, help='Input metadata parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() random.seed(args.seed) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load pid2sid mapping print(f"Loading pid2sid from {args.pid2sid}...") df_pid2sid = pd.read_parquet(args.pid2sid) pid2sid = dict(zip(df_pid2sid['pid'], df_pid2sid['sid'])) print(f" Loaded {len(pid2sid):,} mappings") # 2. Load metadata print(f"Loading metadata from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 3. Process data (train only, split=0) print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): if row['split'] != 0: continue result = process_row(row, pid2sid) if result: results.append(result) # 4. Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/sft/label_pred.py ================================================ """ Label Prediction Task (Point-wise Classification) Input: metadata parquet + pid2sid parquet Output: LLM SFT training format parquet Task: Predict whether a user will "longview" (watch for a long time) a candidate video. Binary classification: "是" (yes) or "否" (no). """ import pandas as pd import numpy as np import argparse import json import uuid import random from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' INTERACTION_MAX_LEN = 10 TARGET_MAX_LEN = 10 # Interaction types INTERACTION_TYPES = ["longview", "like", "follow", "forward", "not_interested"] # System prompts (Chinese) SYSTEM_PROMPTS = [ "你是一个内容推荐专家,擅长分析用户的互动模式,预测用户的内容偏好。", "你是一个个性化推荐系统,能够基于用户的历史互动行为,预测用户未来可能产生的互动。", "你是一个用户行为分析助手,专注于理解用户的兴趣偏好,并推荐相关内容。", "你是一个内容推荐引擎,通过学习用户的互动历史,预测用户对新内容的反应。", ] # Interaction type descriptions (Chinese) INTERACTION_PROMPTS = { "longview": ["用户长时观看过以下内容:", "用户完整观看过的内容:", "用户深度浏览过以下内容:"], "like": ["用户点赞过以下内容:", "用户喜欢的内容:", "获得用户点赞的内容:"], "follow": ["用户关注过以下内容的作者:", "用户关注了这些内容的创作者:"], "forward": ["用户转发过以下内容:", "用户分享过的内容:", "用户向他人推荐的内容:"], "not_interested": ["用户表示不感兴趣的内容:", "用户标记为不感兴趣的内容:"], } # Classification question prompts (Chinese) CLASSIFICATION_QUESTIONS = [ "请判断用户是否会长时观看视频{candidate_sid}?", "用户会完整观看视频{candidate_sid}吗?", "预测用户是否会深度观看视频{candidate_sid}。", "视频{candidate_sid}能够吸引用户长时间观看吗?", "用户会花时间仔细观看视频{candidate_sid}吗?", ] # Classification answers POSITIVE_ANSWER = "是" NEGATIVE_ANSWER = "否" # ============== Core Functions ============== def pids_to_sids(pids, pid2sid: dict) -> str: """Convert a list of pids to SID string.""" if pids is None or (isinstance(pids, float) and pd.isna(pids)): return "" sids = [] for pid in pids: if pid in pid2sid: code = pid2sid[pid] sid = SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) sids.append(sid) return ''.join(sids) def pid_to_sid(pid, pid2sid: dict) -> str: """Convert a single pid to SID string.""" if pid in pid2sid: code = pid2sid[pid] return SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) return "" def build_messages(user_content: str, question: str, answer: str) -> str: """Build messages format JSON string.""" system_prompt = random.choice(SYSTEM_PROMPTS) messages = [ {"role": "system", "content": [{"type": "text", "text": system_prompt}]}, {"role": "user", "content": [{"type": "text", "text": user_content + "\n" + question}]}, {"role": "assistant", "content": [{"type": "text", "text": answer}]} ] return json.dumps(messages, ensure_ascii=False) def process_row(row, pid2sid: dict) -> list: """Process a single row of data. Returns a list of results (one per candidate video).""" hist_pids = row['hist_video_pid'] target_pids = row['target_video_pid'] # Check data validity if hist_pids is None or (isinstance(hist_pids, float) and pd.isna(hist_pids)): return [] if target_pids is None or (isinstance(target_pids, float) and pd.isna(target_pids)): return [] if len(target_pids) == 0: return [] # Build user interaction history description user_content_parts = [] for interaction in INTERACTION_TYPES: hist_col = f'hist_video_{interaction}' if hist_col not in row or row[hist_col] is None: continue mask = row[hist_col] if isinstance(mask, float) and pd.isna(mask): continue # Filter pids with interaction based on mask if len(mask) == len(hist_pids): mask_array = np.array(mask) pids_array = np.array(hist_pids) interaction_pids = pids_array[mask_array == 1].tolist() interaction_pids = interaction_pids[-INTERACTION_MAX_LEN:] if interaction_pids: sids = pids_to_sids(interaction_pids, pid2sid) if sids: prompt = random.choice(INTERACTION_PROMPTS[interaction]) user_content_parts.append(f"{prompt}{sids}") if not user_content_parts: return [] # Get target longview mask target_longview_col = 'target_video_longview' if target_longview_col not in row or row[target_longview_col] is None: return [] target_longview_mask = row[target_longview_col] if isinstance(target_longview_mask, float) and pd.isna(target_longview_mask): return [] if len(target_longview_mask) != len(target_pids): return [] # Limit target candidates limited_target_pids = target_pids[:TARGET_MAX_LEN] limited_longview_mask = target_longview_mask[:TARGET_MAX_LEN] # Build user content user_content = "\n".join(user_content_parts) # Generate one sample per candidate video results = [] for candidate_pid, label in zip(limited_target_pids, limited_longview_mask): label = int(label) # Convert candidate pid to SID candidate_sid = pid_to_sid(candidate_pid, pid2sid) if not candidate_sid: continue # Build question with candidate SID question = random.choice(CLASSIFICATION_QUESTIONS).format(candidate_sid=candidate_sid) # Determine answer based on label answer = POSITIVE_ANSWER if label == 1 else NEGATIVE_ANSWER result = { 'source': 'RecIF_LabelPred', 'uuid': str(uuid.uuid4()), 'messages': build_messages(user_content, question, answer), 'metadata': json.dumps({ 'uid': int(row['uid']), 'label': label }, ensure_ascii=False) } results.append(result) return results # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Label Prediction Task Data Processing") parser.add_argument('--input', type=str, required=True, help='Input metadata parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() random.seed(args.seed) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load pid2sid mapping print(f"Loading pid2sid from {args.pid2sid}...") df_pid2sid = pd.read_parquet(args.pid2sid) pid2sid = dict(zip(df_pid2sid['pid'], df_pid2sid['sid'])) print(f" Loaded {len(pid2sid):,} mappings") # 2. Load metadata print(f"Loading metadata from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 3. Process data (train only, split=0) print("Processing...") results = [] positive_count, negative_count = 0, 0 for _, row in tqdm(df.iterrows(), total=len(df)): if row['split'] != 0: continue row_results = process_row(row, pid2sid) for result in row_results: metadata = json.loads(result['metadata']) label = metadata['label'] results.append(result) if label == 1: positive_count += 1 else: negative_count += 1 # 4. Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows, pos={positive_count:,}, neg={negative_count:,})") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/sft/product_rec.py ================================================ """ Product Recommendation Task (Cross-domain) Input: metadata parquet + video_pid2sid parquet + product_pid2sid parquet Output: LLM SFT training format parquet Task: Predict product the user will click based on video watch history and product click history. Note: Video and product use different pid2sid mappings (different domains). """ import pandas as pd import argparse import json import uuid import random from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' VIDEO_HIST_MAX_LEN = 100 PRODUCT_HIST_MAX_LEN = 100 TARGET_MAX_LEN = 10 # System prompts (Chinese) SYSTEM_PROMPTS = [ "你是一个智能跨域推荐助手,能够根据用户观看的视频内容和历史购物行为,预测用户接下来可能点击的商品。", "你是一个跨域推荐专家,擅长分析用户的观看习惯和购物偏好,预测用户的商品兴趣。", "你是一个个性化推荐系统,能够基于用户的视频观看历史和购物记录,预测用户未来可能购买的商品。", "你是一个用户行为分析助手,专注于理解用户的内容偏好和购物兴趣,推荐相关商品。", "你是一个跨域推荐引擎,通过学习用户的视频观看和购物历史,预测用户对商品的兴趣。", ] # Video watch history prompts (Chinese) VIDEO_WATCH_PROMPTS = [ "用户观看过的视频:", "用户浏览过的视频内容:", "用户长时间观看的视频:", "用户感兴趣的视频:", ] # Product click history prompts (Chinese) PRODUCT_CLICK_PROMPTS = [ "用户点击过的商品:", "用户浏览过的商品:", "用户感兴趣的商品:", "用户历史购物记录:", ] # Task prompts (Chinese) TASK_PROMPTS = [ "请根据用户的观看和购物历史,预测用户接下来可能点击的商品。", "基于以上记录,推荐用户可能感兴趣并点击的商品。", "分析用户的行为偏好,预测用户下一步会点击哪些商品。", "根据用户的视频观看和购物习惯,推荐用户可能点击的商品。", "请推荐用户接下来可能感兴趣并点击的商品。", ] # ============== Core Functions ============== def pids_to_sids(pids, pid2sid: dict) -> str: """Convert a list of pids to SID string.""" if pids is None or (isinstance(pids, float) and pd.isna(pids)): return "" sids = [] for pid in pids: if pid in pid2sid: code = pid2sid[pid] sid = SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) sids.append(sid) return ''.join(sids) def build_messages(user_content: str, task_prompt: str, answer: str) -> str: """Build messages format JSON string.""" system_prompt = random.choice(SYSTEM_PROMPTS) messages = [ {"role": "system", "content": [{"type": "text", "text": system_prompt}]}, {"role": "user", "content": [{"type": "text", "text": user_content + "\n" + task_prompt}]}, {"role": "assistant", "content": [{"type": "text", "text": answer}]} ] return json.dumps(messages, ensure_ascii=False) def process_row(row, video_pid2sid: dict, product_pid2sid: dict) -> dict: """Process a single row of data.""" hist_product_pids = row['hist_goods_pid'] target_product_pids = row['target_goods_pid'] # Check data validity if target_product_pids is None or (isinstance(target_product_pids, float) and pd.isna(target_product_pids)): return None if len(target_product_pids) == 0: return None # Build user content parts user_content_parts = [] # 1. Process video watch history (long-view videos, use video_pid2sid) hist_longview_video_list = row['hist_longview_video_list'] if hist_longview_video_list is not None and not (isinstance(hist_longview_video_list, float) and pd.isna(hist_longview_video_list)): if len(hist_longview_video_list) > 0: # Keep the most recent videos (rightmost in the list) video_sids = pids_to_sids(hist_longview_video_list[-VIDEO_HIST_MAX_LEN:], video_pid2sid) if video_sids: video_prompt = random.choice(VIDEO_WATCH_PROMPTS) user_content_parts.append(f"{video_prompt}{video_sids}") # 2. Process product click history (use product_pid2sid) if hist_product_pids is not None and not (isinstance(hist_product_pids, float) and pd.isna(hist_product_pids)): if len(hist_product_pids) > 0: # Keep the most recent products (rightmost in the list) product_sids = pids_to_sids(hist_product_pids[-PRODUCT_HIST_MAX_LEN:], product_pid2sid) if product_sids: product_prompt = random.choice(PRODUCT_CLICK_PROMPTS) user_content_parts.append(f"{product_prompt}{product_sids}") # Need at least one type of history if not user_content_parts: return None # 3. Process target product (use product_pid2sid) answer = pids_to_sids(target_product_pids[:TARGET_MAX_LEN], product_pid2sid) if not answer: return None # Build final messages user_content = "\n".join(user_content_parts) task_prompt = random.choice(TASK_PROMPTS) return { 'source': 'RecIF_ProductRec', 'uuid': str(uuid.uuid4()), 'messages': build_messages(user_content, task_prompt, answer), 'metadata': json.dumps({'uid': int(row['uid'])}, ensure_ascii=False) } # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Product Recommendation Task Data Processing") parser.add_argument('--input', type=str, required=True, help='Input metadata parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='Video pid2sid mapping parquet path') parser.add_argument('--product_pid2sid', type=str, required=True, help='Product pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() random.seed(args.seed) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load video pid2sid mapping print(f"Loading video pid2sid from {args.pid2sid}...") df_video_pid2sid = pd.read_parquet(args.pid2sid) video_pid2sid = dict(zip(df_video_pid2sid['pid'], df_video_pid2sid['sid'])) print(f" Loaded {len(video_pid2sid):,} video mappings") # 2. Load product pid2sid mapping print(f"Loading product pid2sid from {args.product_pid2sid}...") df_product_pid2sid = pd.read_parquet(args.product_pid2sid) product_pid2sid = dict(zip(df_product_pid2sid['pid'], df_product_pid2sid['sid'])) print(f" Loaded {len(product_pid2sid):,} product mappings") # 3. Load metadata print(f"Loading metadata from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 4. Process data (train only, split=0) print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): if row['split'] != 0: continue result = process_row(row, video_pid2sid, product_pid2sid) if result: results.append(result) # 5. Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/sft/rec_reason.py ================================================ """ Recommendation Reasoning Task Input: rec_reason parquet (user_profile_with_sid, gsu_caption, target_caption, cot, etc.) Output: LLM SFT training format parquet Task: Given user profile, watch history captions, and target video caption, generate reasoning for why the user would click the target video. """ import pandas as pd import argparse import json import uuid from pathlib import Path from tqdm import tqdm # ============== Configuration ============== USER_PROMPT_TEMPLATE = """{user_profile} [历史观看视频内容] {gsu_caption} [用户点击下一个视频内容] {target_video_caption} 请在思考的时候分析总结用户兴趣,重点根据用户观看视频内容进行推理,给出下一个点击的理由及视频的基本内容,下一个点击视频需要与给定的一致,注意虽然给出了下一个点击的视频但应该体现出推理得到而不是直接知道的。 最后再用一段话输出精炼的推理过程。 生成格式严格按照两大部分,标题分别是:预测分析;精炼推理。 """ # ============== Core Functions ============== def build_messages(user_prompt: str, answer: str) -> str: """Build messages format JSON string.""" messages = [ {"role": "user", "content": [{"type": "text", "text": user_prompt}]}, {"role": "assistant", "content": [{"type": "text", "text": answer}]} ] return json.dumps(messages, ensure_ascii=False) def is_valid_str(val) -> bool: """Check if value is a valid non-empty string.""" if val is None: return False if isinstance(val, float) and pd.isna(val): return False if isinstance(val, str) and val.strip(): return True return False def process_row(row) -> dict: """Process a single row of data.""" user_profile = row.get('inter_user_profile_with_sid') gsu_caption = row.get('reco_gsu_caption') target_caption = row.get('reco_target_caption') answer = row.get('reco_cot') # Check data validity if not is_valid_str(user_profile): return None if not is_valid_str(target_caption): return None if not is_valid_str(answer): return None gsu_caption = str(gsu_caption) # Build user prompt user_prompt = USER_PROMPT_TEMPLATE.format( user_profile=user_profile, gsu_caption=gsu_caption, target_video_caption=target_caption ) metadata = { 'uid': int(row['uid']) if 'uid' in row else None, 'target_pid': int(row['target_pid']) if 'target_pid' in row else None, } return { 'source': 'RecIF_RecoReason', 'uuid': str(uuid.uuid4()), 'messages': build_messages(user_prompt, answer), 'metadata': json.dumps(metadata, ensure_ascii=False) } # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Recommendation Reasoning Task Data Processing") parser.add_argument('--input', type=str, required=True, help='Input rec_reason parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # Load data print(f"Loading data from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # Process data (train only, split=0) print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): if row.get('split', 0) != 0: continue result = process_row(row) if result: results.append(result) # Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/onerec_data/sft/video_rec.py ================================================ """ Video Recommendation Task Input: metadata parquet + pid2sid parquet Output: LLM SFT training format parquet """ import pandas as pd import numpy as np import argparse import json import uuid import random from pathlib import Path from tqdm import tqdm # ============== Configuration ============== SID_FORMAT = '<|sid_begin|><|sid_end|>' HIST_MAX_LEN = 512 TARGET_MAX_LEN = 10 # System prompts (Chinese) SYSTEM_PROMPTS = [ "你是一个智能推荐助手,能够根据用户的浏览历史预测用户可能感兴趣的下一个内容。", "你是一名内容推荐专家,擅长分析用户浏览行为并预测用户偏好。", "作为推荐系统助手,你需要根据用户历史浏览记录推荐合适的内容。", "你具备理解用户浏览模式并生成个性化推荐的能力。", "你是一个专业的内容推荐助手,能够根据用户过往浏览记录推荐相关内容。", ] # User prompts (Chinese) USER_PROMPTS = [ "根据以下用户浏览记录,请预测用户接下来可能观看的内容:\n{query}", "用户浏览了以下内容:\n{query}\n请预测用户的下一个观看意向。", "以下是用户的浏览历史:\n{query}\n请推荐用户可能感兴趣的下一个内容。", "用户历史浏览记录如下:\n{query}\n分析并预测用户接下来会观看什么内容。", "{query}\n根据上述浏览记录,推测用户的下一个观看目标。", ] # ============== Core Functions ============== def pids_to_sids(pids, pid2sid: dict) -> str: """Convert a list of pids to SID string.""" if pids is None or (isinstance(pids, float) and pd.isna(pids)): return "" sids = [] for pid in pids: if pid in pid2sid: code = pid2sid[pid] sid = SID_FORMAT.format(c0=code[0], c1=code[1], c2=code[2]) sids.append(sid) return ''.join(sids) def build_messages(query: str, answer: str) -> str: """Build messages format JSON string.""" system_prompt = random.choice(SYSTEM_PROMPTS) user_prompt = random.choice(USER_PROMPTS).format(query=query) messages = [ {"role": "system", "content": [{"type": "text", "text": system_prompt}]}, {"role": "user", "content": [{"type": "text", "text": user_prompt}]}, {"role": "assistant", "content": [{"type": "text", "text": answer}]} ] return json.dumps(messages, ensure_ascii=False) def process_row(row, pid2sid: dict) -> dict: """Process a single row of data.""" hist_pids = row['hist_video_pid'] target_pids = row['target_video_pid'] # Check data validity if hist_pids is None or (isinstance(hist_pids, float) and pd.isna(hist_pids)): return None if target_pids is None or (isinstance(target_pids, float) and pd.isna(target_pids)): return None # Truncate and convert to SID (keep most recent history) query = pids_to_sids(hist_pids[-HIST_MAX_LEN:], pid2sid) answer = pids_to_sids(target_pids[:TARGET_MAX_LEN], pid2sid) if not query or not answer: return None return { 'source': 'RecIF_VideoRec', 'uuid': str(uuid.uuid4()), 'messages': build_messages(query, answer), 'metadata': json.dumps({'uid': int(row['uid'])}, ensure_ascii=False) } # ============== Main Function ============== def main(): parser = argparse.ArgumentParser(description="Video Recommendation Task Data Processing") parser.add_argument('--input', type=str, required=True, help='Input metadata parquet path') parser.add_argument('--pid2sid', type=str, required=True, help='pid2sid mapping parquet path') parser.add_argument('--output_dir', type=str, required=True, help='Output directory') parser.add_argument('--seed', type=int, default=42, help='Random seed') args = parser.parse_args() random.seed(args.seed) output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # 1. Load pid2sid mapping print(f"Loading pid2sid from {args.pid2sid}...") df_pid2sid = pd.read_parquet(args.pid2sid) pid2sid = dict(zip(df_pid2sid['pid'], df_pid2sid['sid'])) print(f" Loaded {len(pid2sid):,} mappings") # 2. Load metadata print(f"Loading metadata from {args.input}...") df = pd.read_parquet(args.input) print(f" Loaded {len(df):,} rows") # 3. Process data (train only, split=0) print("Processing...") results = [] for _, row in tqdm(df.iterrows(), total=len(df)): if row['split'] != 0: continue result = process_row(row, pid2sid) if result: results.append(result) # 4. Save results df_train = pd.DataFrame(results) train_path = output_dir / 'train.parquet' df_train.to_parquet(train_path, index=False) print(f"Saved: {train_path} ({len(df_train):,} rows)") print("Done!") if __name__ == "__main__": main() ================================================ FILE: data/prepare_distillation.sh ================================================ #!/bin/bash # Data sampling script: Sample specified number of samples from general dataset for on-policy distillation set -e # Configuration INPUT_PATH="../raw_data/general_text/sft" OUTPUT_FILE="../output/onpolicy_distillation.parquet" TEMP_FILE="../output/onpolicy_distillation_temp.parquet" NUM_SAMPLES=200000 SEED=42 ENGINE="pyarrow" # Check if paths exist SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" if [ ! -e "${INPUT_PATH}" ]; then echo "Error: Input path does not exist: ${INPUT_PATH}" exit 1 fi # Step 1: Sample data echo "Step 1: Sampling data..." python3 "${SCRIPT_DIR}/scripts/sample_data.py" \ --input "${INPUT_PATH}" \ --output "${TEMP_FILE}" \ --num_samples "${NUM_SAMPLES}" \ --seed "${SEED}" \ --engine "${ENGINE}" # Step 2: Fix unicode encoding echo "" echo "Step 2: Fixing unicode encoding..." python3 "${SCRIPT_DIR}/scripts/parquet_unicode_fix.py" \ --input "${TEMP_FILE}" \ --output "${OUTPUT_FILE}" \ --engine "${ENGINE}" # Clean up temporary files if [ -f "${TEMP_FILE}" ]; then rm "${TEMP_FILE}" echo "Temporary files cleaned up" fi echo "" echo "Processing completed! Output file: ${OUTPUT_FILE}" ================================================ FILE: data/prepare_pretrain.sh ================================================ #!/bin/bash # Data splitting script: Merge general text and recommendation data, then split by every 1000 samples set -e # Configuration # Both general and onerec use datasets starting with pretrain GENERAL_TEXT_PATH="../raw_data/general_text/pretrain" REC_DATA_PATH="../raw_data/onerec_data" OUTPUT_DIR="../output/split_data_pretrain" MAX_ROWS=1000 ENGINE="pyarrow" # Check if paths exist SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" if [ ! -e "${GENERAL_TEXT_PATH}" ]; then echo "Error: General text path does not exist: ${GENERAL_TEXT_PATH}" exit 1 fi if [ ! -e "${REC_DATA_PATH}" ]; then echo "Error: Recommendation data path does not exist: ${REC_DATA_PATH}" exit 1 fi # Execute python3 "${SCRIPT_DIR}/scripts/split_data.py" \ --general_text_path "${GENERAL_TEXT_PATH}" \ --rec_data_path "${REC_DATA_PATH}" \ --output_dir "${OUTPUT_DIR}" \ --max_rows "${MAX_ROWS}" \ --engine "${ENGINE}" ================================================ FILE: data/prepare_rl.sh ================================================ #!/bin/bash # RL data splitting script: Merge multiple RL task datasets and split into training and test sets set -e # Configuration # onerec dataset output path, rl uses datasets starting with sft REC_DATA_PATH="../output" # Tasks that RL depends on VIDEO_REC=${REC_DATA_PATH}/sft_video_rec.parquet AD_REC=${REC_DATA_PATH}/sft_ad_rec.parquet PRODUCT_REC=${REC_DATA_PATH}/sft_product_rec.parquet INTERACTIVE_REC=${REC_DATA_PATH}/sft_interactive_rec.parquet LABEL_COND_REC=${REC_DATA_PATH}/sft_label_cond_rec.parquet # Output configuration OUTPUT_DIR="../output/rl_data" TEST_SIZE=1000 SEED=42 ENGINE="pyarrow" # Get script directory SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # Define all task files to process declare -a TASK_FILES=( "${VIDEO_REC}" "${AD_REC}" "${PRODUCT_REC}" "${INTERACTIVE_REC}" "${LABEL_COND_REC}" ) # Check if input files exist echo "Checking input files..." MISSING_FILES=0 for file in "${TASK_FILES[@]}"; do if [ ! -f "${file}" ]; then echo "Warning: File does not exist: ${file}" MISSING_FILES=$((MISSING_FILES + 1)) fi done if [ ${MISSING_FILES} -eq ${#TASK_FILES[@]} ]; then echo "Error: All input files do not exist" exit 1 fi # Execute train_test_split, merge all files and process them together echo "" echo "Starting RL data splitting..." echo "==========================================" echo "Input files:" for file in "${TASK_FILES[@]}"; do if [ -f "${file}" ]; then echo " - ${file}" fi done echo "Output directory: ${OUTPUT_DIR}" echo "Test set size: ${TEST_SIZE}" echo "==========================================" python3 "${SCRIPT_DIR}/scripts/train_test_split.py" \ --input_files "${TASK_FILES[@]}" \ --test_size "${TEST_SIZE}" \ --output_dir "${OUTPUT_DIR}" \ --seed "${SEED}" \ --engine "${ENGINE}" \ --test_filename "test.parquet" \ --train_filename "train.parquet" echo "" echo "==========================================" echo "RL data processing completed!" echo "Output directory: ${OUTPUT_DIR}" echo " - train.parquet (training set)" echo " - test.parquet (test set)" echo "==========================================" ================================================ FILE: data/prepare_sft.sh ================================================ #!/bin/bash # Data splitting script: Merge general text and recommendation data, then split by every 1000 samples set -e # Configuration # Both general and onerec use datasets starting with sft GENERAL_TEXT_PATH="../raw_data/general_text/sft" REC_DATA_PATH="../raw_data/onerec_data" OUTPUT_DIR="../output/split_data_sft" MAX_ROWS=1000 ENGINE="pyarrow" # Check if paths exist SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" if [ ! -e "${GENERAL_TEXT_PATH}" ]; then echo "Error: General text path does not exist: ${GENERAL_TEXT_PATH}" exit 1 fi if [ ! -e "${REC_DATA_PATH}" ]; then echo "Error: Recommendation data path does not exist: ${REC_DATA_PATH}" exit 1 fi # Execute python3 "${SCRIPT_DIR}/scripts/split_data.py" \ --general_text_path "${GENERAL_TEXT_PATH}" \ --rec_data_path "${REC_DATA_PATH}" \ --output_dir "${OUTPUT_DIR}" \ --max_rows "${MAX_ROWS}" \ --engine "${ENGINE}" ================================================ FILE: data/scripts/parquet_unicode_fix.py ================================================ #!/usr/bin/env python3 """Parquet Unicode Fix Script Fix unicode Chinese garbled text issues in messages and segments fields of parquet files. Supports single file or batch directory processing. """ import argparse import json import logging import os import sys from pathlib import Path from typing import List, Optional, Union import pandas as pd from tqdm import tqdm # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler()] ) logger = logging.getLogger(__name__) def decode_unicode_json(json_str: Optional[Union[str, bytes]]) -> Optional[str]: """Decode unicode characters in JSON string. Args: json_str: JSON string that may contain unicode encoding Returns: Decoded JSON string """ if json_str is None or pd.isna(json_str): return json_str # Handle bytes type if isinstance(json_str, bytes): json_str = json_str.decode('utf-8', errors='ignore') # If already a string and doesn't contain unicode escape sequences, return directly if isinstance(json_str, str) and '\\u' not in json_str: return json_str try: # JSON load (automatically decode unicode) json_obj = json.loads(json_str) # JSON dump with ensure_ascii disabled (preserve Chinese characters) decoded_str = json.dumps( json_obj, ensure_ascii=False, # Key: don't convert Chinese to unicode indent=None, # Keep original compact format separators=(',', ':') # Keep original separator format ) return decoded_str except json.JSONDecodeError: # Return original string when JSON parsing fails return json_str except Exception as e: logger.debug(f"Error processing JSON string: {e}") return json_str def find_parquet_files(directory: str, recursive: bool = True) -> List[str]: """ Find all parquet files in the directory Args: directory: Directory path recursive: Whether to recursively search subdirectories, default True Returns: List of parquet file paths """ parquet_files = [] directory_path = Path(directory) if not directory_path.exists(): raise FileNotFoundError(f"Directory does not exist: {directory}") if not directory_path.is_dir(): raise ValueError(f"Path is not a directory: {directory}") pattern = "**/*.parquet" if recursive else "*.parquet" parquet_files = [str(p) for p in directory_path.glob(pattern) if p.is_file()] logger.info(f"Found {len(parquet_files)} parquet files in directory {directory}") return sorted(parquet_files) def get_output_path(input_path: str, output_base: str, input_base: Optional[str] = None) -> str: """ Generate output path based on input path and output base path Args: input_path: Input file path output_base: Output base path (file or directory) input_base: Input base path (to maintain relative path structure), if None uses input file's directory Returns: Output file path """ input_path_obj = Path(input_path) output_base_obj = Path(output_base) # If output base path is a file, return directly if output_base_obj.is_file() or (not output_base_obj.exists() and not output_base_obj.suffix == ''): return str(output_base_obj) # If output base path is a directory if input_base: # Maintain relative path structure input_base_obj = Path(input_base) try: relative_path = input_path_obj.relative_to(input_base_obj) output_path = output_base_obj / relative_path except ValueError: # If unable to calculate relative path, use filename output_path = output_base_obj / input_path_obj.name else: # Use input file's directory as base output_path = output_base_obj / input_path_obj.name return str(output_path) def process_parquet_file( input_path: str, output_path: str, engine: str = 'pyarrow', fields: Optional[List[str]] = None ) -> None: """Process parquet file to fix unicode Chinese garbled text in specified fields. Args: input_path: Input parquet file path output_path: Output parquet file path engine: Engine for reading/writing parquet, options: 'pyarrow' or 'fastparquet' fields: List of fields to process, defaults to ['messages', 'segments'] """ if not os.path.exists(input_path): raise FileNotFoundError(f"Input file does not exist: {input_path}") if fields is None: fields = ['messages', 'segments'] # Read parquet file logger.info(f"Reading file: {input_path}") df = pd.read_parquet(input_path, engine=engine) logger.info(f"Total rows: {len(df)}") # Check and process fields processed_fields = [] for field in fields: if field in df.columns: logger.debug(f"Processing field: {field}") df[field] = df[field].apply(decode_unicode_json) processed_fields.append(field) else: logger.debug(f"Field does not exist, skipping: {field}") if not processed_fields: logger.warning(f"No fields to process found: {fields}") # If no fields to process, copy file directly if input_path != output_path: import shutil Path(output_path).parent.mkdir(parents=True, exist_ok=True) shutil.copy2(input_path, output_path) logger.info(f"File copied to: {output_path}") return logger.info(f"Processed fields: {processed_fields}") # Save processed file Path(output_path).parent.mkdir(parents=True, exist_ok=True) df.to_parquet( output_path, engine=engine, index=False, compression='snappy' ) logger.info(f"File saved successfully: {output_path}") def process_directory(input_dir: str, output_dir: str, engine: str = 'pyarrow', recursive: bool = True, overwrite: bool = False) -> None: """ Batch process all parquet files in the directory Args: input_dir: Input directory path output_dir: Output directory path engine: Parquet processing engine recursive: Whether to recursively process subdirectories overwrite: Whether to overwrite original files (if True, output_dir is ignored and input files are overwritten directly) """ # Find all parquet files parquet_files = find_parquet_files(input_dir, recursive=recursive) if not parquet_files: logger.warning(f"No parquet files found in directory {input_dir}") return # Create output directory (if needed) if not overwrite: output_path_obj = Path(output_dir) output_path_obj.mkdir(parents=True, exist_ok=True) logger.info(f"Output directory: {output_dir}") # Process each file total_files = len(parquet_files) success_count = 0 fail_count = 0 for input_file in tqdm(parquet_files, desc="Processing files"): try: if overwrite: # Overwrite original file output_file = input_file else: # Generate output path, maintain directory structure output_file = get_output_path(input_file, output_dir, input_dir) # Ensure output directory exists Path(output_file).parent.mkdir(parents=True, exist_ok=True) process_parquet_file(input_file, output_file, engine) success_count += 1 except Exception as e: fail_count += 1 logger.error(f"File processing failed: {input_file}, error: {e}", exc_info=True) continue # Output statistics logger.info(f"\n{'='*60}") logger.info(f"Batch processing completed!") logger.info(f"Total files: {total_files}") logger.info(f"Success: {success_count}") logger.info(f"Failed: {fail_count}") logger.info(f"{'='*60}") def main(): # Parse command line arguments parser = argparse.ArgumentParser( description='Process unicode Chinese garbled text in messages and segments fields of parquet files (supports single file or batch directory processing)' ) parser.add_argument( '-i', '--input', required=True, help='Input parquet file path or directory path (required)' ) parser.add_argument( '-o', '--output', required=True, help='Output parquet file path or directory path (required)' ) parser.add_argument( '-e', '--engine', choices=['pyarrow', 'fastparquet'], default='pyarrow', help='Parquet processing engine, default uses pyarrow' ) parser.add_argument( '--no-recursive', action='store_true', help='When processing directory, do not recursively process subdirectories (only process files in current directory)' ) parser.add_argument( '--overwrite', action='store_true', help='Overwrite original files (only effective when input is directory, will ignore output path)' ) args = parser.parse_args() # Execute processing try: input_path = Path(args.input) if not input_path.exists(): logger.error(f"Input path does not exist: {args.input}") exit(1) # Determine if input is file or directory if input_path.is_file(): # Single file processing mode logger.info("Single file processing mode") if Path(args.output).is_dir(): # If output is directory, create file with same name in directory output_file = Path(args.output) / input_path.name else: output_file = args.output process_parquet_file( input_path=str(input_path), output_path=str(output_file), engine=args.engine ) logger.info("All operations completed!") elif input_path.is_dir(): # Directory batch processing mode logger.info("Directory batch processing mode") if args.overwrite: logger.info("Will overwrite original files") process_directory( input_dir=str(input_path), output_dir="", # Will not be used engine=args.engine, recursive=not args.no_recursive, overwrite=True ) else: output_path = Path(args.output) if output_path.exists() and output_path.is_file(): logger.error(f"When input is directory, output should also be directory, but output path is file: {args.output}") exit(1) process_directory( input_dir=str(input_path), output_dir=str(output_path), engine=args.engine, recursive=not args.no_recursive, overwrite=False ) logger.info("All operations completed!") else: logger.error(f"Input path is neither file nor directory: {args.input}") exit(1) except KeyboardInterrupt: logger.info("\nOperation cancelled by user") sys.exit(1) except Exception as e: logger.error(f"Program execution failed: {e}", exc_info=True) sys.exit(1) if __name__ == "__main__": main() ================================================ FILE: data/scripts/sample_data.py ================================================ #!/usr/bin/env python3 """Data Sampling Script Sample specified number of samples from one or more paths (directories or files) containing parquet files, and save as a single parquet file. """ import argparse import logging import random import sys from pathlib import Path from typing import List import pandas as pd from tqdm import tqdm # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler()] ) logger = logging.getLogger(__name__) def find_parquet_files(directory: str, recursive: bool = True) -> List[str]: """Find all parquet files in the directory. Args: directory: Directory path recursive: Whether to recursively search subdirectories Returns: List of parquet file paths """ dir_path = Path(directory) if not dir_path.exists(): raise FileNotFoundError(f"Directory does not exist: {directory}") if not dir_path.is_dir(): raise ValueError(f"Path is not a directory: {directory}") pattern = "**/*.parquet" if recursive else "*.parquet" parquet_files = [str(p) for p in dir_path.glob(pattern) if p.is_file()] return sorted(parquet_files) def collect_parquet_files(input_paths: List[str], recursive: bool = True) -> List[str]: """Collect all parquet file paths. Args: input_paths: List of input paths (can be files or directories) recursive: Whether to recursively search subdirectories Returns: List of parquet file paths """ all_files = [] for input_path in input_paths: path = Path(input_path) if not path.exists(): logger.warning(f"Path does not exist, skipping: {input_path}") continue if path.is_file(): if path.suffix.lower() == '.parquet': all_files.append(str(path)) else: logger.warning(f"Not a parquet file, skipping: {input_path}") elif path.is_dir(): files = find_parquet_files(str(path), recursive=recursive) all_files.extend(files) else: logger.warning(f"Unknown path type, skipping: {input_path}") return sorted(list(set(all_files))) # Remove duplicates and sort def load_all_parquet_files(file_paths: List[str], engine: str = 'pyarrow') -> pd.DataFrame: """Load all parquet files and merge them. Args: file_paths: List of parquet file paths engine: Parquet engine, 'pyarrow' or 'fastparquet' Returns: Merged DataFrame """ if not file_paths: logger.warning("No parquet files found") return pd.DataFrame() logger.info(f"Found {len(file_paths)} parquet files, starting to load...") dataframes = [] for file_path in tqdm(file_paths, desc="Loading files"): try: df = pd.read_parquet(file_path, engine=engine) logger.debug(f" Loaded {file_path}: {len(df)} rows") dataframes.append(df) except Exception as e: logger.error(f" Failed to load {file_path}: {e}") continue if not dataframes: logger.warning("No files loaded successfully") return pd.DataFrame() # Merge all DataFrames logger.info("Merging all data...") combined_df = pd.concat(dataframes, ignore_index=True) logger.info(f"Merge completed, total {len(combined_df)} rows") return combined_df def sample_dataframe(df: pd.DataFrame, num_samples: int, seed: int = None) -> pd.DataFrame: """Sample specified number of samples from DataFrame. Args: df: DataFrame to sample from num_samples: Number of samples seed: Random seed Returns: Sampled DataFrame """ if len(df) == 0: logger.warning("DataFrame is empty, cannot sample") return pd.DataFrame() if num_samples <= 0: raise ValueError(f"num_samples must be greater than 0, current value: {num_samples}") total_rows = len(df) if num_samples >= total_rows: logger.warning(f"Sample size ({num_samples}) is greater than or equal to total rows ({total_rows}), returning all data") return df.copy() # Set random seed if seed is not None: random.seed(seed) logger.info(f"Using random seed: {seed}") # Random sampling logger.info(f"Sampling {num_samples} rows from {total_rows} rows...") sampled_indices = random.sample(range(total_rows), num_samples) sampled_df = df.iloc[sampled_indices].copy() logger.info(f"Sampling completed, total {len(sampled_df)} rows") return sampled_df def main(): """Main function.""" parser = argparse.ArgumentParser( description='Sample specified number of samples from one or more paths containing parquet files, and save as a single parquet file' ) parser.add_argument( '--input', type=str, nargs='+', required=True, help='Input paths (can be files or directories), multiple paths can be specified' ) parser.add_argument( '--output', type=str, required=True, help='Output parquet file path' ) parser.add_argument( '--num_samples', type=int, required=True, help='Number of samples' ) parser.add_argument( '--seed', type=int, default=None, help='Random seed (optional)' ) parser.add_argument( '--engine', choices=['pyarrow', 'fastparquet'], default='pyarrow', help='Parquet processing engine (default: pyarrow)' ) parser.add_argument( '--no-recursive', action='store_true', help='Do not recursively search for files in subdirectories' ) args = parser.parse_args() # Validate parameters if args.num_samples <= 0: logger.error(f"num_samples must be greater than 0, current value: {args.num_samples}") sys.exit(1) try: # 1. Collect all parquet files logger.info("=" * 60) logger.info("Step 1: Collecting parquet files...") parquet_files = collect_parquet_files( args.input, recursive=not args.no_recursive ) if not parquet_files: logger.error("No parquet files found") sys.exit(1) logger.info(f"Found {len(parquet_files)} parquet files") # 2. Load all files logger.info("=" * 60) logger.info("Step 2: Loading parquet files...") combined_df = load_all_parquet_files(parquet_files, engine=args.engine) if len(combined_df) == 0: logger.error("No data loaded") sys.exit(1) # 3. Sample data logger.info("=" * 60) logger.info("Step 3: Sampling data...") sampled_df = sample_dataframe( combined_df, num_samples=args.num_samples, seed=args.seed ) if len(sampled_df) == 0: logger.error("Sampled data is empty") sys.exit(1) # 4. Save results logger.info("=" * 60) logger.info("Step 4: Saving results...") output_path = Path(args.output) output_path.parent.mkdir(parents=True, exist_ok=True) sampled_df.to_parquet( output_path, engine='pyarrow', index=False, compression='snappy' ) logger.info(f"Results saved to: {output_path}") # 5. Output statistics logger.info("=" * 60) logger.info("Processing completed!") logger.info(f"Input files: {len(parquet_files)}") logger.info(f"Original data rows: {len(combined_df)}") logger.info(f"Sampled rows: {len(sampled_df)}") logger.info(f"Output file: {output_path}") logger.info("=" * 60) except KeyboardInterrupt: logger.info("\nOperation cancelled by user") sys.exit(1) except Exception as e: logger.error(f"Program execution failed: {e}", exc_info=True) sys.exit(1) if __name__ == "__main__": main() ================================================ FILE: data/scripts/split_data.py ================================================ #!/usr/bin/env python3 """Data splitting script Merge general text data and recommendation data, then split into multiple files with 1000 samples each. """ import argparse import json import logging import sys from pathlib import Path from typing import List import pandas as pd from tqdm import tqdm # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler()] ) logger = logging.getLogger(__name__) def find_parquet_files(directory: str, recursive: bool = True) -> List[str]: """Find all parquet files in the directory. Args: directory: Directory path recursive: Whether to recursively search subdirectories Returns: List of parquet file paths """ dir_path = Path(directory) if not dir_path.exists(): raise FileNotFoundError(f"Directory does not exist: {directory}") if not dir_path.is_dir(): raise ValueError(f"Path is not a directory: {directory}") pattern = "**/*.parquet" if recursive else "*.parquet" parquet_files = [str(p) for p in dir_path.glob(pattern) if p.is_file()] return sorted(parquet_files) def load_all_parquet_files(file_paths: List[str], engine: str = 'pyarrow') -> pd.DataFrame: """Load and merge all parquet files. Args: file_paths: List of parquet file paths engine: Parquet engine, 'pyarrow' or 'fastparquet' Returns: Merged DataFrame """ if not file_paths: logger.warning("No parquet files found") return pd.DataFrame() logger.info(f"Found {len(file_paths)} parquet files, starting to load...") dataframes = [] for file_path in tqdm(file_paths, desc="Loading files"): try: df = pd.read_parquet(file_path, engine=engine) logger.debug(f" Loaded {file_path}: {len(df)} rows") dataframes.append(df) except Exception as e: logger.error(f" Failed to load {file_path}: {e}") continue if not dataframes: logger.warning("No files loaded successfully") return pd.DataFrame() # Merge all DataFrames logger.info("Merging all data...") combined_df = pd.concat(dataframes, ignore_index=True) logger.info(f"Merge complete, total {len(combined_df)} rows") return combined_df def split_dataframe(df: pd.DataFrame, max_rows: int, output_dir: str, prefix: str = "part") -> List[str]: """Split DataFrame into multiple files by fixed number of rows. Args: df: DataFrame to split max_rows: Maximum number of rows per file output_dir: Output directory prefix: Output file prefix Returns: List of output file paths """ if len(df) == 0: logger.warning("DataFrame is empty, no need to split") return [] if max_rows <= 0: raise ValueError(f"max_rows must be greater than 0, current value: {max_rows}") # Create output directory output_dir_path = Path(output_dir) output_dir_path.mkdir(parents=True, exist_ok=True) # Calculate number of files needed total_rows = len(df) num_chunks = (total_rows + max_rows - 1) // max_rows # Round up logger.info(f"Splitting data into {num_chunks} files (max {max_rows} rows per file)") # Use fixed 5-digit format to ensure consistent file naming # Format: part-00000-of-00010.parquet num_digits = 5 # Split and save output_files = [] for chunk_idx in tqdm(range(num_chunks), desc="Splitting files"): start_idx = chunk_idx * max_rows end_idx = min(start_idx + max_rows, total_rows) # Extract data chunk chunk_df = df.iloc[start_idx:end_idx] # Generate output filename, format: part-00000-of-00010.parquet output_filename = f"{prefix}-{chunk_idx:0{num_digits}d}-of-{num_chunks:0{num_digits}d}.parquet" output_path = output_dir_path / output_filename # Save file chunk_df.to_parquet( output_path, engine='pyarrow', index=False, compression='snappy' ) output_files.append(str(output_path)) logger.debug(f" Saved file {chunk_idx + 1}/{num_chunks}: {output_path} (rows {start_idx} to {end_idx - 1})") logger.info(f"Successfully split into {len(output_files)} files") return output_files def main(): """Main function.""" parser = argparse.ArgumentParser( description='Merge general text data and recommendation data, then split into multiple files with 1000 samples each' ) parser.add_argument( '--general_text_path', type=str, required=True, help='General text data path (directory or file)' ) parser.add_argument( '--rec_data_path', type=str, required=True, help='Recommendation data path (directory or file)' ) parser.add_argument( '--output_dir', type=str, required=True, help='Output directory path' ) parser.add_argument( '--max_rows', type=int, default=1000, help='Maximum number of rows per file (default: 1000)' ) parser.add_argument( '--engine', choices=['pyarrow', 'fastparquet'], default='pyarrow', help='Parquet processing engine (default: pyarrow)' ) parser.add_argument( '--no-recursive', action='store_true', help='Do not recursively search for files in subdirectories' ) args = parser.parse_args() # Validate parameters if args.max_rows <= 0: logger.error(f"max_rows must be greater than 0, current value: {args.max_rows}") sys.exit(1) try: # 1. Find all parquet files logger.info("=" * 60) logger.info("Step 1: Finding general text data files...") general_text_path = Path(args.general_text_path) if general_text_path.is_file(): general_text_files = [str(general_text_path)] else: general_text_files = find_parquet_files( args.general_text_path, recursive=not args.no_recursive ) logger.info(f"Found {len(general_text_files)} general text files") logger.info("Step 2: Finding recommendation data files...") rec_data_path = Path(args.rec_data_path) if rec_data_path.is_file(): rec_data_files = [str(rec_data_path)] else: rec_data_files = find_parquet_files( args.rec_data_path, recursive=not args.no_recursive ) logger.info(f"Found {len(rec_data_files)} recommendation data files") # 2. Load all files logger.info("=" * 60) logger.info("Step 3: Loading general text data...") general_text_df = load_all_parquet_files(general_text_files, engine=args.engine) logger.info("Step 4: Loading recommendation data...") rec_data_df = load_all_parquet_files(rec_data_files, engine=args.engine) # 3. Merge data logger.info("=" * 60) logger.info("Step 5: Merging data...") if len(general_text_df) == 0 and len(rec_data_df) == 0: logger.error("No data loaded") sys.exit(1) if len(general_text_df) == 0: combined_df = rec_data_df logger.info("Using only recommendation data") elif len(rec_data_df) == 0: combined_df = general_text_df logger.info("Using only general text data") else: combined_df = pd.concat([general_text_df, rec_data_df], ignore_index=True) logger.info(f"Merge complete: general text {len(general_text_df)} rows + recommendation data {len(rec_data_df)} rows = total {len(combined_df)} rows") # 4. Split data logger.info("=" * 60) logger.info("Step 6: Splitting data...") output_files = split_dataframe( combined_df, max_rows=args.max_rows, output_dir=args.output_dir, prefix="part" ) # 5. Generate file list JSON logger.info("=" * 60) logger.info("Step 7: Generating file list JSON...") output_dir_path = Path(args.output_dir) json_file_path = output_dir_path / "file_list.json" # Convert file paths to absolute paths (absolute paths are more reliable) file_list = [str(Path(f).absolute()) for f in output_files] with open(json_file_path, 'w', encoding='utf-8') as f: json.dump(file_list, f, indent=2, ensure_ascii=False) logger.info(f"File list saved to: {json_file_path} ({len(file_list)} files)") # 6. Output statistics logger.info("=" * 60) logger.info("Processing complete!") logger.info(f"Input files: general text {len(general_text_files)} files, recommendation data {len(rec_data_files)} files") logger.info(f"Total data rows: {len(combined_df)}") logger.info(f"Output files: {len(output_files)}") logger.info(f"Output directory: {args.output_dir}") logger.info(f"File list JSON: {json_file_path}") logger.info("=" * 60) except KeyboardInterrupt: logger.info("\nOperation cancelled by user") sys.exit(1) except Exception as e: logger.error(f"Program execution failed: {e}", exc_info=True) sys.exit(1) if __name__ == "__main__": main() ================================================ FILE: data/scripts/train_test_split.py ================================================ #!/usr/bin/env python3 """Train/Test Split Script Randomly selects N samples from multiple parquet files as the test set, with remaining data as the training set. Both datasets are shuffled before saving. """ import argparse import logging import sys from pathlib import Path from typing import List import pandas as pd from tqdm import tqdm # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler()] ) logger = logging.getLogger(__name__) def load_all_parquet_files(file_paths: List[str], engine: str = 'pyarrow') -> pd.DataFrame: """Load and merge all parquet files. Args: file_paths: List of parquet file paths engine: Parquet engine, 'pyarrow' or 'fastparquet' Returns: Merged DataFrame """ if not file_paths: logger.warning("No parquet files found") return pd.DataFrame() logger.info(f"Found {len(file_paths)} parquet files, starting to load...") dataframes = [] for file_path in tqdm(file_paths, desc="Loading files"): try: df = pd.read_parquet(file_path, engine=engine) logger.debug(f" Loaded {file_path}: {len(df)} rows") dataframes.append(df) except Exception as e: logger.error(f" Failed to load {file_path}: {e}") continue if not dataframes: logger.warning("No files loaded successfully") return pd.DataFrame() # Merge all DataFrames logger.info("Merging all data...") combined_df = pd.concat(dataframes, ignore_index=True) logger.info(f"Merge complete, total {len(combined_df)} rows") return combined_df def split_train_test( df: pd.DataFrame, test_size: int, seed: int = None ) -> tuple: """Split DataFrame into training and test sets. Args: df: DataFrame to split test_size: Number of test samples seed: Random seed Returns: (train_df, test_df) tuple """ if len(df) == 0: logger.warning("DataFrame is empty, cannot split") return pd.DataFrame(), pd.DataFrame() if test_size <= 0: raise ValueError(f"test_size must be greater than 0, current value: {test_size}") total_rows = len(df) if test_size >= total_rows: logger.warning( f"Test size ({test_size}) is greater than or equal to total rows ({total_rows}), " f"using all data as test set, training set will be empty" ) return pd.DataFrame(), df.copy() # Use pandas sample method for random sampling, ensuring reproducibility if seed is not None: logger.info(f"Using random seed: {seed}") logger.info(f"Randomly selecting {test_size} rows from {total_rows} rows as test set...") # Use pandas sample method to randomly select test set test_df = df.sample(n=test_size, random_state=seed).copy() # Get test set indices test_indices = set(test_df.index) # Remaining data as training set train_df = df.drop(test_indices).copy() logger.info(f"Split complete: training set {len(train_df)} rows, test set {len(test_df)} rows") return train_df, test_df def shuffle_dataframe(df: pd.DataFrame, seed: int = None) -> pd.DataFrame: """Shuffle DataFrame. Args: df: DataFrame to shuffle seed: Random seed (for reproducibility) Returns: Shuffled DataFrame """ if len(df) == 0: return df.copy() # Use sample method for shuffling (frac=1 means sampling all data, i.e., shuffling) # random_state parameter ensures reproducibility shuffled_df = df.sample(frac=1, random_state=seed).reset_index(drop=True) return shuffled_df def main(): """Main function.""" parser = argparse.ArgumentParser( description='Randomly select N samples from multiple parquet files as test set, remaining data as training set' ) parser.add_argument( '--input_files', type=str, nargs='+', required=True, help='List of input parquet file paths (can specify multiple files)' ) parser.add_argument( '--test_size', type=int, required=True, help='Number of test samples' ) parser.add_argument( '--output_dir', type=str, required=True, help='Output directory path' ) parser.add_argument( '--seed', type=int, default=None, help='Random seed (optional, for reproducibility)' ) parser.add_argument( '--engine', choices=['pyarrow', 'fastparquet'], default='pyarrow', help='Parquet processing engine (default: pyarrow)' ) parser.add_argument( '--test_filename', type=str, default='test.parquet', help='Test set output filename (default: test.parquet)' ) parser.add_argument( '--train_filename', type=str, default='train.parquet', help='Training set output filename (default: train.parquet)' ) args = parser.parse_args() # Validate parameters if args.test_size <= 0: logger.error(f"test_size must be greater than 0, current value: {args.test_size}") sys.exit(1) # Validate input files exist input_files = [] for file_path in args.input_files: path = Path(file_path) if not path.exists(): logger.warning(f"File does not exist, skipping: {file_path}") continue if not path.is_file(): logger.warning(f"Path is not a file, skipping: {file_path}") continue if path.suffix.lower() != '.parquet': logger.warning(f"Not a parquet file, skipping: {file_path}") continue input_files.append(str(path)) if not input_files: logger.error("No valid parquet files found") sys.exit(1) try: # 1. Load all parquet files logger.info("=" * 60) logger.info("Step 1: Loading parquet files...") combined_df = load_all_parquet_files(input_files, engine=args.engine) if len(combined_df) == 0: logger.error("No data loaded") sys.exit(1) # 2. Split training and test sets logger.info("=" * 60) logger.info("Step 2: Splitting training and test sets...") train_df, test_df = split_train_test( combined_df, test_size=args.test_size, seed=args.seed ) if len(test_df) == 0: logger.error("Test set is empty, cannot continue") sys.exit(1) # 3. Shuffle data logger.info("=" * 60) logger.info("Step 3: Shuffling data...") # Use different seed offsets for training and test sets to ensure different shuffle results # If seed is provided, use different offsets; otherwise use None for both (completely random) train_seed = (args.seed + 1000) if args.seed is not None else None test_seed = (args.seed + 2000) if args.seed is not None else None logger.info("Shuffling training set...") train_df = shuffle_dataframe(train_df, seed=train_seed) logger.info("Shuffling test set...") test_df = shuffle_dataframe(test_df, seed=test_seed) # 4. Save results logger.info("=" * 60) logger.info("Step 4: Saving results...") output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) test_path = output_dir / args.test_filename train_path = output_dir / args.train_filename logger.info(f"Saving test set to: {test_path}") test_df.to_parquet( test_path, engine='pyarrow', index=False, compression='snappy' ) if len(train_df) > 0: logger.info(f"Saving training set to: {train_path}") train_df.to_parquet( train_path, engine='pyarrow', index=False, compression='snappy' ) else: logger.warning("Training set is empty, skipping save") # 5. Output statistics logger.info("=" * 60) logger.info("Processing complete!") logger.info(f"Number of input files: {len(input_files)}") logger.info(f"Original data rows: {len(combined_df)}") logger.info(f"Training set rows: {len(train_df)}") logger.info(f"Test set rows: {len(test_df)}") logger.info(f"Output directory: {output_dir}") logger.info(f"Training set file: {train_path}") logger.info(f"Test set file: {test_path}") if args.seed is not None: logger.info(f"Random seed: {args.seed}") logger.info("=" * 60) except KeyboardInterrupt: logger.info("\nOperation cancelled by user") sys.exit(1) except Exception as e: logger.error(f"Program execution failed: {e}", exc_info=True) sys.exit(1) if __name__ == "__main__": main() ================================================ FILE: pretrain/.gitignore ================================================ # Python __pycache__/ *.pyc *.so *.egg-info *.pylintrc # Build build # IDE .vscode/ .idea/ *~ # OS .DS_Store # Project specific keys.txt hostfile # Data files *.parquet *.bin *.pt *.npy # Logs and outputs *.log *.err *.out # System files *.swp *.deb .git ================================================ FILE: pretrain/README.md ================================================ # OpenOneRec Pretraining Module The OpenOneRec pretraining module is based on the Qwen3 architecture, supporting a two-stage pretraining pipeline (Itemic-Text Alignment → Full-parameter Co-Pretraining) and SFT training workflow. > **⚠️ Important Notice** > > The distributed training in this module **relies on MPI (Message Passing Interface)** for multi-node communication. The current training scripts use `mpirun` to launch distributed training, requiring proper MPI environment configuration (e.g., OpenMPI) and hostfile setup. > > To simplify environment configuration and improve reproducibility, we plan to release in future versions: > - **Pre-configured Docker/Apptainer images**: Including all necessary dependencies and MPI environment > - **torchrun-based training scripts**: Providing an easier way to launch distributed training > > Before the images and torchrun versions are released, please ensure your environment has MPI properly installed and configured. ## Quick Start ### Prerequisites - **Hardware**: CUDA-enabled GPUs (multi-GPU or multi-node recommended) - **Software**: - Python 3.8+ - PyTorch (with FSDP and distributed training support) - OpenMPI or compatible MPI implementation - NCCL (for GPU communication) - **Data**: Training data converted to Parquet format (refer to `../data/README.md`) - **Model**: Qwen3 base model (HuggingFace format) ### 1. Environment Setup First, configure the training environment: ```bash # Set environment variables source set_env.sh ``` This script sets necessary environment variables, including Python path, CUDA path, etc. ### 2. Qwen3 Model Vocabulary Expansion Before starting training, you need to expand the vocabulary of the Qwen3 base model to support recommendation system-specific item ID encoding (itemic tokens). #### 2.1 Configure Parameters Edit `scripts/expand_qwen3_vocab.sh` and set the following parameters: ```bash HF_MODEL_DIR=/path/to/Qwen3-0.6B # Original Qwen3 HuggingFace model path OUTPUT_MODEL_DIR=/path/to/Qwen3-0.6B_itemic # Output model path with expanded vocabulary ITEMIC_LAYER_N=3 # Number of layers for itemic tokens VOCAB_SIZE_PER_LAYER=8192 # Vocabulary size expansion per layer ``` #### 2.2 Execute Expansion ```bash bash scripts/expand_qwen3_vocab.sh ``` This script will: - Add new itemic tokens on top of the original vocabulary - Align vocabulary size to multiples of 256 - Initialize embedding weights for new tokens - Save the expanded model to the specified directory **Note**: The expanded model path needs to be used in the data configuration file for subsequent training (`base_model_dir` field). ### 3. Data Preparation Training data needs to be converted to Parquet format. Please refer to `../data/README.md` for format specifications. Data configuration is specified through JSON files located in the `examples/dataset_config/` directory. #### Data Configuration Format Each data configuration file contains the following main fields: ```json { "name": "chat_completion_parquet", "sources": "/path/to/file_list.json", "base_model_dir": "/path/to/Qwen3-1.7B_itemic", "max_length": 30000, "num_epochs": 3, "num_workers": 2, "itemic_id_range": [151669, 176246], "add_think_pattern": false, "local_shuffle_buffer_size": 100000 ... } ``` ### 4. Training Training scripts are located in the `examples/` directory, and data configuration files are in the `examples/dataset_config/` directory. #### 4.1 Stage1 Pretraining Stage1 is mainly used for training itemic embeddings, typically freezing LLM parameters and only optimizing the embedding layer. ```bash # Edit examples/pretrain_stg1.sh to set model path, output path, and other parameters bash examples/pretrain_stg1.sh ``` Main training parameters (configured in `pretrain_stg1.sh`): - `--dataset_config examples/dataset_config/stg1.json`: Specify data configuration - `--freeze_llm`: Freeze LLM parameters - `--start_optimize_embedding_index 151669`: Start optimizing embeddings from the specified token ID - `--model_dir`: Base model path with expanded vocabulary - `--output_dir`: Model output path **Note**: After training, convert the checkpoint to HuggingFace format (see [Model Conversion](#model-conversion)). #### 4.2 Stage2 Pretraining Stage2 is used for full-parameter pretraining to further optimize model performance. This stage unfreezes all model parameters and performs co-pretraining on a mixed domain of recommendation data and general text data. ```bash # Edit examples/pretrain_stg2.sh to set model path, output path, and other parameters # MODEL_DIR should point to the converted hf model path from Stage1 training output bash examples/pretrain_stg2.sh ``` Main training parameters (configured in `pretrain_stg2.sh`): - `--dataset_config examples/dataset_config/pretrain.json`: Specify data configuration (including recommendation data and general text data) - `--model_dir`: Converted model path from Stage1 output - `--output_dir`: Model output path - Note: **Does not include** `--freeze_llm` parameter, indicating full-parameter training **Note**: After training, convert the checkpoint to HuggingFace format (see [Model Conversion](#model-conversion)). #### 4.3 SFT Fine-tuning SFT (Supervised Fine-Tuning) is used for instruction fine-tuning to improve model performance on specific tasks. This stage performs supervised learning on instruction-following data, enabling the model to better understand and execute recommendation-related instructions. ```bash # Edit examples/posttrain_sft.sh to set model path, output path, and other parameters # MODEL_DIR should point to the converted hf model path from Stage2 training output bash examples/posttrain_sft.sh ``` Main training parameters (configured in `posttrain_sft.sh`): - `--dataset_config examples/dataset_config/sft.json`: Specify SFT data configuration - `--model_dir`: Converted model path from Stage2 output - `--output_dir`: Model output path - `add_think_pattern: true` in data configuration enables thinking mode, which automatically adds `` `` tags and `/think` and `/no_think` instructions (for reasoning tasks) **Note**: After training, convert the checkpoint to HuggingFace format (see [Model Conversion](#model-conversion)). ## Training Configuration ### Data Configuration Fields | Field | Type | Description | |-------|------|-------------| | `name` | str | Data loader name, default is `"chat_completion_parquet"` | | `sources` | str | Data file list path (JSON file) or directory path list | | `base_model_dir` | str | Base model path (with expanded vocabulary), used for tokenizing data | | `max_length` | int | Maximum sequence length | | `num_epochs` | int | Number of training epochs | | `num_workers` | int | Number of dataloader workers | | `model_class` | str | Model class name, default is `"Qwen3ForCausalLM"` | | `itemic_id_range` | list | Itemic token ID range `[start, end]`, only used for metrics statistics | | `only_assistant_loss` | bool | Whether to only compute loss for assistant responses, applies to chat format data | | `local_shuffle_buffer_size` | int | Local sample-level shuffle buffer size | | `add_think_pattern` | bool | Whether to add think tags (add `/think` `/no_think` in prompt, and `` `` in response) | Notes: * The default dataset is implemented based on torch.utils.data.IterableDataset * By default, one GPU is bound to one process, each process creates `num_workers` workers. The dataset distributes files from `sources` to each worker at file granularity based on total worker count. The file list is shuffled before distribution, and sample-level shuffle is performed according to `local_shuffle_buffer_size` when reading data * If `num_epochs` > 1, file distribution is performed twice, with the file list reshuffled each time ### Training Parameters Main training parameters are passed via command line to `recipes/train_qwen3.py`: | Parameter | Description | |-----------|-------------| | `--model_dir` | Base model path (HuggingFace format) | | `--output_dir` | Model output path | | `--dataset_config` | Data configuration file path | | `--freeze_llm` | Whether to freeze LLM parameters | | `--learning_rate` | Learning rate | | `--max_length` | Sequence length per step | | `--min_lr` | Minimum learning rate | | `--lr_scheduler_type` | Learning rate scheduler type (e.g., `cosine`) | | `--num_training_steps` | Number of training steps | | `--save_checkpoint_per_step` | Save checkpoint every N steps | | `--minibatch_size` | LLM head chunk size for chunked loss computation to save memory | | `--resume_from` | Checkpoint directory path to resume training from | | `--resume_from_tag` | Checkpoint tag to resume from (e.g., `global_step1000`) | | `--resume_training_state` | Whether to restore full training state (including optimizer, lr scheduler, and dataloader state) | | `--start_optimize_embedding_index` | Start optimizing embeddings from the specified token ID (for Stage1 training, typically set to the starting ID of itemic tokens, e.g., 151669) | | `--use_tie_weights` | Tie embedding and lm_head weights (required for smaller models like 0.6B / 1.7B / 4B to align with Qwen3 model configuration) | Notes: * `resume_from` is used to load checkpoints produced by the framework. When `resume_from` is configured, it takes priority; only model structure parameters from `model_dir` are loaded for initialization. If not configured, parameters from `model_dir` are also loaded * `num_training_steps` only affects the lr decay steps. This configuration ensures that when training reaches `num_training_steps`, lr decays to minimum, but training will not stop. It is recommended to configure based on token count and `max_length` to calculate the maximum training steps * `max_length` represents the maximum sequence length per GPU per step; the framework will perform packing based on this configuration ## Utility Scripts ### Model Conversion Convert trained checkpoints to HuggingFace format: ```bash bash scripts/convert_checkpoint_to_hf.sh ``` Parameter description: - `base_model_dir`: Qwen base model directory with expanded vocabulary (output from vocabulary expansion stage) - `model_home`: Training output directory (i.e., `OUTPUT_DIR` in training script) - `step`: Checkpoint step number to convert **Example:** ```bash # Assuming the vocabulary-expanded model is in ./qwen_extended # Training output is in ./output # Converting the checkpoint at step 4000 bash scripts/convert_checkpoint_to_hf.sh ./qwen_extended ./output 4000 ``` Conversion process: 1. The script automatically locates the `{model_home}/step{step}/global_step{step}` directory 2. Reads the training checkpoint from that directory 3. Saves the converted HuggingFace format model to `{model_home}/step{step}/global_step{step}/converted/` The converted model can be directly used for: - Loading and inference with HuggingFace Transformers - Subsequent SFT or other fine-tuning stages - Model evaluation and deployment ### Model Testing Test the converted HuggingFace model: ```bash bash scripts/test_hf_model.sh ``` Parameter description: - `hf_model_dir`: Converted HuggingFace model directory **Example:** ```bash # Test the converted model at step 4000 bash scripts/test_hf_model.sh ./output/step4000/global_step4000/converted/ ``` This script will verify: - Whether model weights are loaded correctly - Whether forward pass works normally - Whether generation functionality is available ### Training Monitoring Logs and outputs during training: - **Standard output/error**: Saved in `$OUTPUT_DIR/stdout.log` and `$OUTPUT_DIR/stderr.log` - **Training logs**: Contains loss values, learning rate, training steps, and other information - **TensorBoard**: The model supports TensorBoard visualization. You can start TensorBoard with: ```bash tensorboard --logdir=$OUTPUT_DIR ``` - **Checkpoint**: Saved at configured step intervals (`--save_checkpoint_per_step`) ### Checkpoint Management Checkpoints are saved periodically during training with the following directory structure: ``` output_dir/ ├── step50/ │ └── global_step50/ │ ├── model/ # Model weights │ ├── optimizer/ # Optimizer state │ └── ... ├── step100/ │ └── global_step100/ │ └── ... └── ... ``` **Resuming Training**: To resume training from a checkpoint, add the following to the training script: ```bash --resume_from $OUTPUT_DIR/step1000 \ --resume_from_tag global_step1000 \ --resume_training_state ``` ## Notes 1. **MPI Environment**: - Training scripts use `mpirun` for multi-node distributed training, requiring OpenMPI or compatible MPI implementation - Proper hostfile configuration is required (e.g., `/etc/mpi/hostfile`), with one node address per line - Ensure passwordless SSH access between all nodes - Training scripts automatically read environment variables like `OMPI_COMM_WORLD_RANK`, `OMPI_COMM_WORLD_SIZE`, etc. 2. **Data Format**: - Ensure training data conforms to Parquet format specifications, refer to `../data/README.md` - It is recommended that each Parquet file contains approximately 1000 samples for efficient loading and shuffling - Data file lists are specified through JSON files, supporting both local paths and HDFS paths 3. **Vocabulary Expansion**: - Vocabulary expansion must be performed before training, using the expanded model as `base_model_dir` - The expanded model path needs to be specified in the `base_model_dir` field of the data configuration file - Ensure `itemic_id_range` is consistent with the configuration during vocabulary expansion 4. **Model Size**: - For smaller models like 0.6B / 1.7B / 4B, the `--use_tie_weights` parameter is required to align with Qwen3 model configuration - Different model sizes may require different learning rate and training step configurations ## Related Documentation - [OpenOneRec Main README](../README.md): Project overview and complete workflow - [Data Format Specification](../data/README.md): Training data format requirements and preprocessing methods ================================================ FILE: pretrain/examples/dataset_config/pretrain.json ================================================ { "name": "chat_completion_parquet", "sources": "../output/split_data_pretrain/file_list.json", "only_assistant_loss": false, "max_length": 30000, "base_model_dir": "/code/hf_models/Qwen3-1.7B_itemic", "num_workers": 2, "num_epochs": 4, "cut_to_pad": 1, "model_class": "Qwen3ForCausalLM", "full_attention": false, "local_shuffle_buffer_size": 10000, "max_sample_length": 30000, "local_shuffle_random_fetch": 0.0001, "itemic_id_range": [151669, 176246] } ================================================ FILE: pretrain/examples/dataset_config/sft.json ================================================ { "name": "chat_completion_parquet", "sources": "../output/split_data_sft/file_list.json", "only_assistant_loss": false, "max_length": 30000, "base_model_dir": "/code/hf_models/Qwen3-1.7B_itemic", "num_workers": 2, "num_epochs": 4, "cut_to_pad": 1, "model_class": "Qwen3ForCausalLM", "full_attention": false, "local_shuffle_buffer_size": 10000, "max_sample_length": 30000, "local_shuffle_random_fetch": 0.0001, "itemic_id_range": [151669, 176246], "add_think_pattern": true } ================================================ FILE: pretrain/examples/posttrain_sft.sh ================================================ sed 's/=1/=8/g' /etc/mpi/hostfile > /etc/mpi/hostfile_seq # MODEL_DIR=/code/hf_models/Qwen3-1.7B_itemic STAGE2_OUTPUT_DIR=/code/onerec_pretrain/model_output/stg2_opt_utils_big MODEL_DIR=${STAGE2_OUTPUT_DIR}/step5000/global_step5000/converted OUTPUT_DIR=/code/onerec_pretrain/model_output/sft_output mkdir -p $OUTPUT_DIR mkdir -p /tmp/_wids_cache nnode=$(wc -l < /etc/mpi/hostfile_seq) set -x SCRIPT_FILE=$(readlink -f $0) echo `date '+%Y-%m-%d %H:%M:%S'` >> $OUTPUT_DIR/task_info.log echo "script: ${SCRIPT_FILE}" >> $OUTPUT_DIR/task_info.log echo "=========================" >> $OUTPUT_DIR/task_info.log echo "Output: $OUTPUT_DIR" export PYTHONPATH=$PWD:$PYTHONPATH source set_env.sh hostfile=/etc/mpi/hostfile_seq TCP_NIC=$(ifconfig | grep -B1 " "$(hostname -i)" " | grep -o "^\w*") MASTER_ADDR=$MY_NODE_IP MASTER_PORT=8499 mpirun --allow-run-as-root \ -hostfile $hostfile \ -mca btl self,tcp -mca pml ob1 \ -mca plm_rsh_num_concurrent 600 \ -mca routed_radix 600 \ -mca btl_tcp_if_include $TCP_NIC \ -mca oob_tcp_if_include $TCP_NIC \ -mca btl_openib_allow_ib false \ -mca opal_set_max_sys_limits 1 \ -x OMPI_MCA_btl=self,tcp \ -x OMPI_MCA_pml=ob1 \ -x OMPI_MCA_btl_tcp_if_include=$TCP_NIC \ -x OMPI_MCA_oob_tcp_if_include=$TCP_NIC \ -x OMPI_MCA_btl_openib_allow_ib=false \ -x NCCL_IB_DISABLE=0 \ -x NCCL_IB_GID_INDEX=3 \ -x NCCL_SOCKET_IFNAME=$TCP_NIC \ -x NCCL_IB_HCA=mlx5 \ -x NCCL_DEBUG=WARN \ -x NCCL_IB_QPS_PER_CONNECTION=4 \ -x NCCL_NET_OVERHEAD=1000 \ -x NCCL_IB_TIMEOUT=20 \ -x LD_PRELOAD=$LD_PRELOAD \ -x http_proxy="" \ -x https_proxy="" \ -x HOROVOD_MPI_THREADS_DISABLE=1 \ -x MPI_THREAD_SINGLE=1 \ -x NO_COLOR=1 \ -x TERM=dumb \ -x COLORTERM=0 \ -x PYTHONIOENCODING=utf-8 \ -x LD_LIBRARY_PATH=$LIBRARY_PATH \ -x PATH \ -x PYTHONPATH=$PYTHONPATH \ -x JAVA_HOME=$JAVA_HOME \ -x HIVE_HOME=$HIVE_HOME \ -x CLASSPATH=$CLASSPATH \ -x HADOOP_USER_NAME=$HADOOP_USER_NAME \ -x HADOOP_HOME=$HADOOP_HOME \ -x SPARK_HOME=$SPARK_HOME \ -x MASTER_ADDR=$MASTER_ADDR \ -x MASTER_PORT=$MASTER_PORT \ -x TOKENIZERS_PARALLELISM=false \ with_nccl_local_env \ bash -c "bash scripts/numa_runner.sh python3 recipes/train_qwen3.py \ --model_dir $MODEL_DIR \ --output_dir $OUTPUT_DIR \ --dataset_config examples/dataset_config/sft.json \ --use_tie_weights \ --model_class Qwen3ForCausalLM \ --monitor_datasource_loss \ --monitor_datasource_cnt \ --max_length 32768 \ --learning_rate 2e-4 \ --min_lr 1e-4 \ --weight_decay 0.1 \ --max_grad_norm 1.0 \ --lr_scheduler_type cosine \ --num_warmup_steps 500 \ --num_training_steps 5000 \ --save_checkpoint_per_step 50 \ --minibatch_size 16384 \ --logging_per_step 5 \ --use_fp32_weight \ --seed 19260817 \ --enable_profiler \ --enable_gradient_checkpointing \ --use_chunked_loss_computer \ " > $OUTPUT_DIR/stdout.log 2>$OUTPUT_DIR/stderr.log & # --resume_from $PRETRAIN_OUTPUT_DIR/step5000 \ # --resume_from_tag global_step5000 \ ================================================ FILE: pretrain/examples/pretrain_stg1.sh ================================================ sed 's/=1/=8/g' /etc/mpi/hostfile > /etc/mpi/hostfile_seq MODEL_DIR=/code/hf_models/Qwen3-1.7B_itemic OUTPUT_DIR=/code/onerec_pretrain/model_output/stg1_opt_utils_big mkdir -p $OUTPUT_DIR mkdir -p /tmp/_wids_cache nnode=$(wc -l < /etc/mpi/hostfile_seq) set -x SCRIPT_FILE=$(readlink -f $0) echo `date '+%Y-%m-%d %H:%M:%S'` >> $OUTPUT_DIR/task_info.log echo "script: ${SCRIPT_FILE}" >> $OUTPUT_DIR/task_info.log echo "=========================" >> $OUTPUT_DIR/task_info.log echo "Output: $OUTPUT_DIR" export PYTHONPATH=$PWD:$PYTHONPATH source set_env.sh hostfile=/etc/mpi/hostfile_seq TCP_NIC=$(ifconfig | grep -B1 " "$(hostname -i)" " | grep -o "^\w*") MASTER_ADDR=$MY_NODE_IP MASTER_PORT=8499 mpirun --allow-run-as-root \ -hostfile $hostfile \ -mca btl self,tcp -mca pml ob1 \ -mca plm_rsh_num_concurrent 600 \ -mca routed_radix 600 \ -mca btl_tcp_if_include $TCP_NIC \ -mca oob_tcp_if_include $TCP_NIC \ -mca btl_openib_allow_ib false \ -mca opal_set_max_sys_limits 1 \ -x OMPI_MCA_btl=self,tcp \ -x OMPI_MCA_pml=ob1 \ -x OMPI_MCA_btl_tcp_if_include=$TCP_NIC \ -x OMPI_MCA_oob_tcp_if_include=$TCP_NIC \ -x OMPI_MCA_btl_openib_allow_ib=false \ -x NCCL_IB_DISABLE=0 \ -x NCCL_IB_GID_INDEX=3 \ -x NCCL_SOCKET_IFNAME=$TCP_NIC \ -x NCCL_IB_HCA=mlx5 \ -x NCCL_DEBUG=WARN \ -x NCCL_IB_QPS_PER_CONNECTION=4 \ -x NCCL_NET_OVERHEAD=1000 \ -x NCCL_IB_TIMEOUT=20 \ -x LD_PRELOAD=$LD_PRELOAD \ -x http_proxy="" \ -x https_proxy="" \ -x HOROVOD_MPI_THREADS_DISABLE=1 \ -x MPI_THREAD_SINGLE=1 \ -x NO_COLOR=1 \ -x TERM=dumb \ -x COLORTERM=0 \ -x PYTHONIOENCODING=utf-8 \ -x LD_LIBRARY_PATH=$LIBRARY_PATH \ -x PATH \ -x PYTHONPATH=$PYTHONPATH \ -x JAVA_HOME=$JAVA_HOME \ -x HIVE_HOME=$HIVE_HOME \ -x CLASSPATH=$CLASSPATH \ -x HADOOP_USER_NAME=$HADOOP_USER_NAME \ -x HADOOP_HOME=$HADOOP_HOME \ -x SPARK_HOME=$SPARK_HOME \ -x MASTER_ADDR=$MASTER_ADDR \ -x MASTER_PORT=$MASTER_PORT \ -x TOKENIZERS_PARALLELISM=false \ with_nccl_local_env \ bash -c "bash scripts/numa_runner.sh python3 recipes/train_qwen3.py \ --model_dir $MODEL_DIR \ --output_dir $OUTPUT_DIR \ --dataset_config examples/dataset_config/pretrain.json \ --freeze_llm \ --use_tie_weights \ --start_optimize_embedding_index 151669 \ --model_class Qwen3ForCausalLM \ --monitor_datasource_loss \ --monitor_datasource_cnt \ --max_length 32768 \ --learning_rate 2e-4 \ --min_lr 1e-4 \ --weight_decay 0.1 \ --max_grad_norm 1.0 \ --lr_scheduler_type cosine \ --num_warmup_steps 200 \ --num_training_steps 2000 \ --save_checkpoint_per_step 50 \ --minibatch_size 16384 \ --logging_per_step 5 \ --use_fp32_weight \ --seed 19260817 \ --enable_profiler \ --enable_gradient_checkpointing \ --use_chunked_loss_computer \ " > $OUTPUT_DIR/stdout.log 2>$OUTPUT_DIR/stderr.log & ================================================ FILE: pretrain/examples/pretrain_stg2.sh ================================================ sed 's/=1/=8/g' /etc/mpi/hostfile > /etc/mpi/hostfile_seq # MODEL_DIR=/code/hf_models/Qwen3-1.7B_itemic STAGE1_OUTPUT_DIR=/code/onerec_pretrain/model_output/stg1_opt_utils_big MODEL_DIR=${STAGE1_OUTPUT_DIR}/step2000/global_step2000/converted OUTPUT_DIR=/code/onerec_pretrain/model_output/stg2_opt_utils_big mkdir -p $OUTPUT_DIR mkdir -p /tmp/_wids_cache nnode=$(wc -l < /etc/mpi/hostfile_seq) set -x SCRIPT_FILE=$(readlink -f $0) echo `date '+%Y-%m-%d %H:%M:%S'` >> $OUTPUT_DIR/task_info.log echo "script: ${SCRIPT_FILE}" >> $OUTPUT_DIR/task_info.log echo "=========================" >> $OUTPUT_DIR/task_info.log echo "Output: $OUTPUT_DIR" export PYTHONPATH=$PWD:$PYTHONPATH source set_env.sh hostfile=/etc/mpi/hostfile_seq TCP_NIC=$(ifconfig | grep -B1 " "$(hostname -i)" " | grep -o "^\w*") MASTER_ADDR=$MY_NODE_IP MASTER_PORT=8499 mpirun --allow-run-as-root \ -hostfile $hostfile \ -mca btl self,tcp -mca pml ob1 \ -mca plm_rsh_num_concurrent 600 \ -mca routed_radix 600 \ -mca btl_tcp_if_include $TCP_NIC \ -mca oob_tcp_if_include $TCP_NIC \ -mca btl_openib_allow_ib false \ -mca opal_set_max_sys_limits 1 \ -x OMPI_MCA_btl=self,tcp \ -x OMPI_MCA_pml=ob1 \ -x OMPI_MCA_btl_tcp_if_include=$TCP_NIC \ -x OMPI_MCA_oob_tcp_if_include=$TCP_NIC \ -x OMPI_MCA_btl_openib_allow_ib=false \ -x NCCL_IB_DISABLE=0 \ -x NCCL_IB_GID_INDEX=3 \ -x NCCL_SOCKET_IFNAME=$TCP_NIC \ -x NCCL_IB_HCA=mlx5 \ -x NCCL_DEBUG=WARN \ -x NCCL_IB_QPS_PER_CONNECTION=4 \ -x NCCL_NET_OVERHEAD=1000 \ -x NCCL_IB_TIMEOUT=20 \ -x LD_PRELOAD=$LD_PRELOAD \ -x http_proxy="" \ -x https_proxy="" \ -x HOROVOD_MPI_THREADS_DISABLE=1 \ -x MPI_THREAD_SINGLE=1 \ -x NO_COLOR=1 \ -x TERM=dumb \ -x COLORTERM=0 \ -x PYTHONIOENCODING=utf-8 \ -x LD_LIBRARY_PATH=$LIBRARY_PATH \ -x PATH \ -x PYTHONPATH=$PYTHONPATH \ -x JAVA_HOME=$JAVA_HOME \ -x HIVE_HOME=$HIVE_HOME \ -x CLASSPATH=$CLASSPATH \ -x HADOOP_USER_NAME=$HADOOP_USER_NAME \ -x HADOOP_HOME=$HADOOP_HOME \ -x SPARK_HOME=$SPARK_HOME \ -x MASTER_ADDR=$MASTER_ADDR \ -x MASTER_PORT=$MASTER_PORT \ -x TOKENIZERS_PARALLELISM=false \ with_nccl_local_env \ bash -c "bash scripts/numa_runner.sh python3 recipes/train_qwen3.py \ --model_dir $MODEL_DIR \ --output_dir $OUTPUT_DIR \ --dataset_config examples/dataset_config/pretrain.json \ --use_tie_weights \ --model_class Qwen3ForCausalLM \ --monitor_datasource_loss \ --monitor_datasource_cnt \ --max_length 32768 \ --learning_rate 2e-4 \ --min_lr 1e-4 \ --weight_decay 0.1 \ --max_grad_norm 1.0 \ --lr_scheduler_type cosine \ --num_warmup_steps 500 \ --num_training_steps 5000 \ --save_checkpoint_per_step 50 \ --minibatch_size 16384 \ --logging_per_step 5 \ --use_fp32_weight \ --seed 19260817 \ --enable_profiler \ --enable_gradient_checkpointing \ --use_chunked_loss_computer \ " > $OUTPUT_DIR/stdout.log 2>$OUTPUT_DIR/stderr.log & ================================================ FILE: pretrain/onerec_llm/__init__.py ================================================ ================================================ FILE: pretrain/onerec_llm/data/__init__.py ================================================ ================================================ FILE: pretrain/onerec_llm/data/dataloaders.py ================================================ from torchdata.stateful_dataloader import StatefulDataLoader from onerec_llm.data.qwen3_dataset import Qwen3ChatCompletionParquetDataset def get_chat_completion_parquet_dataloader(sources: str, max_length, base_model_dir, num_epochs=1, shuffle_seed=1024, num_workers=8, datasource_config={}, **kwargs): model_type = kwargs.get('model_class','Qwen3ForCausalLM') ModelDataset = {'Qwen3ForCausalLM': Qwen3ChatCompletionParquetDataset} num_readers = kwargs.get("num_readers", 1) shuffle_window = kwargs.get("shuffle_window", 0) def input_creator(): return ModelDataset[model_type]( sources = sources, num_workers = num_workers, num_epochs = num_epochs, shuffle_seed = shuffle_seed, max_length = max_length, base_model_dir=base_model_dir, datasource_config=datasource_config, num_readers=num_readers, shuffle_window=shuffle_window, **kwargs ) dataset = input_creator() dataloader = StatefulDataLoader( dataset=dataset, shuffle=False, batch_size=1, num_workers=num_workers, collate_fn=lambda x: x[0], ) return dataloader def get_dataloader(name: str, **kwargs): if name == "chat_completion_parquet": return get_chat_completion_parquet_dataloader( **kwargs ) else: raise NotImplementedError("Unsupported dataloader.") ================================================ FILE: pretrain/onerec_llm/data/local_shuffle_buffer.py ================================================ """ Local shuffle buffer for data randomization during iteration. This module provides a fixed-size buffer that randomizes the order of data samples using hash-based indexing with SortedDict for efficient random access. """ import hashlib import logging import threading import traceback from collections import defaultdict from sortedcontainers import SortedDict logger = logging.getLogger(__name__) class LocalShuffleBuffer: """ A buffer class to implement local data shuffling. Maintains a fixed-size buffer to randomize the order of data samples during iteration. Uses hash-based indexing with SortedDict for efficient random access. Attributes: buffer_size: Maximum capacity of the buffer random_fetch: Probability to randomly fetch a sample before buffer is full buffer: SortedDict storing samples (key: hash, value: sample) count: Statistics counter (adds, conflicts, buffer_epoch) buffer_multiply: Large multiplier to avoid hash collisions across epochs lock: Thread lock for thread-safe operations """ def __init__(self, buffer_size: int = 2048, random_fetch: float = 0.01) -> None: """ Initialize the LocalShuffleBuffer. Args: buffer_size: Maximum capacity of the buffer (default: 2048) random_fetch: Probability to randomly fetch a sample before buffer is full (0.0-1.0, default: 0.01) """ if buffer_size <= 0: raise ValueError(f"buffer_size must be positive, got {buffer_size}") if not 0.0 <= random_fetch <= 1.0: raise ValueError(f"random_fetch must be between 0.0 and 1.0, got {random_fetch}") self.buffer_size = buffer_size self.random_fetch = random_fetch self.buffer = SortedDict() # key: hash, value: sample self.count = defaultdict(int) self.count["buffer_epoch"] = 0 # Large multiplier (0xffffffffffffffff) to avoid hash collisions across epochs self.buffer_multiply = int('f' * 16, 16) self.lock = threading.Lock() def _calc_sample_hash(self, obj: dict, buffer_epoch: int = None) -> int: """ Calculate a unique hash for a sample to use as buffer key. Maps sample identifier to integer with random-like distribution using MD5 hash. Adds epoch-based offset to prevent cross-epoch hash collisions. Args: obj: Sample object containing "uuid" and "source" keys buffer_epoch: Optional epoch index. If None, uses current buffer_epoch Returns: Integer hash value """ if buffer_epoch is None: buffer_epoch = self.count["buffer_epoch"] # Create unique string from sample identifiers unique_str = f"{obj['uuid']}{obj['source']}@ep{buffer_epoch}" # Generate MD5 hash and convert to integer (use first 16 hex chars = 64 bits) hash_obj = hashlib.md5(unique_str.encode('utf-8')) hex_str = hash_obj.hexdigest()[:16] base_hash = int(hex_str, 16) # Add epoch-based offset to prevent cross-epoch collisions return base_hash + self.buffer_multiply * buffer_epoch def add(self, obj: dict, fn: str = None, epoch: int = None) -> bool: """ Add a sample to the buffer. Args: obj: Sample object to add to buffer (must contain "uuid" and "source" keys) fn: Optional filename/identifier for logging epoch: Optional epoch index Returns: True if sample was added and buffer isn't ready for extraction, False if extraction should occur (buffer full or random fetch triggered) """ try: # Calculate hash for the sample obj_hash = self._calc_sample_hash(obj, buffer_epoch=epoch) self.count["add"] += 1 # Update buffer epoch every buffer_size additions if self.count["add"] % self.buffer_size == 0: self.count["buffer_epoch"] += 1 # Handle hash collisions (duplicate unique identifiers) if obj_hash in self.buffer: self.count["conflict"] += 1 # Log warning periodically for collision rate if self.count["conflict"] % 100 == 0: conflict_rate = self.count["conflict"] / self.count["add"] logger.warning( f"{'=' * 30}\n" f"Potential duplicate samples with same uuid/source! " f"uuid={obj['uuid']}, source={obj['source']}, fn={fn}, " f"conflict_rate={conflict_rate:.4f}, add_count={self.count['add']}\n" f"{'=' * 30}" ) with self.lock: self.buffer[obj_hash] = obj # Random fetch trigger: small probability to extract before buffer is full # This prevents downstream timeout errors if (obj_hash % 10000) < int(10000 * self.random_fetch): return False # Trigger extraction # Check if buffer has reached capacity return len(self.buffer) < self.buffer_size except Exception as e: logger.error(f"Error in LocalShuffleBuffer.add(): {traceback.format_exc()}") raise def get(self) -> dict: """ Extract a sample from the buffer. Returns: A sample object from the buffer Raises: ValueError: If buffer is empty """ if len(self.buffer) == 0: raise ValueError("Cannot get sample from empty buffer") with self.lock: # Pop first item from SortedDict (provides random-like access due to hashing) # popitem(0) removes the first (smallest) key-value pair return self.buffer.popitem(0)[1] def __len__(self) -> int: """Return current number of samples in the buffer.""" return len(self.buffer) ================================================ FILE: pretrain/onerec_llm/data/qwen3_dataset.py ================================================ import logging import os import json import time import traceback import random import re import multiprocessing import numpy as np import webdataset as wds from easydict import EasyDict as edict from typing import Union, Iterable, Optional, List, Dict, Tuple, Any import torch import torch.distributed as dist import torch.nn.functional as F from torch.utils.data import IterableDataset from transformers import AutoTokenizer, AutoConfig from onerec_llm.data.local_shuffle_buffer import LocalShuffleBuffer from onerec_llm.utils.common import print_rank_0 from onerec_llm.utils.worker_utils import pytorch_worker_info from onerec_llm.utils.data_utils import shell_hdfs_ls, load_parquet_file from onerec_llm.models.qwen3.configuration_qwen3 import Qwen3Config logger = logging.getLogger(__name__) def set_kwargs(self, kwargs, **_kwargs): kwargs.update(_kwargs) self.kwargs = edict(kwargs) for k, v in kwargs.items(): setattr(self, k, v) class Qwen3ChatCompletionDataset(IterableDataset): def __init__(self, **kwargs): set_kwargs(self, kwargs) print_rank_0(f"ChatCompletionDataset init with kwargs={kwargs}") try: model_config = AutoConfig.from_pretrained(self.kwargs.base_model_dir) except Exception: model_config = Qwen3Config.from_pretrained(self.kwargs.base_model_dir) self.pad_token_id = model_config.pad_token_id self.dataset, self.total_samples = self._build_source_dataset(self.sources) # for data_source monitor self.source_sample_cnt = {} self.source_error_cnt = {} self.tokenizer = AutoTokenizer.from_pretrained(self.base_model_dir, trust_remote_code=True) self.max_sample_length = min(self.max_length, self.kwargs.get("max_sample_length", 9999999)) assert self.max_length > 0 # Chat template tokens self.im_start_token = "<|im_start|>" self.im_end_token = "<|im_end|>" self.im_start_token_id = self.tokenizer.encode(self.im_start_token)[0] self.im_end_token_id = self.tokenizer.encode(self.im_end_token)[0] # Derive chat template patterns from tokenizer instead of hardcoded token ids. self.assistant_start_pattern = self.tokenizer.encode( f"{self.im_start_token}assistant\n", add_special_tokens=False, ) self.im_end_pattern = self.tokenizer.encode( f"{self.im_end_token}\n", add_special_tokens=False, ) if not self.im_end_pattern: self.im_end_pattern = self.tokenizer.encode( self.im_end_token, add_special_tokens=False, ) self.add_think_pattern = self.kwargs.get("add_think_pattern", False) if self.add_think_pattern: logger.info(f"Thinking pattern enabled: add_think_pattern={self.add_think_pattern}") self.itemic_id_range = self.kwargs.get("itemic_id_range", None) if self.itemic_id_range is not None: assert len(self.itemic_id_range) == 2, "itemic_id_range must be a list of two elements" assert self.itemic_id_range[0] < self.itemic_id_range[1], "itemic_id_range[0] must be less than itemic_id_range[1]" def _build_source_dataset(self, sources): """Build WebDataset from source configuration files. Args: sources: String (comma-separated) or list of JSON config file paths Returns: tuple: (dataset, total_samples) """ if isinstance(sources, str): sources = sources.split(",") # Read URLs from configuration files urls = [] total_samples = 0 for source in sources: with open(source, encoding="utf-8") as f: index = json.loads(f.read())["shardlist"] source_dir = os.path.dirname(source) for item in index: urls.append(os.path.join(source_dir, item["url"])) total_samples += item["nsamples"] # Sort, shuffle and broadcast URLs across all ranks urls.sort() random.shuffle(urls) url_list = [urls] dist.broadcast_object_list(url_list, src=0) urls = url_list[0] logger.info(f"[RANK{dist.get_rank()}] Loaded {len(urls)} URLs, total_samples={total_samples}") # Build WebDataset dataset = wds.WebDataset( urls, handler=wds.warn_and_continue, resampled=True, shardshuffle=True, cache_dir="/tmp/_wids_cache", nodesplitter=wds.split_by_node, workersplitter=wds.split_by_worker ) dataset = dataset.shuffle( self.shuffle_size, initial=self.shuffle_initial_size ).decode("pil", handler=wds.warn_and_continue) return dataset, total_samples def _convert_messages(self, messages): msg_list = [] for msg in messages: content = msg['content'] if isinstance(content, str): msg_list.append({ 'role': msg['role'], 'content': content }) elif isinstance(content, dict) and 'type' in content and content['type'] == 'text': msg_list.append({ 'role': msg['role'], 'content': content['text'] }) elif isinstance(content, list) and len(content) > 0: content_text = "" for c in content: if isinstance(c, dict) and 'type' in c and c['type'] == 'text': content_text += c['text'] elif isinstance(c, str): content_text += c else: continue msg_list.append({ 'role': msg['role'], 'content': content_text }) else: raise ValueError(f"Unsupported content type: {type(content)}") if self.add_think_pattern: # Process thinking pattern: add /think or /no_think suffix to user messages # based on whether assistant message contains reasoning content for i in range(len(msg_list)): if msg_list[i]['role'] == 'assistant': assistant_content = msg_list[i]['content'] # Find corresponding user message (typically the previous one) user_idx = i - 1 if user_idx < 0 or msg_list[user_idx]['role'] != 'user': continue # Check if assistant content contains tags pattern = r'(.*?)' match = re.search(pattern, assistant_content, re.DOTALL) if match is None: # No reasoning tags found: add empty tags and mark as /no_think msg_list[user_idx]['content'] += "/no_think" msg_list[i]['content'] = "\n\n" + assistant_content else: # Reasoning tags found: check if they contain actual content reasoning_content = match.group(1) if reasoning_content.strip(): # Has reasoning content: mark as /think msg_list[user_idx]['content'] += "/think" else: # Empty reasoning tags: mark as /no_think msg_list[user_idx]['content'] += "/no_think" return msg_list def _get_assistant_mask(self, batch_input_ids: torch.Tensor, start_pattern: Optional[List[int]], end_pattern: Optional[List[int]]): """ Generate mask for assistant tokens in chat format. Args: batch_input_ids: Input token IDs start_pattern: Pattern to identify start of assistant response end_pattern: Pattern to identify end of assistant response Returns: mask: Boolean mask indicating which tokens to compute loss on """ if not start_pattern: start_pattern = self.assistant_start_pattern if not end_pattern: end_pattern = self.im_end_pattern masks = [] for input_ids in batch_input_ids: ids = input_ids.tolist() mask = [0] * len(ids) start_len = len(start_pattern) end_len = len(end_pattern) i = 0 while i <= len(ids) - start_len: if ids[i:i + start_len] != start_pattern: i += 1 continue content_start = i + start_len j = content_start found_end = False while j <= len(ids) - end_len: if ids[j:j + end_len] == end_pattern: found_end = True break j += 1 if not found_end: for k in range(content_start, len(ids)): mask[k] = 1 break for k in range(content_start, j): mask[k] = 1 i = j + end_len masks.append(mask) return torch.tensor(masks, dtype=torch.long) def _get_rope_index_qwen3( self, input_ids: torch.LongTensor, ) -> torch.Tensor: position_ids = torch.arange(input_ids.shape[1], device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand(input_ids.shape[0], -1) return position_ids def _process_completion(self, sample: Dict[str, Any]) -> Dict[str, torch.Tensor]: """ Process segments format data into model inputs. Args: sample: Sample containing segments with pre-tokenized tokens Returns: Dictionary containing input_ids, attention_mask, labels, etc. """ segments = sample["json"]["segments"] segments_text = "" for segment in segments: if segment["type"] == "text": segments_text += segment["text"] else: logger.error(f"segment type is not text, skip: {segment}") continue # Note: do not use self.tokenizer.eos_token as it's always set to # References: # 1. https://huggingface.co/Qwen/Qwen3-8B/blob/main/tokenizer_config.json#L232 # 2. https://qwen.readthedocs.io/zh-cn/latest/getting_started/concepts.html#control-tokens segments_text += self.tokenizer.pad_token # Tokenize inputs = self.tokenizer( segments_text, return_tensors="pt", padding=False, truncation=False ) input_ids = inputs["input_ids"] # Check length if input_ids.shape[-1] > self.max_length: raise ValueError(f"Sample too long: {input_ids.shape[-1]} > {self.max_length}") # Mask EOS token inputs["loss_mask"] = torch.ones_like(input_ids) inputs["loss_mask"][..., -1] = 0 # itemic id index mask itemic_id_mask = torch.zeros_like(input_ids) if self.itemic_id_range is not None: itemic_id_mask[(input_ids >= self.itemic_id_range[0]) & (input_ids <= self.itemic_id_range[1])] = 1 inputs["itemic_id_mask"] = itemic_id_mask # Generate position IDs inputs["position_ids"] = self._get_rope_index_qwen3(input_ids) return inputs def _process_chat(self, sample: Dict[str, Any]) -> Dict[str, torch.Tensor]: """ Process messages format data into model inputs. Args: sample: Sample containing messages in the new format Returns: Dictionary containing input_ids, attention_mask, labels, etc. """ msg_key = "message" if "message" in sample["json"] else "messages" messages = sample["json"][msg_key] msg_converted = self._convert_messages(messages) # Convert messages to text using chat template text = self.tokenizer.apply_chat_template( msg_converted, tokenize=False, add_generation_prompt=False ) # Add EOS token text += self.tokenizer.pad_token # Tokenize inputs = self.tokenizer( text, return_tensors="pt", padding=False, truncation=False ) input_ids = inputs["input_ids"] # Check length if input_ids.shape[-1] > self.max_length: raise ValueError(f"Sample too long: {input_ids.shape[-1]} > {self.max_length}") inputs["loss_mask"] = self._get_assistant_mask( input_ids, start_pattern=self.assistant_start_pattern, end_pattern=self.im_end_pattern, ) # Mask EOS token inputs["loss_mask"][..., -1] = 0 # itemic id index mask itemic_id_mask = torch.zeros_like(input_ids) if self.itemic_id_range is not None: itemic_id_mask[(input_ids >= self.itemic_id_range[0]) & (input_ids <= self.itemic_id_range[1])] = 1 inputs["itemic_id_mask"] = itemic_id_mask # Generate position IDs inputs["position_ids"] = self._get_rope_index_qwen3(input_ids) return inputs def _process(self, sample, source_name=None): if "segments" in sample["json"] and sample["json"]["segments"] is not None: inputs = self._process_completion(sample) else: inputs = self._process_chat(sample) inputs['epoch_idx'] = sample['epoch_idx'] if not inputs: raise ValueError("Empty inputs, skip") # Check if sample exceeds max_sample_length (always <= max_length) if inputs["input_ids"].shape[-1] > self.max_sample_length: logger.warning(f"Sample exceeds max_sample_length={self.max_sample_length}, length={inputs['input_ids'].shape[-1]}") raise ValueError( f"Unable to generate sample within max_sample_length={self.max_sample_length}" ) return inputs def _cut_sample(self, inputs, packable_length): inputs["input_ids"] = inputs["input_ids"][:, :packable_length] inputs["attention_mask"] = inputs["attention_mask"][:, :packable_length] inputs["loss_mask"] = inputs["loss_mask"][:, :packable_length] inputs["position_ids"] = inputs["position_ids"][..., :packable_length] inputs["itemic_id_mask"] = inputs["itemic_id_mask"][:, :packable_length] return inputs def _append_sample_packing(self, inputs: Dict[str, torch.Tensor], packed_input_ids: List[torch.Tensor], packed_position_ids: List[torch.Tensor], packed_loss_mask: List[torch.Tensor], packed_itemic_id_mask: List[torch.Tensor], packed_sample_idx: List[torch.Tensor], cu_seqlens: List[int], sample_idx: Optional[int] = None, ): packable_length = self.max_length - cu_seqlens[-1] if packable_length == 0: return if self.cut_to_pad and inputs['input_ids'].shape[1] > packable_length: inputs = self._cut_sample(inputs, packable_length) packed_input_ids.append(inputs["input_ids"].flatten()) packed_loss_mask.append(inputs["loss_mask"].flatten()) packed_position_ids.append(inputs["position_ids"]) packed_itemic_id_mask.append(inputs["itemic_id_mask"].flatten()) if sample_idx is None: sample_idx = len(cu_seqlens) - 1 packed_sample_idx.append( torch.full_like(packed_input_ids[-1], sample_idx)) cu_seqlens.append(cu_seqlens[-1] + len(inputs["input_ids"][0])) return len(inputs["input_ids"][0]) def _packing(self, buffer: List[Dict[str, torch.Tensor]]): packed_input_ids: List[torch.Tensor] = [] packed_position_ids: List[torch.Tensor] = [] packed_loss_mask: List[torch.Tensor] = [] packed_itemic_id_mask: List[torch.Tensor] = [] packed_sample_idx: List[torch.Tensor] = [] cu_seqlens: List[int] = [0] epochs = [] valid_seq_len = 0 for _, inputs in enumerate(buffer): epochs.append(inputs.get("epoch_idx", None)) valid_seq_len += self._append_sample_packing(inputs, packed_input_ids, packed_position_ids, packed_loss_mask, packed_itemic_id_mask, packed_sample_idx, cu_seqlens, ) packed_input_ids = torch.cat(packed_input_ids, dim=0).unsqueeze(0) packed_loss_mask = torch.cat(packed_loss_mask, dim=0).unsqueeze(0) packed_itemic_id_mask = torch.cat(packed_itemic_id_mask, dim=0).unsqueeze(0) packed_position_ids = torch.cat(packed_position_ids, dim=-1) packed_sample_idx = torch.cat(packed_sample_idx, dim=0).unsqueeze(0) max_length = max(self.max_length, packed_input_ids.numel()) padding_len = (max_length + 7) // 8 * 8 + 64 - packed_input_ids.numel() assert padding_len > 0, f"padding_len should be greater than 0, got {padding_len}" packed_input_ids = F.pad( packed_input_ids, (0, padding_len), value=self.tokenizer.pad_token_id) packed_sample_idx = F.pad(packed_sample_idx, (0, padding_len), value=-1) packed_position_ids = F.pad(packed_position_ids, (0, padding_len), value=0) packed_loss_mask = F.pad(packed_loss_mask, (0, padding_len), value=0) packed_itemic_id_mask = F.pad(packed_itemic_id_mask, (0, padding_len), value=False) cu_seqlens.append(cu_seqlens[-1] + padding_len) if self.kwargs.get("full_attention", False): packed_position_ids = self._get_rope_index_qwen3(packed_input_ids) cu_seqlens = [0, cu_seqlens[-1]] epochs = [x for x in epochs if x is not None] inputs = { "input_ids": packed_input_ids, "position_ids": packed_position_ids, "loss_mask": packed_loss_mask, "itemic_id_mask": packed_itemic_id_mask, "cu_seqlens": torch.tensor(cu_seqlens, dtype=torch.int32), "sample_idx": packed_sample_idx.to(torch.int32), "epoch_idx": torch.tensor([sum(epochs) / len(epochs)], dtype=torch.float32), } return inputs def __iter__(self): if self.dataset is None: self.dataset, self.total_samples = self._build_source_dataset(self.sources) buffer = [] source_list = [] cur_length = 0 ds_iter = iter(self.dataset) while True: try: sample = next(ds_iter) sample_key = sample["__key__"] if "__key__" in sample else "" sample_url = sample["__url__"] if "__url__" in sample else "" try: source_name = sample["json"]["source"] except Exception: source_name = "None" self.source_sample_cnt.setdefault(source_name, 0) self.source_sample_cnt[source_name] += 1 inputs = self._process(sample, source_name) except Exception: self.source_error_cnt.setdefault(source_name, 0) self.source_error_cnt[source_name] += 1 error_ratio = self.source_error_cnt[source_name] * 1.0 / \ self.source_sample_cnt[source_name] rank, world_size, worker, num_workers = pytorch_worker_info() logger.error( f"Qwen3ChatCompletionDataset process sample error. worker=r{rank}_w{worker}" f"{source_name=}, {error_ratio=}, {sample_key=}, {sample_url=}, sample=\n{str(sample)[:50]}" f"errmsg={traceback.format_exc()}") continue sample_length = inputs["input_ids"].shape[-1] if cur_length + sample_length >= self.max_length: if self.cut_to_pad: buffer.append(inputs) source_list.append(source_name) packed_inputs = self._packing(buffer) packed_inputs["data_source"] = source_list buffer = [] source_list = [] cur_length = 0 if packed_inputs["loss_mask"].sum().item() == 0: logger.warning(f"Packed sample has no valid loss tokens, cur_length={cur_length}, skipping. " f"This usually happens when a single sample has no valid tokens after processing.") continue else: packed_inputs = self._packing(buffer) packed_inputs["data_source"] = source_list buffer = [inputs] source_list = [source_name] cur_length = sample_length if packed_inputs["loss_mask"].sum() == 0: logger.warning("Skipping sample with no valid loss tokens.") continue yield packed_inputs else: buffer.append(inputs) source_list.append(source_name) cur_length += sample_length class Qwen3NaiveParquetDataset(IterableDataset): """Naive parquet dataset for Qwen3 that handles file reading and parsing.""" def __init__(self, data_files, num_workers, **kwargs): set_kwargs(self, kwargs, data_files=data_files, num_workers=num_workers) self.local_shuffle_buffer = LocalShuffleBuffer(buffer_size=self.kwargs.get("local_shuffle_buffer_size", 81920), random_fetch=self.kwargs.get("local_shuffle_random_fetch", 0.00001)) manager = multiprocessing.Manager() def make_dict(): return manager.dict() self.finish_dict_all = make_dict() for i in range(self.num_workers): self.finish_dict_all[i] = make_dict() def _parser(self, raw_row_data, file_url): """Parse a single row from parquet file.""" try: messages = None segments = None if "messages" in raw_row_data: messages = raw_row_data["messages"] if isinstance(messages, str): messages = json.loads(messages) if "segments" in raw_row_data: segments = raw_row_data["segments"] if isinstance(segments, str): segments = json.loads(segments) data_source = raw_row_data["source"] key = raw_row_data["uuid"] samples = { "__key__": key, "__url__": file_url, } sample_data = { "source": data_source, } if messages is not None and isinstance(messages, list) and len(messages) > 0: sample_data["messages"] = messages elif segments is not None and isinstance(segments, list) and len(segments) > 0: sample_data["segments"] = segments elif messages is not None and isinstance(messages, np.ndarray): sample_data["messages"] = messages.tolist() else: raise NotImplementedError(f"Unsupported sample, message type is {type(messages)}, message={messages}, segments type is {type(segments)}, segments={segments}") samples["json"] = sample_data return samples except Exception as e: logger.error(f"Qwen3NaiveParquetDataset parse sample error: {str(e)}") return None def __iter__local_shuffle(self): rank, world_size, worker, num_workers = pytorch_worker_info() finish_dict = self.finish_dict_all[worker] assert num_workers == self.num_workers total_num_workers = num_workers * world_size local_worker_idx = rank * num_workers + worker fn_list = [fn for idx, fn in enumerate(self.data_files) if idx % total_num_workers == local_worker_idx] logger.warning( f"ParquetDataset Info: {rank=}, {world_size=}, {worker=}, {num_workers=}, {len(fn_list)=}" ) def get_sample(): for fn_index, (fn, epoch_idx) in enumerate(fn_list): try: df = load_parquet_file(fn).read_row_group(0).to_pandas() except Exception as e: logger.warning( f"ParquetDataset Info: {rank=}, {world_size=}, {worker=}, {num_workers=}, {fn} failed" + \ f"traceback=\n{traceback.format_exc()}" ) continue df['epoch_idx'] = epoch_idx df['fn_idx'] = fn_index df['__fn__'] = fn df['sample_index'] = range(len(df)) for i, (_, row) in enumerate(df.iterrows()): sample_bit = 1 << row['sample_index'] if sample_bit & finish_dict.get((row['__fn__'], row['epoch_idx']), 0) != 0: logger.debug(f"[Rank{rank}-Worker{worker}] Skipping already processed sample: " f"{row['__fn__']}-epoch{row['epoch_idx']}-sample{row['sample_index']}") continue if self.local_shuffle_buffer.add(row, fn, epoch_idx): continue row = self.local_shuffle_buffer.get() yield row while len(self.local_shuffle_buffer) > 0: row = self.local_shuffle_buffer.get() yield row for row in get_sample(): sample_bit = 1 << row['sample_index'] key = (row['__fn__'], row['epoch_idx']) if key not in finish_dict: finish_dict[key] = 0 finish_dict[key] |= sample_bit sample = self._parser(row, row['__fn__']) sample['epoch_idx'] = torch.tensor(row['epoch_idx']) yield sample def __iter__(self,): for sample in self.__iter__local_shuffle(): if sample is None: continue yield sample def state_dict(self): """Get state dict for checkpointing.""" rank, world_size, worker, num_workers = pytorch_worker_info() state_dict = { "finish_dict": dict(self.finish_dict_all[worker]), } return state_dict def load_state_dict(self, state_dict): """Load state dict from checkpoint.""" rank, world_size, worker, num_workers = pytorch_worker_info() finish_dict = state_dict["finish_dict"] # Convert to regular dict to support old checkpoint format tmp_finish_dict = dict(finish_dict) # Clear current state and update self.finish_dict_all[worker].clear() self.finish_dict_all[worker].update(tmp_finish_dict) logger.info(f"[rank{rank}-worker{worker}] Loaded checkpoint successfully. finish_dict_size={len(tmp_finish_dict)}") class Qwen3ChatCompletionParquetDataset(Qwen3ChatCompletionDataset): def __init__(self, sources, num_workers, shuffle_seed=1024, num_epochs=1, **kwargs): self.rng = random.Random(shuffle_seed) self.num_workers = num_workers self.num_epochs = num_epochs self.cut_to_pad = kwargs.get("cut_to_pad", True) self.kwargs = kwargs self.num_readers = kwargs.get("num_readers", 1) self.shuffle_window = kwargs.get("shuffle_window", 0) super().__init__(sources=sources, **kwargs) def _build_source_dataset(self, sources): data_file_list = [] if dist.get_rank() == 0: data_files = [] if isinstance(sources, str) and sources.endswith(".json"): with open(sources, "r") as fp: data_files = json.loads(fp.read()) data_files = [fn for fn in data_files if fn.endswith(".parquet")] elif isinstance(sources, list): for source in sources: hdfs_files = shell_hdfs_ls(source) data_files += [fn for fn in hdfs_files if fn.endswith(".parquet")] # repeat for i in range(self.num_epochs): data_files.sort() self.rng.shuffle(data_files) data_file_list += [(fn, i) for fn in data_files] logger.info(f"ParquetDataset rank{dist.get_rank()}: original_file_num={len(data_files)}, total_file_num={len(data_file_list)}") t = [data_file_list] dist.broadcast_object_list(t, src=0) data_file_list = t[0] logger.info(f"ParquetDataset rank{dist.get_rank()}: file_num={len(data_file_list)}") if len(data_file_list) == 0: raise ValueError(f"no datafile found!") dataset = Qwen3NaiveParquetDataset(data_file_list, self.num_workers, **self.kwargs) return dataset, -1 def state_dict(self): if self.dataset is None: return {} return self.dataset.state_dict() def load_state_dict(self, state_dict): if self.dataset is None: return self.dataset.load_state_dict(state_dict) ================================================ FILE: pretrain/onerec_llm/losses/__init__.py ================================================ from onerec_llm.losses.ce import CrossEntropyLoss, ChunkedLossComputer __all__ = [ "CrossEntropyLoss", "ChunkedLossComputer", ] ================================================ FILE: pretrain/onerec_llm/losses/ce.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F from onerec_llm.utils.time_tracker import TimeTracker # =================================================================== # Cross-Entropy Loss Function # =================================================================== class CrossEntropyLoss(nn.Module): """ An efficient CrossEntropyLoss module that avoids redundant calculations. It first computes per-token losses and then manually applies the reduction. (Based on the user-provided, superior implementation). """ def __init__(self, ignore_index: int = -100, return_token_loss: bool = False, shift_labels: bool = True, reduction: str = "mean"): super().__init__() self.ignore_index = ignore_index self.return_token_loss = return_token_loss self.reduction = reduction self.shift_labels = shift_labels def forward(self, logits: torch.Tensor, labels: torch.Tensor): """ Args: logits (torch.Tensor): A single tensor of shape (..., vocab_size). labels (torch.Tensor): Ground truth labels. """ vocab_size = logits.shape[-1] if self.shift_labels: logits = logits[:, :-1, :] labels = labels[:, 1:] # Reshape for cross-entropy calculation logits_flat = logits.float().reshape(-1, vocab_size) labels_flat = labels.reshape(-1) # Step 1: Compute per-token loss. This is the base for all other calculations. per_token_loss = F.cross_entropy( logits_flat, labels_flat, ignore_index=self.ignore_index, reduction="none" ) # Step 2: Manually apply reduction to get the final loss. loss = per_token_loss.sum() if self.reduction == "mean": # Ensure we divide by the number of valid (non-ignored) tokens total_elements = (labels_flat != self.ignore_index).sum() if total_elements > 0: loss /= total_elements else: # Handle case where all tokens are ignored loss.zero_() # Return what's requested if self.return_token_loss: return loss, per_token_loss return loss # =================================================================== # Memory-Efficient Chunked Loss Computer # =================================================================== class ChunkedLossComputer: """ Memory-efficient chunked loss computer for solving OOM issues caused by large lm_head in LLMs. By computing the input sequence in chunks and manually accumulating gradients, it avoids allocating huge intermediate tensors for the entire sequence at once. Note: The returned loss has already been backpropagated and detached, and cannot be used for operations requiring gradients. """ def __init__(self, lm_head: nn.Module, loss_fn: nn.Module, minibatch_size: int, shift_labels: bool = True): """ Initialize the chunked loss computer. Args: lm_head: The output layer of the language model (typically nn.Linear) loss_fn: Loss function, must return (avg_loss, per_token_loss) tuple minibatch_size: Size of each chunk, used to control memory usage shift_labels: Whether to shift labels (for autoregressive models) """ if not isinstance(lm_head, nn.Module) or not isinstance(loss_fn, nn.Module): raise TypeError("lm_head and loss_fn must be instances of nn.Module") self.lm_head = lm_head self.loss_fn = loss_fn self.minibatch_size = minibatch_size self.shift_labels = shift_labels self.loss_info = {} self.ticker = TimeTracker() def forward_and_backward(self, input: torch.Tensor, labels: torch.Tensor, loss_fn_args: dict = {}): """ Execute chunked forward and backward propagation. Args: input: Input tensor with shape [batch_size, seq_len, hidden_dim] labels: Label tensor with shape [batch_size, seq_len] loss_fn_args: Additional arguments passed to the loss function Returns: tuple[torch.Tensor, torch.Tensor]: (final_avg_loss, per_token_loss) Note: The returned loss has already been backpropagated and detached, and cannot be used for operations requiring gradients. """ self.ticker.tick("lm_head") params = list(self.lm_head.parameters()) grad_accs = [torch.zeros_like(p) for p in params] grad_input_full = torch.zeros_like(input) total_loss_sum_for_reporting = torch.tensor(0.0, device=input.device) all_per_token_losses = [] seq_len = input.size(1) # Calculate total number of valid elements labels_to_count = labels[:, 1:] if self.shift_labels else labels total_elements = (labels_to_count != getattr(self.loss_fn, 'ignore_index', -100)).sum() if total_elements.item() == 0: return torch.tensor(0.0, device=input.device), None # Chunked forward and gradient accumulation for i in range(0, seq_len, self.minibatch_size): start, end = i, min(i + self.minibatch_size, seq_len) input_chunk = input[:, start:end, :].detach().requires_grad_() logits_chunk = self.lm_head(input_chunk) if self.shift_labels: label_start, label_end = start + 1, end + 1 labels_chunk = labels[:, label_start:label_end] # Ensure logits and labels have matching lengths if logits_chunk.size(1) > labels_chunk.size(1): logits_chunk = logits_chunk[:, :labels_chunk.size(1), :] else: labels_chunk = labels[:, start:end] if labels_chunk.numel() == 0: continue logits_flat = logits_chunk.reshape(-1, self.lm_head.out_features) labels_flat = labels_chunk.reshape(-1) # Compute loss loss_chunk_avg, per_token_loss_chunk = self.loss_fn(logits_flat, labels_flat, **loss_fn_args) # Convert to sum loss for backward propagation valid_tokens_in_chunk = (labels_flat != getattr(self.loss_fn, 'ignore_index', -100)).sum() if valid_tokens_in_chunk.item() == 0: all_per_token_losses.append(per_token_loss_chunk.detach()) continue loss_chunk_sum = loss_chunk_avg * valid_tokens_in_chunk # Manually compute gradients and accumulate tensors_to_grad = [p for p in params if p.requires_grad] + [input_chunk] grads = torch.autograd.grad(outputs=loss_chunk_sum, inputs=tensors_to_grad, retain_graph=False) grad_idx = 0 for j in range(len(params)): if params[j].requires_grad: grad_accs[j] += grads[grad_idx] grad_idx += 1 grad_input_full[:, start:end, :] = grads[grad_idx] total_loss_sum_for_reporting += loss_chunk_sum.detach() all_per_token_losses.append(per_token_loss_chunk.detach()) # Apply accumulated gradients for j, p in enumerate(params): if p.requires_grad: p.grad = grad_accs[j] / total_elements self.ticker.tick("llm") input.backward(gradient=grad_input_full / total_elements) self.ticker.tick("done") final_avg_loss = (total_loss_sum_for_reporting / total_elements).detach() per_token_loss = torch.cat(all_per_token_losses) if all_per_token_losses else None final_avg_loss.requires_grad = True self.loss_info = { 'loss': final_avg_loss, 'per_token_loss': per_token_loss } return final_avg_loss, per_token_loss ================================================ FILE: pretrain/onerec_llm/models/qwen3/__init__.py ================================================ # Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from transformers.utils.import_utils import _LazyModule, define_import_structure if TYPE_CHECKING: from .configuration_qwen3 import * from .modeling_qwen3 import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) ================================================ FILE: pretrain/onerec_llm/models/qwen3/configuration_qwen3.py ================================================ # coding=utf-8 # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Qwen3 model configuration""" from transformers.configuration_utils import PretrainedConfig from transformers.modeling_rope_utils import rope_config_validation from transformers.utils import logging logger = logging.get_logger(__name__) class Qwen3Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen3Model`]. It is used to instantiate a Qwen3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Qwen3-8B [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151936): Vocabulary size of the Qwen3 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Qwen3Model`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 22016): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 32): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`. head_dim (`int`, *optional*, defaults to 128): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. use_sliding_window (`bool`, *optional*, defaults to `False`): Whether to use sliding window attention. sliding_window (`int`, *optional*, defaults to 4096): Sliding window attention (SWA) window size. If not specified, will default to `4096`. max_window_layers (`int`, *optional*, defaults to 28): The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ```python >>> from transformers import Qwen3Model, Qwen3Config >>> # Initializing a Qwen3 style configuration >>> configuration = Qwen3Config() >>> # Initializing a model from the Qwen3-8B style configuration >>> model = Qwen3Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen3" keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `Qwen3` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=151936, hidden_size=4096, intermediate_size=22016, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, head_dim=128, hidden_act="silu", max_position_embeddings=32768, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, use_sliding_window=False, sliding_window=4096, max_window_layers=28, attention_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window # we check `use_sliding_window` in the modeling code self.max_window_layers = max_window_layers # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.head_dim = head_dim self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) super().__init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["Qwen3Config"] ================================================ FILE: pretrain/onerec_llm/models/qwen3/modeling_qwen3.py ================================================ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/qwen3/modular_qwen3.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_qwen3.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Callable, Optional, Tuple, Union import torch from torch import nn from transformers.activations import ACT2FN from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache from transformers.generation.utils import GenerationMixin from transformers.modeling_attn_mask_utils import AttentionMaskConverter from transformers.modeling_flash_attention_utils import FlashAttentionKwargs from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from transformers.processing_utils import Unpack from transformers.utils.generic import LossKwargs, can_return_tuple from transformers.utils.doc import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from transformers.utils import logging from transformers.utils.deprecation import deprecate_kwarg from .configuration_qwen3 import Qwen3Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "Qwen/Qwen3-8B" _CONFIG_FOR_DOC = "Qwen3Config" class Qwen3RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Qwen3RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class Qwen3MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class Qwen3Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Qwen3Config, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) self.q_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim! self.k_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape self.sliding_window = config.sliding_window if not ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): self.sliding_window = None def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] import time t0 = time.time() attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, # diff with Llama **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Qwen3DecoderLayer(nn.Module): def __init__(self, config: Qwen3Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Qwen3Attention(config=config, layer_idx=layer_idx) self.mlp = Qwen3MLP(config) self.input_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) if ( config.sliding_window and config._attn_implementation != "flash_attention_2" ): # diff with Llama is this warning logger.warning_once( f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " "unexpected results may be encountered." ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class Qwen3RotaryEmbedding(nn.Module): def __init__(self, config: Qwen3Config, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) QWEN3_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Qwen3Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare Qwen3 Model outputting raw hidden-states without any specific head on top.", QWEN3_START_DOCSTRING, ) class Qwen3PreTrainedModel(PreTrainedModel): config_class = Qwen3Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Qwen3DecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn_2 = True _supports_sdpa = True _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True _supports_attention_backend = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() QWEN3_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ @add_start_docstrings( "The bare Qwen3 Model outputting raw hidden-states without any specific head on top.", QWEN3_START_DOCSTRING, ) class Qwen3Model(Qwen3PreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen3DecoderLayer`] Args: config: Qwen3Config """ def __init__(self, config: Qwen3Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Qwen3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Qwen3RotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @can_return_tuple @add_start_docstrings_to_model_forward(QWEN3_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **flash_attn_kwargs: Unpack[FlashAttentionKwargs], ) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache if not isinstance(past_key_values, (type(None), Cache)): raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) cu_seqlens = flash_attn_kwargs.pop("cu_seqlens", None) if cu_seqlens is not None: cu_seqlens = cu_seqlens.to(dtype=torch.int32) flash_attn_kwargs["cu_seq_lens_q"] = cu_seqlens flash_attn_kwargs["cu_seq_lens_k"] = cu_seqlens max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item() flash_attn_kwargs["max_length_q"] = max_seqlen flash_attn_kwargs["max_length_k"] = max_seqlen causal_mask = None else: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( partial(decoder_layer.__call__, **flash_attn_kwargs), hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **flash_attn_kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and past_key_values is not None: is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0] if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" " this may lead to unexpected behaviour for Flash Attention version of Qwen3. Make sure to " " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache) # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if ( self.config._attn_implementation == "sdpa" and not (using_static_cache or using_sliding_window_cache) and not output_attentions ): if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] # SlidingWindowCache or StaticCache if using_sliding_window_cache or using_static_cache: target_length = past_key_values.get_max_cache_shape() # DynamicCache or no cache else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], config=self.config, past_key_values=past_key_values, ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, config: Qwen3Config, past_key_values: Cache, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to place the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. config (`Qwen3Config`): The model's configuration class past_key_values (`Cache`): The cache class that is being used currently to generate """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) if config.sliding_window is not None: # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also # the check is needed to verify is current checkpoint was trained with sliding window or not if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length: sliding_attend_mask = torch.arange(target_length, device=device) <= ( cache_position.reshape(-1, 1) - config.sliding_window ) diagonal_attend_mask.bitwise_or_(sliding_attend_mask) causal_mask *= diagonal_attend_mask causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit if attention_mask.shape[-1] > target_length: attention_mask = attention_mask[:, :target_length] mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... class Qwen3ForCausalLM(Qwen3PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} wrap_modules = {Qwen3DecoderLayer } def __init__(self, config): super().__init__(config) self.model = Qwen3Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.chunked_loss_computer = getattr(config, "chunked_loss_computer", False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @can_return_tuple @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") @add_start_docstrings_to_model_forward(QWEN3_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[KwargsForCausalLM], ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. logits_to_keep (`int` or `torch.Tensor`, *optional*): If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension. This is useful when using packed tensor format (single dimension for batch and sequence length). Returns: Example: ```python >>> from transformers import AutoTokenizer, Qwen3ForCausalLM >>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B") >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep if self.chunked_loss_computer: logits = hidden_states[:, slice_indices, :] else: logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The Qwen3 Model transformer with a sequence classification head on top (linear layer). [`Qwen3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, QWEN3_START_DOCSTRING, ) class Qwen3ForSequenceClassification(Qwen3PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = Qwen3Model(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @can_return_tuple @add_start_docstrings_to_model_forward(QWEN3_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> SequenceClassifierOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ transformer_outputs: BaseModelOutputWithPast = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) hidden_states = transformer_outputs.last_hidden_state logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config) return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ The Qwen3 Model transformer with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, QWEN3_START_DOCSTRING, ) class Qwen3ForTokenClassification(Qwen3PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = Qwen3Model(config) if getattr(config, "classifier_dropout", None) is not None: classifier_dropout = config.classifier_dropout elif getattr(config, "hidden_dropout", None) is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.score = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @can_return_tuple @add_start_docstrings_to_model_forward(QWEN3_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> TokenClassifierOutput: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs: BaseModelOutputWithPast = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = outputs.last_hidden_state sequence_output = self.dropout(sequence_output) logits = self.score(sequence_output) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.config) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ The Qwen3 Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, QWEN3_START_DOCSTRING, ) class Qwen3ForQuestionAnswering(Qwen3PreTrainedModel): base_model_prefix = "transformer" def __init__(self, config): super().__init__(config) self.transformer = Qwen3Model(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.transformer.embed_tokens def set_input_embeddings(self, value): self.transformer.embed_tokens = value @can_return_tuple @add_start_docstrings_to_model_forward(QWEN3_INPUTS_DOCSTRING) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, **kwargs, ) -> QuestionAnsweringModelOutput: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs: BaseModelOutputWithPast = self.transformer( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = outputs.last_hidden_state logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() loss = None if start_positions is not None and end_positions is not None: loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs) return QuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "Qwen3ForCausalLM", "Qwen3ForQuestionAnswering", "Qwen3Model", "Qwen3PreTrainedModel", "Qwen3ForSequenceClassification", "Qwen3ForTokenClassification", ] ================================================ FILE: pretrain/onerec_llm/models/qwen3/modular_qwen3.py ================================================ # coding=utf-8 # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen3 model.""" from typing import Callable, Optional, Tuple import torch import torch.utils.checkpoint from transformers.cache_utils import Cache from transformers.modeling_flash_attention_utils import FlashAttentionKwargs from transformers.modeling_outputs import CausalLMOutputWithPast from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS from transformers.processing_utils import Unpack from transformers.utils.generic import LossKwargs from transformers.utils import logging from transformers.models.gemma.modeling_gemma import GemmaMLP from transformers.models.llama.modeling_llama import ( LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaRMSNorm, apply_rotary_pos_emb, eager_attention_forward, ) from transformers.models.mistral.modeling_mistral import MistralModel from .configuration_qwen3 import Qwen3Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "Qwen/Qwen3-8B" class Qwen3RMSNorm(LlamaRMSNorm): pass class Qwen3MLP(GemmaMLP): pass class Qwen3Attention(LlamaAttention): def __init__(self, config: Qwen3Config, layer_idx: int): super().__init__(config, layer_idx) self.q_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim! self.k_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape self.sliding_window = config.sliding_window if not ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): self.sliding_window = None def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2) key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, # diff with Llama **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Qwen3DecoderLayer(LlamaDecoderLayer): def __init__(self, config: Qwen3Config, layer_idx: int): super().__init__() self.self_attn = Qwen3Attention(config=config, layer_idx=layer_idx) self.mlp = Qwen3MLP(config) if ( config.sliding_window and config._attn_implementation != "flash_attention_2" ): # diff with Llama is this warning logger.warning_once( f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; " "unexpected results may be encountered." ) class Qwen3Model(MistralModel): # mistral model creates sliding window pass class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ... class Qwen3ForCausalLM(LlamaForCausalLM): def forward( self, **super_kwargs: Unpack[KwargsForCausalLM], ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. logits_to_keep (`int` or `torch.Tensor`, *optional*): If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension. This is useful when using packed tensor format (single dimension for batch and sequence length). Returns: Example: ```python >>> from transformers import AutoTokenizer, Qwen3ForCausalLM >>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B") >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" return super().forward(**super_kwargs) class Qwen3ForSequenceClassification(LlamaForSequenceClassification): pass class Qwen3ForTokenClassification(LlamaForTokenClassification): pass class Qwen3ForQuestionAnswering(LlamaForQuestionAnswering): pass __all__ = [ "Qwen3ForCausalLM", "Qwen3ForQuestionAnswering", "Qwen3Model", "Qwen3PreTrainedModel", # noqa: F822 "Qwen3ForSequenceClassification", "Qwen3ForTokenClassification", ] ================================================ FILE: pretrain/onerec_llm/training/__init__.py ================================================ """Training utilities for FSDP-based LLM training. This package provides core training functionality including: - Distributed training with FSDP - Checkpoint management - Learning rate scheduling - Gradient computation and masking - Activation checkpointing """ from onerec_llm.training.activations import set_activation_checkpointing from onerec_llm.training.checkpoint import ( AppState, DistributedCheckpointer, load_checkpoint_to_state_dict, load_hf_checkpoint, load_safetensors, safe_torch_load, ) from onerec_llm.training.common import set_default_dtype from onerec_llm.training.distributed import ( load_from_full_model_state_dict, shard_model, ) from onerec_llm.training.gradients import ( EmbeddingGradientMasker, clip_grad_by_value, compute_fsdp_zero2_grad_norm, ) from onerec_llm.training.lr_schedulers import get_cosine_scheduler, get_scheduler __all__ = [ # Activations "set_activation_checkpointing", # Checkpoint "AppState", "DistributedCheckpointer", "load_checkpoint_to_state_dict", "load_hf_checkpoint", "load_safetensors", "safe_torch_load", # Common "set_default_dtype", # Distributed "load_from_full_model_state_dict", "shard_model", # Gradients "EmbeddingGradientMasker", "clip_grad_by_value", "clip_grad_norm", "compute_fsdp_zero2_grad_norm", # LR Schedulers "get_cosine_scheduler", "get_scheduler", ] ================================================ FILE: pretrain/onerec_llm/training/activations.py ================================================ import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy def set_activation_checkpointing( model: nn.Module, auto_wrap_policy, **kwargs ) -> None: """Utility to apply activation checkpointing to the passed-in model. Args: model (nn.Module): Model to apply activation checkpointing to. auto_wrap_policy (ACWrapPolicyType): Policy to wrap module. This can either be a set of ``nn.Module`` types, in which case, modules of the specified type(s) will be wrapped individually with activation checkpointing, or a ``callable`` policy describing how to wrap the model with activation checkpointing. For more information on authoring custom policies, please see this tutorial: https://pytorch.org/tutorials/intermediate/FSDP_adavnced_tutorial.html#transformer-wrapping-policy. **kwargs: additional arguments to pass to ``torch.distributed`` activation checkpointing. """ if isinstance(auto_wrap_policy, set): auto_wrap_policy = ModuleWrapPolicy(auto_wrap_policy) apply_activation_checkpointing(model, auto_wrap_policy=auto_wrap_policy, **kwargs) ================================================ FILE: pretrain/onerec_llm/training/checkpoint.py ================================================ from typing import Dict, Any, Union, Optional, Protocol, Callable import re import os import gc import glob import time from pathlib import Path from concurrent.futures import Future import torch import torch.distributed as dist from safetensors import safe_open from torch.distributed.checkpoint import ( async_save, FileSystemReader, FileSystemWriter, load, save, ) from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE import torch.distributed.checkpoint as dcp from torch.distributed.checkpoint.stateful import Stateful from torch.distributed.checkpoint.state_dict import get_model_state_dict, set_model_state_dict from torch.distributed.checkpoint.default_planner import _EmptyStateDictLoadPlanner from safetensors.torch import load_file from tqdm import tqdm from onerec_llm.utils.distributed import get_world_size_and_rank from onerec_llm.utils.common import print_rank_0, print_rank_n def load_safetensors(path: Union[Path, str]) -> Dict[str, torch.Tensor]: """Load safetensors file and return a dictionary of tensors. Args: path: Path to the safetensors file. Returns: Dictionary mapping tensor names to tensors. """ tensors = {} with safe_open(path, framework="pt", device="cpu") as f: for k in f.keys(): tensors[k] = f.get_tensor(k) return tensors def safe_torch_load( checkpoint_path: Union[Path, str], weights_only: bool = True, mmap: bool = True ) -> Dict[str, Any]: """ Utility to load a checkpoint file onto CPU in a safe manner. Provides separate handling for safetensors files. Args: checkpoint_path: Path to the checkpoint file. weights_only: Whether to load only tensors, primitive types, and dictionaries (passthrough to torch.load). Default: True mmap: Whether to mmap from disk into CPU memory. Default: True Returns: State dict from the checkpoint file. Raises: ValueError: If the checkpoint file is not found or cannot be loaded. """ try: checkpoint_path_str = str(checkpoint_path) if checkpoint_path_str.endswith(".safetensors"): return load_safetensors(checkpoint_path) else: return torch.load( checkpoint_path_str, map_location="cpu", mmap=mmap, weights_only=weights_only, ) except Exception as e: raise ValueError(f"Unable to load checkpoint from {checkpoint_path}") from e def load_hf_checkpoint( model_dir: str, output_keys_file: Optional[str] = None ) -> Dict[str, torch.Tensor]: """Load HuggingFace format checkpoint from a directory. Args: model_dir: Directory containing checkpoint files (.safetensors or .bin). output_keys_file: Optional path to write checkpoint keys for debugging. If None, keys are not written. Default: None. Returns: Merged state dictionary containing all checkpoint weights. Raises: ValueError: If checkpoint files are not found or contain non-tensor values. """ merged_state_dict: Dict[str, torch.Tensor] = {} # Try to find safetensors files first, fall back to .bin files ckpt_paths = sorted(glob.glob(os.path.join(model_dir, "*.safetensors"))) if not ckpt_paths: ckpt_paths = sorted(glob.glob(os.path.join(model_dir, "*.bin"))) if not ckpt_paths: raise ValueError(f"No checkpoint files found in {model_dir}") for cpt_idx, cpt_path in enumerate(ckpt_paths): print_rank_0(f"Loading checkpoint {cpt_idx + 1}/{len(ckpt_paths)}: {cpt_path}") state_dict = safe_torch_load(cpt_path) # Validate that all values are tensors for key, value in state_dict.items(): if not isinstance(value, torch.Tensor): raise ValueError( f"Expected all values in the state dict to be torch.Tensor. " f"Found {key}={type(value)} instead." ) merged_state_dict.update(state_dict) # Free memory del state_dict gc.collect() # Optionally write keys to file for debugging if output_keys_file: with open(output_keys_file, "w", encoding="utf-8") as f: f.write("# Checkpoint file paths:\n") for path in ckpt_paths: f.write(f"{path}\n") f.write("\n# State dict keys:\n") for key in merged_state_dict.keys(): f.write(f"{key}\n") return merged_state_dict def load_checkpoint_to_state_dict(checkpoint_path: Union[str, os.PathLike]) -> Dict[str, torch.Tensor]: """Load checkpoint file or directory and return state_dict. Supports multiple checkpoint formats: - .pth or .pt files (PyTorch format) - .safetensors files (SafeTensors format) - Directories containing .safetensors files (HuggingFace format) - .distcp format directories (Distributed checkpoint format) Args: checkpoint_path: Path to checkpoint file or directory. Can be: - .pth, .pt file path - .safetensors file path - Directory containing .safetensors files - .distcp format directory Returns: state_dict: Dictionary containing model weights Raises: FileNotFoundError: If checkpoint path does not exist ValueError: If checkpoint format is unsupported or invalid """ checkpoint_path = os.path.abspath(checkpoint_path) # Check if path exists if not os.path.exists(checkpoint_path): raise FileNotFoundError(f"Checkpoint path does not exist: {checkpoint_path}") # If it's a file if os.path.isfile(checkpoint_path): # Handle .pth files if checkpoint_path.endswith(".pth") or checkpoint_path.endswith(".pt"): print_rank_0(f"Loading PyTorch checkpoint from {checkpoint_path}...") state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu')) # If state_dict contains nested 'model' or 'app.model' keys, extract them if "model" in state_dict and isinstance(state_dict["model"], dict): state_dict = state_dict["model"] elif "app" in state_dict and "model" in state_dict["app"]: state_dict = state_dict["app"]["model"] return state_dict # Handle .safetensors files elif checkpoint_path.endswith(".safetensors"): print_rank_0(f"Loading SafeTensors checkpoint from {checkpoint_path}...") return load_file(checkpoint_path) else: raise ValueError(f"Unsupported file format: {checkpoint_path}") # If it's a directory elif os.path.isdir(checkpoint_path): # Check if it's a .distcp format directory if any(file.endswith(".distcp") for file in os.listdir(checkpoint_path)) or \ os.path.exists(os.path.join(checkpoint_path, "checkpoint.json")): print_rank_0(f"Loading DCP checkpoint from {checkpoint_path}...") # Use PyTorch's FileSystemReader to load DCP format sd: STATE_DICT_TYPE = {} from torch.distributed.checkpoint.state_dict_loader import _load_state_dict _load_state_dict( sd, storage_reader=FileSystemReader(checkpoint_path), planner=_EmptyStateDictLoadPlanner(), no_dist=True, ) # Extract model weights section if "app" in sd and "model" in sd["app"]: return sd["app"]["model"] return sd # Check if it's a directory containing .safetensors files safetensors_files = [f for f in os.listdir(checkpoint_path) if f.endswith(".safetensors")] if safetensors_files: # Directly merge all .safetensors files print_rank_0(f"Loading and merging all SafeTensors files from {checkpoint_path}...") state_dict = {} for safetensors_file in tqdm(safetensors_files, desc="Loading safetensors"): file_path = os.path.join(checkpoint_path, safetensors_file) shard_state_dict = load_file(file_path) # Update state_dict, merging all file contents state_dict.update(shard_state_dict) return state_dict else: raise ValueError(f"No supported checkpoint files found in directory: {checkpoint_path}") else: raise ValueError(f"Invalid checkpoint path: {checkpoint_path}") class CheckpointerInterface(Protocol): """Protocol interface for checkpoint loaders and savers.""" def load_checkpoint(self, **kwargs) -> Dict[str, Any]: """Load checkpoint from storage.""" ... def save_checkpoint(self, state_dict: Dict[str, Any], **kwargs) -> None: """Save checkpoint to storage.""" ... class DistributedCheckpointer(CheckpointerInterface): """ Checkpointer which reads and writes checkpoints in the DistributedCheckpointing format. Args: process_group: Optional process group to use for distributed saving/loading. If None, the default process group will be used. For checkpointing, gloo CPU-based backend is needed. """ def __init__( self, process_group: Optional[dist.ProcessGroup] = None ) -> None: self._checkpoint_future: Optional[Future] = None self._checkpoint_dir_prefix = "global_step" _, self._rank = get_world_size_and_rank() self._process_group: Optional[dist.ProcessGroup] = process_group def get_latest_checkpoint(self, checkpoint_dir: str) -> Optional[str]: """Get the latest checkpoint directory path. Args: checkpoint_dir: Directory containing checkpoint subdirectories. Returns: Path to the latest checkpoint directory, or None if no checkpoints found. """ checkpoint_dir_pattern = re.compile(f"{self._checkpoint_dir_prefix}(\\d+)") checkpoint_paths = [] if not os.path.isdir(checkpoint_dir): return None for name in os.listdir(checkpoint_dir): if re.match(checkpoint_dir_pattern, name): checkpoint_path = os.path.join(checkpoint_dir, name) if os.path.isdir(checkpoint_path): checkpoint_paths.append(name) if checkpoint_paths: latest_checkpoint_dir = sorted( checkpoint_paths, key=lambda x: int(x.split("_")[-1]) )[-1] return os.path.join(checkpoint_dir, latest_checkpoint_dir) return None def load_checkpoint( self, state_dict: STATE_DICT_TYPE, checkpoint_path: Optional[str] = None, checkpoint_dir: Optional[str] = None, tag: Union[str, int] = "latest" ) -> Dict[str, Any]: """ Load a Distributed checkpoint. Args: state_dict: State dictionary to load into. checkpoint_path: Direct path to checkpoint. If provided, this takes precedence. checkpoint_dir: Directory containing checkpoints. tag: Checkpoint tag (e.g., "latest" or step number). Default: "latest". Returns: Loaded state dictionary. Raises: ValueError: If no checkpoint path can be determined. """ if not checkpoint_path: if not checkpoint_dir: raise ValueError("Either checkpoint_path or checkpoint_dir must be provided") if tag == "latest": checkpoint_path = self.get_latest_checkpoint(checkpoint_dir) if not checkpoint_path: raise ValueError(f"No checkpoint found in {checkpoint_dir}") else: checkpoint_path = str(Path(checkpoint_dir) / str(tag)) if not checkpoint_path or not os.path.exists(checkpoint_path): raise ValueError(f"Checkpoint path does not exist: {checkpoint_path}") print_rank_0(f"Loading checkpoint from {checkpoint_path}") dcp.load( state_dict=state_dict, storage_reader=FileSystemReader(checkpoint_path), process_group=self._process_group, ) return state_dict def save_checkpoint( self, state_dict: STATE_DICT_TYPE, output_dir: Union[str, Path], tag: Optional[Union[str, int]] = None, save_async: bool = False ) -> None: """ Save a distributed checkpoint to storage. If ``save_async`` is True, the save happens asynchronously unblocking the GPUs sooner. This should only be used for intermediate checkpoints. Final checkpoint must be synchronous as the training job cannot terminate until the checkpoint is persisted. Args: state_dict: Checkpoint state dict to be written out to file. output_dir: Directory to save the checkpoint. tag: Checkpoint tag. Used to create the checkpoint directory name, generally step number. save_async: If True, save the checkpoint asynchronously. Default: False. """ checkpoint_path = Path(output_dir) if tag is not None: checkpoint_path = checkpoint_path / f"{self._checkpoint_dir_prefix}{tag}" checkpoint_path_str = str(checkpoint_path) print_rank_0(f"Saving checkpoint to {checkpoint_path_str}") # Wait for previous checkpoint to finish if still in progress if self._checkpoint_future and not self._checkpoint_future.done(): wait_start = time.perf_counter() print_rank_n( f"Rank {self._rank}: previous checkpoint has not finished. " f"Checkpointing frequency is too high. Waiting...", rank=self._rank ) self._checkpoint_future.result() wait_time = time.perf_counter() - wait_start print_rank_n( f"Rank {self._rank}: waited {wait_time:.2f} seconds " f"for previous checkpoint to finish", rank=self._rank ) self._checkpoint_future = None cp_start = time.perf_counter() if save_async: def callback(f: Future) -> None: if f.exception() is None: print_rank_n( f"Rank {self._rank}: Checkpoint saved asynchronously " f"to {checkpoint_path_str} successfully.", rank=self._rank ) else: print_rank_n( f"Rank {self._rank}: Checkpoint failed to save asynchronously " f"to {checkpoint_path_str} with exception: {f.exception()}", rank=self._rank ) self._checkpoint_future = async_save( state_dict=state_dict, storage_writer=FileSystemWriter( checkpoint_path_str, thread_count=16 ), process_group=self._process_group, ) blocked_time = time.perf_counter() - cp_start print_rank_n( f"Rank {self._rank}: Trainer was blocked for {blocked_time:.2f} seconds " "for checkpointing to start...", rank=self._rank ) self._checkpoint_future.add_done_callback(callback) else: print_rank_0(f"Saving model checkpoint synchronously to {checkpoint_path_str}") save( state_dict=state_dict, storage_writer=FileSystemWriter( checkpoint_path_str, thread_count=4 ), process_group=self._process_group, ) print_rank_0( "The full model checkpoint, including all the weights and " "configurations, has been saved successfully by the " "DistributedCheckpointer. " "You can now use this checkpoint for further training." ) class AppState(Stateful): """This is a useful wrapper for checkpointing the Application State. Since this object is compliant with the Stateful protocol, DCP will automatically call state_dict/load_stat_dict as needed in the dcp.save/load APIs. Note: We take advantage of this wrapper to hande calling distributed state dict methods on the model and optimizer. """ def __init__(self, model, optimizer=None, call_back=None): self.model = model self.call_back = call_back def set_call_back(self, cb): self.call_back = cb return self def state_dict(self): # this line automatically manages FSDP FQN's, as well as sets the # default state dict type to FSDP.SHARDED_STATE_DICT model_state_dict = \ get_model_state_dict(self.model) if self.call_back is not None: model_state_dict = self.call_back(model_state_dict) return { "model": model_state_dict } def load_state_dict(self, state_dict): # sets our state dicts on the model and optimizer, now that we've loaded set_model_state_dict( self.model, model_state_dict=state_dict["model"], ) ================================================ FILE: pretrain/onerec_llm/training/common.py ================================================ """Common training utilities for distributed model training.""" from typing import Generator import contextlib import torch @contextlib.contextmanager def set_default_dtype(dtype: torch.dtype) -> Generator[None, None, None]: """Temporarily set torch's default dtype. Args: dtype: The desired default dtype. """ old_dtype = torch.get_default_dtype() torch.set_default_dtype(dtype) try: yield finally: torch.set_default_dtype(old_dtype) ================================================ FILE: pretrain/onerec_llm/training/distributed.py ================================================ """Distributed training utilities for FSDP model sharding and checkpoint loading.""" from typing import Any, Dict, Optional import torch import torch.distributed as dist from torch import nn from onerec_llm.utils.ds_utils import format_dict_or_list from onerec_llm.utils.distributed import get_world_size_and_rank from torch.distributed._composable.fsdp import CPUOffloadPolicy, fully_shard, MixedPrecisionPolicy from torch.distributed._tensor import distribute_tensor from torch.distributed.device_mesh import DeviceMesh def shard_model( model: nn.Module, *, cpu_offload: bool, reshard_after_forward: bool = True, dp_mesh: Optional[DeviceMesh] = None, fp32_weight: bool = True, model_class: str = 'Qwen3ForCausalLM', fp32_reduce: bool = True ) -> None: """Shard a model with FSDP using the PyTorch Distributed fully_shard API. Args: model: Model to shard with FSDP. cpu_offload: If True, FSDP will offload parameters to CPU. reshard_after_forward: Whether to reshard after forward pass. dp_mesh: Device mesh for FSDP sharding under multiple parallelism. fp32_weight: If True, use fp32 for weights with bfloat16 params. model_class: Model class name. Currently only supports 'Qwen3ForCausalLM'. fp32_reduce: If True, use fp32 for gradient reduction. """ fsdp_kwargs = {"reshard_after_forward": reshard_after_forward, "mesh": dp_mesh} if fp32_weight: fsdp_kwargs["mp_policy"] = MixedPrecisionPolicy( param_dtype=torch.bfloat16, reduce_dtype=torch.float32 if fp32_reduce else torch.bfloat16 ) if cpu_offload: fsdp_kwargs["offload_policy"] = CPUOffloadPolicy() if model_class == 'Qwen3ForCausalLM': layers = list(model.model.layers) else: raise ValueError(f"Unsupported model_class: {model_class}") for layer in layers: fully_shard(layer, **fsdp_kwargs) fully_shard(model, **fsdp_kwargs) # Set up forward prefetch for layers prev = None for layer in reversed(layers): if prev is not None: layer.set_modules_to_forward_prefetch([prev]) prev = layer model.set_modules_to_forward_prefetch([prev]) def load_from_full_model_state_dict( model: "FSDPModule", full_sd: Dict[str, Any], allow_random_init_params: Optional[str] = None, use_tie_weights: bool = False ) -> None: """Load full state dict into an FSDP-sharded model. Args: model: FSDP-sharded model to load into. full_sd: Full (unsharded) state dictionary. allow_random_init_params: Comma-separated parameter names to randomly initialize if not found in full_sd. Default: None. use_tie_weights: If True, tie lm_head.weight to model.embed_tokens.weight. """ if isinstance(allow_random_init_params, str): allow_random_init_params = allow_random_init_params.split(',') meta_sharded_sd = model.state_dict() sharded_sd = {} if dist.get_rank() == 0: if use_tie_weights: full_sd['lm_head.weight'] = full_sd['model.embed_tokens.weight'] extra_meta_sharded_sd = set(meta_sharded_sd.keys()) - set(full_sd.keys()) extra_full_ds = set(full_sd.keys()) - set(meta_sharded_sd.keys()) extra_meta_sharded_sd = { k: (v.shape, v.device, v.dtype) for k, v in meta_sharded_sd.items() if k in extra_meta_sharded_sd } extra_full_ds = { k: (v.shape, v.device, v.dtype) for k, v in full_sd.items() if k in extra_full_ds } device0 = full_sd[list(full_sd)[0]] for k in extra_meta_sharded_sd: if allow_random_init_params is not None and k in allow_random_init_params: full_sd[k] = torch.rand(extra_meta_sharded_sd[k][0]) * 0.1 if full_sd[k].ndim >= 2: nn.init.kaiming_normal_(full_sd[k], a=0, mode='fan_in', nonlinearity='relu') else: nn.init.zeros_(full_sd[k]) full_sd[k] = full_sd[k].to(device0) assert len(meta_sharded_sd) == len(full_sd), ( f"Sharded State Dict doesn't equal to Full State Dict, " f"{len(meta_sharded_sd)} vs {len(full_sd)}\n" f"extra_meta_sharded_sd={format_dict_or_list(extra_meta_sharded_sd)}, " f"extra_full_ds={format_dict_or_list(extra_full_ds)}" ) assert sorted(list(meta_sharded_sd.keys())) == sorted(list(full_sd.keys())), \ "Keys of Sharded State Dict doesn't equal to Full State Dict" for param_name, sharded_meta_param in meta_sharded_sd.items(): if dist.get_rank() == 0: full_tensor = full_sd[param_name].detach().cuda().type(sharded_meta_param.dtype) else: full_tensor = torch.empty( sharded_meta_param.size(), device="cuda", dtype=sharded_meta_param.dtype, ) mesh = sharded_meta_param.device_mesh dist.broadcast(full_tensor, src=0, group=mesh.get_group(0)) dist.barrier() sharded_tensor = distribute_tensor( full_tensor, mesh, sharded_meta_param.placements ) sharded_sd[param_name] = nn.Parameter(sharded_tensor) model.load_state_dict(sharded_sd, assign=True) ================================================ FILE: pretrain/onerec_llm/training/gradients.py ================================================ """Gradient computation and manipulation utilities for training. This module provides utilities for gradient processing including: - Gradient clipping - Gradient norm computation for FSDP models - Gradient masking for embedding layers in distributed training """ from typing import Optional import torch import torch.distributed as dist def clip_grad_by_value( model: torch.nn.Module, clip_range: Optional[float] = None ) -> None: """Clip gradients by value. Args: model: The model whose gradients will be clipped. clip_range: Maximum absolute value for gradients. If None, no clipping. """ if clip_range is not None: torch.nn.utils.clip_grad_value_(model.parameters(), clip_range) def clip_grad_norm( model: torch.nn.Module, max_grad_norm: Optional[float] = None ) -> None: """Clip gradients by global L2 norm. Args: model: The model whose gradients will be clipped. max_grad_norm: Maximum allowed L2 norm. If None, no clipping. """ if max_grad_norm is not None: torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) def compute_fsdp_zero2_grad_norm( model: torch.nn.Module, ignore_unused_parameters: bool = True ) -> float: """Compute the global L2 norm of gradients for FSDP Zero-2 models. Args: model: FSDP-wrapped model. ignore_unused_parameters: If True, ignore parameters without gradients. Returns: The global L2 norm of all gradients. """ total_sq = torch.tensor(0.0, device=next(model.parameters()).device) for param in model.parameters(): if param.grad is None: if not ignore_unused_parameters: raise ValueError( f"Parameter {param} has no gradient. " "Please check if it is being used correctly." ) continue local_grad = param.grad.to_local() total_sq += torch.sum(local_grad ** 2) dist.all_reduce(total_sq, op=dist.ReduceOp.SUM, group=dist.group.WORLD) grad_norm = torch.sqrt(total_sq).item() return grad_norm class EmbeddingGradientMasker: """Freeze a portion of embedding parameters during distributed training. In distributed training with DTensor, embedding layers are sharded across ranks. This class freezes the first `start_optimize_embedding_index` tokens in the vocabulary, allowing only the remaining tokens to be optimized. This is useful for progressive training strategies where only a subset of the vocabulary is optimized initially. Args: model: The model containing embedding layers config: Model config with vocab_size attribute start_optimize_embedding_index: Index from which to start optimizing embeddings. Tokens before this index will be frozen. If <= 0, no masking is applied. """ def __init__(self, model, config, start_optimize_embedding_index): self.model = model self.config = config self.start_optimize_embedding_index = start_optimize_embedding_index self.embedding_params = [] # List of (name, param) tuples for embedding layers self.saved_weights = {} # Dict mapping param name -> frozen weight slice (torch.Tensor) if start_optimize_embedding_index > 0: self._find_embedding_parameters() self._save_initial_weights() def _find_embedding_parameters(self): """Find all embedding-related parameters (embed_tokens and lm_head).""" for name, param in self.model.named_parameters(): if param.requires_grad and ("embed_tokens" in name or "lm_head" in name): self.embedding_params.append((name, param)) def _save_initial_weights(self): """Save frozen weight slices for each rank in distributed training. In distributed training, embedding parameters are sharded across ranks. This method calculates which portion of the local shard needs to be frozen and saves those weights for later restoration after optimizer steps. """ dp_world_size = dist.get_world_size() dp_rank = dist.get_rank() full_vocab_size = self.config.vocab_size # Calculate shard boundaries: each rank owns a contiguous slice of the vocabulary shard_size = (full_vocab_size + dp_world_size - 1) // dp_world_size shard_offset = dp_rank * shard_size with torch.no_grad(): for name, param in self.embedding_params: # Get local tensor from DTensor (param is a DTensor in distributed mode) local_param_tensor = param.to_local() local_shard_size = local_param_tensor.shape[0] # Calculate overlap between frozen range [0, start_optimize_embedding_index) # and this rank's shard [shard_offset, shard_offset + local_shard_size) overlap_start = shard_offset overlap_end = min(self.start_optimize_embedding_index, shard_offset + local_shard_size) # Number of rows in this rank's shard that need to be frozen num_local_rows = 0 if overlap_end > overlap_start: num_local_rows = int(overlap_end - overlap_start) # Save the frozen slice for restoration after optimizer steps if num_local_rows > 0: self.saved_weights[name] = local_param_tensor[:num_local_rows].clone() def save_frozen_params(self): """Deprecated: Logic moved to __init__. Kept for backward compatibility.""" pass def apply_gradient_mask(self, optimizer=None): """Deprecated: We use restore strategy instead. Kept for backward compatibility.""" pass def restore_frozen_params(self): """Restore frozen parameters after optimizer.step(). This should be called after each optimizer.step() to restore the frozen portion of embedding weights that were modified by the optimizer. Uses .to_local() to safely modify DTensor parameters in distributed training. """ if self.start_optimize_embedding_index <= 0 or not self.saved_weights: return with torch.no_grad(): for name, param in self.embedding_params: if name in self.saved_weights: # Get local tensor from DTensor for modification local_param_tensor = param.to_local() saved_slice = self.saved_weights[name] num_to_restore = saved_slice.shape[0] if num_to_restore > 0: # Restore frozen weights by copying saved slice back local_param_tensor[:num_to_restore].copy_(saved_slice) ================================================ FILE: pretrain/onerec_llm/training/lr_schedulers.py ================================================ """Learning rate schedulers for training.""" import math from functools import partial from typing import Optional from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR def _get_cosine_schedule_with_warmup_lr_lambda( current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float, num_stop_steps: int = 0, min_lr_rate: float = 0.0 ) -> float: """Compute learning rate multiplier for cosine schedule with warmup. Args: current_step: Current training step. num_warmup_steps: Number of warmup steps. num_training_steps: Total number of training steps. num_cycles: Number of cosine cycles. num_stop_steps: Number of steps to keep LR at 0 at the start. min_lr_rate: Minimum learning rate as a fraction of max LR. Returns: Learning rate multiplier (0.0 to 1.0). """ if num_stop_steps > 0 and current_step < num_stop_steps: return 0.0 if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) if current_step > num_training_steps: return min_lr_rate progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) factor = 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)) factor = factor * (1 - min_lr_rate) + min_lr_rate return max(0.0, factor) def get_cosine_scheduler( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, num_stop_steps: int = 0, last_epoch: int = -1, min_lr: Optional[float] = None, min_lr_rate: Optional[float] = None, **kwargs ) -> LambdaLR: """Create a cosine learning rate scheduler with warmup. Args: optimizer: Optimizer to schedule. num_warmup_steps: Number of warmup steps. num_training_steps: Total number of training steps. num_cycles: Number of cosine cycles. Default: 0.5. num_stop_steps: Number of steps to keep LR at 0 at the start. Default: 0. last_epoch: Last epoch index for resuming. Default: -1. min_lr: Minimum learning rate (absolute value). min_lr_rate: Minimum learning rate as fraction of max LR. Returns: LambdaLR scheduler with cosine schedule. """ if min_lr is not None and min_lr_rate is not None: raise ValueError("Only one of min_lr or min_lr_rate should be set") elif min_lr is not None: min_lr_rate = min_lr / optimizer.defaults["lr"] elif min_lr_rate is None: raise ValueError("One of min_lr or min_lr_rate must be set") lr_lambda = partial( _get_cosine_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, min_lr_rate=min_lr_rate, num_stop_steps=num_stop_steps, ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_scheduler( name: str, optimizer: Optimizer, num_warmup_steps: Optional[int] = None, num_training_steps: Optional[int] = None, **kwargs ) -> LambdaLR: """Get a learning rate scheduler by name. Args: name: Scheduler name. Currently only supports "cosine". optimizer: Optimizer to schedule. num_warmup_steps: Number of warmup steps. num_training_steps: Total number of training steps. **kwargs: Additional arguments passed to the scheduler. Returns: Learning rate scheduler instance. Raises: NotImplementedError: If scheduler name is not supported. """ if name == "cosine": return get_cosine_scheduler( optimizer=optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, **kwargs ) else: raise NotImplementedError(f"Unsupported LR scheduler `{name}`") ================================================ FILE: pretrain/onerec_llm/utils/__init__.py ================================================ """Utility functions for LLM training. This package provides general-purpose utilities including: - Common utilities (printing, device operations, random seeds) - Distributed training base utilities - Data loading and processing - Debugging and formatting tools - Performance tracking (MFU, time tracking) - Gradient masking - Worker information """ from onerec_llm.utils.common import ( Timer, dist_reduce_dict, get_optimizer_grouped_parameters, print_rank_0, print_rank_n, set_random_seed, to_cuda, to_device, ) from onerec_llm.utils.distributed import ( get_rank, get_world_size, get_world_size_and_rank, is_distributed, ) from onerec_llm.utils.ds_utils import ( format_dict_or_list, print_input_info, tensor_statistics, ) from onerec_llm.utils.mfu_stats import MFUStats from onerec_llm.utils.time_tracker import TimeTracker from onerec_llm.utils.worker_utils import get_worker_info, pytorch_worker_info __all__ = [ # Common "Timer", "dist_reduce_dict", "get_optimizer_grouped_parameters", "print_rank_0", "print_rank_n", "set_random_seed", "to_cuda", "to_device", # Distributed "get_rank", "get_world_size", "get_world_size_and_rank", "is_distributed", # Debug/Format "format_dict_or_list", "print_input_info", "tensor_statistics", # Performance tracking "MFUStats", "TimeTracker", # Worker info "get_worker_info", "pytorch_worker_info", ] ================================================ FILE: pretrain/onerec_llm/utils/common.py ================================================ """Common utility functions for the onerec_llm package. This module contains core utilities for: - Distributed training (printing, reduction) - Device operations - Optimizer configuration - Random seed setting - Timing utilities """ import random import time import numpy as np import torch import torch.distributed as dist from rich import print from transformers import set_seed as set_transformers_seed def print_rank_n(*msg, rank=0): try: _rank = dist.get_rank() except Exception: _rank = 0 if _rank == rank: print(*msg) def print_rank_0(*msg): print_rank_n(*msg, rank=0) def get_optimizer_grouped_parameters(model, learning_rate: float, weight_decay, no_decay_name_list=[ "bias", "LayerNorm.weight", "embedding.weight", "lm_head.weight" ], ): optimizer_grouped_parameters = [] llm_wd_params_group = [] llm_nowd_params_group = [] for n, p in model.named_parameters(): if p.requires_grad: if any(nd in n for nd in no_decay_name_list): # no weight decay params llm_nowd_params_group.append((n, p)) else: llm_wd_params_group.append((n, p)) # for LLM optimizer_grouped_parameters.append({ "params": [p for n, p in llm_wd_params_group], "weight_decay": weight_decay, "lr": learning_rate, }) optimizer_grouped_parameters.append({ "params": [p for n, p in llm_nowd_params_group], "weight_decay": 0.0, "lr": learning_rate, }) # remove empty params group final_optimizer_grouped_parameters = [] for group in optimizer_grouped_parameters: if len(group['params']) > 0: final_optimizer_grouped_parameters.append(group) return final_optimizer_grouped_parameters def to_device(batch, device, non_blocking=True): for key in list(batch.keys()): if isinstance(batch[key], torch.Tensor): batch[key] = batch[key].to(device=device, non_blocking=non_blocking) return batch def to_cuda(batch, non_blocking=True): """Move batch to CUDA device. This is a convenience wrapper around to_device.""" to_device(batch, device=torch.cuda.current_device(), non_blocking=non_blocking) def set_random_seed(seed): if seed is not None: set_transformers_seed(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def dist_reduce_dict(local_dict, group=None): gather_list = [None for _ in range(dist.get_world_size(group=group))] dist.all_gather_object( object_list=gather_list, obj=local_dict, group=group) def reduce_dicts(dicts): def _reduce(d1, d2): for key, value in d2.items(): if isinstance(value, dict): if key not in d1: d1[key] = {} _reduce(d1[key], value) else: if key in d1: d1[key] += value else: d1[key] = value return d1 result = {} for d in dicts: result = _reduce(result, d) return result return reduce_dicts(gather_list) class Timer: def __init__(self, desc: str = ""): self.desc = desc def __enter__(self): print_rank_0(f"Start... {self.desc}") self.start = time.time() return self def __exit__(self, exc_type, exc_value, traceback): self.end = time.time() self.elapsed = self.end - self.start print_rank_0(f"End... {self.desc} elapsed: {self.elapsed:.3f} ") ================================================ FILE: pretrain/onerec_llm/utils/data_utils.py ================================================ """Data loading utilities for parquet files and HDFS.""" import hashlib import os import subprocess import time import traceback from typing import Optional import numpy as np import pyarrow.parquet as pq from onerec_llm.utils.worker_utils import get_worker_info from onerec_llm.utils.distributed import get_world_size_and_rank def calculate_text_hash(text): """Calculate SHA-256 hash of text. Args: text: Input text string Returns: Hexadecimal hash string """ hash_object = hashlib.sha256() hash_object.update(text.encode('utf-8')) return hash_object.hexdigest() def shell_hdfs_ls(source_dir): """List files in HDFS directory. Args: source_dir: HDFS directory path Returns: list: List of file paths starting with 'viewfs://' """ try: command = f"hdfs dfs -ls {source_dir}" result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True) files = [] for line in result.stdout.splitlines(): parts = line.split() if len(parts) > 0 and parts[-1].startswith('viewfs://'): files.append(parts[-1]) return files except subprocess.CalledProcessError as e: print(f"Error occurred: {traceback.format_exc()}") return [] class FakeParquetFileFromFastParquetFile: """Wrapper for fastparquet ParquetFile to match pyarrow interface.""" def __init__(self, fast_parquet_file): # Package version: mpirun --allow-run-as-root --hostfile /etc/mpi/hostfile --pernode bash -c "pip3 install fastparquet==2024.2.0" from fastparquet import ParquetFile self.fast_parquet_file = fast_parquet_file # Put file opening logic first to prevent failure if file is deleted self.res = ParquetFile(self.fast_parquet_file) self.res.num_rows = len(self.res.to_pandas()) self.num_row_groups = 1 def read_row_group(self, i): assert i == 0 return self.res def load_parquet_file( file_path: str, retry: int = 5, max_cache_files: int = 500, parquet_backend: str = 'fast_parquet', cache_dir: Optional[str] = None, hadoop_cmd: Optional[str] = None ) -> pq.ParquetFile: """Load a parquet file from local path or HDFS. This function handles two types of paths: 1. HDFS paths (viewfs:// or hdfs://): Downloads to cache and loads from cache 2. Local paths: Directly loads from the path Args: file_path: Path to parquet file (can be local path or HDFS path) retry: Number of retries when HDFS download fails max_cache_files: Maximum number of files to keep in cache parquet_backend: Parquet backend, 'fast_parquet' or 'pyarrow' cache_dir: Cache directory path (default: /code/dataset_cache/{worker_id}_{rank_id}) hadoop_cmd: Hadoop command path (default: /home/hadoop/software/hadoop/bin/hadoop) Returns: Loaded parquet file object Raises: ValueError: If parquet_backend is invalid FileNotFoundError: If file cannot be found or downloaded after retries """ if parquet_backend not in ["fast_parquet", "pyarrow"]: raise ValueError(f"Invalid parquet_backend: {parquet_backend}. Must be 'fast_parquet' or 'pyarrow'") # Check if it's an HDFS path is_hdfs_path = file_path.startswith(('viewfs://', 'hdfs://')) if is_hdfs_path: # HDFS path: use cache and download logic return _load_parquet_from_hdfs( file_path, retry, max_cache_files, parquet_backend, cache_dir, hadoop_cmd ) else: # Local path: directly load (even if os.path.exists returns False, # some file systems may support direct access) try: return _load_parquet_from_path(file_path, parquet_backend) except Exception as e: # If direct load fails and file doesn't exist, provide clear error if not os.path.exists(file_path): raise FileNotFoundError(f"Local file not found: {file_path}") from e raise def _load_parquet_from_hdfs( file_path: str, retry: int, max_cache_files: int, parquet_backend: str, cache_dir: Optional[str], hadoop_cmd: Optional[str] ) -> pq.ParquetFile: """Load parquet file from HDFS using cache mechanism.""" # Setup cache directory # If cache_dir is None or empty string, use default cache directory if not cache_dir: worker_id = get_worker_info()[0] rank_id = get_world_size_and_rank()[1] cache_dir = f'/code/dataset_cache/{worker_id}_{rank_id}' os.makedirs(cache_dir, exist_ok=True) # Generate cache file path filename = os.path.basename(file_path) file_hash = calculate_text_hash(file_path) cache_path = os.path.join(cache_dir, f"{file_hash}_{filename}") # Try to load from cache first if os.path.exists(cache_path): try: return _load_parquet_from_path(cache_path, parquet_backend) except Exception as e: # Cache file might be corrupted, remove it and re-download print(f"Warning: Cached file {cache_path} is corrupted, removing: {e}") try: os.remove(cache_path) except Exception: pass # Download from HDFS with retry if hadoop_cmd is None: hadoop_cmd = '/home/hadoop/software/hadoop/bin/hadoop' last_error = None for attempt in range(retry): try: # Clean cache if needed before downloading _clean_cache_if_needed(cache_dir, max_cache_files) # Download from HDFS _download_from_hdfs(file_path, cache_path, hadoop_cmd) # Load downloaded file return _load_parquet_from_path(cache_path, parquet_backend) except Exception as e: last_error = e if attempt < retry - 1: # Exponential backoff with jitter wait_time = 2 + np.random.randint(0, 5) + attempt print(f"Download attempt {attempt + 1}/{retry} failed: {e}. Retrying in {wait_time}s...") time.sleep(wait_time) else: print(f"All {retry} download attempts failed for {file_path}") # All retries failed raise FileNotFoundError( f"Failed to load parquet file from HDFS after {retry} attempts. " f"HDFS path: {file_path}, Cache: {cache_path}, Error: {last_error}" ) def _load_parquet_from_path(file_path: str, parquet_backend: str) -> pq.ParquetFile: """Load parquet file from given path.""" if parquet_backend == 'pyarrow': return pq.ParquetFile(file_path) else: return FakeParquetFileFromFastParquetFile(file_path) def _clean_cache_if_needed(cache_dir: str, max_cache_files: int): """Clean old cache files if cache exceeds max_cache_files.""" try: files = [ os.path.join(cache_dir, f) for f in os.listdir(cache_dir) if os.path.isfile(os.path.join(cache_dir, f)) ] if len(files) <= max_cache_files: return # Sort by creation time and remove oldest half files.sort(key=os.path.getctime) files_to_remove = files[:len(files) - max_cache_files // 2] for file_path in files_to_remove: try: os.remove(file_path) print(f"Removed old cached file: {file_path}") except Exception as e: print(f"Failed to remove cached file {file_path}: {e}") except Exception as e: print(f"Warning: Failed to clean cache: {e}") def _download_from_hdfs(hdfs_path: str, local_path: str, hadoop_cmd: str): """Download file from HDFS to local path.""" cmd = [hadoop_cmd, 'fs', '-get', hdfs_path, local_path] result = subprocess.run( cmd, capture_output=True, text=True, check=False ) if result.returncode != 0: raise RuntimeError( f"HDFS download failed. Command: {' '.join(cmd)}, " f"Return code: {result.returncode}, " f"Error: {result.stderr}" ) if not os.path.exists(local_path): raise FileNotFoundError(f"Downloaded file not found at {local_path}") ================================================ FILE: pretrain/onerec_llm/utils/distributed.py ================================================ """Distributed training base utilities. This module provides fundamental distributed training utilities that can be used across different modules without creating circular dependencies. For FSDP-specific utilities, see onerec_llm.training.distributed. """ import os from typing import Tuple import torch import torch.distributed as dist def get_world_size_and_rank() -> Tuple[int, int]: """Get the current world size and rank number. This function checks multiple sources in order: 1. PyTorch distributed (if initialized) 2. Environment variables (RANK, WORLD_SIZE) 3. Defaults to single process (1, 0) Returns: Tuple of (world_size, rank). """ if torch.distributed.is_available() and torch.distributed.is_initialized(): return torch.distributed.get_world_size(), torch.distributed.get_rank() elif "RANK" in os.environ and "WORLD_SIZE" in os.environ: return int(os.environ["WORLD_SIZE"]), int(os.environ["RANK"]) else: return 1, 0 def get_rank() -> int: """Get the current process rank. Returns: Process rank (0-based). """ _, rank = get_world_size_and_rank() return rank def get_world_size() -> int: """Get the current world size. Returns: Number of processes in the distributed group. """ world_size, _ = get_world_size_and_rank() return world_size def is_distributed() -> bool: """Check if distributed training is initialized. Returns: True if distributed training is available and initialized. """ return torch.distributed.is_available() and torch.distributed.is_initialized() ================================================ FILE: pretrain/onerec_llm/utils/ds_utils.py ================================================ """Debug and formatting utilities for data structures and tensors.""" import math import os import traceback from dataclasses import is_dataclass, asdict from typing import Any, Dict, List, Tuple, Union import torch def convert_dataclass_to_dict(obj: Any) -> Any: """Convert dataclass instance to dict, return other objects unchanged.""" if is_dataclass(obj) and not isinstance(obj, type): return asdict(obj) return obj def tensor_statistics(tensor: torch.Tensor, n: int = -1, **kwargs) -> Tuple[str, str, str, str]: """Compute tensor statistics at 4 granularity levels. Args: tensor: PyTorch tensor of any shape n: Partial range: -1 for first half, >0 for first n elements Returns: Tuple of 4 formatted stat strings: full, partial, magnitude-based, 1/10 magnitude-based """ flattened = tensor.reshape(-1) total_elements = flattened.numel() if total_elements == 0: base = "mean: NaN, variance: NaN, max: NaN, min: NaN, non-zeros: 0" return ( f"Full - {base}", f"Partial - {base}", f"Magnitude-based - {base}", f"1/10 Magnitude-based - {base}" ) if n == -1: part_count = (total_elements + 1) // 2 part_tensor = flattened[:part_count] part_label = f"first half ({part_count} elements)" elif isinstance(n, int) and n > 0: if n > total_elements: raise ValueError(f"n={n} exceeds total elements ({total_elements})") part_count = n part_tensor = flattened[:n] part_label = f"first {n} elements" else: raise ValueError(f"n must be -1 or positive integer, got: {n}") if total_elements <= 1: mag_count = 0 mag_label = "no elements (total <= 1)" mag_tensor = flattened[:0] else: log_val = math.log10(total_elements) k = int(log_val) - 1 if log_val.is_integer() else math.floor(log_val) mag_count = 10 ** k mag_count = min(mag_count, total_elements) mag_tensor = flattened[:mag_count] mag_label = f"first {mag_count} elements (magnitude-based)" line4_count = mag_count // 10 if line4_count <= 0: line4_label = "no elements (1/10 of magnitude-based <= 0)" line4_tensor = flattened[:0] else: line4_count = min(line4_count, total_elements) line4_tensor = flattened[:line4_count] line4_label = f"first {line4_count} elements (1/10 of magnitude-based)" def calc_stats(t: torch.Tensor) -> Tuple[float, float, float, float, int]: """Calculate mean, variance, max, min, non-zero count.""" if t.numel() == 0: return (float('nan'), float('nan'), float('nan'), float('nan'), 0) return ( torch.mean(t.float()).item(), torch.var(t.float(), unbiased=False).item(), torch.max(t).item(), torch.min(t).item(), torch.count_nonzero(t).item() ) full_mean, full_var, full_max, full_min, full_nonzero = calc_stats(flattened) part_mean, part_var, part_max, part_min, part_nonzero = calc_stats(part_tensor) mag_mean, mag_var, mag_max, mag_min, mag_nonzero = calc_stats(mag_tensor) line4_mean, line4_var, line4_max, line4_min, line4_nonzero = calc_stats(line4_tensor) def format_line(label: str, mean: float, var: float, max_val: float, min_val: float, nonzero: int) -> str: return (f"{label} - mean: {mean:.6f}, variance: {var:.6f}, " f"max: {max_val:.6f}, min: {min_val:.6f}, non-zeros: {nonzero}") line1 = format_line("Full", full_mean, full_var, full_max, full_min, full_nonzero) line2 = format_line(part_label, part_mean, part_var, part_max, part_min, part_nonzero) line3 = format_line(mag_label, mag_mean, mag_var, mag_max, mag_min, mag_nonzero) line4 = format_line(line4_label, line4_mean, line4_var, line4_max, line4_min, line4_nonzero) return line1, line2, line3, line4 def print_input_info( data: Any, prefix: str = "", max_str_len: int = 50, return_str: bool = False, max_show: int = 4, save_path: Union[str, None] = None, **kwargs ) -> Union[None, str]: """Recursively print or return detailed information about input data. Supports Tensor, dict, list, tuple, str, int, float. Can save data to disk. Args: data: Data to print prefix: Prefix for each line (indentation) max_str_len: Max string display length return_str: Return string instead of printing max_show: Max elements for tensor preview save_path: Optional path to save data (tensors detached to CPU) **kwargs: Passed to tensor_statistics() Returns: Formatted string if return_str=True, else None """ data = convert_dataclass_to_dict(data) def _detach_to_cpu(obj: Any) -> Any: """Recursively detach tensors and move to CPU.""" if isinstance(obj, torch.Tensor): return obj.detach().cpu() elif isinstance(obj, (list, tuple)): return type(obj)(_detach_to_cpu(item) for item in obj) elif isinstance(obj, dict): return {k: _detach_to_cpu(v) for k, v in obj.items()} elif hasattr(obj, '__dict__'): return {k: _detach_to_cpu(v) for k, v in obj.__dict__.items()} else: return obj if save_path is not None: try: data_to_save = _detach_to_cpu(data) dirname = os.path.dirname(save_path) if dirname: os.makedirs(dirname, exist_ok=True) torch.save(data_to_save, save_path) print(f"Saved data to: {save_path}") except Exception as e: print(f"Failed to save data to {save_path}: {e}\n{traceback.format_exc()}") lines: List[str] = [] try: data = dict(data) except (TypeError, ValueError): pass def add_line(text: str) -> None: if return_str: lines.append(text) else: print(text) def _process_nested_item(item: Any, item_prefix: str, max_str_len: int, return_str: bool, lines: List[str], **kwargs) -> None: sub_result = print_input_info(item, item_prefix, max_str_len, return_str=True, **kwargs) if return_str: lines.extend(sub_result.split('\n')) else: print(sub_result) if data is None: add_line(f"{prefix}None") return "\n".join(lines) if return_str else None if isinstance(data, torch.Tensor): flattened = data.flatten() data_preview = f"{flattened[:max_show].tolist()}...{flattened[-max_show:].tolist()}" base_info = (f"{prefix}Tensor: shape={tuple(data.shape)}, dtype={data.dtype}, " f"device={data.device}, data={data_preview}") if data.dtype == torch.bool: total_elements = data.numel() true_count = data.sum().item() false_count = total_elements - true_count true_ratio = true_count / total_elements * 100 if total_elements > 0 else 0 false_ratio = false_count / total_elements * 100 if total_elements > 0 else 0 add_line(base_info) add_line(f"{prefix} True: count={true_count:,d} ({true_ratio:.2f}%)") add_line(f"{prefix} False: count={false_count:,d} ({false_ratio:.2f}%)") else: add_line(base_info) for idx, stat_line in enumerate(tensor_statistics(data, **kwargs)): add_line(f"{prefix} stat{idx}: {stat_line}") elif isinstance(data, str): display_str = data[:max_str_len] + "..." if len(data) > max_str_len else data add_line(f"{prefix}String: length={len(data)}, value='{display_str}'") elif isinstance(data, (list, tuple)): container_type = "List" if isinstance(data, list) else "Tuple" add_line(f"{prefix}{container_type}: length={len(data)}") for i, item in enumerate(data): add_line(f"{prefix}[{i}]:") _process_nested_item(item, prefix + " ", max_str_len, return_str, lines, **kwargs) elif isinstance(data, dict): add_line(f"{prefix}Dict: keys={len(data)}") for key, value in data.items(): add_line(f"{prefix}'{key}':") _process_nested_item(value, prefix + " ", max_str_len, return_str, lines, **kwargs) elif isinstance(data, (int, float)): add_line(f"{prefix}{type(data).__name__}: {data}") else: data_str = str(data) truncated = data_str[:max_show] + "..." + data_str[-max_show:] if len(data_str) > max_show * 2 else data_str add_line(f"{prefix}Other type ({type(data).__name__}): {truncated}") return "\n".join(lines) if return_str else None def format_dict_or_list(obj: Any, indent_level: int = 0, indent_size: int = 2) -> str: """Format dict/list as readable string (alternative to json.dumps). Args: obj: Dictionary, list, or other object indent_level: Current indentation level indent_size: Spaces per indentation level Returns: Formatted string """ def format_value(value: Any, indent_level: int, indent_size: int) -> str: if isinstance(value, (dict, list)): return format_dict_or_list(value, indent_level, indent_size) elif isinstance(value, str): return f'"{value}"' else: return str(value) if isinstance(obj, dict): formatted_items = [] indent = " " * indent_size * (indent_level + 1) for key, value in obj.items(): formatted_value = format_value(value, indent_level + 1, indent_size) formatted_items.append(f'{indent}"{key}": {formatted_value}') items_str = ',\n'.join(formatted_items) current_indent = " " * indent_size * indent_level return f'{{\n{items_str}\n{current_indent}}}' elif isinstance(obj, list): formatted_items = [] indent = " " * indent_size * (indent_level + 1) for item in obj: formatted_value = format_value(item, indent_level + 1, indent_size) formatted_items.append(f'{indent}{formatted_value}') items_str = ',\n'.join(formatted_items) current_indent = " " * indent_size * indent_level return f'[\n{items_str}\n{current_indent}]' else: return str(obj) ================================================ FILE: pretrain/onerec_llm/utils/mfu_stats.py ================================================ """Model FLOPs Utilization (MFU) statistics and calculation utilities. This module provides functionality to calculate FLOPs (Floating Point Operations) for transformer models and compute MFU metrics for training performance monitoring. """ import collections import json import os import platform import re import subprocess from collections import defaultdict from functools import lru_cache from typing import Dict, List, Optional, Union import easydict def _sum_if_list(x: Union[int, List[int]]) -> int: """Sum if input is a list, otherwise return as-is.""" return sum(x) if isinstance(x, list) else x @lru_cache(maxsize=1) def _get_gpu_model() -> str: """Get NVIDIA GPU model name. Returns: GPU model name, or "Unknown" if detection fails. """ try: # Try nvidia-smi (most reliable method) if platform.system() in ["Linux", "Darwin"]: result = subprocess.run( ["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"], capture_output=True, text=True ) if result.returncode == 0: return result.stdout.strip() elif platform.system() == "Windows": result = subprocess.run( ["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"], capture_output=True, text=True, shell=True ) if result.returncode == 0: return result.stdout.strip() # Fallback: Windows Management Instrumentation try: import wmi c = wmi.WMI() gpus = c.Win32_VideoController() for gpu in gpus: if "NVIDIA" in gpu.Name: return gpu.Name except ImportError: pass # Fallback: PyTorch CUDA try: import torch if torch.cuda.is_available(): return torch.cuda.get_device_name(0) except ImportError: pass # Fallback: TensorFlow try: import tensorflow as tf if tf.test.is_gpu_available(): gpus = tf.config.list_physical_devices('GPU') if gpus: details = tf.config.experimental.get_device_details(gpus[0]) return details.get('device_name', 'NVIDIA GPU') except ImportError: pass # Last resort: Check Linux driver file if platform.system() == "Linux": if os.path.exists("/proc/driver/nvidia/version"): with open("/proc/driver/nvidia/version", "r") as f: first_line = f.readline().strip() match = re.search(r"NVIDIA driver \S+ for (\S+)", first_line) if match: return match.group(1) except Exception: pass return "Unknown" @lru_cache(maxsize=1) def _is_h800() -> bool: """Check if GPU is NVIDIA H800.""" gpu_model = _get_gpu_model() return gpu_model.split('\n')[0].strip() == 'NVIDIA H800' @lru_cache(maxsize=1) def _get_gpu_flops() -> float: """Get theoretical peak FLOPS for current GPU. Returns: Peak FLOPS (H800: 989 TFLOPS, others: 312 TFLOPS) """ return 989e12 if _is_h800() else 312e12 def _calculate_decoder_layer_flops( num_head: int, head_dim: int, hidden_size: int, intermediate_size: int, kv_heads: Optional[int] = None, is_causal: bool = False, seq_len: Union[int, List[int]] = 1, batch_size: int = 1, linear_factor: int = 2, attn_output_layers: int = 2 ) -> Dict: """Calculate FLOPs for a single transformer decoder layer. Args: num_head: Number of attention heads head_dim: Dimension per attention head hidden_size: Hidden layer size intermediate_size: FFN intermediate layer size kv_heads: Number of KV attention heads (for Group Attention) is_causal: Whether to use causal masking seq_len: Input sequence length (int or list for variable lengths) batch_size: Batch size linear_factor: Linear computation factor (default: 2 for multiply-add) attn_output_layers: Number of attention output layers Returns: Dictionary containing FLOPs breakdown and total FLOPs """ if kv_heads is None: kv_heads = num_head seq_len_per_sample = None if isinstance(seq_len, list) else seq_len // batch_size total_seq_len = _sum_if_list(seq_len) # QKV projection FLOPs q_flops = linear_factor * total_seq_len * hidden_size * (num_head * head_dim) k_flops = linear_factor * total_seq_len * hidden_size * (kv_heads * head_dim) v_flops = linear_factor * total_seq_len * hidden_size * (kv_heads * head_dim) # Attention scores FLOPs if isinstance(seq_len, list): attn_scores_flops = 0 for seq_len_per_sample in seq_len: attn_scores_flops += ( linear_factor * num_head * seq_len_per_sample * seq_len_per_sample * head_dim ) else: attn_scores_flops = ( linear_factor * num_head * seq_len_per_sample * seq_len_per_sample * head_dim * batch_size ) # Causal masking reduces computation by half if is_causal: attn_scores_flops *= 0.5 attn_v_flops = attn_scores_flops # Attention output projection attn_out_flops = linear_factor * total_seq_len * (num_head * head_dim) * hidden_size # Total attention FLOPs attention_flops = q_flops + k_flops + v_flops + attn_scores_flops + attn_v_flops + attn_out_flops # FFN FLOPs ffn_flops = ( linear_factor * total_seq_len * hidden_size * intermediate_size * attn_output_layers ) total_flops = attention_flops + ffn_flops return { 'total_flops': total_flops, 'attention': { 'q_proj': q_flops, 'k_proj': k_flops, 'v_proj': v_flops, 'attn_scores': attn_scores_flops, 'attn_v': attn_v_flops, 'attn_out': attn_out_flops, 'total': attention_flops }, 'ffn_flops': ffn_flops, 'batch_info': { 'batch_size': batch_size, 'seq_len_per_sample': seq_len_per_sample } } def _calculate_decoder_layers_flops( num_head: int, head_dim: int, hidden_size: int, intermediate_size: int, kv_heads: Optional[int] = None, is_causal: bool = False, seq_len: Union[int, List[int]] = 1, num_layers: int = 1, linear_factor: int = 2, batch_size: int = 1, attn_output_layers: int = 2 ) -> Dict: """Calculate FLOPs for multiple transformer decoder layers. Args: num_head: Number of attention heads head_dim: Dimension per attention head hidden_size: Hidden layer size intermediate_size: FFN intermediate layer size kv_heads: Number of KV attention heads is_causal: Whether to use causal masking seq_len: Input sequence length num_layers: Number of decoder layers linear_factor: Linear computation factor batch_size: Batch size attn_output_layers: Number of attention output layers Returns: Dictionary containing per-layer and total FLOPs """ layers_flops = [] total_flops = 0 for layer_idx in range(num_layers): layer_flops = _calculate_decoder_layer_flops( num_head=num_head, head_dim=head_dim, hidden_size=hidden_size, intermediate_size=intermediate_size, kv_heads=kv_heads, is_causal=is_causal, seq_len=seq_len, linear_factor=linear_factor, batch_size=batch_size, attn_output_layers=attn_output_layers ) layers_flops.append({ 'layer_index': layer_idx, **layer_flops }) total_flops += layer_flops['total_flops'] return { 'total_flops': total_flops, 'per_layer_flops': layers_flops[0] if layers_flops else {}, 'avg_flops_per_layer': total_flops / num_layers if num_layers > 0 else 0, 'num_layers': num_layers, } def _calculate_llm_flops(llm_params: easydict.EasyDict) -> Dict: """Calculate total FLOPs for an LLM model. Args: llm_params: Model parameters (EasyDict with model config) Returns: Dictionary containing total FLOPs including LM head """ linear_factor = 2 llm_flops = _calculate_decoder_layers_flops( num_head=llm_params.num_head, head_dim=llm_params.head_dim, hidden_size=llm_params.hidden_size, intermediate_size=llm_params.intermediate_size, num_layers=llm_params.num_layers, kv_heads=llm_params.get('kv_heads', None), is_causal=llm_params.get('is_causal', True), seq_len=llm_params.seq_len, batch_size=llm_params.get('batch_size', 1), linear_factor=linear_factor, attn_output_layers=3 ) # Add LM head FLOPs lm_head_flops = ( linear_factor * _sum_if_list(llm_params.seq_len) * (llm_params.hidden_size * llm_params.vocab_size) ) llm_flops['total_flops'] += lm_head_flops llm_flops['lm_head_flops'] = lm_head_flops return llm_flops @lru_cache(maxsize=32) def _extract_model_params(config_path: str) -> easydict.EasyDict: """Extract transformer parameters from model config JSON. Supports Qwen3 architecture. Args: config_path: Path to JSON config file Returns: EasyDict containing transformer parameters Raises: ValueError: If architecture is not supported """ with open(config_path, 'r') as f: config = json.load(f) if 'architectures' in config and 'Qwen3ForCausalLM' in config['architectures']: transformer_params = { 'num_head': config['num_attention_heads'], 'head_dim': config['head_dim'], 'hidden_size': config['hidden_size'], 'intermediate_size': config['intermediate_size'], 'kv_heads': config['num_key_value_heads'], 'num_layers': config['num_hidden_layers'], 'vocab_size': config['vocab_size'] } else: raise ValueError( f'Unsupported architecture. Expected Qwen3ForCausalLM, ' f'got: {config.get("architectures", "unknown")}' ) return easydict.EasyDict(transformer_params) def _calc_mfu( config_path: str, total_seq_len: int, llm_batch_size: int = 1, secs_per_step: Optional[float] = None, _gpu_flops: Optional[float] = None ) -> Dict: """Calculate Model FLOPs Utilization (MFU) for LLM models. Args: config_path: Path to model config JSON total_seq_len: Total sequence length llm_batch_size: Batch size for LLM secs_per_step: Seconds per training step _gpu_flops: GPU peak FLOPS (auto-detected if None) Returns: Dictionary containing MFU metrics and FLOPs breakdown """ transformer_params = _extract_model_params(config_path) # Calculate LLM FLOPs llm_params = easydict.EasyDict({ **transformer_params, 'is_causal': True, 'seq_len': total_seq_len, 'batch_size': llm_batch_size }) flops = _calculate_llm_flops(llm_params) gpu_flops = _get_gpu_flops() if _gpu_flops is None else _gpu_flops # Add MFU metrics flops['total_flops*3(T)'] = flops['total_flops'] * 3 / 1e12 flops['total_flops/gpu_flops'] = flops['total_flops'] * 3 / gpu_flops flops['gpu_flops'] = gpu_flops flops['llm_total_flops*3(T)'] = flops['total_flops*3(T)'] flops['llm_percentage'] = 100 flops['input_args'] = easydict.EasyDict( config_path=config_path, total_seq_len=total_seq_len, llm_batch_size=llm_batch_size, secs_per_step=secs_per_step ) if secs_per_step is not None: flops['mfu'] = flops['total_flops/gpu_flops'] / secs_per_step return flops class MFUStats: """Model FLOPs Utilization statistics tracker for LLM training. Tracks token counts and computes MFU metrics for training performance monitoring. Args: args: Training arguments containing model_dir and logging_per_step """ def __init__(self, args): self.tokens_for_mfu = collections.defaultdict(int) self.mfu_per_step_per_gpu = None self.args = args self.total_mfu = defaultdict(int) def set(self, num_tokens: int, num_samples: int) -> None: """Accumulate token and sample counts for MFU calculation. Args: num_tokens: Total number of tokens num_samples: Number of samples """ self.tokens_for_mfu["num_tokens"] += int(num_tokens) self.tokens_for_mfu["num_samples"] += int(num_samples) def mfu(self, secs: float, global_step: int) -> Dict[str, float]: """Compute MFU metrics for the current logging period. Args: secs: Total seconds elapsed in this period global_step: Current global training step Returns: Dictionary containing MFU metrics for logging """ args = self.args tokens_for_mfu = self.tokens_for_mfu # Calculate MFU arguments for text-only LLM mfu_args = easydict.EasyDict( total_seq_len=round(tokens_for_mfu["num_tokens"] / args.logging_per_step), llm_batch_size=round(tokens_for_mfu["num_samples"] / args.logging_per_step), secs_per_step=secs / args.logging_per_step ) config_path = os.path.join(args.model_dir, "config.json") mfu_per_step_per_gpu = _calc_mfu(config_path, **mfu_args) self.mfu_per_step_per_gpu = mfu_per_step_per_gpu # Accumulate total MFU total_mfu = self.total_mfu total_mfu['llm_total_flops*3(T)'] += ( mfu_per_step_per_gpu['llm_total_flops*3(T)'] * args.logging_per_step ) total_mfu['mfu'] += mfu_per_step_per_gpu['mfu'] * args.logging_per_step # Build logging dictionary # Current metrics: period-based MFU (current logging period) # Average metrics: cumulative MFU (average over entire training, smoothed) mfu_log_dict = { "perf/mfu_per_step_per_gpu_current": mfu_per_step_per_gpu['mfu'], "perf/llm_flops_per_step_per_gpu_current": mfu_per_step_per_gpu['llm_total_flops*3(T)'], "perf/mfu_per_step_per_gpu_avg": total_mfu['mfu'] / global_step, "perf/llm_flops_per_step_per_gpu_avg": total_mfu['llm_total_flops*3(T)'] / global_step, } # Reset counters for next period self.tokens_for_mfu = collections.defaultdict(int) return mfu_log_dict ================================================ FILE: pretrain/onerec_llm/utils/time_tracker.py ================================================ """Time tracking utilities for performance profiling.""" import os import time from typing import Dict, List, Literal, Optional class TimeTracker: """Track time intervals between tick calls and compute rolling averages. This class records time intervals for named events and maintains a rolling average of the last N intervals for each event. Supports both absolute wall-clock time and CPU time tracking. Args: n: Number of recent intervals to average (default: 1) time_types: List of time types to track. Options: "absolute" (wall-clock) or "cpu" (CPU time). Default: ["absolute"] Example: >>> tracker = TimeTracker(n=10) >>> tracker.tick("start") >>> # ... do some work ... >>> tracker.tick("end") >>> stats = tracker.stat() # Returns average intervals """ def __init__( self, n: int = 1, time_types: Optional[List[Literal["absolute", "cpu"]]] = None ): if time_types is None: time_types = ["absolute"] self.n = n self.time_types = time_types self.last_times: Dict[str, float] = { "absolute": time.perf_counter(), "cpu": os.times().user } self.interval_records: Dict[str, List[float]] = {} def tick(self, name: str) -> None: """Record time interval for a named event. Records the time elapsed since the last tick() call for each configured time type. Maintains a rolling window of the last N intervals. Args: name: Name of the event to track """ for time_type in self.time_types: # Get current time based on type if time_type == "absolute": current_time = time.perf_counter() elif time_type == "cpu": current_time = os.times().user else: raise ValueError( f"Invalid time_type '{time_type}'. " "Allowed values are 'absolute' or 'cpu'." ) # Calculate interval last_time = self.last_times[time_type] interval = current_time - last_time self.last_times[time_type] = current_time # Store interval in rolling window key = f"{time_type}@{name}" if key not in self.interval_records: self.interval_records[key] = [] intervals = self.interval_records[key] intervals.append(interval) # Maintain rolling window of size n if len(intervals) > self.n: intervals.pop(0) def stat(self) -> Dict[str, float]: """Get average time intervals for all tracked events. Returns: Dictionary mapping event keys (format: "{time_type}@{name}") to average interval values. Only includes events with recorded intervals. """ result: Dict[str, float] = {} for key, intervals in self.interval_records.items(): if intervals: result[key] = sum(intervals) / len(intervals) return result ================================================ FILE: pretrain/onerec_llm/utils/worker_utils.py ================================================ """Worker information utilities for PyTorch DataLoader and distributed training.""" import os import torch import torch.distributed as dist def get_worker_info(): """Get PyTorch DataLoader worker information. This function prioritizes PyTorch DataLoader's worker info over environment variables, as it provides accurate worker information in multi-process DataLoader contexts. Returns: tuple: (worker_id, num_workers) """ # Priority 1: Try to get from PyTorch DataLoader worker info # This is the most reliable source in DataLoader worker processes try: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if worker_info is not None: return worker_info.id, worker_info.num_workers except (ModuleNotFoundError, AttributeError): pass # Priority 2: Fall back to environment variables (for non-DataLoader contexts) if "WORKER" in os.environ and "NUM_WORKERS" in os.environ: return int(os.environ["WORKER"]), int(os.environ["NUM_WORKERS"]) # Default: single worker, worker_id = 0 return 0, 1 def pytorch_worker_info(group=None): """Return node and worker info for PyTorch and some distributed environments. Args: group: Optional process group for distributed environments. Defaults to None. Returns: tuple: (rank, world_size, worker, num_workers) """ # Get worker info (reuse get_worker_info to avoid code duplication) worker, num_workers = get_worker_info() # Get rank and world_size rank = 0 world_size = 1 # Check environment variables first if "RANK" in os.environ and "WORLD_SIZE" in os.environ: rank = int(os.environ["RANK"]) world_size = int(os.environ["WORLD_SIZE"]) else: # Try to get from PyTorch distributed try: if dist.is_available() and dist.is_initialized(): group = group or dist.group.WORLD rank = dist.get_rank(group=group) world_size = dist.get_world_size(group=group) except (ModuleNotFoundError, AttributeError): pass return rank, world_size, worker, num_workers ================================================ FILE: pretrain/recipes/train_qwen3.py ================================================ """Qwen3 Training Script Multi-node, multi-GPU training script for Qwen3 models using FSDP (Fully Sharded Data Parallel). Supports distributed training, checkpointing, and comprehensive monitoring. """ import os import sys sys.path.append("./onerec_llm/models") import argparse import collections import contextlib import datetime import gc import itertools import json import logging import queue import threading import time from functools import partial from typing import Dict, Optional, Tuple import torch import torch.distributed as dist from accelerate import init_empty_weights from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.utils.tensorboard import SummaryWriter from transformers import AutoConfig, AutoTokenizer from onerec_llm.data.dataloaders import get_dataloader from onerec_llm.losses import CrossEntropyLoss, ChunkedLossComputer from onerec_llm.models.qwen3.modeling_qwen3 import Qwen3ForCausalLM from onerec_llm.training.activations import set_activation_checkpointing from onerec_llm.training.checkpoint import ( AppState, DistributedCheckpointer, load_hf_checkpoint, ) from onerec_llm.training.common import set_default_dtype from onerec_llm.training.gradients import ( EmbeddingGradientMasker, clip_grad_by_value, clip_grad_norm, compute_fsdp_zero2_grad_norm, ) from onerec_llm.training.distributed import ( load_from_full_model_state_dict, shard_model, ) from onerec_llm.training.lr_schedulers import get_scheduler from onerec_llm.utils.common import ( Timer, dist_reduce_dict, get_optimizer_grouped_parameters, print_rank_0, set_random_seed, to_cuda, ) from onerec_llm.utils.ds_utils import format_dict_or_list, print_input_info from onerec_llm.utils.mfu_stats import MFUStats from onerec_llm.utils.time_tracker import TimeTracker # Disable garbage collection for performance gc.disable() # Set CUDA memory allocation configuration os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" # Process group timeout (24 hours) PROCESS_GROUP_TIMEOUT = datetime.timedelta(minutes=60 * 24) # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s' ) logger = logging.getLogger(__name__) class TrainingMetrics: """Manages training metrics accumulation and statistics. This class tracks metrics in two ways: - Period metrics (period_*): Accumulated over a logging period (logging_per_step steps) - Total metrics (total_*): Accumulated over the entire training run """ def __init__(self): self.reset_period_accumulators() # Total metrics accumulated over entire training self.total_num_tokens = 0 self.total_num_samples = 0 self.total_num_valid_tokens = 0 self.total_data_source_tokens = collections.defaultdict(int) self.local_period_data_source_samples = collections.defaultdict(int) def reset_period_accumulators(self): """Reset accumulated metrics for the current logging period.""" # Period metrics: accumulated over logging_per_step steps self.period_sum_loss = 0.0 self.period_sum_itemic_token_loss = 0.0 self.period_sum_text_token_loss = 0.0 self.period_num_tokens = 0 self.period_num_samples = 0 self.period_num_valid_tokens = 0 self.period_data_source_loss = collections.defaultdict(float) self.period_data_source_tokens = collections.defaultdict(int) self.period_valid_data_source_tokens = collections.defaultdict(int) # Track number of steps in current period for averaging self.period_num_steps = 0 def update(self, num_tokens, num_samples, num_valid_tokens): """Update both period and total metrics.""" # Update period metrics (for current logging period) self.period_num_tokens += num_tokens self.period_num_samples += num_samples self.period_num_valid_tokens += num_valid_tokens # Update total metrics (for entire training) self.total_num_tokens += num_tokens self.total_num_samples += num_samples self.total_num_valid_tokens += num_valid_tokens class TensorBoardLogger: """Manages TensorBoard logging in a separate thread.""" def __init__(self, tb_writer: Optional[SummaryWriter]): self.tb_writer = tb_writer self.metrics_queue = queue.Queue(maxsize=8) self.thread = None if tb_writer is not None and dist.get_rank() == 0: self.thread = threading.Thread( target=self._write_async, args=(tb_writer, self.metrics_queue), daemon=True ) self.thread.start() def _write_async(self, tb_writer, metrics_queue): """Async TensorBoard writer thread.""" while True: global_step, log_dict, ticker_stats, ds_loss, ds_tokens, ds_samples = metrics_queue.get() total_num_samples = log_dict["perf/total_num_samples"] total_num_valid_tokens = log_dict["perf/valid_total_num_tokens"] # Log main metrics for name, data in log_dict.items(): if data is not None and tb_writer: tb_writer.add_scalar( name, data, global_step=global_step, new_style=True ) # Log training metrics by valid tokens if name.startswith("training/"): tb_writer.add_scalar( f"x_token_{name}", data, global_step=total_num_valid_tokens, new_style=True ) # Log ticker stats for name, data in ticker_stats.items(): tb_writer.add_scalar( f"ticker/{name}", data, global_step=global_step, new_style=True ) # Log data source metrics if ds_loss and tb_writer: for key, loss_sum in ds_loss.items(): tb_writer.add_scalar( f"data_source_loss/{key}", loss_sum / (ds_tokens.get(key, 0) + 1e-6), global_step=global_step, new_style=True ) if ds_samples and tb_writer: for key, samples in ds_samples.items(): tb_writer.add_scalar( f"data_source_sample_ratio/{key}", 1.0 * samples / total_num_samples, global_step=global_step, new_style=True ) total_tokens = sum(ds_tokens.values()) if total_tokens > 0: for key, num_tokens in ds_tokens.items(): tb_writer.add_scalar( f"data_source_token_ratio/{key}", 1.0 * num_tokens / total_tokens, global_step=global_step, new_style=True ) def log(self, global_step, log_dict, ticker_stats, ds_loss, ds_tokens, ds_samples): """Queue metrics for async logging.""" if self.tb_writer is not None: self.metrics_queue.put(( global_step, log_dict, ticker_stats, ds_loss, ds_tokens, ds_samples )) def get_argument_parser() -> argparse.ArgumentParser: """Create and configure argument parser.""" parser = argparse.ArgumentParser(description="Qwen3 Training Script") # Checkpoint arguments parser.add_argument("--model_dir", type=str, default=None, help="Directory of the pretrained model") parser.add_argument("--resume_from", type=str, default=None, help="Checkpoint directory to resume from") parser.add_argument("--resume_from_tag", type=str, default=None, help="Checkpoint tag to resume from") parser.add_argument("--resume_training_state", action="store_true", help="Whether to resume training state including optimizer, scheduler, and dataloader") parser.add_argument("--use_fp32_weight", action="store_true", help="Use fp32 for model weight updating") parser.add_argument("--use_fp32_reduce", action="store_true", help="Use fp32 for gradient reduction") parser.add_argument("--reshard_after_forward", action="store_true", help="Enable reshard_after_forward to enable Zero3") parser.add_argument("--save_checkpoint_per_step", type=int, default=1000, help="Number of steps to save a checkpoint") parser.add_argument("--output_dir", type=str, default=None, help="Directory to write the trained model") parser.add_argument("--model_class", type=str, default="Qwen3ForCausalLM", help="Model class name") # Dataset arguments parser.add_argument("--dataset_config", type=str, default=None, help="Path to dataset configuration JSON file") parser.add_argument("--max_length", type=int, default=None, help="Max tokens per sentence") parser.add_argument("--minibatch_size", type=int, default=4096, help="Minibatch size") parser.add_argument("--start_optimize_embedding_index", type=int, default=0, help="Start optimize embedding index for finetuning") # Learning rate arguments parser.add_argument("--lr_scheduler_type", type=str, default="cosine_with_min_lr", help="Learning rate scheduler type") parser.add_argument("--num_warmup_steps", type=int, default=0, help="Number of warmup steps") parser.add_argument("--num_training_steps", type=int, default=1000, help="Number of training steps") parser.add_argument("--min_lr", type=float, default=1e-6, help="Minimum learning rate after cosine schedule") # Optimizer arguments parser.add_argument("--learning_rate", type=float, default=2e-4, help="Peak learning rate") parser.add_argument("--weight_decay", type=float, default=0.1, help="Weight decay for AdamW") parser.add_argument("--beta1", type=float, default=0.9, help="Beta1 for AdamW") parser.add_argument("--beta2", type=float, default=0.95, help="Beta2 for AdamW") # Training arguments parser.add_argument("--use_tie_weights", action="store_true", help="Tie embedding and lm_head weights") # parser.add_argument("--clip_range", type=float, default=None, # help="Gradient clipping range") parser.add_argument("--max_grad_norm", type=float, default=1.0, help="Max gradient norm (global L2); set to 0 to disable") parser.add_argument("--freeze_llm", action="store_true", help="Freeze all LLM parameters") parser.add_argument("--enable_gradient_checkpointing", action="store_true", help="Enable gradient checkpointing") parser.add_argument("--allow_random_init_params", type=str, default='', help="Allow random initialization for specified parameters") parser.add_argument("--logging_per_step", type=int, default=100, help="Number of steps to log training info") parser.add_argument("--seed", type=int, default=123, help="Random seed") parser.add_argument("--monitor_datasource_loss", action="store_true", help="Monitor loss of each datasource") parser.add_argument("--monitor_datasource_cnt", action="store_true", help="Monitor count of each datasource") parser.add_argument("--use_chunked_loss_computer", action="store_true", help="Use chunked loss computer") # Profiling arguments parser.add_argument("--enable_profiler", action="store_true", help="Enable PyTorch profiler for performance analysis") return parser class StateDictConverter: """Converter for state dict transformations (identity by default).""" def convert(self, state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """Convert state dict (e.g., for loading).""" return state_dict def revert(self, state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """Revert state dict (e.g., for saving).""" return state_dict def _init_profiler(output_dir: str, enable: bool = False) -> Optional[torch.profiler.profile]: """Initialize PyTorch profiler. Args: output_dir: Directory to save profiler traces enable: Whether to enable the profiler. If False, returns None. Returns: PyTorch profiler instance if enabled, None otherwise. """ if not enable: return None if not os.path.exists(output_dir): if dist.get_rank() == 0: os.makedirs(output_dir, exist_ok=True) def trace_handler(prof): prof.export_chrome_trace( os.path.join(output_dir, f"{prof.step_num}_w{dist.get_rank()}.json") ) # Profiler schedule: wait 50 steps, warmup 1 step, profile 10 steps, repeat once # This avoids profiling initialization overhead and captures representative performance return torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA, ], schedule=torch.profiler.schedule(wait=50, warmup=1, active=10, repeat=1), on_trace_ready=trace_handler, ) def save_model_checkpoint( save_dir: str, tag: str, global_step: int, optimizer: torch.optim.Optimizer, lr_scheduler, dataloader: Optional[object], app_state: AppState, dist_checkpointer: DistributedCheckpointer, ) -> None: """Save FSDP+TP model checkpoint. Args: save_dir: Save directory tag: Checkpoint tag global_step: Global training step optimizer: Optimizer instance lr_scheduler: Learning rate scheduler dataloader: Optional dataloader for state saving app_state: Application state dist_checkpointer: Distributed checkpointer """ if dist.get_rank() == 0: os.makedirs(save_dir, exist_ok=True) ckpt_path = os.path.join(save_dir, tag) if dist.get_rank() == 0: os.makedirs(ckpt_path, exist_ok=True) with open(os.path.join(save_dir, "latest"), "w") as f: f.write(tag) try: # Save model checkpoint dist_checkpointer.save_checkpoint( state_dict={"app": app_state}, output_dir=ckpt_path, tag=str(global_step) ) # Save dataloader state if dataloader is not None: try: dataloader_state = {"dataloader_state_dict": dataloader.state_dict()} dataloader_path = os.path.join(ckpt_path, "dataloader_ckpt") if dist.get_rank() == 0: os.makedirs(dataloader_path, exist_ok=True) dist.barrier() torch.save( dataloader_state, os.path.join(dataloader_path, f"rank{dist.get_rank()}.pt") ) print_rank_0(f"Saved dataloader state to {dataloader_path}") except Exception as e: logger.error(f"Failed to save dataloader state: {e}", exc_info=True) # Save optimizer and scheduler state optimizer_path = os.path.join(ckpt_path, "optimizer_ckpt") optimizer_state = { "optimizer_state_dict": optimizer.state_dict(), "scheduler_state_dict": lr_scheduler.state_dict(), } if dist.get_rank() == 0: os.makedirs(optimizer_path, exist_ok=True) dist.barrier() torch.save( optimizer_state, os.path.join(optimizer_path, f"rank{dist.get_rank()}.pt") ) print_rank_0(f"Saved optimizer state to {optimizer_path}") except Exception as e: logger.error(f"Failed to save checkpoint: {e}", exc_info=True) raise finally: dist.barrier() def initialize_distributed() -> Tuple[int, int, int]: """Initialize distributed training environment. Returns: Tuple of (rank, world_size, local_rank) """ rank = int(os.environ.get("OMPI_COMM_WORLD_RANK", 0)) world_size = int(os.environ.get("OMPI_COMM_WORLD_SIZE", 0)) local_rank = int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK", 0)) torch.cuda.set_device(local_rank) torch.distributed.init_process_group( rank=rank, world_size=world_size, timeout=PROCESS_GROUP_TIMEOUT ) return rank, world_size, local_rank def initialize_model( args, device_mesh: DeviceMesh, state_dict: Optional[Dict[str, torch.Tensor]], converter: StateDictConverter, ) -> torch.nn.Module: """Initialize and shard model. Args: args: Training arguments device_mesh: Device mesh for distributed training state_dict: Optional pretrained state dict converter: State dict converter Returns: Initialized and sharded model """ # Create model on meta device with set_default_dtype(torch.bfloat16), torch.device("meta"), init_empty_weights(): config = AutoConfig.from_pretrained(args.model_dir, trust_remote_code=True) config._attn_implementation = "flash_attention_2" config.use_cache = False config.chunked_loss_computer = args.use_chunked_loss_computer model = eval(args.model_class)(config) # Verify all parameters are on meta device for tensor in itertools.chain(model.parameters(), model.buffers()): assert tensor.device == torch.device("meta"), "All tensors must be on meta device" # Enable gradient checkpointing if requested if args.enable_gradient_checkpointing: print_rank_0("Enable gradient checkpointing") set_activation_checkpointing( model, auto_wrap_policy=eval(args.model_class).wrap_modules ) # Convert to fp32 if needed if args.use_fp32_weight: model = model.float() # Shard model with FSDP shard_model( model=model, cpu_offload=False, reshard_after_forward=args.reshard_after_forward, dp_mesh=device_mesh, fp32_weight=args.use_fp32_weight, model_class=args.model_class, fp32_reduce=args.use_fp32_reduce ) dist.barrier() # Load state dict with Timer("Load state dict"): load_from_full_model_state_dict( model=model, full_sd=state_dict, allow_random_init_params=args.allow_random_init_params, use_tie_weights=args.use_tie_weights ) # Tie weights if requested # Sharing weights between embedding and output projection can reduce parameters # and improve training stability for some models if args.use_tie_weights: model.lm_head.weight = model.model.embed_tokens.weight # Verify weight tying: check if there are any differences (should be ~0) diff_weight = model.lm_head.weight - model.model.embed_tokens.weight diff_weight_cnt = (diff_weight.full_tensor().abs() > 1e-6).float().sum() print_rank_0( f"diff_weight_cnt: {diff_weight_cnt.item()}, " f"diff_weight_ratio: {diff_weight_cnt.item() / model.lm_head.weight.numel():.4f}" ) # Initialize RoPE with torch.device(torch.cuda.current_device()): for m in model.modules(): if hasattr(m, "rope_init"): print_rank_0("Initialize RoPE") m.rope_init() elif hasattr(m, "inv_freq"): print_rank_0(f"Initialize RoPE inv_freq for {m.__class__.__name__}") from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS rope_type = getattr(m, "rope_type", "default") rope_init_fn = ROPE_INIT_FUNCTIONS[rope_type] inv_freq, attention_scaling = rope_init_fn( m.config, device=torch.cuda.current_device() ) m.register_buffer("inv_freq", inv_freq, persistent=False) m.attention_scaling = attention_scaling # Freeze parameters if requested # When freeze_llm is enabled, only embedding and output head are trainable # This is useful for embedding-only fine-tuning or when using start_optimize_embedding_index if args.freeze_llm: assert args.start_optimize_embedding_index > 0 for name, param in model.named_parameters(): if "embed_tokens" in name or "lm_head" in name: param.requires_grad = True # Only embeddings and output head are trainable else: param.requires_grad = False # Freeze all transformer layers # Print trainable parameters for name, param in model.named_parameters(): if param.requires_grad: print_rank_0(f"Trainable parameter: {name}") print_rank_0("=" * 50) return model def load_model_checkpoint( args, app_state: AppState, dist_checkpointer: DistributedCheckpointer, converter: StateDictConverter, ) -> None: """Load model checkpoint from distributed checkpoint. Args: args: Training arguments app_state: Application state dist_checkpointer: Distributed checkpointer converter: State dict converter """ ckpt_path = os.path.join(args.resume_from, args.resume_from_tag) if not os.path.exists(ckpt_path): raise ValueError(f"Checkpoint path {ckpt_path} does not exist") state_dict = {"app": app_state.set_call_back(converter.convert)} dist_checkpointer.load_checkpoint( state_dict=state_dict, checkpoint_dir=args.resume_from, tag=args.resume_from_tag ) print_rank_0("Successfully loaded model using distributed checkpoint") def load_optimizer_checkpoint( args, optimizer: torch.optim.Optimizer, lr_scheduler, ) -> None: """Load optimizer and scheduler state from checkpoint. Args: args: Training arguments optimizer: Optimizer instance lr_scheduler: Learning rate scheduler """ optimizer_state_dict_path = os.path.join( args.resume_from, "optimizer_ckpt", f"rank{dist.get_rank()}.pt" ) if os.path.exists(optimizer_state_dict_path): optimizer_state_dict = torch.load(optimizer_state_dict_path) lr_scheduler.load_state_dict(optimizer_state_dict["scheduler_state_dict"]) optimizer.load_state_dict(optimizer_state_dict["optimizer_state_dict"]) print_rank_0(f"Successfully loaded optimizer and scheduler state from {optimizer_state_dict_path}") else: print_rank_0(f"Warning: Optimizer checkpoint {optimizer_state_dict_path} does not exist") def load_dataloader_checkpoint(args) -> Optional[Dict]: """Load dataloader state from checkpoint. Args: args: Training arguments Returns: Dataloader state dict if found, None otherwise """ dataloader_resume_path = os.path.join( args.resume_from, "dataloader_ckpt", f"rank{dist.get_rank()}.pt" ) if os.path.exists(dataloader_resume_path): try: dataloader_state_dict = torch.load(dataloader_resume_path)["dataloader_state_dict"] print_rank_0(f"Successfully loaded dataloader state from {dataloader_resume_path}") return dataloader_state_dict except Exception as e: print_rank_0(f"Error loading dataloader checkpoint: {e}") return None else: print_rank_0(f"Warning: Dataloader checkpoint {dataloader_resume_path} does not exist") print_rank_0("Will start training without resuming dataloader state") return None def load_checkpoint( args, app_state: AppState, dist_checkpointer: DistributedCheckpointer, converter: StateDictConverter, optimizer: torch.optim.Optimizer, lr_scheduler, ) -> Tuple[Optional[Dict], int]: """Load checkpoint if resuming training. This function orchestrates loading of model, optimizer, scheduler, and dataloader checkpoints. It delegates to specialized functions for each component. Args: args: Training arguments app_state: Application state dist_checkpointer: Distributed checkpointer converter: State dict converter optimizer: Optimizer instance lr_scheduler: Learning rate scheduler Returns: Tuple of (dataloader_state_dict, global_step) """ dataloader_state_dict = None global_step = 0 if args.resume_from_tag: ckpt_path = os.path.join(args.resume_from, args.resume_from_tag) if args.resume_training_state: global_step = int(args.resume_from_tag.split("step")[-1]) print_rank_0( f"Resume from checkpoint: {ckpt_path}, global_step={global_step}" ) else: print_rank_0( f"Resume model weights only from checkpoint: {ckpt_path}, " "global_step stays at 0" ) # Load model checkpoint load_model_checkpoint(args, app_state, dist_checkpointer, converter) # Load optimizer, scheduler, and dataloader state if requested # Note: resume_training_state controls whether to restore the full training state # including optimizer momentum, scheduler step, and dataloader position. # This allows seamless continuation of training from a checkpoint. if args.resume_training_state: load_optimizer_checkpoint(args, optimizer, lr_scheduler) dataloader_state_dict = load_dataloader_checkpoint(args) return dataloader_state_dict, global_step def compute_forward_backward( model: torch.nn.Module, batch: Dict, compute_loss_fn, loss_fn: CrossEntropyLoss, args, embedding_masker: Optional[EmbeddingGradientMasker], optimizer: torch.optim.Optimizer, ) -> Tuple[torch.Tensor, torch.Tensor]: """Compute forward and backward pass. Args: model: Model instance batch: Input batch compute_loss_fn: Loss computation function loss_fn: Loss function instance args: Training arguments embedding_masker: Optional embedding gradient masker optimizer: Optimizer instance Returns: Tuple of (loss, per_token_loss) """ input_ids = batch["input_ids"] loss_mask = batch["loss_mask"] attention_mask = batch.get("attention_mask", None) cu_seqlens = batch.get("cu_seqlens", None) position_ids = batch.get("position_ids", None) # Prepare labels # Zero out padding tokens (input_ids <= 0) to avoid computing loss on them input_ids = input_ids * (input_ids > 0).to(torch.int64, non_blocking=True) # Forward pass with Timer("Fwd"): output = model( input_ids=input_ids, attention_mask=attention_mask, labels=None, cu_seqlens=cu_seqlens, position_ids=position_ids, ) logits = output.logits # Shift labels for next token prediction # For causal LM, we predict token[i] given tokens[0:i], so labels need to be shifted # by one position: label[i] should correspond to input[i+1] pad = torch.full( (input_ids.shape[0], 1), loss_fn.ignore_index, dtype=input_ids.dtype ).to(device=input_ids.device, non_blocking=True) labels = torch.cat([input_ids[:, 1:], pad], dim=-1) # Update labels: use input_ids where loss_mask==1, ignore_index where loss_mask==0 # This allows selective loss computation on specific tokens (e.g., excluding special tokens) labels = labels * loss_mask + loss_fn.ignore_index * (1 - loss_mask) loss, per_token_loss = compute_loss_fn(logits, labels=labels) per_token_loss = per_token_loss.to(loss.device) # Backward pass with Timer("bwd"): loss.backward() # Apply gradient mask for embedding layers if needed # When start_optimize_embedding_index > 0, only embeddings with index >= threshold are trainable # This allows progressive unfreezing of embeddings during training if args.start_optimize_embedding_index > 0 and embedding_masker is not None: embedding_masker.apply_gradient_mask(optimizer) # clip_grad_by_value(model, args.clip_range) if args.max_grad_norm and args.max_grad_norm > 0: clip_grad_norm(model, args.max_grad_norm) return loss, per_token_loss def compute_metrics( batch: Dict, loss: torch.Tensor, per_token_loss: torch.Tensor, loss_mask: torch.Tensor, loss_fn: CrossEntropyLoss, args, metrics: TrainingMetrics, ) -> Tuple[float, float, float, int, int, int]: """Compute and accumulate training metrics. Args: batch: Input batch loss: Loss tensor per_token_loss: Per-token loss tensor loss_mask: Loss mask tensor loss_fn: Loss function instance args: Training arguments metrics: Training metrics tracker Returns: Tuple of (avg_loss, avg_itemic_token_loss, avg_text_token_loss, num_tokens, num_samples, num_valid_tokens) """ input_ids = batch["input_ids"] cu_seqlens = batch.get("cu_seqlens", None) itemic_id_mask = batch.get("itemic_id_mask", None) data_source = batch.get("data_source", None) sample_idx = batch["sample_idx"] # Compute token metrics token_count = input_ids.numel() num_samples = len(cu_seqlens) - 1 if cu_seqlens is not None else 1 # Calculate number of valid tokens (tokens with loss_mask == 1) # Works for both 1D (flattened) and 2D (batch, seq_len) loss_mask num_valid_tokens = (loss_mask == 1).sum().item() # Aggregate metrics across all ranks token_metrics = torch.tensor( [token_count, num_samples, num_valid_tokens] ).cuda(non_blocking=True) dist.all_reduce( token_metrics, op=dist.ReduceOp.SUM, group=None ) num_tokens, num_samples, num_valid_tokens = ( token_metrics.detach().cpu().numpy() ) # Update metrics metrics.update(num_tokens, num_samples, num_valid_tokens) metrics.period_num_steps += 1 # Compute average loss for this step avg_loss = loss.detach() dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM) avg_loss = avg_loss.item() / dist.get_world_size() metrics.period_sum_loss += avg_loss # Compute itemic and text token losses if itemic_id_mask is not None: itemic_id_mask = itemic_id_mask.view(per_token_loss.shape) avg_itemic_token_loss = (itemic_id_mask * per_token_loss).sum() / (itemic_id_mask.sum() + 1e-6) avg_text_token_loss = ((1 - itemic_id_mask) * per_token_loss).sum() / ((1 - itemic_id_mask).sum() + 1e-6) dist.all_reduce(avg_itemic_token_loss, op=dist.ReduceOp.SUM) dist.all_reduce(avg_text_token_loss, op=dist.ReduceOp.SUM) avg_itemic_token_loss = avg_itemic_token_loss.item() / dist.get_world_size() avg_text_token_loss = avg_text_token_loss.item() / dist.get_world_size() else: avg_itemic_token_loss = 0.0 avg_text_token_loss = avg_loss metrics.period_sum_itemic_token_loss += avg_itemic_token_loss metrics.period_sum_text_token_loss += avg_text_token_loss # Monitor data source metrics if args.monitor_datasource_loss and data_source is not None: local_sample_idx = sample_idx.squeeze() unique_sample_idx = local_sample_idx.unique() # Get local loss mask for valid token counting local_loss_mask = loss_mask.squeeze() for s_idx in unique_sample_idx: if s_idx < 0: continue mask = local_sample_idx == s_idx sum_loss = per_token_loss[mask].sum() key = data_source[int(s_idx.item())] metrics.period_data_source_loss[key] += sum_loss.item() metrics.period_data_source_tokens[key] += mask.sum().item() # Count valid tokens using loss mask metrics.period_valid_data_source_tokens[key] += ( mask[local_loss_mask != 0].sum().item() ) if args.monitor_datasource_cnt and data_source is not None: for data_source_name in data_source: metrics.local_period_data_source_samples[data_source_name] += 1 return avg_loss, avg_itemic_token_loss, avg_text_token_loss, int(num_tokens), int(num_samples), int(num_valid_tokens) def log_training_step( global_step: int, metrics: TrainingMetrics, args, lr_scheduler, grad_norm: float, period_start_time: float, training_start_time: float, mfu_stats: MFUStats, step_time_tracker: TimeTracker, iteration_time_tracker: TimeTracker, epoch_idx: int, tb_logger: TensorBoardLogger, chunked_loss_computer: Optional[ChunkedLossComputer], ) -> float: """Log training step metrics. Args: global_step: Global training step metrics: Training metrics tracker args: Training arguments lr_scheduler: Learning rate scheduler grad_norm: Gradient norm period_start_time: Start time of the current logging period training_start_time: Start time of the entire training run mfu_stats: MFU statistics tracker step_time_tracker: Time tracker for training steps (tracks forward/backward/optimizer) iteration_time_tracker: Time tracker for data iteration (tracks data loading) epoch_idx: Current epoch index tb_logger: TensorBoard logger chunked_loss_computer: Optional chunked loss computer Returns: Updated period_start_time for next logging period """ end_time = time.time() model_lrs = lr_scheduler.get_last_lr() learning_rate = model_lrs[0] # Compute performance metrics for the logging period period_duration = end_time - period_start_time period_num_steps = max(metrics.period_num_steps, 1) # Avoid division by zero # Current period metrics (_current): reflect performance in the current logging period # These metrics show recent performance and can fluctuate with short-term variations sec_per_step = period_duration / period_num_steps tokens_per_sec_per_gpu_current = ( metrics.period_num_tokens / period_duration / dist.get_world_size() ) samples_per_sec_per_gpu_current = ( metrics.period_num_samples / period_duration / dist.get_world_size() ) samples_per_step_per_gpu_current = ( metrics.period_num_samples / period_num_steps / dist.get_world_size() ) valid_tokens_per_sec_per_gpu_current = ( metrics.period_num_valid_tokens / period_duration / dist.get_world_size() ) # Average metrics (_avg): reflect average performance over entire training # These metrics smooth out short-term fluctuations and include all overhead # (checkpoint saving, logging, etc.), providing a more stable view of overall performance samples_per_step_per_gpu_avg = ( metrics.total_num_samples / dist.get_world_size() / max(global_step, 1) ) samples_per_sec_per_gpu_avg = ( metrics.total_num_samples / dist.get_world_size() / (end_time - training_start_time) ) tokens_per_step_per_gpu_avg = ( metrics.total_num_tokens / dist.get_world_size() / max(global_step, 1) ) tokens_per_sec_per_gpu_avg = ( metrics.total_num_tokens / dist.get_world_size() / (end_time - training_start_time) ) # Compute average losses over the logging period avg_loss = metrics.period_sum_loss / period_num_steps avg_itemic_token_loss = metrics.period_sum_itemic_token_loss / period_num_steps avg_text_token_loss = metrics.period_sum_text_token_loss / period_num_steps # Reduce data source metrics across all ranks period_data_source_loss = dist_reduce_dict(metrics.period_data_source_loss) period_data_source_tokens = dist_reduce_dict(metrics.period_data_source_tokens) period_valid_data_source_tokens = dist_reduce_dict(metrics.period_valid_data_source_tokens) total_data_source_samples = dist_reduce_dict( metrics.local_period_data_source_samples, group=None ) # Update total data source tokens for ds_key, ds_num_tokens in period_data_source_tokens.items(): metrics.total_data_source_tokens[ds_key] += ds_num_tokens # Build log dictionary log_dict = { "training/loss": avg_loss, "training/itemic_token_loss": avg_itemic_token_loss, "training/text_token_loss": avg_text_token_loss, "training/grad_norm": grad_norm, "training/learning_rate": learning_rate, "perf/sec_per_step": sec_per_step, "perf/tokens_per_sec_per_gpu_current": tokens_per_sec_per_gpu_current, "perf/samples_per_sec_per_gpu_current": samples_per_sec_per_gpu_current, "perf/total_num_tokens": metrics.total_num_tokens, "perf/total_num_samples": metrics.total_num_samples, "perf/num_sample_per_gpu": metrics.total_num_samples / dist.get_world_size(), "perf/samples_per_step_per_gpu_current": samples_per_step_per_gpu_current, # Note: num_sample_per_sec_per_gpu is the same as samples_per_sec_per_gpu_current # Keeping for backward compatibility, but samples_per_sec_per_gpu_current should be used "perf/num_sample_per_sec_per_gpu": samples_per_sec_per_gpu_current, "perf/valid_total_num_tokens": metrics.total_num_valid_tokens, "perf/valid_tokens_per_sec_per_gpu_current": valid_tokens_per_sec_per_gpu_current, "perf/valid_token_ratio": metrics.total_num_valid_tokens / metrics.total_num_tokens, **mfu_stats.mfu(period_duration, global_step), "perf/samples_per_step_per_gpu_avg": samples_per_step_per_gpu_avg, "perf/samples_per_sec_per_gpu_avg": samples_per_sec_per_gpu_avg, "perf/tokens_per_step_per_gpu_avg": tokens_per_step_per_gpu_avg, "perf/tokens_per_sec_per_gpu_avg": tokens_per_sec_per_gpu_avg, "perf/epoch_idx": epoch_idx, } # Get ticker statistics ticker_stats = {} for t in [step_time_tracker, iteration_time_tracker]: ticker_stats.update(t.stat()) # Log to TensorBoard tb_logger.log( global_step, log_dict, ticker_stats, period_data_source_loss if args.monitor_datasource_loss else {}, period_data_source_tokens if args.monitor_datasource_cnt else {}, total_data_source_samples if args.monitor_datasource_cnt else {}, ) # Print to console print_rank_0( f"Step: {global_step}, Loss: {avg_loss:.4f}, " f"Learning Rate: {learning_rate:.2e}, " f"Grad Norm: {grad_norm:.4f}, " f"Sec per Step: {sec_per_step:.4f}", format_dict_or_list(log_dict), "\n", format_dict_or_list({ "mfu_stats": mfu_stats.mfu_per_step_per_gpu, "step_time_tracker": step_time_tracker.stat() }), "\n", chunked_loss_computer.ticker.stat() if chunked_loss_computer else "", ) return end_time def train(): """Main training function.""" parser = get_argument_parser() args = parser.parse_args() # Validate arguments assert args.learning_rate > 0.0, "Learning rate must be positive" assert args.save_checkpoint_per_step > 0, "save_checkpoint_per_step must be positive" # Initialize distributed training rank, world_size, local_rank = initialize_distributed() device_mesh = init_device_mesh("cuda", mesh_shape=(dist.get_world_size(),)) set_random_seed(args.seed) # Load dataset configuration logger.info(f"Loading dataset config from: {args.dataset_config}") with open(args.dataset_config, encoding="utf-8") as f: dataset_config = json.loads(f.read()) dataset = dataset_config.pop("name") dataset_config["model_class"] = args.model_class if args.max_length: dataset_config["max_length"] = args.max_length # Load pretrained checkpoint converter = StateDictConverter() state_dict = None if dist.get_rank() == 0: with set_default_dtype(torch.bfloat16): state_dict = load_hf_checkpoint(args.model_dir) state_dict = converter.convert(state_dict) dist.barrier() # Save training arguments timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') if dist.get_rank() == 0: args_dict = vars(args) args_str = json.dumps(args_dict, indent=4, ensure_ascii=False) print_rank_0(f"Training Arguments:\n{args_str}") os.makedirs(args.output_dir, exist_ok=True) with open( os.path.join(args.output_dir, f"args-{timestamp}.json"), 'w', encoding="utf-8" ) as f: f.write(args_str + "\n") # Initialize TensorBoard tb_writer = None if dist.get_rank() == 0: tb_writer = SummaryWriter(log_dir=os.path.join(args.output_dir, "log")) # Initialize model model = initialize_model(args, device_mesh, state_dict, converter) if state_dict is not None: del state_dict # Initialize optimizer optimizer_grouped_parameters = get_optimizer_grouped_parameters( model, learning_rate=args.learning_rate, weight_decay=args.weight_decay ) optimizer = torch.optim.AdamW( optimizer_grouped_parameters, lr=args.learning_rate, betas=(args.beta1, args.beta2), eps=1.0e-8 ) # Initialize embedding gradient masker # This allows selective training of embeddings based on token index # Useful for fine-tuning where only certain token embeddings should be updated embedding_masker = EmbeddingGradientMasker( model, model.config, args.start_optimize_embedding_index ) if args.start_optimize_embedding_index > 0: # Save frozen embedding parameters to restore after optimizer step # This prevents optimizer from updating frozen embeddings embedding_masker.save_frozen_params() # Initialize learning rate scheduler lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.num_training_steps, min_lr=args.min_lr ) # Initialize checkpointing app_state = AppState(model=model) dist_checkpointer = DistributedCheckpointer() # Load checkpoint if resuming dataloader_state_dict, global_step = load_checkpoint( args, app_state, dist_checkpointer, converter, optimizer, lr_scheduler ) dist.barrier() # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(args.model_dir, trust_remote_code=True) # Save dataset configuration if dist.get_rank() == 0: with open( os.path.join(args.output_dir, f"dataset-{timestamp}.json"), 'w', encoding="utf-8" ) as f: f.write(json.dumps(dataset_config, ensure_ascii=False, indent=2) + "\n") # Build dataloader with Timer("Build dataloader"): try: dataloader = get_dataloader(name=dataset, **dataset_config) except Exception as e: logger.error(f"Failed to build dataloader: {e}", exc_info=True) raise if args.resume_training_state and dataloader_state_dict is not None: dataloader.load_state_dict(dataloader_state_dict) # Initialize profiler torch_profiler = _init_profiler( output_dir=os.path.join(args.output_dir, "torch_profile"), enable=args.enable_profiler ) # Initialize loss function loss_fn = CrossEntropyLoss( ignore_index=-100, return_token_loss=True, shift_labels=False ) compute_loss_fn = loss_fn chunked_loss_computer = None if args.use_chunked_loss_computer: chunked_loss_computer = ChunkedLossComputer( lm_head=model.lm_head, loss_fn=loss_fn, minibatch_size=args.minibatch_size, shift_labels=False ) compute_loss_fn = chunked_loss_computer.forward_and_backward # Initialize training state training_start_time = time.time() period_start_time = training_start_time remaining_debug_samples = 1 # Number of sample batches to print for debugging # Only reset global_step if not resuming from checkpoint # If resume_from_tag exists, global_step is already set in load_checkpoint if args.resume_from_tag is None: global_step = 0 metrics = TrainingMetrics() mfu_stats = MFUStats(args) # step_time_tracker: tracks time for training steps (forward/backward/optimizer) step_time_tracker = TimeTracker(n=args.logging_per_step) # iteration_time_tracker: tracks time for data iteration (data loading) iteration_time_tracker = TimeTracker(n=args.logging_per_step) tb_logger = TensorBoardLogger(tb_writer) # Create data iterator data_iter = iter(dataloader) get_next_batch = lambda: next(data_iter) # Training loop while True: with contextlib.ExitStack() as ctx: if torch_profiler: ctx.enter_context(torch_profiler) step_time_tracker.tick("enter_context(torch_profiler)") try: batch = get_next_batch() except StopIteration: break step_time_tracker.tick("next_batch") # Show sample data for debugging # Only print from first 8 ranks to avoid log spam (rank 0-7) # Sleep based on rank to stagger output and make logs easier to read if remaining_debug_samples > 0 and dist.get_rank() <= 8: with Timer("Show data"): input_text = tokenizer.decode(batch['input_ids'][0]) # Stagger output by rank to avoid interleaved prints (0.3s per rank) time.sleep(float(dist.get_rank()) * 0.3) print(f"Input Text:\n\n{input_text}\n" + "=" * 100 + "\n\n") print_input_info(batch, f"rank{dist.get_rank()}") remaining_debug_samples -= 1 # Move batch to CUDA to_cuda(batch) step_time_tracker.tick("to_cuda(batch)") # Update MFU stats token_count = batch["input_ids"].numel() num_samples = len(batch.get("cu_seqlens", [0, 1])) - 1 mfu_stats.set(num_tokens=token_count, num_samples=num_samples) # Forward and backward pass loss, per_token_loss = compute_forward_backward( model, batch, compute_loss_fn, loss_fn, args, embedding_masker, optimizer ) # Compute metrics epoch_idx = batch.get("epoch_idx", torch.tensor([0])).cpu().item() avg_loss, avg_itemic_token_loss, avg_text_token_loss, num_tokens, num_samples, num_valid_tokens = compute_metrics( batch, loss, per_token_loss, batch["loss_mask"], loss_fn, args, metrics ) step_time_tracker.tick("compute_metrics") # Optimizer step grad_norm = compute_fsdp_zero2_grad_norm(model) optimizer.step() # Restore frozen parameters after optimizer step # This ensures frozen embeddings are not modified by the optimizer # even if they were included in the gradient computation if args.start_optimize_embedding_index > 0: embedding_masker.restore_frozen_params() lr_scheduler.step() optimizer.zero_grad() global_step += 1 step_time_tracker.tick("optimizer.step") # Logging if global_step % args.logging_per_step == 0: period_start_time = log_training_step( global_step, metrics, args, lr_scheduler, grad_norm, period_start_time, training_start_time, mfu_stats, step_time_tracker, iteration_time_tracker, epoch_idx, tb_logger, chunked_loss_computer ) metrics.reset_period_accumulators() # Save checkpoint # Save at regular intervals (save_checkpoint_per_step) and at early steps (20, 200) # Early checkpoints help verify training setup and catch issues early should_save = ( (global_step % args.save_checkpoint_per_step == 0 and global_step > 0) or global_step == 20 or # Early checkpoint for initial verification global_step == 200 # Early checkpoint for training stability check ) if should_save: torch.cuda.empty_cache() gc.collect() with Timer("save checkpoint"): save_model_checkpoint( save_dir=args.output_dir, tag=f"step{global_step}", global_step=global_step, optimizer=optimizer, lr_scheduler=lr_scheduler, dataloader=dataloader, app_state=app_state.set_call_back(converter.revert), dist_checkpointer=dist_checkpointer ) step_time_tracker.tick(f"save_ckpt*{args.save_checkpoint_per_step}") iteration_time_tracker.tick("iteration_time_tracker") if torch_profiler: torch_profiler.step() # Save final checkpoint save_model_checkpoint( save_dir=args.output_dir, tag=f"step{global_step}", global_step=global_step, optimizer=optimizer, lr_scheduler=lr_scheduler, dataloader=dataloader, app_state=app_state.set_call_back(converter.revert), dist_checkpointer=dist_checkpointer ) if __name__ == "__main__": train() ================================================ FILE: pretrain/scripts/convert_checkpoint_to_hf.sh ================================================ #!/bin/bash set -e BASE_MODEL_DIR=$1 MODEL_HOME=$2 STEP=$3 CKPT_DIR=${MODEL_HOME}/step${STEP}/global_step${STEP} OUTPUT_DIR=$CKPT_DIR/converted python3 tools/model_converter/convert_checkpoint_to_hf.py --checkpoint_dir $CKPT_DIR \ --output_dir $OUTPUT_DIR \ --source_hf_model_path $BASE_MODEL_DIR ================================================ FILE: pretrain/scripts/expand_qwen3_vocab.sh ================================================ #!/bin/bash set -e HF_MODEL_DIR=/code/onerec_pretrain/hf_models/Qwen3-0.6B OUTPUT_MODEL_DIR=/code/onerec_pretrain/hf_models/Qwen3-0.6B_itemic ITEMIC_LAYER_N=3 VOCAB_SIZE_PER_LAYER=8192 python3 tools/model_converter/expand_qwen3_vocab.py \ --hf_model_dir $HF_MODEL_DIR \ --output_model_dir $OUTPUT_MODEL_DIR \ --itemic_layer_n $ITEMIC_LAYER_N \ --vocab_size_per_layer $VOCAB_SIZE_PER_LAYER ================================================ FILE: pretrain/scripts/killall.sh ================================================ #!/bin/bash mpirun --allow-run-as-root --hostfile /etc/mpi/hostfile --pernode bash -c "pkill -9 python3" mpirun --allow-run-as-root --hostfile /etc/mpi/hostfile --pernode bash -c "pkill -9 pt_main_thread" mpirun --allow-run-as-root --hostfile /etc/mpi/hostfile --pernode bash -c "pkill -9 pt_data_worker" ================================================ FILE: pretrain/scripts/numa_runner.sh ================================================ #!/bin/bash # Get local NUMA node count num_numa=$(numactl -H | grep "node [0-9] cpus" | wc -l) if [ "$num_numa" -lt 1 ]; then num_numa=1 fi # Default to NUMA 0 numa_id=0 echo "Bind to NUMA node $numa_id" # Bind memory and CPU to NUMA node 0 when running command numactl --membind=$numa_id --cpunodebind=$numa_id "$@" ================================================ FILE: pretrain/scripts/test_cases_example.json ================================================ { "test_cases": [ { "type": "text", "input": "你好,请介绍一下你自己。", "ground_truth": "" }, { "type": "chat", "input": [ {"role": "user", "content": "写一首关于春天的短诗:"} ], "ground_truth": "" }, { "type": "chat", "input": [ {"role": "system", "content": "你是一名视频描述生成器,请根据下面的视频token生成视频描述"}, {"role": "user", "content": "这是一个视频:<|sid_begin|><|sid_end|>,帮我总结一下这个视频讲述了什么内容"} ], "ground_truth": "" } ] } ================================================ FILE: pretrain/scripts/test_hf_model.sh ================================================ #!/bin/bash # HuggingFace Model Testing Script # Tests a HuggingFace model with text generation or chat mode # # Configuration: # - MODEL_PATH: Path to HuggingFace model directory # - TEST_FILE: Path to JSON test cases file (optional, use --use_default if not set) # - Generation parameters: MAX_NEW_TOKENS, TEMPERATURE, TOP_P, REPETITION_PENALTY # - Chat options: ENABLE_THINKING, SHOW_TEMPLATE, SHOW_INPUT_IDS # - Output: COMPARE_GROUND_TRUTH set -e # Model path - receive from command line argument MODEL_PATH="$1" # Check if MODEL_PATH is empty if [ -z "${MODEL_PATH}" ]; then echo "ERROR: MODEL_PATH cannot be empty" echo "Usage: $0 " echo "Example: $0 /path/to/model" exit 1 fi # Check if model path exists if [ ! -e "${MODEL_PATH}" ]; then echo "WARNING: model path does not exist: ${MODEL_PATH}" exit 1 fi # Test case: use default or specify a test file # Option 1: Use built-in default test cases USE_DEFAULT=true # Option 2: Use custom test file (comment out USE_DEFAULT and uncomment below) # USE_DEFAULT=false # TEST_FILE=tools/model_test/test_cases_example.json # Generation parameters MAX_NEW_TOKENS=1024 TEMPERATURE=0.7 TOP_P=0.9 REPETITION_PENALTY=1.2 # Chat mode options ENABLE_THINKING=false SHOW_TEMPLATE=false SHOW_INPUT_IDS=false # Output options COMPARE_GROUND_TRUTH=false # Device and data type DEVICE=auto DTYPE=bf16 # Build command CMD="python3 tools/model_test/test_hf_model.py" CMD="$CMD --model_path $MODEL_PATH" CMD="$CMD --device $DEVICE" CMD="$CMD --dtype $DTYPE" CMD="$CMD --max_new_tokens $MAX_NEW_TOKENS" CMD="$CMD --temperature $TEMPERATURE" CMD="$CMD --top_p $TOP_P" CMD="$CMD --repetition_penalty $REPETITION_PENALTY" # Test case source if [ "$USE_DEFAULT" = true ]; then CMD="$CMD --use_default" elif [ -n "$TEST_FILE" ]; then CMD="$CMD --test_file $TEST_FILE" fi # Chat mode options [ "$ENABLE_THINKING" = true ] && CMD="$CMD --enable_thinking" [ "$SHOW_TEMPLATE" = true ] && CMD="$CMD --show_template" [ "$SHOW_INPUT_IDS" = true ] && CMD="$CMD --show_input_ids" # Output options [ "$COMPARE_GROUND_TRUTH" = true ] && CMD="$CMD --compare_ground_truth" # Execute eval $CMD ================================================ FILE: pretrain/set_env.sh ================================================ #!/bin/bash # Check if current shell is bash if [ -z "$BASH_VERSION" ]; then echo "This script must be run with bash. Please use 'bash script.bash' to run it." >&2 exit 1 fi # Get current script directory SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ENV_FILE="${SCRIPT_DIR}/.env" # Check if .env file exists if [ ! -f "${ENV_FILE}" ]; then echo "Error: ${ENV_FILE} not found" >&2 exit 1 fi # Load environment variables set -a # Automatically export all variables source "${ENV_FILE}" set +a # Disable automatic export # Print loaded environment variables echo "Loaded environment variables from ${ENV_FILE}:" cat "${ENV_FILE}" # Install system dependencies PIP_CMD='pip' PROXY="http://oversea-squid1.jp.txyun:11080" HOSTFILE="/etc/mpi/hostfile" # Install numactl on all nodes mpirun --allow-run-as-root \ --hostfile "${HOSTFILE}" \ -x http_proxy="${PROXY}" \ -x https_proxy="${PROXY}" \ --pernode \ bash -c "apt-get install -y numactl" # Install Python dependencies on all nodes mpirun --allow-run-as-root \ --hostfile "${HOSTFILE}" \ --pernode \ bash -c "${PIP_CMD} install transformers==4.53 && \ ${PIP_CMD} install easydict && \ ${PIP_CMD} install torchao==0.10 && \ ${PIP_CMD} install sortedcontainers" ================================================ FILE: pretrain/tests/test_qwen3_dataset_file_distribution.py ================================================ """ Test file distribution logic for Qwen3ChatCompletionParquetDataset in multi-process, multi-worker scenarios Validation points: 1. Each file is processed by only one worker (no duplication) 2. All files are processed (no omission) 3. Works correctly under different rank and worker combinations """ import unittest from unittest.mock import patch, MagicMock import os import sys class TestFileDistribution(unittest.TestCase): """Test file distribution logic""" def setUp(self): """Set up test environment""" # Create mock file list self.data_files = [ (f"file_{i}.parquet", 0) for i in range(100) # 100 files, epoch=0 ] self.num_workers = 4 def _get_file_distribution(self, rank, world_size, worker, num_workers): """ Simulate file distribution logic, return file indices for this worker Args: rank: Process rank world_size: Total number of processes worker: Worker ID num_workers: Number of workers per process Returns: list: File index list """ total_num_workers = num_workers * world_size local_worker_idx = rank * num_workers + worker fn_list = [ idx for idx, fn in enumerate(self.data_files) if idx % total_num_workers == local_worker_idx ] return fn_list def test_file_distribution_no_overlap(self): """Test file distribution without overlap: each file is processed by only one worker""" world_size = 2 num_workers = 4 # Collect files assigned to all workers all_assigned_files = set() for rank in range(world_size): for worker in range(num_workers): assigned_files = self._get_file_distribution(rank, world_size, worker, num_workers) file_indices = set(assigned_files) # Check for overlap overlap = all_assigned_files & file_indices self.assertEqual( len(overlap), 0, f"Rank {rank}, Worker {worker} assigned files overlap with existing assignments: {overlap}" ) all_assigned_files.update(file_indices) # Verify all files are assigned total_files = len(self.data_files) self.assertEqual( len(all_assigned_files), total_files, f"File assignment incomplete: expected {total_files} files, actually assigned {len(all_assigned_files)}" ) def test_file_distribution_completeness(self): """Test file distribution completeness: all files are processed""" world_size = 2 num_workers = 4 all_assigned_files = set() for rank in range(world_size): for worker in range(num_workers): assigned_files = self._get_file_distribution(rank, world_size, worker, num_workers) all_assigned_files.update(assigned_files) # Verify all files are assigned expected_files = set(range(len(self.data_files))) self.assertEqual( all_assigned_files, expected_files, f"File assignment incomplete: missing files {expected_files - all_assigned_files}" ) def test_file_distribution_different_configs(self): """Test file distribution under different configurations""" test_configs = [ (1, 1), # Single process, single worker (1, 4), # Single process, 4 workers (2, 2), # 2 processes, 2 workers each (4, 2), # 4 processes, 2 workers each (2, 8), # 2 processes, 8 workers each ] for world_size, num_workers in test_configs: with self.subTest(world_size=world_size, num_workers=num_workers): all_assigned_files = set() for rank in range(world_size): for worker in range(num_workers): assigned_files = self._get_file_distribution( rank, world_size, worker, num_workers ) file_indices = set(assigned_files) # Check for overlap overlap = all_assigned_files & file_indices self.assertEqual( len(overlap), 0, f"Config (world_size={world_size}, num_workers={num_workers}), " f"Rank {rank}, Worker {worker} has overlap: {overlap}" ) all_assigned_files.update(file_indices) # Verify completeness expected_files = set(range(len(self.data_files))) self.assertEqual( all_assigned_files, expected_files, f"Config (world_size={world_size}, num_workers={num_workers}) " f"file assignment incomplete: missing {expected_files - all_assigned_files}" ) def test_file_distribution_balance(self): """Test file distribution load balancing (each worker should be assigned roughly equal number of files)""" world_size = 2 num_workers = 4 total_workers = world_size * num_workers file_counts = [] for rank in range(world_size): for worker in range(num_workers): assigned_files = self._get_file_distribution(rank, world_size, worker, num_workers) file_counts.append(len(assigned_files)) # Calculate expected file count (should be roughly equal) expected_per_worker = len(self.data_files) / total_workers min_files = int(expected_per_worker) max_files = int(expected_per_worker) + 1 # Verify each worker's file count is within reasonable range for count in file_counts: self.assertGreaterEqual(count, min_files, "Too few files assigned") self.assertLessEqual(count, max_files, "Too many files assigned") # Verify total count is correct self.assertEqual( sum(file_counts), len(self.data_files), f"Total file count mismatch: expected {len(self.data_files)}, actual {sum(file_counts)}" ) def test_file_distribution_with_epochs(self): """Test file distribution with multiple epochs""" # Create multi-epoch file list data_files_multi_epoch = [] for epoch in range(3): for i in range(20): data_files_multi_epoch.append((f"file_{i}.parquet", epoch)) self.data_files = data_files_multi_epoch world_size = 2 num_workers = 4 # Collect assignments by (file_idx, epoch) all_assigned = set() for rank in range(world_size): for worker in range(num_workers): assigned_indices = self._get_file_distribution( rank, world_size, worker, num_workers ) # Convert indices to (filename, epoch) tuples for idx in assigned_indices: file_name, epoch = self.data_files[idx] all_assigned.add((file_name, epoch)) # Verify all (file, epoch) combinations are assigned expected = set((fn, ep) for fn, ep in self.data_files) self.assertEqual( all_assigned, expected, f"Multi-epoch file assignment incomplete: missing {expected - all_assigned}" ) class TestFileDistributionLogic(unittest.TestCase): """Test core algorithm of file distribution logic""" def setUp(self): """Set up test environment""" self.data_files = [ (f"file_{i}.parquet", 0) for i in range(50) ] def test_distribution_algorithm(self): """Test correctness of file distribution algorithm""" # Simulate distribution logic in Qwen3NaiveParquetDataset.__iter__local_shuffle rank = 0 world_size = 2 worker = 0 num_workers = 2 total_num_workers = num_workers * world_size local_worker_idx = rank * num_workers + worker fn_list = [ fn for idx, fn in enumerate(self.data_files) if idx % total_num_workers == local_worker_idx ] # Verify file list is not empty self.assertGreater(len(fn_list), 0, "File list should not be empty") # Verify file indices are correct expected_indices = [ idx for idx in range(len(self.data_files)) if idx % total_num_workers == local_worker_idx ] actual_indices = [ idx for idx, fn in enumerate(self.data_files) if fn in fn_list ] self.assertEqual( set(actual_indices), set(expected_indices), "File index assignment is incorrect" ) def run_distribution_test_manual(): """ Manually run file distribution test, print detailed assignment information For debugging and verification """ print("=" * 80) print("File Distribution Test - Manual Verification") print("=" * 80) # Test configurations data_files = [(f"file_{i}.parquet", 0) for i in range(100)] test_configs = [ (1, 1, "Single process, single worker"), (1, 4, "Single process, 4 workers"), (2, 2, "2 processes, 2 workers each"), (4, 2, "4 processes, 2 workers each"), (2, 8, "2 processes, 8 workers each"), ] for world_size, num_workers, desc in test_configs: print(f"\nConfig: {desc} (world_size={world_size}, num_workers={num_workers})") print("-" * 80) total_num_workers = num_workers * world_size all_assigned = {} for rank in range(world_size): for worker in range(num_workers): local_worker_idx = rank * num_workers + worker assigned_files = [ idx for idx, fn in enumerate(data_files) if idx % total_num_workers == local_worker_idx ] all_assigned[(rank, worker)] = assigned_files print(f" Rank {rank}, Worker {worker} (local_idx={local_worker_idx}): " f"{len(assigned_files)} files, index range: {min(assigned_files) if assigned_files else 'N/A'}-{max(assigned_files) if assigned_files else 'N/A'}") # Verify completeness all_file_indices = set() for assigned in all_assigned.values(): all_file_indices.update(assigned) expected_indices = set(range(len(data_files))) missing = expected_indices - all_file_indices extra = all_file_indices - expected_indices if missing: print(f" X Missing file indices: {sorted(missing)}") if extra: print(f" X Extra file indices: {sorted(extra)}") if not missing and not extra: print(f" OK File assignment complete: all {len(data_files)} files correctly assigned") # Check for overlap has_overlap = False for (r1, w1), files1 in all_assigned.items(): for (r2, w2), files2 in all_assigned.items(): if (r1, w1) >= (r2, w2): # Avoid duplicate checks continue overlap = set(files1) & set(files2) if overlap: print(f" X Overlap detected: Rank {r1}, Worker {w1} and Rank {r2}, Worker {w2} overlap files: {sorted(overlap)}") has_overlap = True if not has_overlap: print(f" OK No overlap: all files processed by only one worker") if __name__ == '__main__': # Run unit tests print("Running unit tests...") unittest.main(argv=[''], exit=False, verbosity=2) # Run manual verification print("\n" + "=" * 80) run_distribution_test_manual() ================================================ FILE: pretrain/tools/model_converter/convert_checkpoint_to_hf.py ================================================ """Checkpoint to HuggingFace Format Converter This module provides utilities to convert PyTorch checkpoints (DCP or .pth files) to HuggingFace format (safetensors or bin files with sharding support). """ import argparse import json import logging import os import shutil from pathlib import Path from typing import Dict, Optional, Union import torch import tqdm from safetensors.torch import save_file from torch.distributed.checkpoint import FileSystemReader from torch.distributed.checkpoint.default_planner import _EmptyStateDictLoadPlanner from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE from torch.distributed.checkpoint.state_dict_loader import _load_state_dict # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler()] ) logger = logging.getLogger(__name__) # Constants SHARD_FNAME_TEMPLATE = "model-{cpt_idx}-of-{num_shards}" BYTES_PER_GB = 1024 * 1024 * 1024 DEFAULT_MAX_GB_PER_SHARD = 5 DEFAULT_DTYPE = "bf16" # Common HuggingFace config files to copy HF_CONFIG_FILES = [ "config.json", "tokenizer.json", "tokenizer_config.json", "tokenizer.model", # SentencePiece tokenizer model file "vocab.txt", "vocab.json", "merges.txt", "special_tokens_map.json", "added_tokens.json", "generation_config.json", "preprocessor_config.json", # For vision models ] def _get_torch_dtype(dtype_str: str) -> torch.dtype: """Convert dtype string to torch.dtype. Args: dtype_str: Data type string ("fp32", "fp16", "bf16") Returns: Corresponding torch.dtype Raises: ValueError: If dtype_str is not supported """ dtype_map = { "fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16, } if dtype_str not in dtype_map: raise ValueError(f"Unsupported dtype: {dtype_str}. Supported: {list(dtype_map.keys())}") return dtype_map[dtype_str] def _extract_state_dict_from_checkpoint(checkpoint: Dict, model_only: bool = True) -> Dict[str, torch.Tensor]: """Extract state_dict from checkpoint with various structures. Args: checkpoint: Checkpoint dictionary model_only: Whether to extract only model weights Returns: State dictionary containing model weights """ if not isinstance(checkpoint, dict): raise ValueError(f"Unsupported checkpoint format: {type(checkpoint)}") # Check for nested DCP-like structure if model_only and "app" in checkpoint and "model" in checkpoint["app"]: logger.info("Found nested structure: checkpoint['app']['model']") return checkpoint["app"]["model"] elif "model" in checkpoint: logger.info("Found structure: checkpoint['model']") return checkpoint["model"] elif "state_dict" in checkpoint: logger.info("Found structure: checkpoint['state_dict']") return checkpoint["state_dict"] else: # Assume entire dict is the state_dict logger.info("Using entire checkpoint as state_dict") return checkpoint def _convert_state_dict_to_shards( state_dict: Dict[str, torch.Tensor], output_dir: Union[str, os.PathLike], use_safetensor: bool = True, max_gb_per_shard: int = DEFAULT_MAX_GB_PER_SHARD, dtype: str = DEFAULT_DTYPE ) -> None: """Convert state_dict to sharded safetensors or bin files. Args: state_dict: State dictionary containing model weights output_dir: Output directory for sharded files use_safetensor: Whether to use safetensors format (default: True) max_gb_per_shard: Maximum size per shard in GB (default: 5) dtype: Data type for conversion ("fp32", "fp16", "bf16", default: "bf16") Raises: ValueError: If dtype is not supported """ torch_dtype = _get_torch_dtype(dtype) logger.info(f"Converting state_dict to {dtype} format") # Convert data types logger.info("Converting tensor data types...") for key in tqdm.tqdm(state_dict.keys(), desc="Converting dtypes"): state_dict[key] = state_dict[key].to(torch_dtype) # Split into shards logger.info(f"Splitting state_dict into shards (max {max_gb_per_shard} GB per shard)...") split_state_dicts: Dict[int, Dict[str, torch.Tensor]] = {} shard_idx = 0 total_size = 0 current_size = 0 max_bytes_per_shard = max_gb_per_shard * BYTES_PER_GB for key, weight in tqdm.tqdm(state_dict.items(), desc="Creating shards"): if shard_idx not in split_state_dicts: split_state_dicts[shard_idx] = {} split_state_dicts[shard_idx][key] = weight weight_size = weight.numel() * weight.element_size() current_size += weight_size total_size += weight_size if current_size >= max_bytes_per_shard: shard_idx += 1 current_size = 0 # Write shard files num_shards = len(split_state_dicts) weight_map: Dict[str, str] = {} output_path_obj = Path(output_dir) output_path_obj.mkdir(parents=True, exist_ok=True) logger.info(f"Writing {num_shards} shard files...") for shard_idx, shard_state_dict in tqdm.tqdm(split_state_dicts.items(), desc="Writing shards"): shard_name = SHARD_FNAME_TEMPLATE.format( cpt_idx=f"{shard_idx}".zfill(5), num_shards=f"{num_shards}".zfill(5) ) if use_safetensor: shard_path = output_path_obj / f"{shard_name}.safetensors" save_file(shard_state_dict, shard_path, metadata={"format": "pt"}) else: shard_path = output_path_obj / f"{shard_name}.bin" torch.save(shard_state_dict, shard_path) # Update weight map shard_filename = shard_path.name for key in shard_state_dict.keys(): weight_map[key] = shard_filename shard_size_gb = os.path.getsize(shard_path) / BYTES_PER_GB logger.info(f"Shard {shard_idx + 1}/{num_shards}: {shard_size_gb:.2f} GiB saved to {shard_path}") # Write index file index_filename = "model.safetensors.index.json" if use_safetensor else "model.bin.index.json" index_path = output_path_obj / index_filename index_data = { "metadata": { "total_size": total_size }, "weight_map": weight_map, } with open(index_path, "w", encoding="utf-8") as f: json.dump(index_data, f, indent=2) logger.info(f"Index file saved to {index_path}") logger.info(f"Total model size: {total_size / BYTES_PER_GB:.2f} GiB") def pth_to_hf_format( pth_file_path: Union[str, os.PathLike], output_dir: Union[str, os.PathLike], model_only: bool = True, use_safetensor: bool = True, max_gb_per_shard: int = DEFAULT_MAX_GB_PER_SHARD, dtype: str = DEFAULT_DTYPE ) -> None: """Convert .pth file to HuggingFace format (safetensors or bin files). Args: pth_file_path: Path to .pth checkpoint file output_dir: Output directory for converted files model_only: Whether to extract only model weights (default: True) use_safetensor: Whether to use safetensors format (default: True) max_gb_per_shard: Maximum size per shard in GB (default: 5) dtype: Data type for conversion (default: "bf16") Raises: FileNotFoundError: If pth_file_path does not exist ValueError: If pth_file_path is not a .pth file or has unsupported format .. warning:: To avoid OOM, it's recommended to run this function on a single rank/process. """ pth_path = Path(pth_file_path) if not pth_path.exists(): raise FileNotFoundError(f"PTH file not found: {pth_path}") if pth_path.suffix != ".pth": raise ValueError(f"Expected .pth file, got: {pth_path.suffix}") logger.info(f"Loading PTH file from {pth_path}...") checkpoint = torch.load(pth_path, map_location="cpu") # Extract state_dict from checkpoint state_dict = _extract_state_dict_from_checkpoint(checkpoint, model_only=model_only) logger.info(f"Loaded state_dict with {len(state_dict)} keys") # Convert to HuggingFace format _convert_state_dict_to_shards( state_dict=state_dict, output_dir=output_dir, use_safetensor=use_safetensor, max_gb_per_shard=max_gb_per_shard, dtype=dtype ) def dcp_to_hf_format( dcp_checkpoint_dir: Union[str, os.PathLike], output_dir: Union[str, os.PathLike], model_only: bool = True, use_safetensor: bool = True, max_gb_per_shard: int = DEFAULT_MAX_GB_PER_SHARD, dtype: str = DEFAULT_DTYPE ) -> None: """Convert DCP (Distributed Checkpoint) to HuggingFace format. Args: dcp_checkpoint_dir: Directory containing the DCP checkpoint output_dir: Output directory for converted files model_only: Whether to extract only model weights (default: True) use_safetensor: Whether to use safetensors format (default: True) max_gb_per_shard: Maximum size per shard in GB (default: 5) dtype: Data type for conversion (default: "bf16") Raises: FileNotFoundError: If dcp_checkpoint_dir does not exist .. warning:: To avoid OOM, it's recommended to run this function on a single rank/process. """ dcp_path = Path(dcp_checkpoint_dir) if not dcp_path.exists(): raise FileNotFoundError(f"DCP checkpoint directory not found: {dcp_path}") if not dcp_path.is_dir(): raise ValueError(f"Expected directory, got: {dcp_path}") logger.info(f"Loading DCP checkpoint from {dcp_path}...") state_dict: STATE_DICT_TYPE = {} _load_state_dict( state_dict, storage_reader=FileSystemReader(str(dcp_path)), planner=_EmptyStateDictLoadPlanner(), no_dist=True, ) logger.info("DCP checkpoint loaded successfully") if model_only: if "app" not in state_dict or "model" not in state_dict["app"]: raise ValueError("Expected 'app.model' in DCP checkpoint when model_only=True") state_dict = state_dict["app"]["model"] logger.info(f"Extracted model state_dict with {len(state_dict)} keys") # Convert to HuggingFace format _convert_state_dict_to_shards( state_dict=state_dict, output_dir=output_dir, use_safetensor=use_safetensor, max_gb_per_shard=max_gb_per_shard, dtype=dtype ) def copy_hf_config_files( source_hf_model_path: Union[str, os.PathLike], output_dir: Union[str, os.PathLike] ) -> None: """Copy HuggingFace configuration files from source to output directory. Args: source_hf_model_path: Path to source HuggingFace model directory output_dir: Output directory where config files will be copied """ source_path = Path(source_hf_model_path) output_path = Path(output_dir) if not source_path.exists(): logger.warning(f"Source HuggingFace model path does not exist: {source_path}") return if not source_path.is_dir(): logger.warning(f"Source path is not a directory: {source_path}") return output_path.mkdir(parents=True, exist_ok=True) copied_files = [] # Copy known config files for config_file in HF_CONFIG_FILES: source_file = source_path / config_file if source_file.exists(): dest_file = output_path / config_file shutil.copy2(source_file, dest_file) copied_files.append(config_file) logger.debug(f"Copied {config_file} to {output_path}") # Copy additional JSON and TXT files (may be config files) for pattern in ["*.json", "*.txt"]: for source_file in source_path.glob(pattern): # Skip already copied files and weight files if (source_file.name in copied_files or source_file.name.startswith("model-") or source_file.suffix in [".bin", ".safetensors"]): continue dest_file = output_path / source_file.name if not dest_file.exists(): # Avoid overwriting already copied files shutil.copy2(source_file, dest_file) if source_file.name not in HF_CONFIG_FILES: logger.debug(f"Copied additional file: {source_file.name}") if copied_files: logger.info(f"Successfully copied {len(copied_files)} config files from {source_path} to {output_path}") else: logger.warning(f"No config files found in {source_path}") def get_argument_parser() -> argparse.ArgumentParser: """Create and configure argument parser. Returns: Configured argument parser """ parser = argparse.ArgumentParser( description="Convert PyTorch checkpoints (DCP or .pth) to HuggingFace format" ) parser.add_argument( "--checkpoint_dir", type=str, required=True, help="Path to DCP checkpoint directory or .pth file" ) parser.add_argument( "--output_dir", type=str, required=True, help="Output directory for converted HuggingFace model" ) parser.add_argument( "--source_hf_model_path", type=str, default=None, help="Path to original HuggingFace model to copy config files from (optional)" ) parser.add_argument( "--use_safetensor", action="store_true", default=True, help="Use safetensors format (default: True)" ) parser.add_argument( "--no_safetensor", dest="use_safetensor", action="store_false", help="Use .bin format instead of safetensors" ) parser.add_argument( "--max_gb_per_shard", type=int, default=DEFAULT_MAX_GB_PER_SHARD, help=f"Maximum size per shard in GB (default: {DEFAULT_MAX_GB_PER_SHARD})" ) parser.add_argument( "--dtype", type=str, default=DEFAULT_DTYPE, choices=["fp32", "fp16", "bf16"], help=f"Data type for conversion (default: {DEFAULT_DTYPE})" ) return parser def main() -> None: """Main entry point for the script.""" parser = get_argument_parser() args = parser.parse_args() checkpoint_path = Path(args.checkpoint_dir) if not checkpoint_path.exists(): raise FileNotFoundError(f"Checkpoint path does not exist: {checkpoint_path}") # Auto-detect input type: .pth file or DCP checkpoint directory if checkpoint_path.is_file() and checkpoint_path.suffix == ".pth": logger.info(f"Detected PTH file: {checkpoint_path}") pth_to_hf_format( pth_file_path=checkpoint_path, output_dir=args.output_dir, model_only=True, use_safetensor=args.use_safetensor, max_gb_per_shard=args.max_gb_per_shard, dtype=args.dtype ) elif checkpoint_path.is_dir(): logger.info(f"Detected DCP checkpoint directory: {checkpoint_path}") dcp_to_hf_format( dcp_checkpoint_dir=checkpoint_path, output_dir=args.output_dir, model_only=True, use_safetensor=args.use_safetensor, max_gb_per_shard=args.max_gb_per_shard, dtype=args.dtype ) else: raise ValueError( f"Invalid checkpoint path: {checkpoint_path}. " "Expected either a .pth file or a DCP checkpoint directory." ) # Copy config files if source model path is provided if args.source_hf_model_path: logger.info(f"Copying config files from {args.source_hf_model_path} to {args.output_dir}") copy_hf_config_files( source_hf_model_path=args.source_hf_model_path, output_dir=args.output_dir ) logger.info("Conversion completed successfully!") if __name__ == "__main__": main() ================================================ FILE: pretrain/tools/model_converter/expand_qwen3_vocab.py ================================================ """Qwen3 Vocabulary Expansion Tool Expand the standard Qwen3 HuggingFace checkpoint vocabulary to support post-training. Add new tokens and adjust model vocabulary size (aligned to multiples of 256). """ import argparse import json import logging import os import random import sys from pathlib import Path from typing import List import torch from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler()] ) logger = logging.getLogger(__name__) def _align_vocab_size(vocab_size: int, alignment: int = 256) -> int: """Align vocabulary size to the nearest multiple of alignment. Args: vocab_size: Current vocabulary size alignment: Alignment value (default: 256) Returns: Aligned vocabulary size """ return ((vocab_size + alignment - 1) // alignment) * alignment def _fix_chat_template(reco_model_dir: str, hf_model_dir: str) -> None: """Fix chat template in tokenizer config by copying from original model. Args: reco_model_dir: Output model directory hf_model_dir: Original HuggingFace model directory """ reco_tokenizer_config_path = os.path.join(reco_model_dir, "tokenizer_config.json") hf_tokenizer_config_path = os.path.join(hf_model_dir, "tokenizer_config.json") if not os.path.exists(hf_tokenizer_config_path): logger.warning(f"Original tokenizer_config.json not found: {hf_tokenizer_config_path}") return if not os.path.exists(reco_tokenizer_config_path): logger.warning(f"Output tokenizer_config.json not found: {reco_tokenizer_config_path}") return # Load configs with open(reco_tokenizer_config_path, "r", encoding="utf-8") as f: reco_config = json.load(f) with open(hf_tokenizer_config_path, "r", encoding="utf-8") as f: hf_config = json.load(f) # Copy chat template from original if "chat_template" in hf_config: reco_config["chat_template"] = hf_config["chat_template"] with open(reco_tokenizer_config_path, "w", encoding="utf-8") as f: json.dump(reco_config, f, indent=2, ensure_ascii=False) logger.info("Chat template copied from original model") def _test_expanded_vocab(model, tokenizer, new_tokens: List[str]) -> None: """Test the expanded vocabulary with sample tokens. Args: model: Expanded model tokenizer: Expanded tokenizer new_tokens: List of newly added tokens """ if not new_tokens: logger.info("No new tokens to test") return # Sample 3-5 tokens from new_tokens num_samples = min(random.randint(3, 5), len(new_tokens)) sampled_tokens = random.sample(new_tokens, num_samples) input_text = " ".join(sampled_tokens) + " Hello world" try: input_ids = tokenizer.encode(input_text, return_tensors='pt') # Test generation (use eval mode to avoid training-specific behavior) model.eval() with torch.no_grad(): output = model.generate(input_ids, max_new_tokens=10, do_sample=False) logger.info("Vocabulary expansion test:") logger.info(f" Input text: {input_text}") logger.info(f" Decoded input: {tokenizer.decode(input_ids[0], skip_special_tokens=True)}") logger.info(f" Input IDs shape: {input_ids.shape}") logger.info(f" Generated: {tokenizer.decode(output[0], skip_special_tokens=True)}") except Exception as e: logger.warning(f"Vocabulary test failed: {e}") def expand_qwen3_vocab_for_pretraining( hf_model_dir: str, output_model_dir: str, new_tokens: List[str] ) -> None: """Expand Qwen3 vocabulary for pretraining by adding new tokens. This function: 1. Loads the original Qwen3 model and tokenizer 2. Adds new tokens to the tokenizer 3. Resizes model embeddings to aligned vocabulary size (multiple of 256) 4. Updates model configuration 5. Saves the expanded model, tokenizer, and config 6. Fixes chat template from original model 7. Tests the expanded vocabulary Args: hf_model_dir: Path to original HuggingFace model directory output_model_dir: Path to save expanded model new_tokens: List of new tokens to add Raises: FileNotFoundError: If model directory doesn't exist ValueError: If new_tokens is empty """ if not new_tokens: raise ValueError("new_tokens list cannot be empty") if not os.path.exists(hf_model_dir): raise FileNotFoundError(f"Model directory does not exist: {hf_model_dir}") # Create output directory os.makedirs(output_model_dir, exist_ok=True) logger.info(f"Expanding vocabulary for pretraining") logger.info(f" Input model: {hf_model_dir}") logger.info(f" Output model: {output_model_dir}") logger.info(f" New tokens: {len(new_tokens)}") # Step 1: Load original model components logger.info("Loading original model components...") config = AutoConfig.from_pretrained(hf_model_dir, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( hf_model_dir, torch_dtype=torch.float32, # Use float32 for compatibility trust_remote_code=True ) tokenizer = AutoTokenizer.from_pretrained(hf_model_dir, trust_remote_code=True) original_vocab_size = len(tokenizer) logger.info(f"Original vocabulary size: {original_vocab_size}") # Step 2: Add new tokens logger.info(f"Adding {len(new_tokens)} new tokens...") num_added = tokenizer.add_tokens(new_tokens) logger.info(f"Successfully added {num_added} tokens") # Step 3: Calculate aligned vocabulary size new_vocab_size = len(tokenizer) target_vocab_size = _align_vocab_size(new_vocab_size, alignment=256) logger.info(f"New vocabulary size: {new_vocab_size}") logger.info(f"Target vocabulary size (aligned to 256): {target_vocab_size}") # Step 4: Resize model embeddings logger.info("Resizing model token embeddings...") model.resize_token_embeddings(target_vocab_size) # Step 5: Update configuration config.vocab_size = target_vocab_size logger.info(f"Updated config vocab_size to {target_vocab_size}") # Step 6: Save expanded components logger.info("Saving expanded model components...") tokenizer.save_pretrained(output_model_dir) model.save_pretrained(output_model_dir) config.save_pretrained(output_model_dir) logger.info("Model components saved successfully") # Step 7: Fix chat template logger.info("Fixing chat template...") _fix_chat_template(output_model_dir, hf_model_dir) # Step 8: Test expanded vocabulary logger.info("Testing expanded vocabulary...") _test_expanded_vocab(model, tokenizer, new_tokens) logger.info(f"✓ Vocabulary expansion completed! Final vocab size: {target_vocab_size}") def generate_itemic_tokens(itemic_layer_n: int, vocab_size_per_layer: int) -> List[str]: """Generate itemic special tokens dynamically. IMPORTANT: Token order must strictly match gen_itemic_sp_tokens.py: 1. All tokens (i from 0 to vocab_size_per_layer-1) 2. All tokens (i from 0 to vocab_size_per_layer-1) 3. All tokens (i from 0 to vocab_size_per_layer-1) 4. ... (for itemic_layer_n layers, in alphabetical order) 5. <|sid_begin|> 6. <|sid_end|> Args: itemic_layer_n: Number of itemic layers (determines s_a, s_b, s_c, ...) vocab_size_per_layer: Vocabulary size per layer (determines range of i) Returns: List of generated tokens in strict order Raises: ValueError: If itemic_layer_n or vocab_size_per_layer is invalid """ if itemic_layer_n <= 0: raise ValueError(f"itemic_layer_n must be positive, got {itemic_layer_n}") if vocab_size_per_layer <= 0: raise ValueError(f"vocab_size_per_layer must be positive, got {vocab_size_per_layer}") # Generate layer names in alphabetical order: a, b, c, d, ... # This ensures the same order as gen_itemic_sp_tokens.py layer_names = [chr(ord('a') + i) for i in range(itemic_layer_n)] new_tokens = [] # Generate tokens in strict order: # For each layer (a, b, c, ...), generate all tokens with i from 0 to vocab_size_per_layer-1 # This matches the order: [*s_a_0..8191, *s_b_0..8191, *s_c_0..8191, ...] for layer_name in layer_names: for i in range(vocab_size_per_layer): new_tokens.append(f"") # Add special tokens at the end (must be in this exact order) new_tokens.append('<|sid_begin|>') new_tokens.append('<|sid_end|>') total_tokens = itemic_layer_n * vocab_size_per_layer + 2 logger.info(f"Generated {total_tokens} itemic tokens in strict order:") logger.info(f" Layers: {itemic_layer_n} ({', '.join([f's_{name}' for name in layer_names])})") logger.info(f" Vocab size per layer: {vocab_size_per_layer}") logger.info(f" Special tokens: <|sid_begin|>, <|sid_end|>") return new_tokens def load_tokens_from_file(tokens_file: str) -> List[str]: """Load tokens from a text file (one token per line). Args: tokens_file: Path to text file containing tokens (one per line) Returns: List of tokens (empty lines are skipped) Raises: FileNotFoundError: If tokens file doesn't exist """ if not os.path.exists(tokens_file): raise FileNotFoundError(f"Tokens file does not exist: {tokens_file}") new_tokens = [] line_count = 0 with open(tokens_file, "r", encoding="utf-8") as f: for line in f: line_count += 1 token = line.strip() if token: # Skip empty lines new_tokens.append(token) logger.info(f"Loaded {len(new_tokens)} tokens from {line_count} lines in {tokens_file}") return new_tokens def main(): """Main entry point for the script.""" parser = argparse.ArgumentParser( description='Expand Qwen3 vocabulary for pretraining by adding new tokens. ' 'Supports two modes: loading from file or generating itemic tokens dynamically.' ) parser.add_argument( "--hf_model_dir", type=str, required=True, help="Path to original HuggingFace Qwen3 model directory" ) parser.add_argument( "--output_model_dir", type=str, required=True, help="Path to save expanded model directory" ) # Itemic token generation parameters parser.add_argument( "--itemic_layer_n", type=int, required=True, help="Number of itemic layers (e.g., 3 for s_a, s_b, s_c)" ) parser.add_argument( "--vocab_size_per_layer", type=int, required=True, help="Vocabulary size per layer (e.g., 8192 for tokens from 0 to 8191)" ) args = parser.parse_args() try: # Generate itemic tokens dynamically logger.info("Generating itemic tokens dynamically...") new_tokens = generate_itemic_tokens( itemic_layer_n=args.itemic_layer_n, vocab_size_per_layer=args.vocab_size_per_layer ) if not new_tokens: logger.error("No tokens to add") sys.exit(1) # Expand vocabulary expand_qwen3_vocab_for_pretraining( hf_model_dir=args.hf_model_dir, output_model_dir=args.output_model_dir, new_tokens=new_tokens ) logger.info("All operations completed successfully!") except KeyboardInterrupt: logger.info("\nOperation cancelled by user") sys.exit(1) except Exception as e: logger.error(f"Program execution failed: {e}", exc_info=True) sys.exit(1) if __name__ == "__main__": main() ================================================ FILE: pretrain/tools/model_test/test_hf_model.py ================================================ #!/usr/bin/env python3 """HuggingFace Model Testing Tool A unified tool for testing HuggingFace models with both direct text generation and chat template modes. Supports thinking mode and ground truth comparison. """ import argparse import json import logging import sys from pathlib import Path from typing import List, Optional, Union import torch from transformers import AutoTokenizer, AutoModelForCausalLM # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[logging.StreamHandler()] ) logger = logging.getLogger(__name__) def load_model( model_path: str, device: str = "auto", torch_dtype: torch.dtype = torch.bfloat16 ) -> tuple: """Load HuggingFace model and tokenizer. Args: model_path: Path to model directory device: Device mapping (default: "auto") torch_dtype: Data type for model (default: bfloat16) Returns: Tuple of (model, tokenizer) """ logger.info(f"Loading model from: {model_path}") tokenizer = AutoTokenizer.from_pretrained( model_path, trust_remote_code=True ) logger.info("Tokenizer loaded") model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=torch_dtype, device_map=device, trust_remote_code=True ) logger.info("Model loaded") return model, tokenizer def print_model_info(model) -> None: """Print model information. Args: model: Loaded model instance """ device = next(model.parameters()).device dtype = next(model.parameters()).dtype logger.info("=" * 60) logger.info("Model Information:") logger.info(f" Device: {device}") logger.info(f" Data Type: {dtype}") logger.info(f" Vocab Size: {model.config.vocab_size}") logger.info(f" Hidden Size: {model.config.hidden_size}") if hasattr(model.config, 'num_hidden_layers'): logger.info(f" Num Layers: {model.config.num_hidden_layers}") logger.info("=" * 60) def generate_text( model, tokenizer, prompt: str, max_new_tokens: int = 256, temperature: float = 0.7, top_p: float = 0.9, repetition_penalty: float = 1.1, do_sample: bool = True, show_input_ids: bool = False ) -> str: """Generate text from a direct prompt (without chat template). Args: model: Model instance tokenizer: Tokenizer instance prompt: Input prompt text max_new_tokens: Maximum number of tokens to generate temperature: Sampling temperature top_p: Top-p sampling parameter repetition_penalty: Repetition penalty do_sample: Whether to use sampling show_input_ids: Whether to print input token IDs Returns: Generated text (only the newly generated part) """ device = next(model.parameters()).device inputs = tokenizer(prompt, return_tensors="pt").to(device) if show_input_ids: logger.info(f"Input IDs: {inputs['input_ids']}") with torch.no_grad(): generate_ids = model.generate( **inputs, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=do_sample ) output = tokenizer.batch_decode( generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False )[0] # Return only the newly generated part generated_text = output[len(prompt):].strip() return generated_text def generate_chat( model, tokenizer, messages: List[dict], max_new_tokens: int = 1024, temperature: float = 0.7, top_p: float = 0.9, repetition_penalty: float = 1.2, enable_thinking: bool = False, add_generation_prompt: bool = True, show_template: bool = False ) -> str: """Generate text using chat template. Args: model: Model instance tokenizer: Tokenizer instance messages: List of message dicts with 'role' and 'content' keys max_new_tokens: Maximum number of tokens to generate temperature: Sampling temperature top_p: Top-p sampling parameter repetition_penalty: Repetition penalty enable_thinking: Whether to enable thinking mode add_generation_prompt: Whether to add generation prompt show_template: Whether to print the formatted template Returns: Generated text (only the newly generated part) """ # Apply chat template template_kwargs = { "tokenize": False, "add_generation_prompt": add_generation_prompt, } if enable_thinking: template_kwargs["enable_thinking"] = True text = tokenizer.apply_chat_template(messages, **template_kwargs) if show_template: logger.info(f"Chat Template:\n{text}\n" + "=" * 60) # Tokenize and generate inputs = tokenizer( text, return_tensors="pt", padding=False, truncation=False ) device = next(model.parameters()).device inputs = inputs.to(device) with torch.no_grad(): output = model.generate( **inputs, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True ) output_text = tokenizer.batch_decode( output, skip_special_tokens=False, clean_up_tokenization_spaces=False )[0] # Return only the newly generated part generated_text = output_text[len(text):].strip() return generated_text def load_test_cases_from_file(file_path: Union[str, Path]) -> tuple: """Load test cases from JSON file. Expected format: { "test_cases": [ { "type": "text" or "chat", "input": "prompt text" or [{"role": "...", "content": "..."}], "ground_truth": "expected output" (optional) } ] } Args: file_path: Path to JSON file Returns: Tuple of (test_cases, ground_truths) """ with open(file_path, "r", encoding="utf-8") as f: data = json.load(f) test_cases = [] ground_truths = [] for item in data.get("test_cases", []): test_cases.append({ "type": item.get("type", "text"), "input": item["input"] }) ground_truths.append(item.get("ground_truth", "")) return test_cases, ground_truths def get_default_test_cases() -> tuple: """Get default test cases for demonstration. Returns: Tuple of (test_cases, ground_truths) """ test_cases = [ { "type": "text", "input": "你好,请介绍一下你自己。" }, { "type": "text", "input": "视频<|sid_begin|><|sid_end|>的类型是:" }, { "type": "chat", "input": [{"role": "user", "content": "写一首关于春天的短诗:"}] }, { "type": "chat", "input": [ {"role": "system", "content": "你是一名视频描述生成器,请根据下面的视频token生成视频描述"}, {"role": "user", "content": "这是一个视频:<|sid_begin|><|sid_end|>,帮我总结一下这个视频讲述了什么内容"} ] }, ] ground_truths = ["", "", "", ""] return test_cases, ground_truths def main(): parser = argparse.ArgumentParser( description="Test HuggingFace models with text generation or chat mode" ) # Model arguments parser.add_argument( "--model_path", type=str, required=True, help="Path to HuggingFace model directory" ) parser.add_argument( "--device", type=str, default="auto", help="Device mapping (default: auto)" ) parser.add_argument( "--dtype", type=str, default="bf16", choices=["fp32", "fp16", "bf16"], help="Model data type (default: bf16)" ) # Test case arguments parser.add_argument( "--test_file", type=str, default=None, help="Path to JSON file containing test cases (optional)" ) parser.add_argument( "--use_default", action="store_true", help="Use default test cases if no test file provided" ) # Generation arguments parser.add_argument( "--max_new_tokens", type=int, default=1024, help="Maximum number of tokens to generate (default: 1024)" ) parser.add_argument( "--temperature", type=float, default=0.7, help="Sampling temperature (default: 0.7)" ) parser.add_argument( "--top_p", type=float, default=0.9, help="Top-p sampling parameter (default: 0.9)" ) parser.add_argument( "--repetition_penalty", type=float, default=1.2, help="Repetition penalty (default: 1.2)" ) # Chat mode arguments parser.add_argument( "--enable_thinking", action="store_true", help="Enable thinking mode for chat template" ) parser.add_argument( "--no_generation_prompt", dest="add_generation_prompt", action="store_false", help="Disable generation prompt in chat template" ) parser.add_argument( "--show_template", action="store_true", help="Show formatted chat template" ) parser.add_argument( "--show_input_ids", action="store_true", help="Show input token IDs for text mode" ) # Output arguments parser.add_argument( "--compare_ground_truth", action="store_true", help="Compare output with ground truth if available" ) args = parser.parse_args() # Convert dtype string to torch.dtype dtype_map = { "fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16, } torch_dtype = dtype_map[args.dtype] # Load model model, tokenizer = load_model(args.model_path, args.device, torch_dtype) print_model_info(model) # Load test cases if args.test_file: logger.info(f"Loading test cases from: {args.test_file}") test_cases, ground_truths = load_test_cases_from_file(args.test_file) elif args.use_default: logger.info("Using default test cases") test_cases, ground_truths = get_default_test_cases() else: logger.error("Either --test_file or --use_default must be provided") sys.exit(1) logger.info(f"Loaded {len(test_cases)} test cases\n") # Run tests logger.info("Starting tests...\n") for i, (test_case, ground_truth) in enumerate(zip(test_cases, ground_truths), 1): logger.info("=" * 60) logger.info(f"Test {i}/{len(test_cases)}") logger.info("=" * 60) test_type = test_case["type"] test_input = test_case["input"] # Display input if test_type == "text": logger.info(f"Input (text): {test_input}\n") else: logger.info(f"Input (chat):") for msg in test_input: logger.info(f" {msg['role']}: {msg['content'][:100]}...") logger.info("") try: # Generate if test_type == "text": generated = generate_text( model, tokenizer, test_input, max_new_tokens=args.max_new_tokens, temperature=args.temperature, top_p=args.top_p, repetition_penalty=args.repetition_penalty, show_input_ids=args.show_input_ids ) else: # chat mode generated = generate_chat( model, tokenizer, test_input, max_new_tokens=args.max_new_tokens, temperature=args.temperature, top_p=args.top_p, repetition_penalty=args.repetition_penalty, enable_thinking=args.enable_thinking, add_generation_prompt=args.add_generation_prompt, show_template=args.show_template ) logger.info(f"Output: {generated}\n") # Compare with ground truth if available if args.compare_ground_truth and ground_truth: logger.info(f"Ground Truth: {ground_truth}\n") if generated.strip() == ground_truth.strip(): logger.info("✓ Match with ground truth") else: logger.info("✗ Does not match ground truth") except Exception as e: logger.error(f"Generation failed: {e}", exc_info=True) logger.info("-" * 60 + "\n") logger.info("=" * 60) logger.info("All tests completed!") logger.info("=" * 60) if __name__ == "__main__": main() ================================================ FILE: tokenizer/README.md ================================================ # Residual K-Means Tokenizer A residual K-means model for vector quantization. It encodes continuous embeddings into discrete codes through hierarchical clustering. > Public weights are available at [OpenOneRec/OneRec-tokenizer](https://huggingface.co/OpenOneRec/OneRec-tokenizer). > To utilize our foundation model, when using new datasets, the **embedding model** must be [Qwen3-8B-Embedding](https://huggingface.co/Qwen/Qwen3-Embedding-8B). ## Files - `res_kmeans.py` - Model definition - `train_res_kmeans.py` - Training script - `infer_res_kmeans.py` - Inference script ## Installation ```bash pip install torch numpy pandas pyarrow faiss tqdm ``` ## Usage ### Training ```bash python train_res_kmeans.py \ --data_path ./data/embeddings.parquet \ --model_path ./checkpoints \ --n_layers 3 \ --codebook_size 8192 \ --dim 4096 ``` **Arguments:** - `--data_path`: Path to parquet file(s) with `embedding` column - `--model_path`: Directory to save the model - `--n_layers`: Number of residual layers (default: 3) - `--codebook_size`: Size of each codebook (default: 8192) - `--dim`: Embedding dimension (default: 4096) - `--seed`: Random seed (default: 42) ### Inference ```bash python infer_res_kmeans.py \ --model_path ./checkpoints/model.pt \ --emb_path ./data/embeddings.parquet \ --output_path ./output/codes.parquet ``` **Arguments:** - `--model_path`: Path to trained model checkpoint - `--emb_path`: Path to parquet file with `pid` and `embedding` columns - `--output_path`: Output path (default: `{emb_path}_codes.parquet`) - `--batch_size`: Inference batch size (default: 10000) - `--device`: Device to use (default: cuda if available) - `--n_layers`: Number of layers to use (default: all) **Input format:** Parquet with columns `pid`, `embedding` **Output format:** Parquet with columns `pid`, `codes` ================================================ FILE: tokenizer/infer_res_kmeans.py ================================================ import argparse import torch import numpy as np import pandas as pd from res_kmeans import ResKmeans def load_embeddings(emb_path): """Load parquet file with pid and embedding columns""" df = pd.read_parquet(emb_path) pids = df['pid'].tolist() emb = torch.tensor(np.stack(df['embedding'].values), dtype=torch.float32) return pids, emb def main(): parser = argparse.ArgumentParser(description='ResKmeans Inference') parser.add_argument('--model_path', type=str, required=True, help='model checkpoint path') parser.add_argument('--emb_path', type=str, required=True, help='embedding file path') parser.add_argument('--output_path', type=str, default=None, help='output path (default: emb_path + _codes.parquet)') parser.add_argument('--batch_size', type=int, default=10000, help='inference batch size') parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu') parser.add_argument('--n_layers', type=int, default=None, help='number of layers to use (default: all layers)') args = parser.parse_args() # Load model print(f"Loading model from {args.model_path}") checkpoint = torch.load(args.model_path, map_location='cpu') if isinstance(checkpoint, ResKmeans): model = checkpoint elif isinstance(checkpoint, dict): # Restore from state_dict if 'model' in checkpoint: state_dict = checkpoint['model'] elif 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint # Infer model parameters n_layers = sum(1 for k in state_dict.keys() if k.startswith('centroids.')) first_centroid = state_dict['centroids.0'] codebook_size, dim = first_centroid.shape model = ResKmeans(n_layers=n_layers, codebook_size=codebook_size, dim=dim) model.load_state_dict(state_dict) else: raise ValueError("Unknown checkpoint format") model = model.to(args.device) model.eval() print(f"Model loaded: n_layers={model.n_layers}, codebook_size={model.codebook_size}, dim={model.dim}") # Load embeddings print(f"Loading embeddings from {args.emb_path}") pids, emb = load_embeddings(args.emb_path) print(f"Embeddings shape: {emb.shape}, num pids: {len(pids)}") # Inference print("Encoding...") all_codes = [] with torch.no_grad(): for i in range(0, len(emb), args.batch_size): batch = emb[i:i + args.batch_size].to(args.device) codes = model.encode(batch, n_layers=args.n_layers) all_codes.append(codes.cpu()) if (i // args.batch_size) % 10 == 0: print(f" Processed {min(i + args.batch_size, len(emb))}/{len(emb)}") all_codes = torch.cat(all_codes, dim=0) print(f"Output codes shape: {all_codes.shape}") # Save results to parquet output_path = args.output_path or args.emb_path.rsplit('.', 1)[0] + '_codes.parquet' df_out = pd.DataFrame({ 'pid': pids, 'codes': all_codes.numpy().tolist() }) df_out.to_parquet(output_path, index=False) print(f"Codes saved to {output_path}") # Compute reconstruction loss print("\nComputing reconstruction loss...") with torch.no_grad(): sample_size = min(10000, len(emb)) sample_emb = emb[:sample_size].to(args.device) sample_codes = all_codes[:sample_size].to(args.device) reconstructed = model.decode(sample_codes) loss_info = model.calc_loss(sample_emb, reconstructed) print(f"Reconstruction loss (MSE): {loss_info['loss']:.6f}") print(f"Relative loss: {loss_info['rel_loss']:.6f}") if __name__ == '__main__': main() ================================================ FILE: tokenizer/res_kmeans.py ================================================ import torch from torch import nn class ResKmeans(nn.Module): def __init__(self, n_layers, codebook_size, dim, extra_kmeans_config=None, **kwargs): super().__init__() self.n_layers = n_layers self.codebook_size = codebook_size self.dim = dim self.extra_kmeans_config = extra_kmeans_config self.centroids = nn.ParameterList([ nn.Parameter(torch.zeros((codebook_size,dim), requires_grad=False)) for i in range(n_layers) ]) def calc_loss(self, x, out, epsilon=1e-4): loss = ((out - x) ** 2).mean() rel_loss = (torch.abs(x - out) / (torch.maximum(torch.abs(x), torch.abs(out)) + epsilon)).mean() return {'loss': loss.item(), 'rel_loss': rel_loss.item()} def train_kmeans(self, inputs, verbose=True): import faiss kmeans = faiss.Kmeans(self.dim, self.codebook_size, spherical=False, **self.extra_kmeans_config) x = inputs.clone() out = torch.zeros_like(x) for l in range(self.n_layers): kmeans.train(x) _, I = kmeans.index.search(x, 1) I = I.reshape([-1]) o = torch.tensor(kmeans.centroids[I]) out += o if verbose: losses = self.calc_loss(inputs, out) print(l, losses) x = x - o self.centroids[l] = nn.Parameter(torch.tensor(kmeans.centroids.copy()), requires_grad=False) print(f"layer {l} finished") def encode(self, x, n_layers=None): if n_layers is None: n_layers = self.n_layers else: assert n_layers <= self.n_layers out = [] for l in range(n_layers): x_norm_sq = x.pow(2.).sum(dim=1, keepdim=True) codebook_t_norm_sq = self.centroids[l].T.pow(2.).sum(dim=0, keepdim=True) distances = torch.addmm(x_norm_sq + codebook_t_norm_sq, x, self.centroids[l].T, alpha=-2.0) code = distances.argmin(dim=-1) x = x - self.centroids[l][code] out.append(code) out = torch.stack(out, dim=1) return out def decode(self, code): out = torch.zeros((code.shape[0], self.dim), dtype=torch.float32, device=code.device) n_layers = code.shape[1] assert n_layers <= self.n_layers for l in range(n_layers): c = code[:, l] out += self.centroids[l][c] return out ================================================ FILE: tokenizer/train_res_kmeans.py ================================================ import os import argparse import random import numpy as np import torch import pyarrow.parquet as pq from tqdm import tqdm from res_kmeans import ResKmeans def read_train_data(path, emb_dim): """Read training data from local parquet files""" dataset = pq.ParquetDataset(path) fragments = list(dataset.fragments) random.shuffle(fragments) print(f"Total files: {len(fragments)}") embeddings = [] current_size = 0 for fragment in tqdm(fragments, desc="Reading files"): table = fragment.to_table(columns=['embedding']) if table.num_rows == 0: continue emb_chunk = table['embedding'].to_numpy(zero_copy_only=False) if emb_chunk.dtype == 'object': emb_chunk = np.vstack(emb_chunk) emb_chunk = emb_chunk[:, :emb_dim].astype(np.float32) embeddings.append(emb_chunk) current_size += len(emb_chunk) result = np.concatenate(embeddings, axis=0) print(f"Final shape: {result.shape}") return result def main(): parser = argparse.ArgumentParser(description='Train ResKmeans') parser.add_argument('--data_path', type=str, required=True, help='training data path') parser.add_argument('--model_path', type=str, required=True, help='model save path') parser.add_argument('--n_layers', type=int, default=3, help='number of layers') parser.add_argument('--codebook_size', type=int, default=8192, help='codebook size') parser.add_argument('--dim', type=int, default=4096, help='embedding dimension') parser.add_argument('--niter', type=int, default=20, help='kmeans iterations') parser.add_argument('--seed', type=int, default=42, help='random seed') args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) # Load data embeddings = read_train_data(args.data_path, args.dim) # Create and train model model = ResKmeans( n_layers=args.n_layers, codebook_size=args.codebook_size, dim=args.dim, ) model.train_kmeans(torch.tensor(embeddings)) # Save model os.makedirs(args.model_path, exist_ok=True) save_path = os.path.join(args.model_path, "model.pt") torch.save(model.state_dict(), save_path) print(f"Model saved to {save_path}") if __name__ == '__main__': main() ================================================ FILE: verl_distillation/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: verl_distillation/README.md ================================================ ## Overview This repository is built on top of the open-source [**verl**](https://github.com/volcengine/verl) (HybridFlow RLHF/RL training framework) and adds support for **on-policy distillation**. It is designed for scenarios where the **teacher and student use different vocabularies**, e.g., distilling from `Qwen3` (teacher) to a recommendation-pretrained model (student) that contains **extended itemic tokens**, while improving and preserving general-purpose capabilities. > **Note**: This repository is forked from [verl](https://github.com/volcengine/verl) at commit [`703a078`](https://github.com/volcengine/verl/commit/703a07856fe2544833dfce51136f386654574b30) and extended with on-policy distillation capabilities. The high-level idea is briefly described in the OpenOneRec technical report, Section **5.2 On-policy Distillation for General Capability**: [OneRecBench.pdf](OneRecBench.pdf). ## Key Features - **On-policy distillation entrypoint**: `recipe/onpolicy_distill/main_onpolicy_distill.py` - **Distillation trainer**: `recipe/onpolicy_distill/onpolicy_distill_trainer.py` - **Teacher/Student vocabulary mismatch support** - Generates `distill_special_token_mask` during rollout - Replaces/masks extended-vocab tokens during log-probability computation to improve training stability - **OneRec dataset adapter (parquet → chat)**: `verl/utils/dataset/onerec_dataset.py` - Optionally appends `/think` or `/no_think` to the user prompt (force/auto modes) - **Algorithm and metrics extensions** - `AdvantageEstimator.ON_POLICY_DISTILL` - `compute_on_policy_distill_data_metrics(...)` ## Quick Start ### Installation ```bash # Configure hostfile (multi-node) cat > /etc/mpi/hostfile << EOF 192.168.1.100 192.168.1.101 192.168.1.102 EOF # Install dependencies # For Single node bash deploy_env.sh # For Multi-node bash deploy_env.sh --all-nodes # Start Ray cluster bash init_ray_cluster.sh ``` ### Required environment variables ```bash # Required: model and data paths export BASE_MODEL=/path/to/student_model export TEACHER_MODEL=/path/to/teacher_model # e.g. Qwen3-1.7B # Optional: extended-vocabulary distillation settings (defaults in the script) export EXTEND_VOCAB_START_TOKEN=151669 # token_id >= this value is treated as an "extended vocab token" export MASK_RESPONSE_IF_HAVE_EXTEND_TOKEN=False # mask the whole response if any extended token appears # Optional: advantage clipping bounds for distillation (defaults in the script) export DISTILL_ADV_MAX=5.0 # upper bound export DISTILL_ADV_MIN=-30.0 # lower bound ``` **`EXTEND_VOCAB_START_TOKEN`** is used for teacher/student vocabulary mismatch. If the student model introduces additional tokens on top of the base vocabulary (e.g., item tokens for recommendation), set this threshold to the first extended token id. During rollout, the framework produces `distill_special_token_mask`; during log-probability computation, extended-vocab tokens are replaced/masked to maintain stability. **`DISTILL_ADV_MAX / DISTILL_ADV_MIN`** clip the distillation advantage to avoid extreme values when the teacher and student distributions differ substantially. The distillation signal is token-level reverse KL: $$A = -(\log p_{\text{student}} - \log p_{\text{teacher}})$$ ### Launch training The training entry script is located at `recipe/onpolicy_distill/run_qwen3_distill.sh`. ```bash bash recipe/onpolicy_distill/run_qwen3_distill.sh /etc/mpi/hostfile ``` Notes: - The script defaults to **console-only logging** (`trainer.logger=[console]`). To use W&B, export `WANDB_API_KEY` and override `trainer.logger=[console,wandb]` in the script/CLI. - Hydra config entrypoint: `recipe/onpolicy_distill/config/onpolicy_distill_trainer.yaml` (reuses the base config from [verl](https://github.com/volcengine/verl)). ## Data Format (parquet) `OneRecDataset` reads the `messages` field from parquet (either a list, or a string-serialized list) and constructs: - `prompt`: all messages except the last one - `ground_truth`: the content of the last message (used for reward payload / analysis) It is recommended to keep a `source` or `data_source` field for per-task statistics. ## Key Implementation Details (for reproducibility) - **Distillation signal (reverse KL)** - Implemented in `verl/trainer/ppo/core_algos.py` as: \(A = -(\log p_{\text{student}} - \log p_{\text{teacher}})\) - Enabled via the `compute_advantage(...)` branch in `verl/trainer/ppo/ray_trainer.py`, with support for `distill_adv_max_clip / distill_adv_min_clip`. - **Extended vocabulary handling** - `extend_vocab_start_token`: tokens with id \(\ge\) this threshold are treated as "extended vocab tokens" - `ToolAgentLoop` emits `distill_special_token_mask` (optionally truncating/masking the response) - `dp_actor.compute_log_prob(..., mask_special_token=True)` replaces/masks extended-vocab tokens and overwrites the corresponding log-prob entries (via `ref_log_prob_replace_val`) --- ## 🙏 Acknowledgements This repository is built upon and extended from the open-source [**verl**](https://github.com/volcengine/verl) project. We sincerely thank the verl team for their excellent work on the HybridFlow RLHF/RL training framework, which provides the solid foundation for our on-policy distillation implementation. ================================================ FILE: verl_distillation/README_ORIGINAL.md ================================================
👋 Hi, everyone! verl is a RL training library initiated by ByteDance Seed team and maintained by the verl community.

Ask DeepWiki.com [![GitHub Repo stars](https://img.shields.io/github/stars/volcengine/verl)](https://github.com/volcengine/verl/stargazers) [![Twitter](https://img.shields.io/twitter/follow/verl_project)](https://twitter.com/verl_project) [![Documentation](https://img.shields.io/badge/documentation-blue)](https://verl.readthedocs.io/en/latest/)
![seed logo](https://github.com/user-attachments/assets/c42e675e-497c-4508-8bb9-093ad4d1f216)

verl: Volcano Engine Reinforcement Learning for LLMs

verl is a flexible, efficient and production-ready RL training library for large language models (LLMs). verl is the open-source version of **[HybridFlow: A Flexible and Efficient RLHF Framework](https://arxiv.org/abs/2409.19256v2)** paper. verl is flexible and easy to use with: - **Easy extension of diverse RL algorithms**: The hybrid-controller programming model enables flexible representation and efficient execution of complex post-training dataflows. Build RL dataflows such as GRPO, PPO in a few lines of code. - **Seamless integration of existing LLM infra with modular APIs**: Decouples computation and data dependencies, enabling seamless integration with existing LLM frameworks, such as FSDP, Megatron-LM, vLLM, SGLang, etc - **Flexible device mapping**: Supports various placement of models onto different sets of GPUs for efficient resource utilization and scalability across different cluster sizes. - Ready integration with popular HuggingFace models verl is fast with: - **State-of-the-art throughput**: SOTA LLM training and inference engine integrations and SOTA RL throughput. - **Efficient actor model resharding with 3D-HybridEngine**: Eliminates memory redundancy and significantly reduces communication overhead during transitions between training and generation phases.

## News - [2025/08] verl is presented in the [PyTorch Expert Exchange Webinar](https://www.youtube.com/watch?v=Vd79NmmqY3Q&t=2s). [Slides](https://github.com/eric-haibin-lin/verl-community/blob/main/slides/verl_talk_pytorch_2025_08.pdf) available. - [2025/07] The [ReTool](https://arxiv.org/pdf/2504.11536) recipe is fully open sourced. [Blog](https://www.notion.so/verl-reTool-recipe-Using-multi-round-conversations-and-code-sandboxing-to-improve-the-math-of-large-23a8b5b7feba80b386b2e5b5e3c1cde0) - [2025/07] The first verl meetup will be held at ICML Vancouver on July 16th! Please [join us](https://lu.ma/0ek2nyao) if you are at ICML! (onsite only) - [2025/06] verl with Megatron backend enables large MoE models such as [DeepSeek-671B and Qwen3-235B](https://verl.readthedocs.io/en/latest/perf/dpsk.html). - [2025/03] [DAPO](https://dapo-sia.github.io/) is the open-sourced SOTA RL algorithm that achieves 50 points on AIME 2024 based on the Qwen2.5-32B pre-trained model, surpassing the previous SOTA achieved by DeepSeek's GRPO (DeepSeek-R1-Zero-Qwen-32B). DAPO's training is fully powered by verl and the reproduction code is available in `recipe/dapo` now.
more...
  • [2025/04] [Seed-Thinking-v1.5](https://github.com/ByteDance-Seed/Seed-Thinking-v1.5/blob/main/seed-thinking-v1.5.pdf) tech report is released! Trained with verl, Seed-Thinking-v1.5 achieves 86.7 on AIME 2024, 55.0 on Codeforces and 77.3 on GPQA, demonstrating excellent reasoning abilities in STEM and coding. Beyond reasoning tasks, the method demonstrates notable generalization across diverse domains.
  • [2025/07] verl keynote at [AWS AI Hours Singapore](https://pages.awscloud.com/aws-ai-hours-sg.html#agenda) on 7/8, verl & verl-agent project updates at [Agent for SWE meetup](https://lu.ma/e498qhsi) by LF AI & Data Singapore on 7/11.
  • [2025/06] verl team will provide latest project updates at [PyTorch Day China](https://www.lfasiallc.com/pytorch-day-china/) on June 7th. Meet our dev team in Beijing!
  • [2025/04] [VAPO](https://arxiv.org/pdf/2504.05118) (value-based augmented PPO) paper covers our latest RL method for reasoning models. Trained from Qwen-32B-base model, VAPO achieves 60.4 on AIME 2024, outperforming DAPO-32B.
  • [2025/05] [PF-PPO](https://arxiv.org/abs/2409.06957), accepted to ICML 2025, is now supported in verl! PF-PPO enhances policy learning efficiency and robustness by filtering potentially noisy reward signals and reusing high-quality experiences via a replay buffer.
  • [2025/04] We will give a tutorial about latest post-training techniques and programming guide for verl at [ICLR 2025 Expo](https://iclr.cc/virtual/2025/calendar?filter_events=Expo+Talk+Panel&filter_rooms=), [SCI-FM workshop](https://open-foundation-model.github.io/) and [LMSys afterparty](https://lu.ma/d23nyynm). Talk materials available [here](https://github.com/eric-haibin-lin/verl-community/tree/main/iclr25).
  • [2025/03] verl v0.3.0.post1 is released! See [release note](https://github.com/volcengine/verl/releases/) for details. It achieves [~1.4x speedup](https://tongyx361.github.io/blogs/posts/verl-intro/#/verl-flexible-and-efficient-rl-for-llms) compared to prev versions.
  • [2025/05] verl will be presented at [A2M Shanghai](https://a2m.msup.com.cn/home/?aid=4488&city=shanghai) on 5/16 - 5/17.
  • [2025/05] verl will be presented at [GOSIM x PyTorch Day 2025](https://paris2025.gosim.org/). See you in Paris!
  • [2025/03] We introduced the programming model of verl at the [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/n77GibL2corAtQHtVEAzfg) and [verl intro and updates](https://github.com/eric-haibin-lin/verl-community/blob/main/slides/verl-lmsys-meetup.pdf) at the [SGLang-LMSYS Org Meetup](https://lu.ma/ntjrr7ig) in Sunnyvale mid-March.
  • [2025/03] We will present verl(HybridFlow) at EuroSys 2025. See you in Rotterdam!
  • [2025/02] verl v0.2.0.post2 is released!
  • [2025/02] We presented verl in the Bytedance/NVIDIA/Anyscale Ray Meetup. See you in San Jose!
  • [2025/01] [Doubao-1.5-pro](https://team.doubao.com/zh/special/doubao_1_5_pro) is released with SOTA-level performance on LLM & VLM. The RL scaling preview model is trained using verl, reaching OpenAI O1-level performance on math benchmarks (70.0 pass@1 on AIME).
  • [2024/12] verl is presented at Ray Forward 2024. Slides available here
  • [2024/12] The team presented Post-training LLMs: From Algorithms to Infrastructure at NeurIPS 2024. Slides and video available.
  • [2024/10] verl is presented at Ray Summit. Youtube video available.
  • [2024/08] HybridFlow (verl) is accepted to EuroSys 2025.
## Key Features - **FSDP**, **FSDP2** and **Megatron-LM** for training. - **vLLM**, **SGLang** and **HF Transformers** for rollout generation. - Compatible with Hugging Face Transformers and Modelscope Hub: [Qwen-3](https://github.com/volcengine/verl/blob/main/examples/grpo_trainer/run_qwen3-8b.sh), Qwen-2.5, Llama3.1, Gemma2, DeepSeek-LLM, etc - Supervised fine-tuning. - Reinforcement learning with [PPO](examples/ppo_trainer/), [GRPO](examples/grpo_trainer/), [GSPO](recipe/gspo/), [ReMax](examples/remax_trainer/), [REINFORCE++](https://verl.readthedocs.io/en/latest/examples/config.html#algorithm), [RLOO](examples/rloo_trainer/), [PRIME](recipe/prime/), [DAPO](recipe/dapo/), [DrGRPO](recipe/drgrpo), [KL_Cov & Clip_Cov](recipe/entropy) etc. - Support model-based reward and function-based reward (verifiable reward) for math, [coding](https://github.com/volcengine/verl/tree/main/recipe/dapo), etc - Support vision-language models (VLMs) and [multi-modal RL](examples/grpo_trainer/run_qwen2_5_vl-7b.sh) with Qwen2.5-vl, Kimi-VL - [Multi-turn with tool calling](https://github.com/volcengine/verl/tree/main/examples/sglang_multiturn) - LLM alignment recipes such as [Self-play preference optimization (SPPO)](https://github.com/volcengine/verl/tree/main/recipe/sppo) - Flash attention 2, [sequence packing](examples/ppo_trainer/run_qwen2-7b_seq_balance.sh), [sequence parallelism](examples/ppo_trainer/run_deepseek7b_llm_sp2.sh) support via DeepSpeed Ulysses, [LoRA](examples/sft/gsm8k/run_qwen_05_peft.sh), [Liger-kernel](examples/sft/gsm8k/run_qwen_05_sp2_liger.sh). - Scales up to 671B models and hundreds of GPUs with [expert parallelism](https://github.com/volcengine/verl/pull/1467) - Multi-gpu [LoRA RL](https://verl.readthedocs.io/en/latest/advance/ppo_lora.html) support to save memory. - Experiment tracking with wandb, swanlab, mlflow and tensorboard. ## Upcoming Features and Changes - Q3 Roadmap https://github.com/volcengine/verl/issues/2388 - DeepSeek 671b optimizations with Megatron https://github.com/volcengine/verl/issues/1033 - Multi-turn rollout and tools using optimizations https://github.com/volcengine/verl/issues/1882 - [Agent integration](https://github.com/volcengine/verl/tree/main/verl/experimental/agent_loop) - Async and off-policy architecture https://github.com/volcengine/verl/pull/2231 - List of breaking changes since v0.4 https://github.com/volcengine/verl/discussions/2270 ## Getting Started Documentation **Quickstart:** - [Installation](https://verl.readthedocs.io/en/latest/start/install.html) - [Quickstart](https://verl.readthedocs.io/en/latest/start/quickstart.html) - [Programming Guide](https://verl.readthedocs.io/en/latest/hybrid_flow.html) & [Tech Talk](https://hcqnc.xetlk.com/sl/3vACOK) (in Chinese) - [PPO in verl](https://verl.readthedocs.io/en/latest/algo/ppo.html) - [GRPO in verl](https://verl.readthedocs.io/en/latest/algo/grpo.html) **Running a PPO example step-by-step:** - [Prepare Data for Post-Training](https://verl.readthedocs.io/en/latest/preparation/prepare_data.html) - [Implement Reward Function for Dataset](https://verl.readthedocs.io/en/latest/preparation/reward_function.html) - [PPO Example Architecture](https://verl.readthedocs.io/en/latest/examples/ppo_code_architecture.html) - [Config Explanation](https://verl.readthedocs.io/en/latest/examples/config.html) **Reproducible algorithm baselines:** - [RL performance on coding, math](https://verl.readthedocs.io/en/latest/algo/baseline.html) **For code explanation and advance usage (extension):** - PPO Trainer and Workers - [PPO Ray Trainer](https://verl.readthedocs.io/en/latest/workers/ray_trainer.html) - [PyTorch FSDP Backend](https://verl.readthedocs.io/en/latest/workers/fsdp_workers.html) - [Megatron-LM Backend](https://verl.readthedocs.io/en/latest/index.html) - Advanced Usage and Extension - [Add Models with the FSDP Backend](https://verl.readthedocs.io/en/latest/advance/fsdp_extension.html) - [Add Models with the Megatron-LM Backend](https://verl.readthedocs.io/en/latest/advance/megatron_extension.html) - [Multi-turn Rollout Support](https://verl.readthedocs.io/en/latest/sglang_multiturn/multiturn.html) - [Search Tool Integration](https://verl.readthedocs.io/en/latest/sglang_multiturn/search_tool_example.html) - [Sandbox Fusion Integration](https://verl.readthedocs.io/en/latest/examples/sandbox_fusion_example.html) - [Deployment using Separate GPU Resources](https://github.com/volcengine/verl/tree/main/examples/split_placement) - [Extend to Other RL(HF) algorithms](https://verl.readthedocs.io/en/latest/advance/dpo_extension.html) - [Ray API design tutorial](https://verl.readthedocs.io/en/latest/advance/placement.html) **Blogs from the community** - [When Reasoning Models Break Tokenization: The Hidden Complexity of Multiturn Training](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/multi-turn/fast_tokenization/multiturn_tokenization_and_masking.md) - [verl deployment on AWS SageMaker](https://medium.com/@kaige.yang0110/run-verl-on-sagemaker-using-4x8-l40s-gpus-8e6d5c3c61d3) - [verl x SGLang Multi-turn Code Walkthrough](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/multi-turn/code-walk-through/readme_EN.md) - [Optimizing SGLang Memory Usage in verl](https://hebiao064.github.io/rl-memory-management) - [SGLang, verl, OpenBMB and Tsinghua University: Pioneering End-to-End Multi-Turn RLHF](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/multi-turn/verl-multiturn-rollout-Release.md) - [Reinforcement Learning from Human Feedback on AMD GPUs with verl and ROCm Integration](https://rocm.blogs.amd.com/artificial-intelligence/verl-large-scale/README.html) - [veMLP x verl :玩转强化学习训练](https://mp.weixin.qq.com/s/7nbqxk4knMGd-hQE9ls2tA) - [使用 verl 进行 GRPO 分布式强化学习训练最佳实践](https://www.volcengine.com/docs/6459/1463942) - [HybridFlow verl 原文浅析](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/readme.md) - [最高提升 20 倍吞吐量!豆包大模型团队发布全新 RLHF 框架,现已开源!](https://team.doubao.com/en/blog/%E6%9C%80%E9%AB%98%E6%8F%90%E5%8D%8720%E5%80%8D%E5%90%9E%E5%90%90%E9%87%8F-%E8%B1%86%E5%8C%85%E5%A4%A7%E6%A8%A1%E5%9E%8B%E5%9B%A2%E9%98%9F%E5%8F%91%E5%B8%83%E5%85%A8%E6%96%B0-rlhf-%E6%A1%86%E6%9E%B6-%E7%8E%B0%E5%B7%B2%E5%BC%80%E6%BA%90) ## Performance Tuning Guide The performance is essential for on-policy RL algorithm. We have written a detailed [performance tuning guide](https://verl.readthedocs.io/en/latest/perf/perf_tuning.html) to help you optimize performance. ## Upgrade to vLLM >= v0.8.2 verl now supports vLLM>=0.8.2 when using FSDP as the training backend. Please refer to [this document](https://github.com/volcengine/verl/blob/main/docs/README_vllm0.8.md) for the installation guide and more information. Please avoid vllm 0.7.x, which contains bugs that may lead to OOMs and unexpected errors. ## Use Latest SGLang SGLang is fully supported with verl, and SGLang RL Group is working extensively on building unique features, including multi-turn agentic RL, VLM RLHF, server-based RL, and partial rollout. Please refer to [this document](https://verl.readthedocs.io/en/latest/workers/sglang_worker.html) for the installation guide and more information. ## Upgrade to FSDP2 verl is fully embracing FSDP2! FSDP2 is recommended by torch distributed team, providing better throughput and memory usage, and is composible with other features (e.g. torch.compile). To enable FSDP2, simply use verl main and set the following options: ``` actor_rollout_ref.ref.strategy=fsdp2 actor_rollout_ref.actor.strategy=fsdp2 critic.strategy=fsdp2 reward_model.strategy=fsdp2 ``` Furthermore, FSDP2 cpu offloading is compatible with gradient accumulation. You can turn it on to save memory with `actor_rollout_ref.actor.fsdp_config.offload_policy=True`. For more details, see https://github.com/volcengine/verl/pull/1026 ## AMD Support (ROCm Kernel) verl now supports FSDP as the training engine (Megatron support coming soon) and both integrates with vLLM and SGLang as inference engines. Please refer to [this document](https://github.com/volcengine/verl/blob/main/docs/amd_tutorial/amd_build_dockerfile_page.rst) for the installation guide and more information, and [this document](https://github.com/volcengine/verl/blob/main/docs/amd_tutorial/amd_vllm_page.rst) for the vLLM performance tuning for ROCm. ## Citation and acknowledgement If you find the project helpful, please cite: - [HybridFlow: A Flexible and Efficient RLHF Framework](https://arxiv.org/abs/2409.19256v2) - [A Framework for Training Large Language Models for Code Generation via Proximal Policy Optimization](https://i.cs.hku.hk/~cwu/papers/gmsheng-NL2Code24.pdf) ```bibtex @article{sheng2024hybridflow, title = {HybridFlow: A Flexible and Efficient RLHF Framework}, author = {Guangming Sheng and Chi Zhang and Zilingfeng Ye and Xibin Wu and Wang Zhang and Ru Zhang and Yanghua Peng and Haibin Lin and Chuan Wu}, year = {2024}, journal = {arXiv preprint arXiv: 2409.19256} } ``` verl is inspired by the design of Nemo-Aligner, Deepspeed-chat and OpenRLHF. The project is adopted and contributed by Bytedance, Anyscale, LMSys.org, [Alibaba Qwen team](https://github.com/QwenLM/), Shanghai AI Lab, Tsinghua University, UC Berkeley, UCLA, UIUC, University of Hong Kong, ke.com, [All Hands AI](https://www.all-hands.dev/), [ModelBest](http://modelbest.cn/), JD AI Lab, Microsoft Research, [StepFun](https://www.stepfun.com/), Amazon, LinkedIn, Meituan, [Camel-AI](https://www.camel-ai.org/), [OpenManus](https://github.com/OpenManus), Xiaomi, NVIDIA research, [Baichuan](https://www.baichuan-ai.com/home), [RedNote](https://www.xiaohongshu.com/), [SwissAI](https://www.swiss-ai.org/), [Moonshot AI (Kimi)](https://www.moonshot-ai.com/), Baidu, Snowflake, Skywork.ai, JetBrains, [IceSword Lab](https://www.iceswordlab.com), and many more. ## Awesome work using verl - [TinyZero](https://github.com/Jiayi-Pan/TinyZero): a reproduction of **DeepSeek R1 Zero** recipe for reasoning tasks ![GitHub Repo stars](https://img.shields.io/github/stars/Jiayi-Pan/TinyZero) - [SkyThought](https://github.com/NovaSky-AI/SkyThought): RL training for Sky-T1-7B by NovaSky AI team. ![GitHub Repo stars](https://img.shields.io/github/stars/NovaSky-AI/SkyThought) - [simpleRL-reason](https://github.com/hkust-nlp/simpleRL-reason): SimpleRL-Zoo: Investigating and Taming Zero Reinforcement Learning for Open Base Models in the Wild ![GitHub Repo stars](https://img.shields.io/github/stars/hkust-nlp/simpleRL-reason) - [Easy-R1](https://github.com/hiyouga/EasyR1): **Multi-modal** RL training framework ![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/EasyR1) - [OpenManus-RL](https://github.com/OpenManus/OpenManus-RL): LLM Agents RL tunning framework for multiple agent environments. ![GitHub Repo stars](https://img.shields.io/github/stars/OpenManus/OpenManus-RL) - [rllm](https://github.com/agentica-project/rllm): async RL training with [verl-pipeline](https://github.com/agentica-project/verl-pipeline) ![GitHub Repo stars](https://img.shields.io/github/stars/agentica-project/rllm) - [RAGEN](https://github.com/ZihanWang314/ragen): a general-purpose reasoning **agent** training framework ![GitHub Repo stars](https://img.shields.io/github/stars/ZihanWang314/ragen) - [Search-R1](https://github.com/PeterGriffinJin/Search-R1): RL with reasoning and **searching (tool-call)** interleaved LLMs ![GitHub Repo stars](https://img.shields.io/github/stars/PeterGriffinJin/Search-R1) - [ReSearch](https://github.com/Agent-RL/ReSearch): Learning to **Re**ason with **Search** for LLMs via Reinforcement Learning ![GitHub Repo stars](https://img.shields.io/github/stars/Agent-RL/ReSearch) - [Skywork-OR1](https://github.com/SkyworkAI/Skywork-OR1): Skywork open reaonser series ![GitHub Repo stars](https://img.shields.io/github/stars/SkyworkAI/Skywork-OR1) - [ToRL](https://github.com/GAIR-NLP/ToRL): Scaling tool-integrated RL ![GitHub Repo stars](https://img.shields.io/github/stars/GAIR-NLP/ToRL) - [Absolute Zero Reasoner](https://github.com/LeapLabTHU/Absolute-Zero-Reasoner): [A no human curated data self-play framework for reasoning](https://arxiv.org/abs/2505.03335) ![GitHub Repo stars](https://img.shields.io/github/stars/LeapLabTHU/Absolute-Zero-Reasoner) - [verl-agent](https://github.com/langfengQ/verl-agent): A scalable training framework for **long-horizon LLM/VLM agents**, along with a new algorithm **GiGPO** ![GitHub Repo stars](https://img.shields.io/github/stars/langfengQ/verl-agent) - [RL-Factory](https://github.com/Simple-Efficient/RL-Factory): An easy and efficient RL post-training framework for Agentic Learning ![GitHub Repo stars](https://img.shields.io/github/stars/Simple-Efficient/RL-Factory) - [ReTool](https://retool-rl.github.io/): ReTool: reinforcement learning for strategic tool use in LLMs. Code release is in progress... - [verl-tool](https://github.com/TIGER-AI-Lab/verl-tool): An unified and easy-to-extend tool-agent training framework based on verl![GitHub Repo stars](https://img.shields.io/github/stars/TIGER-AI-Lab/verl-tool) - [PRIME](https://github.com/PRIME-RL/PRIME): Process reinforcement through implicit rewards ![GitHub Repo stars](https://img.shields.io/github/stars/PRIME-RL/PRIME) - [MemAgent](https://github.com/BytedTsinghua-SIA/MemAgent): MemAgent: Reshaping Long-Context LLM with Multi-Conv RL based Memory Agent ![GitHub Repo stars](https://img.shields.io/github/stars/BytedTsinghua-SIA/MemAgent) - [POLARIS](https://github.com/ChenxinAn-fdu/POLARIS): A Post-training recipe for scaling RL on Advanced Reasoning models ![GitHub Repo stars](https://img.shields.io/github/stars/ChenxinAn-fdu/POLARIS) - [GUI-R1](https://github.com/ritzz-ai/GUI-R1): **GUI-R1**: A Generalist R1-style Vision-Language Action Model For **GUI Agents** ![GitHub Repo stars](https://img.shields.io/github/stars/ritzz-ai/GUI-R1) - [DeepRetrieval](https://github.com/pat-jj/DeepRetrieval): RL Training of **Search Agent** with **Search/Retrieval Outcome** ![GitHub Repo stars](https://img.shields.io/github/stars/pat-jj/DeepRetrieval) - [Code-R1](https://github.com/ganler/code-r1): Reproducing R1 for **Code** with Reliable Rewards ![GitHub Repo stars](https://img.shields.io/github/stars/ganler/code-r1) - [DeepResearcher](https://github.com/GAIR-NLP/DeepResearcher): Scaling deep research via reinforcement learning in real-world environments ![GitHub Repo stars](https://img.shields.io/github/stars/GAIR-NLP/DeepResearcher) - [VAGEN](https://github.com/RAGEN-AI/VAGEN): Training VLM agents with multi-turn reinforcement learning ![GitHub Repo stars](https://img.shields.io/github/stars/RAGEN-AI/VAGEN) - [RM-R1](https://arxiv.org/abs/2505.02387): RL training of reasoning reward models ![GitHub Repo stars](https://img.shields.io/github/stars/RM-R1-UIUC/RM-R1) - [LUFFY](https://arxiv.org/pdf/2504.14945): Learning to Reason under Off-Policy Guidance![GitHub Repo stars](https://img.shields.io/github/stars/ElliottYan/LUFFY) - [DeepMath](https://github.com/zwhe99/DeepMath): DeepMath-103K data and series models for math reasoning![GitHub Repo stars](https://img.shields.io/github/stars/zwhe99/DeepMath) - [PACS](https://github.com/ritzz-ai/PACS): Implicit Actor Critic Coupling via a Supervised Learning Framework for RLVR ![GitHub Repo stars](https://img.shields.io/github/stars/ritzz-ai/PACS) - [Entropy Mechanism of RL](https://github.com/PRIME-RL/Entropy-Mechanism-of-RL): The Entropy Mechanism of Reinforcement Learning for Large Language Model Reasoning![GitHub Repo stars](https://img.shields.io/github/stars/PRIME-RL/Entropy-Mechanism-of-RL) - [LLaSA-TTS-GRPO](https://github.com/channel-io/ch-tts-llasa-rl-grpo): TTS fine-tuning with GRPO optimization based on LLASA models ![GitHub Repo stars](https://img.shields.io/github/stars/channel-io/ch-tts-llasa-rl-grpo) - [PF-PPO](https://arxiv.org/abs/2409.06957): Policy Filtration for PPO based on the reliability of reward signals for more efficient and robust RLHF. - [RACRO](https://github.com/gyhdog99/RACRO2): Build multi-modal reasoning models via decoupling it into query-conditioned captioning and text-only reasoning ![GitHub Repo stars](https://img.shields.io/github/stars/gyhdog99/RACRO2) - [Agent Lightning](https://github.com/microsoft/agent-lightning): A flexible and extensible framework that enables seamless agent optimization for any existing agent framework. ![GitHub Repo stars](https://img.shields.io/github/stars/microsoft/agent-lightning) - [VTool-R1](https://github.com/VTOOL-R1/vtool-r1): VLMs Learn to Think with Images via Reinforcement Learning on Multimodal Tool Use. ![GitHub Repo stars](https://img.shields.io/github/stars/VTOOL-R1/vtool-r1) - [Kimina-Prover-RL](https://github.com/project-numina/kimina-prover-rl/tree/main/recipe/kimina_prover_rl): Training pipeline for formal theorem proving, based on a paradigm inspired by DeepSeek-R1. - [RL-PLUS](https://github.com/YihongDong/RL-PLUS): Countering Capability Boundary Collapse of LLMs in Reinforcement Learning with Hybrid-policy Optimization. - [rStar2-Agent](https://github.com/microsoft/rStar): Using reinforcement learning with multi-step tool-calling for math tasks, rStar2-Agent-14B reaches frontier-level math reasoning in just 510 RL training steps ![GitHub Repo stars](https://img.shields.io/github/stars/microsoft/rStar) - [Vision-SR1](https://github.com/zli12321/Vision-SR1): Self-Rewarding Vision-Language Model via Reasoning Decomposition ![GitHub Repo stars](https://img.shields.io/github/stars/zli12321/Vision-SR1) - [SimpleVLA-RL](https://github.com/PRIME-RL/SimpleVLA-RL): SimpleVLA-RL: A Simple yet Effective Vision-Language Action Model for Reinforcement Learning ![GitHub Repo stars](https://img.shields.io/github/stars/PRIME-RL/SimpleVLA-RL) - [Table-R1](https://github.com/Table-R1/Table-R1): Table-R1: Inference-Time Scaling for Table Reasoning ![GitHub Repo stars](https://img.shields.io/github/stars/Table-R1/Table-R1) - [Revisual-R1](https://github.com/CSfufu/Revisual-R1): Revisual-R1: Advancing Multimodal Reasoning From Optimized Cold Start to Staged Reinforcement Learning ![GitHub Repo stars](https://img.shields.io/github/stars/CSfufu/Revisual-R1) - [ARES](https://github.com/shawn0728/ARES): ARES: Multimodal Adaptive Reasoning via Difficulty-Aware Token-Level Entropy Shaping ![GitHub Repo stars](https://img.shields.io/github/stars/shawn0728/ARES) - [Meta-Bandit-LLM](https://github.com/sanxing-chen/meta-bandit-llm): Meta-Bandit-LLM: Long-horizon multiturn interactive training for meta-bandit agents ![GitHub Repo stars](https://img.shields.io/github/stars/sanxing-chen/meta-bandit-llm) - [PokeeResearch](https://github.com/Pokee-AI/PokeeResearchOSS): PokeeResearch: State-of-the-art 7B DeepResearch Agent that leverages web search and content reading capabilities to answer complex questions using the most up-to-date information available online. ![Github Repo Stars](https://img.shields.io/github/stars/Pokee-AI/PokeeResearchOSS) and many more awesome work listed in [recipe](recipe/README.md). ## Contribution Guide See [contributions guide](CONTRIBUTING.md) ## About [ByteDance Seed Team](https://team.doubao.com/) Founded in 2023, ByteDance Seed Team is dedicated to crafting the industry's most advanced AI foundation models. The team aspires to become a world-class research team and make significant contributions to the advancement of science and society. You can get to know Bytedance Seed better through the following channels👇 --- We are HIRING! Send us an [email](mailto:the.verl.project@gmail.com) if you are interested in internship/FTE opportunities in RL for agents. ================================================ FILE: verl_distillation/deploy_env.sh ================================================ #!/bin/bash # Multi-node Environment Deployment Script # Usage: bash deploy_env.sh [--all-nodes] set -e SCRIPT_DIR=$(cd $(dirname $0); pwd) PROJECT_DIR=${SCRIPT_DIR} # Configuration CONDA_ENV_NAME=${CONDA_ENV_NAME:-"distill"} PYTHON_VERSION=${PYTHON_VERSION:-"3.10"} HOSTFILE=${HOSTFILE:-"/etc/mpi/hostfile"} # Colors GREEN='\033[0;32m' YELLOW='\033[1;33m' RED='\033[0;31m' NC='\033[0m' log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } log_error() { echo -e "${RED}[ERROR]${NC} $1"; } # Initialize conda init_conda() { for conda_sh in /root/anaconda3/etc/profile.d/conda.sh \ /root/miniconda3/etc/profile.d/conda.sh \ $HOME/anaconda3/etc/profile.d/conda.sh \ $HOME/miniconda3/etc/profile.d/conda.sh \ /opt/conda/etc/profile.d/conda.sh; do [ -f "$conda_sh" ] && source "$conda_sh" && return 0 done command -v conda &>/dev/null } # Setup proxy setup_proxy() { log_info "Setting up proxy..." unset -v http_proxy https_proxy no_proxy export http_proxy=http://oversea-squid2.ko.txyun:11080 export https_proxy=http://oversea-squid2.ko.txyun:11080 export no_proxy=localhost,127.0.0.1,localaddress,localdomain.com,internal,corp.kuaishou.com,test.gifshow.com,staging.kuaishou.com } # Install on local node install_local() { log_info "Installing environment..." # Setup proxy first setup_proxy if ! init_conda; then log_error "Conda not found." exit 1 fi # Configure conda for stability conda config --set remote_read_timeout_secs 600 conda config --set remote_connect_timeout_secs 60 conda config --set remote_max_retries 10 # Create or activate conda env if conda env list | grep -q "^${CONDA_ENV_NAME} "; then log_warn "Environment '${CONDA_ENV_NAME}' exists, activating..." else log_info "Creating environment '${CONDA_ENV_NAME}'..." conda create -n ${CONDA_ENV_NAME} python=${PYTHON_VERSION} -y fi source $(conda info --base)/etc/profile.d/conda.sh conda activate ${CONDA_ENV_NAME} log_info "Installing torch..." pip install torch==2.8.0 pip install --force-reinstall torchvision==0.23.0 pip install --force-reinstall torchaudio==2.8.0 # Install requirements log_info "Installing requirements.txt..." pip install -r ${PROJECT_DIR}/requirements.txt # Install flash-attn separately # log_info "Installing flash-attn..." # pip install flash-attn==2.7.4.post1 --no-build-isolation pip install flash-attn --no-build-isolation # Install verl package log_info "Installing verl package..." cd ${PROJECT_DIR} pip install -e . log_info "Done!" } # Deploy to all nodes deploy_all_nodes() { [ ! -f "${HOSTFILE}" ] && log_error "Hostfile not found: ${HOSTFILE}" && exit 1 ALL_NODES=$(awk '!a[$1]++ {print $1}' ${HOSTFILE}) log_info "Deploying to: ${ALL_NODES}" mkdir -p ./logs/deploy for node in ${ALL_NODES}; do log_info "Deploying to ${node}..." ssh -n ${node} "CONDA_ENV_NAME=${CONDA_ENV_NAME} bash ${SCRIPT_DIR}/deploy_env.sh" \ > "./logs/deploy/deploy_${node}.log" 2>&1 & done wait log_info "Deployment completed! Check logs in ./logs/deploy/" } # Main case "${1}" in --all-nodes) deploy_all_nodes ;; *) install_local ;; esac ================================================ FILE: verl_distillation/docker/Apptainerfile.rocm ================================================ Bootstrap: docker # Support - Traing: fsdp; Inference: vllm # FROM: rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 # Support - Traing: fsdp; Inference: vllm, sglang FROM lmsysorg/sglang:v0.4.5-rocm630 %environment export PYTORCH_ROCM_ARCH="gfx90a;gfx942" export HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" export CFLAGS="-D__HIP_PLATFORM_AMD__" export CXXFLAGS="-D__HIP_PLATFORM_AMD__" %post # Create source directory mkdir -p /opt/src # Uninstall and reinstall vllm pip uninstall -y vllm cd /opt/src git clone -b v0.6.3 https://github.com/vllm-project/vllm.git cd vllm MAX_JOBS=$(nproc) python3 setup.py install cd /opt rm -rf /opt/src/vllm # Install dependencies pip install "tensordict<0.6" --no-deps pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ "ray[data,train,tune,serve]" \ torchdata \ transformers \ wandb \ orjson \ pybind11 # Clone and install verl from GitHub cd /opt git clone https://github.com/volcengine/verl.git cd verl # Uncomment to use a specific version # git checkout v0.3.0.post0 pip install -e . --no-deps # Install torch_memory_saver pip install git+https://github.com/ExtremeViscent/torch_memory_saver.git --no-deps ================================================ FILE: verl_distillation/docker/Dockerfile.extention.awsefa ================================================ # Base Image support aws EFA # Build Image with frameworks based on this FROM verlai/verl:app-verl0.6-transformers4.56.1-sglang0.5.2-mcore0.13.0-te2.2 # For aws instances with EFA net interface (Sagemaker AI Pod) # install EFA driver: ######## AWS EFA ############ ENV NCCL_VERSION=2.25.1-1 ENV DEBIAN_FRONTEND=noninteractive ENV EFA_INSTALLER_VERSION=1.40.0 ENV AWS_OFI_NCCL_VERSION=1.14.2 ENV FI_EFA_SET_CUDA_SYNC_MEMOPS=0 ENV FI_PROVIDER=efa RUN apt update && apt install -y linux-image-generic libhwloc-dev RUN cd /tmp && \ curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ cd aws-efa-installer && \ ./efa_installer.sh -y -g --skip-kmod --skip-limit-conf --no-verify && \ ldconfig && \ rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* # NCCL EFA Plugin RUN cd /tmp && \ curl -LO https://github.com/aws/aws-ofi-nccl/archive/refs/tags/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ tar -xzf /tmp/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ rm /tmp/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ mv aws-ofi-nccl-${AWS_OFI_NCCL_VERSION} aws-ofi-nccl && \ cd /tmp/aws-ofi-nccl && \ ./autogen.sh && \ ./configure --prefix=/opt/amazon/efa \ --with-libfabric=/opt/amazon/efa \ --with-cuda=/usr/local/cuda \ --enable-platform-aws \ --with-mpi=/opt/amazon/openmpi && \ make -j$(nproc) install && \ rm -rf /tmp/aws-ofi/nccl # NCCL RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ echo "/opt/amazon/openmpi/lib" >> /etc/ld.so.conf.d/efa.conf && \ ldconfig ENV OMPI_MCA_pml=^cm,ucx \ OMPI_MCA_btl=tcp,self \ OMPI_MCA_btl_tcp_if_exclude=lo,docker0,veth_def_agent \ OPAL_PREFIX=/opt/amazon/openmpi \ NCCL_SOCKET_IFNAME=^docker,lo,veth_def_agent \ FI_EFA_USE_HUGE_PAGE=0 # docker build -t verl:awsefa --label "commit=$(git rev-parse --short HEAD)" . # on aws: # docker run --ipc=host --privileged --name verldev --gpus all --network=host --shm-size=1800gb -itd verl:awsefa ================================================ FILE: verl_distillation/docker/Dockerfile.ngc.vllm ================================================ # docker buildx build --platform linux/x86_64 -t "verlai/verl:ngc-th2.4.0-cu124-vllm0.6.3-ray2.4-te1.7-v0.0.6" -f docker/Dockerfile.ngc.vllm . --builder cloud-verlai-verl-builder --progress=plain --push FROM nvcr.io/nvidia/pytorch:24.05-py3 # uninstall nv-pytorch fork RUN pip3 uninstall pytorch-quantization \ pytorch-triton \ torch \ torch-tensorrt \ torchvision \ xgboost transformer_engine flash_attn \ apex megatron-core -y RUN pip3 install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu124 # =============== Megatron dependencies (optional) ================= # install apex, set MAX_JOBS to avoid OOMs RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \ --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \ git+https://github.com/NVIDIA/apex # =============== End of Megatron dependencies (optional) ================= RUN pip3 install --no-cache-dir \ accelerate \ codetiming \ datasets \ dill \ hydra-core \ numpy \ 'pandas' \ 'peft' \ 'pyarrow>=15.0.0' \ 'pybind11' \ 'pylatexenc' \ 'ray>=2.10' \ 'tensordict<0.6' \ 'transformers' \ 'vllm==0.6.3.post1' \ 'wandb' \ 'tensorboard' # full dependencies RUN pip3 install pytest pre-commit py-spy pyext liger-kernel # =============== Megatron dependencies (optional) ================= # install Transformer Engine, which requires FA 2.5.8. Do it in a separate step for docker cache RUN MAX_JOBS=4 NINJA_FLAGS="-j4" pip3 install flash-attn==2.5.8 --no-cache-dir --no-build-isolation RUN MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 pip3 install git+https://github.com/eric-haibin-lin/TransformerEngine.git@v1.7.0 # =============== End of Megatron dependencies (optional) ================= ================================================ FILE: verl_distillation/docker/Dockerfile.ngc.vllm0.8 ================================================ # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio # Install torch-2.6.0+cu124 + vllm-0.8.3 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 RUN pip install --no-cache-dir "vllm==0.8.3" "torch==2.6.0" "torchvision==0.21.0" "torchaudio==2.6.0" "tensordict==0.6.2" torchdata \ "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=15.0.0" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler \ pytest py-spy pyext pre-commit ruff tensorboard # Install flash-attn-2.7.4.post1 (cxx11abi=False) RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Install flashinfer-0.2.2.post1+cu124 (cxx11abi=False) # vllm-0.8.3 does not support flashinfer>=0.2.3 # see https://github.com/vllm-project/vllm/pull/15777 RUN wget -nv https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install verl RUN pip install --no-cache-dir verl[vllm] -U # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_distillation/docker/Dockerfile.ngc.vllm0.8.sagemaker ================================================ # Using a pre-built image from AWS DLC which contains the current version of python (3.10) and supported cuda version (12.1) FROM 763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:2.1.0-transformers4.36.0-gpu-py310-cu121-ubuntu20.04 # uninstall nv-pytorch fork RUN pip3 uninstall -y pytorch-quantization \ pytorch-triton torch torch-tensorrt torchvision \ xgboost transformer_engine flash_attn apex megatron-core # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini && \ apt-get clean # Install torch-2.6.0 + vllm-0.8.2 RUN pip install --no-cache-dir vllm==0.8.2 torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 tensordict torchdata==0.11.0 \ transformers>=4.49.0 accelerate datasets peft hf-transfer \ ray[default] codetiming hydra-core pandas pyarrow>=15.0.0 pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler \ pytest pre-commit py-spy pyext ruff tensorboard # Install flash_attn-2.7.4.post1 RUN pip uninstall -y transformer-engine flash-attn && \ pip install flash-attn==2.7.4.post1 --no-build-isolation # Fix cv2 RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir nvidia-ml-py>=12.560.30 opencv-python-headless==4.8.0.74 fastapi==0.115.6 && \ pip install --no-cache-dir --upgrade optree>=0.13.0 # Install verl RUN pip install --no-cache-dir verl[vllm] -U # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_distillation/docker/Dockerfile.rocm ================================================ # FROM "compute-artifactory.amd.com:5000/rocm-plus-docker/framework/compute-rocm-rel-6.4:94_ubuntu22.04_py3.10_pytorch_release-2.7_575e247" # FROM "rlfoundation.azurecr.io/rocm6.3.4:vllm-0.8.5-numa-patch-ubuntu-22.04" FROM "rlsys/rocm-6.3.4-patch:rocm6.3.4-numa-patch_ubuntu-22.04" SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV MAX_JOBS=512 ENV PATH="/usr/local/python3.12/bin:$PATH" RUN ln -sf /usr/bin/python3.12 /usr/bin/python && \ ln -sf /usr/bin/pip3.12 /usr/bin/pip ############################################ ############################################ RUN apt-get update RUN apt-get install -y pkg-config liblzma-dev ############################################ ############################################ ########################################### ##########Install TransformerEngine######## ########################################### WORKDIR /workspace/ # transformer-engine install # https://github.com/ROCm/TransformerEngine RUN rm -rf TransformerEngine RUN git clone --recursive https://github.com/ROCm/TransformerEngine.git WORKDIR /workspace/TransformerEngine RUN git checkout 236178e5 # git checkout bb061ade # git checkout 864405c ENV NVTE_FRAMEWORK=pytorch ENV NVTE_ROCM_ARCH=gfx942 ENV NVTE_USE_HIPBLASLT=1 ENV NVTE_USE_ROCM=1 # export CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr:${CMAKE_PREFIX_PATH:-}" ENV CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr" # ENV NVTE_BUILD_MAX_JOBS=$(MAX_JOBS) RUN MAX_JOBS=$(MAX_JOBS) pip install . -vvv WORKDIR /workspace/ ########################################### ########################################### ########################################### #################################################################################### ################Install vllm - sglang require vllm 0.6.7 dependency################# #################################################################################### #### Require vllm 0.6.7 - checkout 113274a0 WORKDIR /workspace/ RUN rm -rf vllm RUN pip uninstall -y vllm # Refer to here (down-grade vllm to 0.6.3): https://docs.vllm.ai/en/v0.6.3/getting_started/amd-installation.html RUN git clone https://github.com/ROCm/vllm.git # git clone https://github.com/vllm-project/vllm.git WORKDIR /workspace/vllm RUN git checkout 113274a0 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" #ENV MAX_JOBS=512 ENV MAX_JOBS=${MAX_JOBS} RUN pip install "boto3>=1.26.0" RUN pip install setuptools_scm # will add src into py. You can delete the repo RUN python3 setup.py install WORKDIR /workspace/ #################################################################################### #################################################################################### #################################################################################### ########################################### ############For hack docker################ ########################################### RUN pip install setuptools==75.8.0 ########################################### ########################################### ########################################### ########################################### ############build sgalng################### ########################################### # Set environment variables ENV BASE_DIR=/sgl-workspace ENV BUILD_TYPE=all ENV SGL_REPO=https://github.com/sgl-project/sglang ENV SGL_BRANCH=v0.4.6.post5 ENV TRITON_REPO=https://github.com/ROCm/triton.git ENV TRITON_COMMIT=improve_fa_decode_3.0.0 ENV AITER_REPO=https://github.com/ROCm/aiter.git ENV AITER_COMMIT=v0.1.2 # v0.1.2 version - commit id: 9d11f47 # ENV AITER_COMMIT=9d11f47 ENV HIP_FORCE_DEV_KERNARG=1 ENV HSA_NO_SCRATCH_RECLAIM=1 ENV SGLANG_SET_CPU_AFFINITY=1 ENV SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 ENV NCCL_MIN_NCHANNELS=112 ENV MOE_PADDING=1 ENV VLLM_FP8_PADDING=1 ENV VLLM_FP8_ACT_PADDING=1 ENV VLLM_FP8_WEIGHT_PADDING=1 ENV VLLM_FP8_REDUCE_CONV=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE=1 ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" ENV AMDGPU_TARGETS=gfx942 ENV ROCM_ARCH=gfx942 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" # Switch to working directory WORKDIR /sgl-workspace # Clean and create directory RUN rm -rf /sgl-workspace && mkdir -p /sgl-workspace # Clone and build sglang RUN git clone ${SGL_REPO} \ && cd sglang \ && git checkout ${SGL_BRANCH} || echo "Using default branch" \ && cd sgl-kernel \ && rm -f pyproject.toml \ && mv pyproject_rocm.toml pyproject.toml \ && python setup_rocm.py install \ && cd .. \ && if [ "$BUILD_TYPE" = "srt" ]; then \ python -m pip --no-cache-dir install -e "python[srt_hip]"; \ else \ python -m pip --no-cache-dir install -e "python[all_hip]"; \ fi \ && cd /sgl-workspace \ && cp -r /sgl-workspace/sglang /sglang \ && python -m pip cache purge # Install common Python packages RUN pip install IPython orjson python-multipart torchao pybind11 # Rebuild Triton RUN pip uninstall -y triton || true \ && git clone ${TRITON_REPO} \ && cd triton \ && git checkout ${TRITON_COMMIT} \ && cd python \ && python3 setup.py install \ && cd /sgl-workspace # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942 --amdgpu-lower-module-lds-strategy=1" # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" # Build aiter #version: Commit 9d11f47 # && git checkout ${AITER_COMMIT} \ RUN pip uninstall -y aiter || true RUN git clone ${AITER_REPO} \ && cd aiter \ && git checkout ${AITER_COMMIT} \ && git submodule sync \ && git submodule update --init --recursive \ && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py install \ && cd /sgl-workspace # && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop \ # && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop \ # Copy MI300X config RUN find /sgl-workspace/sglang/python/sglang/srt/layers/quantization/configs/ \ /sgl-workspace/sglang/python/sglang/srt/layers/moe/fused_moe_triton/configs/ \ -type f -name '*MI300X*' | \ xargs -I {} sh -c 'vf_config=$(echo "$1" | sed "s/MI300X/MI300X_VF/"); cp "$1" "$vf_config"' -- {} # Environment setup complete. RUN echo "Environment setup complete." WORKDIR /workspace/ ########################################### ########################################### ########################################### ########################################### ###############vllm v0.8.5################# ########################################### # ENV GITHUB_USERNAME=yushengsu-thu # ENV GITHUB_MAIL=yushengsu@gmail.com # RUN git config --global user.name "${GITHUB_USERNAME}" \ # && git config --global user.email "${GITHUB_MAIL}" WORKDIR /workspace/ ENV VLLM_TARGET_DEVICE=rocm ENV ROCM_PATH=/opt/rocm ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev # Find the repo path in: DockerFile/Dockerfile.rocm_yang # RUN git clone https://github.com/RLFoundation/vllm-patch.git RUN pip uninstall -y vllm || true RUN rm -rf vllm-patch RUN git clone https://github.com/RLFoundation/vllm-patch.git \ && cd vllm-patch \ && git checkout v0.8.5-sleep-numa \ && rm -rf build/ dist/ *.egg-info \ && ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so \ && SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py install # RUN SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py develop WORKDIR /workspace/ ########################################### ########################################### ########################################### ######################################### #### Install megatron-core############### ######################################### RUN pip uninstall -y megatron-core && \ git clone https://github.com/yushengsu-thu/Megatron-LM-amd_version.git && \ cd Megatron-LM-amd_version && \ pip install -vvv -e . && \ cd /workspace/ ######################################### ######################################### ######################################### ####################################### ################apex################### ####################################### WORKDIR /workspace/ RUN pip uninstall -y apex && \ git clone https://github.com/ROCm/apex.git && \ cd apex && \ python setup.py install && \ cd /workspace/ ####################################### ####################################### ####################################### ################################################################################ ###########################Add torch_memory_saver############################### ################################################################################ # Set environment variables ENV HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" ENV CFLAGS="-D__HIP_PLATFORM_AMD__" ENV CXXFLAGS="-D__HIP_PLATFORM_AMD__" RUN pip install "git+https://github.com/YangWang92/torch_memory_saver_numa.git@numa" ################################################################################ ################################################################################ ################################################################################ ######################################## ######Install ray####################### ######################################## # need to add this patch: https://github.com/ray-project/ray/pull/53531/files RUN pip uninstall ray -y RUN pip install "ray[data,train,tune,serve]>=2.47.0" ######################################## ######################################## ######################################## ########################################## #######Install other dependencies######### ########################################## RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ torchdata \ wandb \ orjson \ pybind11 WORKDIR /workspace/ RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ pip install -e . ########################################## ########################################## ########################################## WORKDIR /workspace/ CMD ["/usr/bin/bash"] ================================================ FILE: verl_distillation/docker/Dockerfile.rocm7 ================================================ # default base image ARG REMOTE_VLLM="1" ARG COMMON_WORKDIR=/app ARG BASE_IMAGE=rocm/vllm-dev:base FROM ${BASE_IMAGE} AS base ARG ARG_PYTORCH_ROCM_ARCH ENV PYTORCH_ROCM_ARCH=${ARG_PYTORCH_ROCM_ARCH:-${PYTORCH_ROCM_ARCH}} # Install some basic utilities RUN apt-get update -q -y && apt-get install -q -y \ sqlite3 libsqlite3-dev libfmt-dev libmsgpack-dev libsuitesparse-dev \ apt-transport-https ca-certificates wget curl # Remove sccache RUN python3 -m pip install --upgrade pip RUN apt-get purge -y sccache; python3 -m pip uninstall -y sccache; rm -f "$(which sccache)" ARG COMMON_WORKDIR WORKDIR ${COMMON_WORKDIR} # ----------------------- # vLLM fetch stages FROM base AS fetch_vllm_0 ONBUILD COPY ./ vllm/ FROM base AS fetch_vllm_1 #ARG VLLM_REPO="https://github.com/ROCm/vllm.git" #ARG VLLM_BRANCH="main" ARG VLLM_REPO=https://github.com/HollowMan6/vllm.git ARG VLLM_BRANCH="sleep_amd" ONBUILD RUN git clone ${VLLM_REPO} \ && cd vllm \ && git checkout ${VLLM_BRANCH} FROM fetch_vllm_${REMOTE_VLLM} AS fetch_vllm # ----------------------- # vLLM build stages FROM fetch_vllm AS build_vllm # Build vLLM RUN cd vllm \ && python3 -m pip install -r requirements/rocm.txt \ && python3 setup.py clean --all \ && ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so \ && VLLM_TARGET_DEVICE=rocm ROCM_PATH=/opt/rocm/ VLLM_GPU_LANG=HIP SETUPTOOLS_SCM_PRETEND_VERSION=0.11.0.dev python3 setup.py bdist_wheel --dist-dir=dist #&& python3 setup.py bdist_wheel --dist-dir=dist FROM scratch AS export_vllm ARG COMMON_WORKDIR COPY --from=build_vllm ${COMMON_WORKDIR}/vllm/dist/*.whl / COPY --from=build_vllm ${COMMON_WORKDIR}/vllm/requirements /requirements COPY --from=build_vllm ${COMMON_WORKDIR}/vllm/benchmarks /benchmarks COPY --from=build_vllm ${COMMON_WORKDIR}/vllm/tests /tests COPY --from=build_vllm ${COMMON_WORKDIR}/vllm/examples /examples COPY --from=build_vllm ${COMMON_WORKDIR}/vllm/.buildkite /.buildkite # ----------------------- # Test vLLM image FROM base AS test RUN python3 -m pip install --upgrade pip && rm -rf /var/lib/apt/lists/* # Install vLLM #RUN --mount=type=bind,from=export_vllm,src=/,target=/install \ COPY --from=export_vllm /*.whl /install COPY --from=export_vllm /requirements /install/requirements COPY --from=export_vllm /benchmarks /install/benchmarks COPY --from=export_vllm /tests /install/tests COPY --from=export_vllm /examples /install/examples COPY --from=export_vllm /.buildkite /install/.buildkite RUN cd /install \ && pip install -U -r requirements/rocm.txt \ && pip install -U -r requirements/rocm-test.txt \ && pip uninstall -y vllm \ && pip install *.whl WORKDIR /vllm-workspace ARG COMMON_WORKDIR COPY --from=build_vllm ${COMMON_WORKDIR}/vllm /vllm-workspace # install development dependencies (for testing) RUN cd /vllm-workspace \ && rm -rf vllm \ && python3 -m pip install -e tests/vllm_test_utils \ && python3 -m pip install lm-eval[api]==0.4.4 \ && python3 -m pip install pytest-shard # ----------------------- # Final vLLM image FROM base AS final RUN python3 -m pip install --upgrade pip && rm -rf /var/lib/apt/lists/* # Error related to odd state for numpy 1.20.3 where there is no METADATA etc, but an extra LICENSES_bundled.txt. # Manually remove it so that later steps of numpy upgrade can continue RUN case "$(which python3)" in \ *"/opt/conda/envs/py_3.9"*) \ rm -rf /opt/conda/envs/py_3.9/lib/python3.9/site-packages/numpy-1.20.3.dist-info/;; \ *) ;; esac RUN python3 -m pip install --upgrade huggingface-hub[cli] # Install vLLM RUN --mount=type=bind,from=export_vllm,src=/,target=/install \ cd /install \ && pip install -U -r requirements/rocm.txt \ && pip uninstall -y vllm \ && pip install *.whl ARG COMMON_WORKDIR # Copy over the benchmark scripts as well COPY --from=export_vllm /benchmarks ${COMMON_WORKDIR}/vllm/benchmarks COPY --from=export_vllm /examples ${COMMON_WORKDIR}/vllm/examples ENV RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 ENV TOKENIZERS_PARALLELISM=false # ENV that can improve safe tensor loading, and end-to-end time ENV SAFETENSORS_FAST_GPU=1 # Performance environment variable. ENV HIP_FORCE_DEV_KERNARG=1 # ----------------------- # Install verl ARG VERL_REPO=https://github.com/volcengine/verl.git ARG VERL_BRANCH=main RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ torchdata \ wandb \ orjson \ pybind11 WORKDIR /workspace/ RUN git clone ${VERL_REPO} && \ cd verl && \ git checkout ${VERL_BRANCH} && \ pip install -e . CMD ["/bin/bash"] ================================================ FILE: verl_distillation/docker/Dockerfile.rocm_verl-0.3.0.post1 ================================================ # Build the docker in the repo dir: # docker build -f docker/Dockerfile.rocm -t verl-rocm:03.04.2015 . # docker images # you can find your built docker # Support - Traing: fsdp; Inference: vllm # FROM rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 # Support - Traing: fsdp; Inference: vllm, sglang FROM lmsysorg/sglang:v0.4.6.post5-rocm630 # Set working directory # WORKDIR $PWD/app # Set environment variables ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" ENV HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" ENV CFLAGS="-D__HIP_PLATFORM_AMD__" ENV CXXFLAGS="-D__HIP_PLATFORM_AMD__" # Install vllm RUN pip uninstall -y vllm && \ rm -rf vllm && \ git clone -b v0.6.3 https://github.com/vllm-project/vllm.git && \ cd vllm && \ MAX_JOBS=$(nproc) python3 setup.py install && \ cd .. && \ rm -rf vllm # Copy the entire project directory COPY . . # Install dependencies RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ "ray[data,train,tune,serve]<2.45.0" \ torchdata \ transformers \ wandb \ orjson \ pybind11 RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ pip install -e . # Install torch_memory_saver RUN pip install git+https://github.com/ExtremeViscent/torch_memory_saver.git --no-deps ================================================ FILE: verl_distillation/docker/Dockerfile.rocm_verl-0.4.1 ================================================ # FROM "compute-artifactory.amd.com:5000/rocm-plus-docker/framework/compute-rocm-rel-6.4:94_ubuntu22.04_py3.10_pytorch_release-2.7_575e247" # FROM "rlfoundation.azurecr.io/rocm6.3.4:vllm-0.8.5-numa-patch-ubuntu-22.04" FROM "rlsys/rocm-6.3.4-patch:rocm6.3.4-numa-patch_ubuntu-22.04" SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV MAX_JOBS=512 ENV PATH="/usr/local/python3.12/bin:$PATH" RUN ln -sf /usr/bin/python3.12 /usr/bin/python && \ ln -sf /usr/bin/pip3.12 /usr/bin/pip ############################################ ############################################ RUN apt-get update RUN apt-get install -y pkg-config liblzma-dev ############################################ ############################################ ########################################### ##########Install TransformerEngine######## ########################################### WORKDIR /workspace/ # transformer-engine install # https://github.com/ROCm/TransformerEngine RUN rm -rf TransformerEngine RUN git clone --recursive https://github.com/ROCm/TransformerEngine.git WORKDIR /workspace/TransformerEngine RUN git checkout 236178e5 # git checkout bb061ade # git checkout 864405c ENV NVTE_FRAMEWORK=pytorch ENV NVTE_ROCM_ARCH=gfx942 ENV NVTE_USE_HIPBLASLT=1 ENV NVTE_USE_ROCM=1 # export CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr:${CMAKE_PREFIX_PATH:-}" ENV CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr" # ENV NVTE_BUILD_MAX_JOBS=$(MAX_JOBS) RUN MAX_JOBS=$(MAX_JOBS) pip install . -vvv WORKDIR /workspace/ ########################################### ########################################### ########################################### #################################################################################### ################Install vllm - sglang require vllm 0.6.7 dependency################# #################################################################################### #### Require vllm 0.6.7 - checkout 113274a0 WORKDIR /workspace/ RUN rm -rf vllm RUN pip uninstall -y vllm # Refer to here (down-grade vllm to 0.6.3): https://docs.vllm.ai/en/v0.6.3/getting_started/amd-installation.html RUN git clone https://github.com/ROCm/vllm.git # git clone https://github.com/vllm-project/vllm.git WORKDIR /workspace/vllm RUN git checkout 113274a0 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" #ENV MAX_JOBS=512 ENV MAX_JOBS=${MAX_JOBS} RUN pip install "boto3>=1.26.0" RUN pip install setuptools_scm # will add src into py. You can delete the repo RUN python3 setup.py install WORKDIR /workspace/ #################################################################################### #################################################################################### #################################################################################### ########################################### ############For hack docker################ ########################################### RUN pip install setuptools==75.8.0 ########################################### ########################################### ########################################### ########################################### ############build sgalng################### ########################################### # Set environment variables ENV BASE_DIR=/sgl-workspace ENV BUILD_TYPE=all ENV SGL_REPO=https://github.com/sgl-project/sglang ENV SGL_BRANCH=v0.4.6.post5 ENV TRITON_REPO=https://github.com/ROCm/triton.git ENV TRITON_COMMIT=improve_fa_decode_3.0.0 ENV AITER_REPO=https://github.com/ROCm/aiter.git ENV AITER_COMMIT=v0.1.2 # v0.1.2 version - commit id: 9d11f47 # ENV AITER_COMMIT=9d11f47 ENV HIP_FORCE_DEV_KERNARG=1 ENV HSA_NO_SCRATCH_RECLAIM=1 ENV SGLANG_SET_CPU_AFFINITY=1 ENV SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 ENV NCCL_MIN_NCHANNELS=112 ENV MOE_PADDING=1 ENV VLLM_FP8_PADDING=1 ENV VLLM_FP8_ACT_PADDING=1 ENV VLLM_FP8_WEIGHT_PADDING=1 ENV VLLM_FP8_REDUCE_CONV=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE=1 ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" ENV AMDGPU_TARGETS=gfx942 ENV ROCM_ARCH=gfx942 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" # Switch to working directory WORKDIR /sgl-workspace # Clean and create directory RUN rm -rf /sgl-workspace && mkdir -p /sgl-workspace # Clone and build sglang RUN git clone ${SGL_REPO} \ && cd sglang \ && git checkout ${SGL_BRANCH} || echo "Using default branch" \ && cd sgl-kernel \ && rm -f pyproject.toml \ && mv pyproject_rocm.toml pyproject.toml \ && python setup_rocm.py install \ && cd .. \ && if [ "$BUILD_TYPE" = "srt" ]; then \ python -m pip --no-cache-dir install -e "python[srt_hip]"; \ else \ python -m pip --no-cache-dir install -e "python[all_hip]"; \ fi \ && cd /sgl-workspace \ && cp -r /sgl-workspace/sglang /sglang \ && python -m pip cache purge # Install common Python packages RUN pip install IPython orjson python-multipart torchao pybind11 # Rebuild Triton RUN pip uninstall -y triton || true \ && git clone ${TRITON_REPO} \ && cd triton \ && git checkout ${TRITON_COMMIT} \ && cd python \ && python3 setup.py install \ && cd /sgl-workspace # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942 --amdgpu-lower-module-lds-strategy=1" # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" # Build aiter #version: Commit 9d11f47 # && git checkout ${AITER_COMMIT} \ RUN pip uninstall -y aiter || true RUN git clone ${AITER_REPO} \ && cd aiter \ && git checkout ${AITER_COMMIT} \ && git submodule sync \ && git submodule update --init --recursive \ && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py install \ && cd /sgl-workspace # && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop \ # && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop \ # Copy MI300X config RUN find /sgl-workspace/sglang/python/sglang/srt/layers/quantization/configs/ \ /sgl-workspace/sglang/python/sglang/srt/layers/moe/fused_moe_triton/configs/ \ -type f -name '*MI300X*' | \ xargs -I {} sh -c 'vf_config=$(echo "$1" | sed "s/MI300X/MI300X_VF/"); cp "$1" "$vf_config"' -- {} # Environment setup complete. RUN echo "Environment setup complete." WORKDIR /workspace/ ########################################### ########################################### ########################################### ########################################### ###############vllm v0.8.5################# ########################################### # ENV GITHUB_USERNAME=yushengsu-thu # ENV GITHUB_MAIL=yushengsu@gmail.com # RUN git config --global user.name "${GITHUB_USERNAME}" \ # && git config --global user.email "${GITHUB_MAIL}" WORKDIR /workspace/ ENV VLLM_TARGET_DEVICE=rocm ENV ROCM_PATH=/opt/rocm ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev # Find the repo path in: DockerFile/Dockerfile.rocm_yang # RUN git clone https://github.com/RLFoundation/vllm-patch.git RUN pip uninstall -y vllm || true RUN rm -rf vllm-patch RUN git clone https://github.com/RLFoundation/vllm-patch.git \ && cd vllm-patch \ && git checkout v0.8.5-sleep-numa \ && rm -rf build/ dist/ *.egg-info \ && ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so \ && SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py install # RUN SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py develop WORKDIR /workspace/ ########################################### ########################################### ########################################### ######################################### #### Install megatron-core############### ######################################### RUN pip uninstall -y megatron-core && \ git clone https://github.com/yushengsu-thu/Megatron-LM-amd_version.git && \ cd Megatron-LM-amd_version && \ pip install -vvv -e . && \ cd /workspace/ ######################################### ######################################### ######################################### ####################################### ################apex################### ####################################### WORKDIR /workspace/ RUN pip uninstall -y apex && \ git clone https://github.com/ROCm/apex.git && \ cd apex && \ python setup.py install && \ cd /workspace/ ####################################### ####################################### ####################################### ################################################################################ ###########################Add torch_memory_saver############################### ################################################################################ # Set environment variables ENV HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" ENV CFLAGS="-D__HIP_PLATFORM_AMD__" ENV CXXFLAGS="-D__HIP_PLATFORM_AMD__" RUN pip install "git+https://github.com/YangWang92/torch_memory_saver_numa.git@numa" ################################################################################ ################################################################################ ################################################################################ ######################################## ######Install ray####################### ######################################## # need to add this patch: https://github.com/ray-project/ray/pull/53531/files RUN pip uninstall ray -y RUN pip install "ray[data,train,tune,serve]>=2.47.0" ######################################## ######################################## ######################################## ########################################## #######Install other dependencies######### ########################################## RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ torchdata \ wandb \ orjson \ pybind11 WORKDIR /workspace/ RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ pip install -e . ########################################## ########################################## ########################################## WORKDIR /workspace/ CMD ["/usr/bin/bash"] CMD ["/usr/bin/bash"] ================================================ FILE: verl_distillation/docker/Dockerfile.sglang ================================================ # Start from the NVIDIA official image (ubuntu-22.04 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=32 ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" # Define installation arguments ARG APT_SOURCE=https://mirrors.ustc.edu.cn/ubuntu/ # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini && \ apt-get clean # Change pip source ARG PIP_INDEX=https://mirrors.aliyun.com/pypi/simple/ RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Install sglang-0.4.6.post5 and torch-memory-saver RUN pip uninstall -y cuda-python && pip install "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install torch-memory-saver --no-cache-dir # Install torch-2.6.0 RUN pip install --no-cache-dir torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 tensordict torchdata \ transformers>=4.49.0 accelerate datasets peft hf_transfer \ ray[default] codetiming hydra-core pandas pyarrow>=15.0.0 pylatexenc qwen-vl-utils wandb liger-kernel \ pytest pre-commit py-spy pyext # Install flash_attn-2.7.4.post1 RUN pip uninstall -y transformer-engine flash-attn && \ wget -v https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Fix cv2 RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir nvidia-ml-py>=12.560.30 opencv-python-headless==4.8.0.74 fastapi==0.115.6 ================================================ FILE: verl_distillation/docker/Dockerfile.vemlp.vllm.te ================================================ # docker buildx build --platform linux/x86_64 -t "verlai/verl:$TAG" -f docker/$FILE . # the one in docker.io is an alias for the one veturbo # FROM vemlp-cn-beijing.cr.volces.com/veturbo/pytorch:2.4-cu124 FROM docker.io/haibinlin/verl:v0.0.5-th2.4.0-cu124-base # only config pip index with https://pypi.tuna.tsinghua.edu.cn/simple if needed # unset for now RUN pip3 config unset global.index-url # transformers 4.47.0 contains the following bug: # AttributeError: 'Gemma2Attention' object has no attribute '_flash_attn_uses_top_left_mask' RUN pip3 install --no-cache-dir \ torch==2.4.0 \ accelerate \ codetiming \ dill \ hydra-core \ numpy \ pybind11 \ tensordict \ "transformers <= 4.46.0" RUN pip3 install --no-cache-dir flash-attn==2.7.0.post2 --no-build-isolation # vllm depends on ray RUN pip3 install --no-cache-dir vllm==0.6.3 ray==2.10 # install apex RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \ --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \ git+https://github.com/NVIDIA/apex # install Transformer Engine # - flash-attn pinned to 2.5.3 by TransformerEngine, switch to eric-haibin-lin/TransformerEngine.git@v1.7.0 to relax version req # - install with: MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 to avoid OOM # - cudnn is required by TransformerEngine # RUN CUDNN_PATH=/opt/conda/lib/python3.11/site-packages/nvidia/cudnn \ # pip3 install git+https://github.com/eric-haibin-lin/TransformerEngine.git@v1.7.0 RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install flash-attn==2.5.3 --no-cache-dir --no-build-isolation RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install git+https://github.com/NVIDIA/TransformerEngine.git@v1.7 ================================================ FILE: verl_distillation/docker/Dockerfile.vllm.sglang.megatron.deepseek ================================================ # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio # Reinstall CUDA 12.4 RUN aria2c https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin && \ mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600 RUN aria2c --always-resume=true --max-tries=99999 https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ dpkg -i cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ cp /var/cuda-repo-ubuntu2204-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cuda-toolkit-12-4 && \ rm cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ update-alternatives --set cuda /usr/local/cuda-12.4 && \ rm -rf /usr/local/cuda-12.6 # Install torch-2.6.0+cu124 + vllm-0.8.5.post1 + sglang-0.4.6.post5 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 # Install sglang-0.4.6.post1 and torch-memory-saver RUN pip install --resume-retries 999 "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install --resume-retries 999 torch-memory-saver --no-cache-dir RUN pip install --resume-retries 999 --no-cache-dir "vllm==0.8.5.post1" "torch==2.6.0" "torchvision==0.21.0" "torchaudio==2.6.0" "tensordict==0.6.2" torchdata RUN pip install --resume-retries 999 --no-cache-dir "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=15.0.0" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile \ pytest py-spy pyext pre-commit ruff # Install flash-attn-2.7.4.post1 (cxx11abi=False) RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install Apex RUN git clone https://github.com/NVIDIA/apex.git && \ cd apex && \ pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --no-deps --no-cache-dir git+https://github.com/NVIDIA/TransformerEngine.git@v2.3 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.13.1 # Fix opencv RUN pip install opencv-python RUN pip install opencv-fixer && \ python -c "from opencv_fixer import AutoFix; AutoFix()" # Install verl # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url RUN apt-get update && \ apt-get install -y aria2 libfreeimage3 libfreeimage-dev zlib1g ================================================ FILE: verl_distillation/docker/README.md ================================================ # Dockerfiles of verl We provide pre-built Docker images for quick setup. And from this version, we utilize a new image release hierarchy for productivity and stability. The image types are divided into three large categories: - **Base Image**: Without inference and training frameworks, only basic dependencies are installed. Can directly install vllm or SGLang on top of it, without need of reinstall torch or CUDA. - **Application Image**: Stable version with inference and training frameworks installed. - **Preview Image**: Unstable version with the latest frameworks and features. The first two types of images are hosted on dockerhub [verlai/verl](https://hub.docker.com/r/verlai/verl) repository, while the preview images are hosted on community repository. > The image versions are mapped with verl releases, for example, image with tag ``verl0.4`` is built for verl release ``v0.4.x``. ## Base Image The stable base image is ``verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.7.4`` with different CUDA versions. The update of base image is not frequent, and the app image can be built on top of it without reinstalling base packages. ## Application Image From this version, we divide images built for vLLM and SGLang as the divergence of dependent packages like FlashInfer. There are 2 types of application images available: - **vLLM with FSDP and Megatron**: ``verlai/verl:app-verl0.5-transformers4.55.4-vllm0.10.0-mcore0.13.0-te2.2`` - **SGLang with FSDP and Megatron**: `verlai/verl:app-verl0.5-transformers4.55.4-sglang0.4.10.post2-mcore0.13.0-te2.2` Docker images with Megatron backends are runnable with large language model like ``Qwen/Qwen3-235B-A22B``, ``deepseek-ai/DeepSeek-V3-0324`` post-training. Refer to the :doc:`Large Language Model Post-Training documentation<../perf/dpsk>` for more details. Application images can be updated frequently, and the Dockerfile can be found in ``docker/verl[version]-[packages]/Dockerfile.app.[frameworks]``. Based on the base image, it is easy to build your own application image with the desired inference and training frameworks. ## Community Image For vLLM with FSDP, please refer to [hiyouga/verl](https://hub.docker.com/r/hiyouga/verl) repository and the latest version is ``hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.4-flashinfer0.2.2-cxx11abi0``. For SGLang with FSDP, please refer to [ocss884/verl-sglang](https://hub.docker.com/r/ocss884/verl-sglang) repository and the latest version is ``ocss884/verl-sglang:ngc-th2.6.0-cu126-sglang0.4.6.post5`` which is provided by SGLang RL Group. For latest vLLM with Megatron, please refer to [iseekyan/verl](https://hub.docker.com/r/iseekyan/verl) repository and the latest version is ``iseekyan/verl:nemo.gptoss_vllm0.11.0``. See files under ``docker/`` for NGC-based image or if you want to build your own. Note that For aws instances with EFA net interface (Sagemaker AI Pod), you need to install EFA driver as shown in ``docker/Dockerfile.extenstion.awsefa`` ## Installation from Docker After pulling the desired Docker image and installing desired inference and training frameworks, you can run it with the following steps: 1. Launch the desired Docker image and attach into it: ```sh docker create --runtime=nvidia --gpus all --net=host --shm-size="10g" --cap-add=SYS_ADMIN -v .:/workspace/verl --name verl sleep infinity docker start verl docker exec -it verl bash ``` 2. If you use the images provided, you only need to install verl itself without dependencies: ```sh # install the nightly version (recommended) git clone https://github.com/volcengine/verl && cd verl pip3 install --no-deps -e . ``` [Optional] If you hope to switch between different frameworks, you can install verl with the following command: ```sh # install the nightly version (recommended) git clone https://github.com/volcengine/verl && cd verl pip3 install -e .[vllm] pip3 install -e .[sglang] ``` ================================================ FILE: verl_distillation/docker/ascend/Dockerfile.ascend_8.2.rc1_a2 ================================================ # 1. Base Image FROM swr.cn-south-1.myhuaweicloud.com/ascendhub/cann:8.2.rc1-910b-ubuntu22.04-py3.11 # 2. Pre-installation foundation vllm with architecture echo RUN ARCH=$(uname -m) && \ echo "export ARCH=$ARCH" >> ~/.bashrc && \ echo "[LOG INFO] Current system architecture: $ARCH" # 3. Install system dependencies RUN apt-get update -y && apt-get install -y --no-install-recommends \ gcc g++ cmake libnuma-dev wget git curl jq vim build-essential \ && rm -rf /var/lib/apt/lists/* # 4. Install vllm RUN ARCH=$(uname -m) && \ echo "[LOG INFO] Detected architecture: $ARCH" && \ if [ "$ARCH" = "x86_64" ]; then \ echo "[LOG INFO] Entering x86_64 branch: Setting pip extra index url"; \ pip config set global.extra-index-url "https://download.pytorch.org/whl/cpu/ https://mirrors.huaweicloud.com/ascend/repos/pypi"; \ else \ echo "[LOG INFO] Entering aarch64 branch: No extra pip index url set"; \ fi && \ git clone --depth 1 --branch v0.9.1 https://github.com/vllm-project/vllm && \ cd vllm && \ VLLM_TARGET_DEVICE=empty pip install -v -e . && \ cd .. # 5. Install vllm_ascend RUN ARCH=$(uname -m) && \ echo "[LOG INFO] Configuring LD_LIBRARY_PATH for $ARCH" && \ if [ "$ARCH" = "aarch64" ]; then \ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/8.2.RC1/aarch64-linux/devlib/linux/aarch64:$LD_LIBRARY_PATH; \ elif [ "$ARCH" = "x86_64" ]; then \ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/8.2.RC1/x86_64-linux/devlib/linux/x86_64/:$LD_LIBRARY_PATH; \ fi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ git clone --depth 1 --branch v0.9.1 https://github.com/vllm-project/vllm-ascend.git && \ cd vllm-ascend && \ pip install -v -e . && \ cd .. # 6. Install verl RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ git checkout main && \ pip install -r requirements-npu.txt && \ pip install -e . && \ cd .. # 7. Install MindSpeed RUN git clone https://gitcode.com/Ascend/MindSpeed.git && \ cd MindSpeed && \ git checkout f2b0977e && \ cd .. && \ pip install -e MindSpeed # 8. Install Megatron-LM and configure PYTHONPATH RUN git clone https://github.com/NVIDIA/Megatron-LM.git && \ cd Megatron-LM && \ git checkout core_v0.12.1 && \ cd .. && \ echo "export PYTHONPATH=\$PYTHONPATH:/Megatron-LM" >> ~/.bashrc # Show pip list and clear pip cache to reduce image size RUN pip list && pip cache purge # Setting Default Commands CMD ["/bin/bash"] ================================================ FILE: verl_distillation/docker/ascend/Dockerfile.ascend_8.2.rc1_a3 ================================================ # 1. Base Image FROM swr.cn-south-1.myhuaweicloud.com/ascendhub/cann:8.2.rc1-a3-ubuntu22.04-py3.11 # 2. Pre-installation foundation vllm with architecture echo RUN ARCH=$(uname -m) && \ echo "export ARCH=$ARCH" >> ~/.bashrc && \ echo "[LOG INFO] Current system architecture: $ARCH" # 3. Install system dependencies RUN apt-get update -y && apt-get install -y --no-install-recommends \ gcc g++ cmake libnuma-dev wget git curl jq vim build-essential \ && rm -rf /var/lib/apt/lists/* # 4. Install vllm RUN ARCH=$(uname -m) && \ echo "[LOG INFO] Detected architecture: $ARCH" && \ if [ "$ARCH" = "x86_64" ]; then \ echo "[LOG INFO] Entering x86_64 branch: Setting pip extra index url"; \ pip config set global.extra-index-url "https://download.pytorch.org/whl/cpu/ https://mirrors.huaweicloud.com/ascend/repos/pypi"; \ else \ echo "[LOG INFO] Entering aarch64 branch: No extra pip index url set"; \ fi && \ git clone --depth 1 --branch v0.9.1 https://github.com/vllm-project/vllm && \ cd vllm && \ VLLM_TARGET_DEVICE=empty pip install -v -e . && \ cd .. # 5. Install vllm_ascend RUN ARCH=$(uname -m) && \ echo "[LOG INFO] Configuring LD_LIBRARY_PATH for $ARCH" && \ if [ "$ARCH" = "aarch64" ]; then \ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/8.2.RC1/aarch64-linux/devlib/linux/aarch64:$LD_LIBRARY_PATH; \ elif [ "$ARCH" = "x86_64" ]; then \ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/8.2.RC1/x86_64-linux/devlib/linux/x86_64/:$LD_LIBRARY_PATH; \ fi && \ source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ git clone --depth 1 --branch v0.9.1 https://github.com/vllm-project/vllm-ascend.git && \ cd vllm-ascend && \ pip install -v -e . && \ cd .. # 6. Install verl RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ git checkout main && \ pip install -r requirements-npu.txt && \ pip install -e . && \ cd .. # 7. Install MindSpeed RUN git clone https://gitcode.com/Ascend/MindSpeed.git && \ cd MindSpeed && \ git checkout f2b0977e && \ cd .. && \ pip install -e MindSpeed # 8. Install Megatron-LM and configure PYTHONPATH RUN git clone https://github.com/NVIDIA/Megatron-LM.git && \ cd Megatron-LM && \ git checkout core_v0.12.1 && \ cd .. && \ echo "export PYTHONPATH=\$PYTHONPATH:/Megatron-LM" >> ~/.bashrc # Show pip list and clear pip cache to reduce image size RUN pip list && pip cache purge # Setting Default Commands CMD ["/bin/bash"] ================================================ FILE: verl_distillation/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.sglang.vllm.mcore0.12 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.6.post5 and torch-memory-saver RUN pip install --resume-retries 999 "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install torch-memory-saver --no-cache-dir # Some sglang operations in 0.4.6.post5 require vllm # [Warning] vllm can have some packages not compatible with sglang, for example, flashinfer RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_distillation/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.sglang.vllm.mcore0.12.deepep ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.6.post5 and torch-memory-saver RUN pip install --resume-retries 999 "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install torch-memory-saver --no-cache-dir # Some sglang operations in 0.4.6.post5 require vllm # [Warning] vllm can have some packages not compatible with sglang, for example, flashinfer RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install ================================================ FILE: verl_distillation/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.sglang.vllm.mcore0.13.preview ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.6.post5 and torch-memory-saver RUN pip install --resume-retries 999 "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install torch-memory-saver --no-cache-dir # Some sglang operations in 0.4.6.post5 require vllm # [Warning] vllm can have some packages not compatible with sglang, for example, flashinfer RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.5 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_r0.13.0 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install ================================================ FILE: verl_distillation/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.vllm.mcore0.12 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.6.0+cu124 + vllm-0.8.5.post1 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Install flashinfer-0.2.2.post1+cu126 (cxx11abi=True) # vllm-0.8.3 does not support flashinfer>=0.2.3 # see https://github.com/vllm-project/vllm/pull/15777 RUN aria2c --max-tries=9999 https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ rm flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_distillation/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.vllm.mcore0.12.deepep ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.6.0+cu124 + vllm-0.8.5.post1 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Install flashinfer-0.2.2.post1+cu126 (cxx11abi=True) # vllm-0.8.3 does not support flashinfer>=0.2.3 # see https://github.com/vllm-project/vllm/pull/15777 RUN aria2c --max-tries=9999 https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ rm flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install ================================================ FILE: verl_distillation/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.vllm.mcore0.13.preview ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.6.0+cu124 + vllm-0.8.5.post1 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Install flashinfer-0.2.2.post1+cu126 (cxx11abi=True) # vllm-0.8.3 does not support flashinfer>=0.2.3 # see https://github.com/vllm-project/vllm/pull/15777 RUN aria2c --max-tries=9999 https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ rm flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.5 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Install mbridge RUN pip3 install --no-cache-dir mbridge # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install ================================================ FILE: verl_distillation/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.base ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-v2-cu124-cudnn9.8-torch2.6-fa2.8.0-te2.3 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio # Reinstall CUDA 12.4 RUN aria2c https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin && \ mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600 RUN aria2c --always-resume=true --max-tries=99999 https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ dpkg -i cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ cp /var/cuda-repo-ubuntu2204-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cuda-toolkit-12-4 && \ rm cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ update-alternatives --set cuda /usr/local/cuda-12.4 && \ rm -rf /usr/local/cuda-12.6 RUN pip install --resume-retries 999 --no-cache-dir torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff # Install flash-attn-2.7.4.post1 (cxx11abi=False) RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN git clone https://github.com/NVIDIA/apex.git && \ cd apex && \ pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 && \ dpkg -i ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb # Fix opencv RUN pip install --resume-retries 999 --no-cache-dir opencv-python RUN pip install --resume-retries 999 --no-cache-dir opencv-fixer && \ python -c "from opencv_fixer import AutoFix; AutoFix()" RUN pip install --resume-retries 999 --no-cache-dir cuda-bindings # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url RUN apt-get update && \ apt-get install -y libfreeimage3 libfreeimage-dev zlib1g htop ================================================ FILE: verl_distillation/docker/verl0.4-cu124-torch2.6-fa2.7.4/README.md ================================================ # verl image with verl v0.4.x ## Important packages version ```txt cuda==12.4 cudnn==9.8.0 torch==2.6.0 flash_attn=2.7.4 sglang==0.4.6.post5 vllm==0.8.5.post1 nvidia-cudnn-cu12==9.8.0.87 transformer_engine==2.3 megatron.core==core_v0.12.2 # Preview transformer_engine==2.5 megatron.core==core_r0.13.0 ``` ## Target - Base image: - `verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4` - App image: - `verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.2-te2.2`: SGLang requires vLLM in 0.4.6.post5 version, vLLM can have some package conflicts with SGLang - `verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.2-te2.2-deepep`: Built with deepep - `verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.2-te2.2` - `verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.2-te2.2-deepep`: Built with deepep - Preview image: - `verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.13.0-te2.2-preview` - `verlai/verl:app-verl0.4-vllm0.8.5-mcore0.13.0-te2.2-preview` ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.app.sglang0.4.10.post2.mcore0.13 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.7.4 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.10 # Install FlashInfer Python package RUN pip install --upgrade pip setuptools packaging RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.9rc1 RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation "sglang[all]==0.4.10.post2" # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]==4.55.4" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.13.0 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.app.sglang0.4.9.post6.mcore0.13 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.7.4 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.10 # Install FlashInfer Python package RUN pip install --upgrade pip setuptools packaging RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.9rc1 RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation "sglang[all]==0.4.9.post6" # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]==4.55.4" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.13.0 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.app.vllm.mcore0.13 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.7.1+cu126 + vllm-0.10.0 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.10.0 # Fix packages # transformers 4.54.0 still not support RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.55.4" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.13.0 # Install mbridge RUN pip3 install --no-cache-dir mbridge # Fix qwen vl RUN pip3 install --no-cache-dir --no-deps trl ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.app.vllm.mcore0.15 ================================================ # Start from the verl base image # Dockerfile.base FROM iseekyan/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.7.4-h100 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.7.1+cu126 + vllm-0.10.0 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.10.0 # Fix packages # transformers 4.54.0 still not support RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.55.4" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.7 RUN pip install onnxscript # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.15.0rc4 # Install mbridge RUN pip3 install --no-cache-dir mbridge==v0.15.0 # Fix qwen vl RUN pip3 install --no-cache-dir --no-deps trl ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.base.torch2.7.1 ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 libfreeimage3 libfreeimage-dev zlib1g htop && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio RUN pip install --resume-retries 999 --no-cache-dir torch==2.7.1 torchvision==0.22.1 torchaudio==2.7.1 # Install flash-attn-2.7.4.post1, although built with torch2.6, it is compatible with torch2.7 # https://github.com/Dao-AILab/flash-attention/issues/1644#issuecomment-2899396361 RUN ABI_FLAG=$(python -c "import torch; print('TRUE' if torch._C._GLIBCXX_USE_CXX11_ABI else 'FALSE')") && \ URL="https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ FILE="flash_attn-2.7.4.post1+cu12torch2.6cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ wget -nv "${URL}" && \ pip install --no-cache-dir "${FILE}" # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" --resume-retries 999 git+https://github.com/NVIDIA/apex.git # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 RUN apt-get install -y ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.52.3" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas cuda-bindings \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7-fa2.7.4/README.md ================================================ # verl image with verl v0.5 ## Important packages version ```txt cuda==12.6 cudnn==9.8.0 torch==2.7.1 flash_attn=2.7.4.post1 sglang==0.4.9.post6 vllm==0.8.5.post1 nvidia-cudnn-cu12==9.8.0.87 transformer_engine==2.3 megatron.core==core_v0.12.2 # Preview transformer_engine==2.5 megatron.core==core_r0.13.0 ``` ## Target - Base image: - `verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.7.4`: We offer a base image with deep ep built in, for vllm/sglang - App image: - `verlai/verl:app-verl0.5-transformers4.55.4-vllm0.10.0-mcore0.13.0-te2.2` - `verlai/verl:app-verl0.5-transformers4.55.4-sglang0.4.10.post2-mcore0.13.0-te2.2` - `iseekyan/verl:app-verl0.5-transformers4.55.4-vllm0.10.0-mcore0.15.0-te2.7` ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7.1-fa2.8.0/Dockerfile.app.sglang.mcore0.12 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.8 and torch-memory-saver # Install FlashInfer Python package RUN pip install --upgrade pip setuptools packaging RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.6.post1 RUN pip install --resume-retries 999 --no-cache-dir "sglang[all]==0.4.8" && pip install torch-memory-saver --no-cache-dir # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.3 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7.1-fa2.8.0/Dockerfile.app.sglang.mcore0.13.preview ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.8 and torch-memory-saver # Install FlashInfer Python package RUN pip install --upgrade pip setuptools packaging RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.6.post1 RUN pip install --resume-retries 999 --no-cache-dir "sglang[all]==0.4.8" && pip install torch-memory-saver --no-cache-dir # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.5 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7.1-fa2.8.0/Dockerfile.base ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 libfreeimage3 libfreeimage-dev zlib1g htop && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio RUN pip install --resume-retries 999 --no-cache-dir torch==2.7.1 torchvision==0.22.1 torchaudio==2.7.1 # Install flash-attn-2.8.0.post2 (cxx11abi=True) RUN ABI_FLAG=$(python -c "import torch; print('TRUE' if torch._C._GLIBCXX_USE_CXX11_ABI else 'FALSE')") && \ URL="https://github.com/Dao-AILab/flash-attention/releases/download/v2.8.0.post2/flash_attn-2.8.0.post2+cu12torch2.7cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ FILE="flash_attn-2.8.0.post2+cu12torch2.7cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ wget -nv "${URL}" && \ pip install --no-cache-dir "${FILE}" # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" --resume-retries 999 git+https://github.com/NVIDIA/apex.git # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 RUN apt-get install -y ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.53" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas cuda-bindings \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_distillation/docker/verl0.5-cu126-torch2.7.1-fa2.8.0/README.md ================================================ # verl image with verl v0.5 ## Important packages version ```txt cuda==12.6 cudnn==9.8.0 torch==2.7.1 flash_attn=2.8.0 ## sglang==0.4.8 vllm==0.8.5.post1 nvidia-cudnn-cu12==9.8.0.87 transformer_engine==2.3 megatron.core==core_v0.12.2 # Preview transformer_engine==2.5 megatron.core==core_r0.13.0 ``` ## Target - Base image: - `verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0`: We offer a base image with deep ep built in - App image: - `verlai/verl:app-verl0.5-sglang0.4.9-mcore0.12.2` - `verlai/verl:app-verl0.5-sglang0.4.9-mcore0.13.0-preview` - vllm temporarily not support latest version ================================================ FILE: verl_distillation/docker/verl0.5-preview-cu128-torch2.7.1-fa2.8.0/Dockerfile.app.sglang.megatron ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-preview-cu128-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.8 and torch-memory-saver # Install FlashInfer Python package RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.6.post1 RUN pip install --resume-retries 999 --no-cache-dir "sglang[all]==0.4.8" && pip install torch-memory-saver --no-cache-dir # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.5 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_r0.13.0 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_distillation/docker/verl0.5-preview-cu128-torch2.7.1-fa2.8.0/Dockerfile.base ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-verl0.5-preview-cu128-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:25.02-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 libfreeimage3 libfreeimage-dev zlib1g htop && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio RUN pip install --resume-retries 999 --no-cache-dir torch==2.7.1 torchvision==0.22.1 torchaudio==2.7.1 --index-url https://download.pytorch.org/whl/cu128 # Install flash-attn-2.8.0.post2 (cxx11abi=True) RUN ABI_FLAG=$(python -c "import torch; print('TRUE' if torch._C._GLIBCXX_USE_CXX11_ABI else 'FALSE')") && \ URL="https://github.com/Dao-AILab/flash-attention/releases/download/v2.8.0.post2/flash_attn-2.8.0.post2+cu12torch2.7cxx11abi${ABI_FLAG}-cp312-cp312-linux_x86_64.whl" && \ FILE="flash_attn-2.8.0.post2+cu12torch2.7cxx11abi${ABI_FLAG}-cp312-cp312-linux_x86_64.whl" && \ wget -nv "${URL}" && \ pip install --no-cache-dir "${FILE}" # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" --resume-retries 999 git+https://github.com/NVIDIA/apex.git # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 RUN apt-get install -y ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas cuda-bindings \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pre-commit ruff # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_distillation/docker/verl0.5-preview-cu128-torch2.7.1-fa2.8.0/README.md ================================================ # verl image with verl v0.5 ## Important packages version ```txt cuda==12.8 cudnn==9.8.0 torch==2.7.1 flash_attn=2.8.0 ## sglang==0.4.8 transformer_engine==2.5 megatron.core==core_r0.13.0 nvidia-cudnn-cu12==9.8.0.87 ``` ## Target - Base image: - `verlai/verl:base-verl0.5-preview-cu128-cudnn9.8-torch2.7.1-fa2.8.0`: We offer a base image with flash infer 0.2.6.post1 built in - App image: - `verlai/verl:app-verl0.5-preview-sglang0.4.8-mcore0.13.0-preview` - vllm temporarily not support latest version ## !!!Notice!!! - pyext is lack of maintainace and cannot work with python 3.12, consider using replacement and deprecating this package. ================================================ FILE: verl_distillation/docker/verl0.6-cu128-torch2.8.0-fa2.7.4/Dockerfile.app.sglang ================================================ FROM verlai/verl:base-verl0.6-cu128-cudnn9.8-torch2.8.0-fa2.7.4 RUN pip install --no-cache-dir "sglang[all]==0.5.2" RUN pip install --no-cache-dir "torch-memory-saver==0.0.9rc1" ================================================ FILE: verl_distillation/docker/verl0.6-cu128-torch2.8.0-fa2.7.4/Dockerfile.base ================================================ # Start from the NVIDIA official image (ubuntu-24.04 + cuda-12.8 + python-3.12) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-25-03.html FROM nvcr.io/nvidia/pytorch:25.03-py3 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" ENV PIP_CONSTRAINT="" ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ pip config set global.no-cache-dir "true" && \ python -m pip install --upgrade pip # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install libxml2 RUN apt-get update && \ apt-get install -y libxml2 aria2 && \ apt-get clean # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ transformer_engine flash_attn apex megatron-core \ xgboost opencv grpcio # Fix packages RUN pip install --no-cache-dir tensordict torchdata "transformers[hf_xet]==4.55.4" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pre-commit ruff # Fix cv2 RUN rm -rf /usr/local/lib/python3.11/dist-packages/cv2 # Install torch RUN pip install --no-cache-dir torch==2.8.0 --index-url https://download.pytorch.org/whl/cu128 # Install flash-attn RUN pip install --no-cache-dir --no-build-isolation flash_attn==2.7.4.post1 # Install DeepEP # the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so # Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ## Build deepep-nvshmem RUN apt-get install -y ninja-build cmake ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ENV GDRCOPY_INCLUDE=/workspace/gdrcopy/include RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install # Install Apex RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN git clone -b core_v0.13.0 https://github.com/NVIDIA/Megatron-LM.git && \ cd Megatron-LM && pip3 install --no-deps -e . # Install mbridge RUN pip3 install --no-cache-dir git+https://github.com/ISEEKYAN/mbridge.git ================================================ FILE: verl_distillation/docker/verl0.6-cu128-torch2.8.0-fa2.7.4/Dockerfile.vllm011.mcore_gpt-oss ================================================ FROM nvcr.io/nvidia/nemo:25.07.gpt_oss RUN git clone -b v0.11.0 --depth 1 https://github.com/vllm-project/vllm.git /opt/vllm RUN pip install setuptools_scm RUN cd /opt/vllm && pip install --no-deps --no-build-isolation --no-cache-dir -e . RUN pip install cbor2 setproctitle blake3 openai_harmony pybase64 msgspec partial_json_parser py-cpuinfo diskcache gguf RUN pip install --upgrade transformers tokenizers RUN pip install codetiming tensordict mathruler pylatexenc RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_distillation/docs/Makefile ================================================ # Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = verl SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: verl_distillation/docs/README.md ================================================ # verl documentations ## Build the docs ```bash # If you want to view auto-generated API docstring, please make sure verl is available in python path. For instance, install verl via: # pip install .. -e[test] # Install dependencies needed for building docs. pip install -r requirements-docs.txt # Build the docs. make clean make html ``` ## Open the docs with your browser ```bash python -m http.server -d _build/html/ ``` Launch your browser and navigate to http://localhost:8000 to view the documentation. Alternatively you could drag the file `_build/html/index.html` to your local browser and view directly. ================================================ FILE: verl_distillation/docs/README_vllm0.7.md ================================================ # Upgrading to vllm >= 0.7 Note: verl+vllm 0.8.3 is now stable. Please see ``docs/README_vllm0.8.md`` for upgrade guide. ## Installation Note: At time of writing, verl+vllm 0.7.x supports **FSDP** for training and **vLLM** for rollout. ``` # Create the conda environment conda create -n distill python==3.10 conda activate distill # Install verl git clone https://github.com/volcengine/verl.git cd verl pip3 install -e . # Install the latest stable version of vLLM pip3 install vllm==0.7.3 # Install flash-attn pip3 install flash-attn --no-build-isolation ``` Note that if you are installing lower versions of vLLM (0.7.0, 0.7.1, 0.7.2), you need to make some tiny patches manually on vllm (/path/to/site-packages/vllm after installation) after the above steps: - vllm/distributed/parallel_state.py: Remove the assertion below: ``` if (world_size != tensor_model_parallel_size * pipeline_model_parallel_size): raise RuntimeError( f"world_size ({world_size}) is not equal to " f"tensor_model_parallel_size ({tensor_model_parallel_size}) x " f"pipeline_model_parallel_size ({pipeline_model_parallel_size})") ``` - vllm/executor/uniproc_executor.py: change `local_rank = rank` to `local_rank = int(os.environ["LOCAL_RANK"])` - vllm/model_executor/model_loader/weight_utils.py: remove the `torch.cuda.empty_cache()` in `pt_weights_iterator` ## Features ### Use cuda graph After installation, examples using FSDP as training backends can be used. By default, the `enforce_eager` is set to True, which disables the cuda graph. To enjoy cuda graphs and the sleep mode of vLLM>=0.7, add the following lines to the bash script: ``` actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ ``` For a typical job like examples/ppo_trainer/run_qwen2-7b_seq_balance.sh, the rollout generation time is 85 seconds with vLLM0.7.0. By enabling the cudagraph, the generation duration is further reduced to 62 seconds. **Note:** Currently, if the `n` is greater than 1 in `SamplingParams` in vLLM>=0.7, there is a potential performance issue on the stability of rollout generation time (Some iterations would see generation time bursts) using vLLM's V0 Engine. ### Use vLLM V1 Engine Using the vLLM V1 engine can avoid instability issues and achieve additional performance improvements. To use the V1 engine, you can first uninstall the previously installed vLLM and then follow the steps below to install the newer version. ``` git clone https://github.com/vllm-project/vllm.git cd vllm git checkout 2275784 sed -i "903a\ data_parallel_size = world_size // pipeline_model_parallel_size // tensor_model_parallel_size" ./vllm/distributed/parallel_state.py VLLM_USE_PRECOMPILED=1 pip install --editable . ``` Then you can enable the V1 engine by setting `export VLLM_USE_V1=1`. In some benchmark tests, the V1 engine demonstrates a 1.5x speed improvement over the vLLM V0 engine. The stable support of the vLLM V1 engine is available on verl main. ================================================ FILE: verl_distillation/docs/README_vllm0.8.md ================================================ # Upgrading to vLLM >= 0.8 Last updated: 05/04/2025. ## Installation Note: This version of verl+vLLM 0.8+ supports **FSDP** for training and **vLLM** for rollout. ```bash # Create the conda environment conda create -n distill python==3.10 conda activate distill # Install verl git clone https://github.com/volcengine/verl.git cd verl pip3 install -e . # Install the latest stable version of vLLM pip3 install vllm==0.8.3 # Install flash-attn pip3 install flash-attn --no-build-isolation ``` We have a pre-built docker image for verl+vLLM 0.8.3. You can direct import it with the following command: ```bash docker pull hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0 ``` ## Features vLLM 0.8+ supports cuda graph and V1 engine by default in verl. To enable these features, remember to add the following lines to the bash script: ```bash actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ ``` and also **remove** the environment variable if it exists: ## Notes When you just directly upgrade vllm>=0.8, some dependency packages may undergo version changes. If you encounter the following problems: ```bash in from torch.multiprocessing.reductions import ForkingPickler ImportError: cannot import name 'ForkingPickler' from 'torch.multiprocessing.reductions' (/opt/conda/lib/python3.11/site-packages/torch/multiprocessing/reductions.py) ``` You need to upgrade `tensordict` to version 0.6.2 using the command `pip install tensordict==0.6.2`. ================================================ FILE: verl_distillation/docs/_static/custom.css ================================================ /* Make the documentation use full screen width */ .wy-nav-content { max-width: none !important; width: 100% !important; padding: 1.618em 3.236em !important; } /* Adjust the content wrapper - will be set by JavaScript */ .wy-nav-content-wrap { margin-left: 300px; transition: margin-left 0.2s ease; width: auto !important; position: relative !important; background: white !important; min-height: 100vh !important; } /* Make the main content area responsive */ .rst-content { max-width: none !important; width: 100% !important; } /* Optional: Adjust table widths to prevent overflow */ .rst-content table.docutils { width: 100% !important; table-layout: auto !important; } /* Optional: Better code block width handling */ .rst-content .highlight { width: 100% !important; } /* Content area positioning already handled above */ /* Optional: Improve readability with some margin on very wide screens */ @media (min-width: 1400px) { .wy-nav-content { max-width: none !important; margin: 0 auto !important; } } /* Resizable sidebar styles */ .wy-nav-side { position: fixed !important; top: 0 !important; bottom: 0 !important; left: 0 !important; width: 300px; min-width: 200px; max-width: 600px; display: flex; flex-direction: column; z-index: 200 !important; } /* Ensure sidebar header (logo, search) adapts to width */ .wy-side-nav-search { width: 100% !important; box-sizing: border-box !important; padding: 0.809em 0.809em !important; } .wy-side-nav-search input[type="text"] { width: 100% !important; box-sizing: border-box !important; } /* Make logo/title area responsive */ .wy-side-nav-search > div.version { width: 100% !important; } .wy-side-nav-search > a { width: 100% !important; display: block !important; white-space: nowrap !important; overflow: hidden !important; text-overflow: ellipsis !important; } /* Responsive adjustments for narrow sidebar */ @media (max-width: 300px) { .wy-side-nav-search > a { font-size: 0.9em !important; } .wy-side-nav-search input[type="text"] { font-size: 0.8em !important; } } /* Ensure search input doesn't overflow */ .wy-side-nav-search form { width: 100% !important; margin: 0 !important; } /* Make search icon responsive */ .wy-side-nav-search .wy-dropdown { width: 100% !important; } /* Adjust search results dropdown width */ .wy-side-nav-search .wy-dropdown-menu { width: 100% !important; max-width: none !important; left: 0 !important; right: 0 !important; } /* Resize handle is created by JavaScript */ /* Make sure the sidebar content doesn't overflow */ .wy-side-scroll { width: 100% !important; flex: 1 !important; overflow-y: auto !important; overflow-x: hidden !important; padding-right: 10px !important; box-sizing: border-box !important; scroll-behavior: auto !important; /* Prevent smooth scrolling on sidebar itself */ } /* Ensure proper scroll behavior for main content area */ html { scroll-behavior: smooth !important; } /* Ensure anchor links work properly in main content */ .wy-nav-content-wrap { scroll-behavior: smooth !important; } /* Fix scroll to target for anchor links */ .rst-content { scroll-behavior: smooth !important; } /* Fix anchor scroll offset to account for fixed header */ .rst-content .section { scroll-margin-top: 60px; } /* Fix anchor scroll offset for headers */ .rst-content h1, .rst-content h2, .rst-content h3, .rst-content h4, .rst-content h5, .rst-content h6 { scroll-margin-top: 60px; } /* Fix anchor scroll offset for specific scroll targets */ .rst-content .headerlink { scroll-margin-top: 60px; } /* Fix sidebar navigation styling */ .wy-menu-vertical { width: 100% !important; } .wy-menu-vertical li { width: 100% !important; } .wy-menu-vertical a { width: 100% !important; word-wrap: break-word !important; white-space: normal !important; } /* Content area margin is handled by JavaScript */ /* Custom drag handle (more visible) */ .resize-handle { position: absolute; top: 0; right: 0; width: 8px; height: 100%; background: #ccc; cursor: col-resize; z-index: 1001; opacity: 0.3; transition: opacity 0.2s ease; } .resize-handle:hover { opacity: 0.8; background: #999; } .resize-handle::before { content: ''; position: absolute; top: 50%; left: 50%; width: 2px; height: 20px; background: #666; transform: translate(-50%, -50%); border-radius: 1px; } .resize-handle:hover::before { background: #333; } /* Ensure smooth resizing */ .wy-nav-side.resizing { user-select: none; pointer-events: none; } .wy-nav-side.resizing .wy-side-scroll { overflow: hidden; } ================================================ FILE: verl_distillation/docs/_static/js/resizable-sidebar.js ================================================ // Resizable sidebar functionality document.addEventListener('DOMContentLoaded', function() { const sidebar = document.querySelector('.wy-nav-side'); const content = document.querySelector('.wy-nav-content-wrap'); if (!sidebar || !content) return; // Create resize handle const resizeHandle = document.createElement('div'); resizeHandle.className = 'resize-handle'; sidebar.appendChild(resizeHandle); let isResizing = false; let startX = 0; let startWidth = 0; // Get initial width const getInitialWidth = () => { return 300; // Default width }; // Save width to localStorage const saveWidth = (width) => { localStorage.setItem('sidebar-width', width); }; // Load width from localStorage const loadWidth = () => { const savedWidth = localStorage.getItem('sidebar-width'); if (savedWidth) { const width = parseInt(savedWidth, 10); if (width >= 200 && width <= 600) { return width; } } return getInitialWidth(); }; // Apply width to sidebar and content const applyWidth = (width) => { // Update sidebar width sidebar.style.width = width + 'px'; // Update content margin with !important to override any CSS content.style.setProperty('margin-left', width + 'px', 'important'); // Also update any other content wrapper that might exist const contentInner = document.querySelector('.wy-nav-content'); if (contentInner) { contentInner.style.setProperty('margin-left', '0px', 'important'); } // Force reflow and repaint sidebar.offsetHeight; content.offsetHeight; // Trigger window resize event to notify other components window.dispatchEvent(new Event('resize')); }; // Initialize with saved width const initialWidth = loadWidth(); applyWidth(initialWidth); // Mouse down on resize handle resizeHandle.addEventListener('mousedown', (e) => { isResizing = true; startX = e.clientX; startWidth = parseInt(window.getComputedStyle(sidebar).width, 10); sidebar.classList.add('resizing'); document.body.style.cursor = 'col-resize'; document.body.style.userSelect = 'none'; // Add overlay to prevent iframe issues const overlay = document.createElement('div'); overlay.style.cssText = ` position: fixed; top: 0; left: 0; width: 100%; height: 100%; z-index: 9999; cursor: col-resize; `; overlay.id = 'resize-overlay'; document.body.appendChild(overlay); e.preventDefault(); }); // Mouse move document.addEventListener('mousemove', (e) => { if (!isResizing) return; const width = startWidth + e.clientX - startX; const clampedWidth = Math.max(200, Math.min(600, width)); applyWidth(clampedWidth); }); // Mouse up document.addEventListener('mouseup', () => { if (!isResizing) return; isResizing = false; sidebar.classList.remove('resizing'); document.body.style.cursor = ''; document.body.style.userSelect = ''; // Remove overlay const overlay = document.getElementById('resize-overlay'); if (overlay) { overlay.remove(); } // Save the current width const currentWidth = parseInt(window.getComputedStyle(sidebar).width, 10); saveWidth(currentWidth); }); // Handle window resize - removed to prevent infinite loop // The sidebar width is fixed and managed by drag functionality, no need to recalculate on window resize // Double-click to reset to default width resizeHandle.addEventListener('dblclick', () => { const defaultWidth = 300; applyWidth(defaultWidth); saveWidth(defaultWidth); }); }); // Fix navigation issues - Using MutationObserver for reliable initialization document.addEventListener('DOMContentLoaded', function() { let navigationFixed = false; function setupNavigationFix() { if (navigationFixed) return; // Find all links in the sidebar const sidebarLinks = document.querySelectorAll('.wy-menu-vertical a'); // Only proceed if we have sidebar links if (sidebarLinks.length === 0) return; console.log('Setting up navigation fix...'); sidebarLinks.forEach(function(link) { const href = link.getAttribute('href'); // Clone the link to remove all existing event listeners const newLink = link.cloneNode(true); // Add our own click handler newLink.addEventListener('click', function(e) { console.log('Link clicked:', href); // If it's an anchor link within the same page if (href && href.startsWith('#') && href !== '#') { e.preventDefault(); e.stopPropagation(); const targetId = href.substring(1); const targetElement = document.getElementById(targetId); if (targetElement) { // Calculate offset for fixed header const headerHeight = 60; const elementPosition = targetElement.getBoundingClientRect().top; const offsetPosition = elementPosition + window.pageYOffset - headerHeight; window.scrollTo({ top: offsetPosition, behavior: 'smooth' }); // Update URL hash if (history.pushState) { history.pushState(null, null, '#' + targetId); } else { location.hash = '#' + targetId; } } } // For external links, navigate normally else if (href && !href.startsWith('#') && !href.startsWith('javascript:')) { console.log('Navigating to external link:', href); window.location.href = href; } }); // Replace the old link with the new one link.parentNode.replaceChild(newLink, link); }); navigationFixed = true; // Handle initial page load with hash if (window.location.hash) { // Use requestAnimationFrame for better timing requestAnimationFrame(() => { const targetId = window.location.hash.substring(1); const targetElement = document.getElementById(targetId); if (targetElement) { const headerHeight = 60; const elementPosition = targetElement.getBoundingClientRect().top; const offsetPosition = elementPosition + window.pageYOffset - headerHeight; window.scrollTo({ top: offsetPosition, behavior: 'smooth' }); } }); } } // Try to set up navigation fix immediately setupNavigationFix(); // If it didn't work, use MutationObserver to watch for when sidebar links are added if (!navigationFixed) { const observer = new MutationObserver(function(mutations) { mutations.forEach(function(mutation) { if (mutation.type === 'childList' && mutation.addedNodes.length > 0) { // Check if sidebar links were added const sidebarLinks = document.querySelectorAll('.wy-menu-vertical a'); if (sidebarLinks.length > 0) { setupNavigationFix(); if (navigationFixed) { observer.disconnect(); } } } }); }); // Start observing the document for changes observer.observe(document.body, { childList: true, subtree: true }); // Fallback timeout in case MutationObserver doesn't work setTimeout(function() { if (!navigationFixed) { setupNavigationFix(); } observer.disconnect(); }, 5000); } }); ================================================ FILE: verl_distillation/docs/_static/js/runllm-widget.js ================================================ document.addEventListener("DOMContentLoaded", function () { var script = document.createElement("script"); script.type = "module"; script.id = "runllm-widget-script"; script.src = "https://widget.runllm.com"; script.setAttribute("version", "stable"); script.setAttribute("crossorigin", "true"); script.setAttribute("runllm-keyboard-shortcut", "Mod+j"); script.setAttribute("runllm-name", "verl Chatbot"); script.setAttribute("runllm-position", "TOP_RIGHT"); script.setAttribute("runllm-assistant-id", "679"); script.async = true; document.head.appendChild(script); }); ================================================ FILE: verl_distillation/docs/advance/agent_loop.rst ================================================ Agent Loop ========== Last updated: 07/17/2025. .. versionadded:: 0.4.2 [status: alpha] .. warning:: Agent Loop is ready for use, but the API may change in future releaes. Agent Loop is designed as general interface for multi-turn rollout and agentic reinforcement learning. **Design goal**: - Plugable user defined agent loop - Provide standard request generate api with different inference frameworks - Provide request level load balance between multiple inference servers **Non-goal**: - How tool is defined and how to call tool In high level overview, agent loop is given a prompt, run user defined loop: call LLM generate api, call tools, ... and return the final output. The final output is then calculated reward and used as trajectory for RL training. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/agent_loop_overview.svg?raw=true API Design ---------- ``AgentLoopBase`` class is the abstraction of agent loop, and ``run`` method is the only interface that user need to implement. The run method, given prompt messages in format: [{"role": "user"}, {"content": "..."}], and additional sampling params, could do whatever user wants, such as - call LLM generate api - call tools: web search, database query, code sandbox, ... - environment interaction - reflection - ... .. code:: python class AgentLoopBase(ABC): @abstractmethod async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput: """Run agent loop to interact with LLM server and environment. Args: sampling_params (Dict[str, Any]): LLM sampling params. **kwargs: dataset fields from `verl.utils.dataset.RLHFDataset`. Returns: AgentLoopOutput: Agent loop output. """ raise NotImplementedError After running user defined loop, run method should return ``AgentLoopOutput``, including prompt token ids, response token ids, and response mask. .. code:: python class AgentLoopOutput(BaseModel): """Agent loop output.""" prompt_ids: list[int] """Prompt token ids.""" response_ids: list[int] """Response token ids including LLM generated token, tool response token.""" response_mask: list[int] """Response mask, 1 for LLM generated token, 0 for tool response token.""" .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/agent_loop_output.svg?raw=true .. note:: AgentLoopOutput only output one trajectory for a given prompt, multiple trajectories output is still under discussion. Architecture Design ------------------- .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/agent_loop_architecture.png?raw=true A single PPO step contain two phase: rollout and train. In rollout phase: 1. PPOTrainer sample a batch from dataset and call ``AgentLoopManager.generate_sequences``. 2. AgentLoopManager ``wake_up`` all async LLM server instances, which will sync weights between inference engine(vLLM/SGLang) and training engine(FSDP/Megatron-LM). 3. AgentLoopManager split batch into chunks and send each chunk to ``AgentLoopWorker``. 4. AgentLoopWorker receive chunk and for each prompt, spawn a user defined ``AgentLoopBase`` instance, run ``run`` coroutine until end and get ``AgentLoopOutput``. .. tip:: AgentLoopWorker schedules multiple coroutines concurrently. If number of AgentLoopWorker equals batch_size, then each worker is response for one prompt. In agent loop, when user need LLM generate response: 5. Call ``AsyncLLMServerManager.generate`` with prompt_ids. 6. AsyncLLMServerManager select a server instance with least request in first turn and send request to it. (In following turns, the request will be sent to the same server instance). 7. AsyncLLMServer receive a request, issue ipc/rpc with model_runner, and generate response. (There's slight differences between vLLM and SGLang, see below). When all prompts in all AgentLoopWorker finish, AgentLoopManager gather results and return to PPOTrainer. 8. AgentLoopManager ``sleep`` all server instances, which will free kv cache and offload weights to CPU memory. AsyncLLMServer ~~~~~~~~~~~~~~ AsyncLLMServer is the abstraction of LLM server with two types of generation api: - `OpenAI chat completion `_: generate response for the given chat conversation. - Token in token out: generate response ids for the given token ids. We have officially supported vLLM and SGLang AsyncLLMServer, both of them implement the two api and are well tested. Other inference engine should be easy to plug-in by implement the ``AsyncServerBase`` class. .. code:: python class AsyncServerBase(ABC): @abstractmethod async def chat_completion(self, raw_request: Request) -> JSONResponse: """OpenAI chat completion API. Args: raw_request (Request): raw json request Returns: JSONResponse: json response API reference: https://platform.openai.com/docs/api-reference/chat/create """ raise NotImplementedError @abstractmethod async def generate(self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str) -> list[int]: """Generate response ids given prompt ids. Args: prompt_ids (List[int]): prompt ids sampling_params (Dict[str, Any]): sampling params request_id (str): request id Returns: List[int]: response ids """ raise NotImplementedError Chat completion vs Token in token out ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. warning:: The following conclusion is based on our recent experience and is still open to investigation and discussion. Almost all agent frameworks (LangGraph, CrewAI, LlamaIndex, etc) call LLM with OpenAI chat completion api, and keep chat history as messages. So user may expect that we should use the chat completion api in multi-turn rollout. But based on our recent experience on single-turn training on DAPO and multi-turn training on `retool `_, we found the token_ids from apply the final messages may not equal to the token_ids by concat prompt_ids and response_ids in each turn. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/multi_turn.png?raw=true **Where does this inconsistency happened?** First, the tool parser may alter the content. For example .. code:: json {"role": "assistant", "content": "Let me call a ... and get the result"} After tool_calls extraction, the messages is like this: .. code:: json {"role": "assistant", "content": "Let me call a and get the result", "tool_calls": [{"name": "foo", "arguments": "{}"}]} Encode the extracted message back is not equal to the original LLM generated response_ids. Second, the `decode-encode` may also lead to inconsistency: `Agent-R1 issue#30 `_. **What is the impact of this inconsistency?** This inconsistency is not a big problem for serving/agent system, but is critical to RL training. It causes the trajectory deviate from the policy model distribution. We have observed that apply_chat_template to the final chat history messages make PPO training not even converged in single-turn. vLLM ^^^^ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/async_vllm.png?raw=true For vLLM, the Async LLM Engine is running in same process as the server, and ModelRunner is running in same process as FSDP/Megatron-LM workers. Async LLM Engine communicate with ModelRunner through ZeroMQ. When server receive a request, it directly call engine to generate response_ids. SGLang ^^^^^^ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/async_sglang.png?raw=true For SGLang, the Async LLM Engine is running in same process as FSDP/Megatron-LM worker-0, and it spawn multiple subprocesses as ModelRunner. Also, Async LLM Engine communicate with ModelRunner through ZeroMQ. When server receive a request, it remote call the worker-0 and get response_ids. AsyncLLMServerManager ~~~~~~~~~~~~~~~~~~~~~ AsyncLLMServerManager serve as proxy to multiple AsyncLLMServer instances, provides: - load balance: select a server instance with least request in first turn and send request to it. - sticky session: bind request_id to server instance, so that the same request_id will be sent to the same server instance in following turns. AsyncLLMServerManager is passed to ``AgentLoopBase.__init__``, whenever user want to interact with LLM in agent loop, they can call ``AsyncLLMServerManager.generate`` to generate response_ids. .. code:: python class AsyncLLMServerManager: async def generate( self, request_id, *, prompt_ids: list[int], sampling_params: dict[str, Any], ) -> list[int]: """Generate tokens from prompt ids. Args: request_id (str): request id for sticky session. prompt_ids (List[int]): List of prompt token ids. sampling_params (Dict[str, Any]): Sampling parameters for the chat completion. Returns: List[int]: List of generated token ids. """ ... Next ---- - :doc:`Agentic RL Training<../start/agentic_rl>`: Quick start agentic RL training with gsm8k dataset. - `LangGraph MathExpression `_: Demonstrate how to use LangGraph to build agent loop. - `Retool `_: End-to-end retool paper reproduction using tool agent. ================================================ FILE: verl_distillation/docs/advance/attention_implementation.rst ================================================ .. _attention-implementation-override: Attention Implementation Override ================================== Last updated: 10/31/2025. By default, VERL's FSDP workers use ``flash_attention_2`` as the attention implementation for improved performance. However, you can now override this setting to use different attention implementations based on your needs. Supported Attention Implementations ----------------------------------- The following attention implementations are supported (subject to model and hardware compatibility): - ``flash_attention_2``: High-performance attention implementation (default) - ``eager``: Standard PyTorch attention implementation - ``sdpa``: Scaled Dot-Product Attention (PyTorch native) When to Override ---------------- You might want to override the attention implementation in the following scenarios: - **Debugging**: Use ``eager`` for easier debugging and better error messages - **Compatibility**: Some models or hardware configurations may not support ``flash_attention_2`` - **Memory constraints**: Different implementations have different memory characteristics - **Performance tuning**: Testing different implementations for optimal performance Configuration Examples ----------------------- PPO Training with Eager Attention ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To override the attention implementation for the actor, rollout, and reference models: .. code:: bash python3 ppo_trainer.py \ +actor_rollout_ref.model.override_config.attn_implementation=eager \ [other parameters...] PPO Training with SDPA Attention ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: bash python3 ppo_trainer.py \ +actor_rollout_ref.model.override_config.attn_implementation=sdpa \ [other parameters...] Critic Model Override ~~~~~~~~~~~~~~~~~~~~~ For training configurations that include a critic model, you can also override its attention implementation: .. code:: bash python3 ppo_trainer.py \ +actor_rollout_ref.model.override_config.attn_implementation=eager \ +critic.model.override_config.attn_implementation=eager \ [other parameters...] YAML Configuration ~~~~~~~~~~~~~~~~~~ You can also specify the attention implementation in your YAML configuration file: .. code:: yaml actor_rollout_ref: model: override_config: attn_implementation: eager # other overrides... critic: # if using a critic model model: override_config: attn_implementation: eager # other overrides... Important Notes --------------- **Backward Compatibility**: If you don't specify ``attn_implementation`` in the override config, VERL will continue to use ``flash_attention_2`` by default, ensuring backward compatibility with existing configurations. **Model Support**: Not all models support all attention implementations. Ensure your model is compatible with the chosen attention implementation before training. **Performance Impact**: Different attention implementations have varying performance characteristics. ``flash_attention_2`` typically offers the best performance, while ``eager`` provides better debugging capabilities. **Hardware Dependencies**: Some attention implementations (like ``flash_attention_2``) may require specific hardware or CUDA versions. If you encounter compatibility issues, try using ``eager`` or ``sdpa``. Troubleshooting --------------- If you encounter errors when using a specific attention implementation: 1. **Check model compatibility**: Verify that your model supports the chosen attention implementation 2. **Try eager attention**: Use ``attn_implementation=eager`` as a fallback for debugging 3. **Check hardware requirements**: Ensure your hardware supports the attention implementation 4. **Review error messages**: Attention implementation errors often provide clear guidance on supported options Example Error Resolution ~~~~~~~~~~~~~~~~~~~~~~~~ If you see an error like "flash_attention_2 is not supported", you can resolve it by switching to eager attention: .. code:: bash # Instead of the default flash_attention_2 python3 ppo_trainer.py +actor_rollout_ref.model.override_config.attn_implementation=eager This override ensures your training can proceed while you investigate the flash attention compatibility issue. ================================================ FILE: verl_distillation/docs/advance/checkpoint.rst ================================================ .. _checkpoint-page: Using Checkpoints to Support Fault Tolerance Training ===================================================== Last updated: 06/25/2025. There could be training errors or machine failure during the whole RLHF training process, so it is recommended to enable checkpoints to minimize your loss. The API Interface has already been listed in :ref:`config-explain-page`, and we will not repeat them. But there are still some technique details we hope to clarify. .. note:: Notice that the ``checkpoint.contents`` field has no effect to FSDP checkpoint except ``hf_model``, the other 3 fields are binded together to save and load. We recommend to include ``model``, ``optimizer`` and ``extra`` all. Checkpoint Saving Directory Structure ------------------------------------- Commonly, we use the ``default_local_dir`` declared in ``ppo_trainer.yaml`` or ``ppo_megatron_trainer.yml`` to work as preffix when saving checkpoints, which is ``checkpoints/${trainer.project_name}/${trainer.experiment_name}``. So the inner checkpoint structure of **FSDP** is like: .. code:: checkpoints/${trainer.project_name}/${trainer.experiment_name} ├── global_steps_${i} │ ├── actor │ │ ├── huggingface # default save config and tokenizer, save huggingface model if include ``hf_model`` in checkpoint.contents │ │ └── fsdp_config.json # FSDP config file, including world_size and fsdp version │ │ ├── model_world_size_{self.world_size}_rank_{self.rank}.pt │ │ ├── optim_world_size_{self.world_size}_rank_{self.rank}.pt │ │ └── extra_state_world_size_{self.world_size}_rank_{self.rank}.pt │ ├── critic │ │ ├── huggingface │ │ └── fsdp_config.json │ │ ├── model_world_size_{self.world_size}_rank_{self.rank}.pt │ │ ├── optim_world_size_{self.world_size}_rank_{self.rank}.pt │ │ └── extra_state_world_size_{self.world_size}_rank_{self.rank}.pt └── latest_checkpointed_iteration.txt All model shards, optimizers and extra states are stored together, in a sharded and distributed way. While **Megatron** current checkpoint structure is: .. code:: checkpoints/${trainer.project_name}/${trainer.experiment_name} ├── global_steps_${i} │ ├── actor │ │ ├── huggingface # default save config and tokenizer, save huggingface model if include ``hf_mode`` in checkpoint.contents │ │ └── dist_ckpt # save sharded model/optimizer/rng_states, naming the same as Megatron │ └── critic │ │ ├── huggingface │ │ └── dist_ckpt └── latest_checkpointed_iteration.txt Convert FSDP and Megatron Checkpoints to HuggingFace Format Model ----------------------------------------------------------------- We provide a tool to convert the FSDP and Megatron checkpoints to HuggingFace format model. The tool is located in ``verl/model_merger``. For older versions of verl that don't include fsdp_config.json in checkpoints, you can use the legacy model merger located at ``verl/scripts/legacy_model_merger.py``. The script supports two main sub-commands: `merge` (to convert and save checkpoints) and `test` (to validate merged checkpoints against a reference model). The arguments for the `merge` sub-command are as follows: .. code:: bash usage: python -m verl.model_merger merge [-h] --backend {fsdp,megatron} [--local_dir LOCAL_DIR] [--tie-word-embedding] [--is-value-model] [--use_cpu_initialization] [--target_dir TARGET_DIR] [--hf_upload_path HF_UPLOAD_PATH] [--private] options: -h, --help show this help message and exit --backend {fsdp,megatron} The backend of the model --local_dir LOCAL_DIR Path to the saved model checkpoints --tie-word-embedding Whether to tie word embedding weights (currently only Megatron supported) --is-value-model Whether the model is a value model (currently only Megatron supported) --use_cpu_initialization Whether to use CPU initialization for the model. This is useful for large models that cannot fit into GPU memory during initialization. --target_dir TARGET_DIR Directory to save the merged huggingface model --hf_upload_path HF_UPLOAD_PATH Hugging Face repository ID to upload the model --private Whether to upload the model to a private Hugging Face repository Example usage for merging Megatron checkpoints: .. code:: bash python -m verl.model_merger merge \ --backend megatron \ --tie-word-embedding \ --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model Example usage for distributed merging Megatron checkpoints: .. code:: bash torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} -m verl.model_merger merge \ --backend megatron \ --tie-word-embedding \ --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model Example usage for merging FSDP checkpoints: .. code:: bash python -m verl.model_merger merge \ --backend fsdp \ --local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model Megatron Merger details ----------------------- Current implement of decoder layers uses ``nn.ModuleList`` to store the layers, and thus the model layers on every PP rank and VPP rank starts their index from 0. There are 3 ways to correct this behavior: 1. Modify the decoder layer's state_dict, add ``offset`` to each layer's index, thus rewrite ``nn.ModuleList`` implementation. 2. Modify the layer index when saving checkpoint and recover them when loading checkpoint. 3. The Checkpoint merger do this work, calculate the actual ``offset`` from ``state_dict`` only, a little complex. Current implementation use solution 2. HuggingFace to Megatron DistCheckpoint details ---------------------------------------------- If your model is quite huge, we recommend you to use Megatron dist-checkpoint to load the model. Megatron dist-checkpoint supports loading with different kinds of model parallelism, and it is much faster than the original checkpoint loading. To convert original HuggingFace model to Megatron dist-checkpoint, you can use the ``scripts/converter_hf_to_mcore.py`` script. Large MoE models are temporarily supported with CPU initialization, which is a little slower. While we are working on a better solution to support large models. Example command to convert the model is as follows: .. code:: bash python scripts/converter_hf_to_mcore.py \ --hf_model_path Qwen/Qwen1.5-MoE-A2.7B-Chat \ --output_path /mnt/disk/Qwen/Qwen1.5-MoE-A2.7B-Chat \ --use_cpu_initialization # Only work for MoE models Example command to distributed convert the huge model like deepseekv3 671B is as follows: .. code:: bash torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} scripts/converter_hf_to_mcore.py \ --hf_model_path deepseek-ai/DeepSeek-V3 \ --output_path /mnt/disk/deepseek-ai/DeepSeek-V3 \ --use_cpu_initialization # Only work for MoE models Original Checkpoint Utils ------------------------- Original Checkpoint Utils refer to original checkpoint implementation in ``verl/models/[model]/megatron/checkpoint_utils``. We only need ``[model]_loader.py`` in original checkpoint utils now, since we get rid of storing ``hf_model`` every time (which is not recommended for large model training, try only saving sharded models if you can). .. note:: Note that ``[model]_loader`` only support environments where **storage clusters are able to connect with every calculation nodes**. Because it utilizes **sharded load way to minimize the loading checkpoint overhead**. Every rank loads its own data from ``state_dict`` which can be accessed by all of them. While there is also no need to broadcast among DP ranks, since the saved state_dict is only produced by DP rank 0. For users who can **only place the huggingface model on one device**, we keep the original costly implementation in ``[model]_loader_deprecated``. In this implementation, rank 0 broadcast all weights to each tp and pp rank, and then dp rank 0 broadcast to all dp ranks. There may be at risks of OOM. To use deprecated loader, change the import package of ``load_state_dict_to_megatron_llama``. ================================================ FILE: verl_distillation/docs/advance/dpo_extension.rst ================================================ Extend to other RL(HF) algorithms ================================= Last updated: 02/25/2025. We already implemented the complete training pipeline of the PPO algorithms. To extend to other algorithms, we analyze the high-level principle to use verl and provide a tutorial to implement the DPO algorithm. Users can follow the similar paradigm to extend to other RL algorithms. .. note:: **Key ideas**: Single process drives multi-process computation and data communication. Overall Approach ---------------- Step 1: Consider what multi-machine multi-GPU computations are needed for each model, such as ``generate_sequence`` , ``compute_log_prob`` and ``update_policy`` in the actor_rollout model. Implement distributed single-process-multiple-data (SPMD) computation and encapsulate them into APIs Step 2: Based on different distributed scenarios, including FSDP and 3D parallelism in Megatron-LM, implement single-process control of data interaction among multi-process computations. Step 3: Utilize the encapsulated APIs to implement the control flow Example: Online DPO ------------------- We use verl to implement a simple online DPO algorithm. The algorithm flow of Online DPO is as follows: 1. There is a prompt (rollout) generator which has the same weight as the actor model. After a batch of prompts are fed into the generator, it generates N responses for each prompt. 2. Send all the prompts + responses to a verifier for scoring, which can be reward model or a rule-based function. Then sort them in pairs to form a training batch. 3. Use this training batch to train the actor model using DPO. During the process, a reference policy is needed. Step 1: What are the multi-machine multi-GPU computations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Sample Generator** Implementation details: .. code:: python from verl.single_controller.base import Worker from verl.single_controller.ray import RayWorkerGroup, RayClassWithInitArgs, RayResourcePool import ray @ray.remote class SampleGenerator(Worker): def __init__(self, config): super().__init__() self.config = config def generate_sequences(self, data): pass Here, ``SampleGenerator`` can be viewed as a multi-process pulled up by ``torchrun``, with each process running the same code (SPMD). ``SampleGenerator`` needs to implement a ``generate_sequences`` API for the control flow to call. The implementation details inside can use any inference engine including vllm, sglang and huggingface. Users can largely reuse the code in verl/verl/workers/rollout/vllm_rollout/vllm_rollout.py and we won't go into details here. **ReferencePolicy inference** API: compute reference log probability .. code:: python from verl.single_controller.base import Worker import ray @ray.remote class ReferencePolicy(Worker): def __init__(self): super().__init__() self.model = Model() def infer(self, data): return self.model(data) **Actor update** API: Update actor model parameters .. code:: python from verl.single_controller.base import Worker import ray @ray.remote class DPOActor(Worker): def __init__(self): super().__init__() self.model = Model() self.model = FSDP(self.model) # or other distributed strategy self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3) self.loss_fn = xxx def update(self, data): self.optimizer.zero_grad() logits = self.model(data) loss = self.loss_fn(logits) loss.backward() self.optimizer.step() **Notes: How to distinguish between control processes and distributed computation processes** ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Control processes are generally functions directly decorated with ``@ray.remote`` - Computation processes are all wrapped into a ``RayWorkerGroup``. Users can reuse most of the distribtued computation logics implemented in PPO algorithm, including FSDP and Megatron-LM backend in verl/verl/trainer/ppo. Step 2: Based on different distributed scenarios, implement single-process control of multi-process data interaction ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **The core problem to solve here is how a single process sends data to multiple processes, drives multi-process computation, and how the control process obtains the results of multi-process computation.** First, we initialize the multi-process ``WorkerGroup`` in the control process. .. code:: python @ray.remote(num_cpus=1) def main_task(config): # construct SampleGenerator resource_pool = RayResourcePool(process_on_nodes=[8] * 2) # 16 GPUs ray_cls = RayClassWithInitArgs(SampleGenerator, config=config) # put SampleGenerator onto resource pool worker_group = RayWorkerGroup(resource_pool, ray_cls) # construct reference policy As we can see, in the control process, multiple processes are wrapped into a ``RayWorkerGroup``. Inside this ``WorkerGroup``, there is a ``self._workers`` member, where each worker is a RayActor (https://docs.ray.io/en/latest/ray-core/actors.html) of SampleGenerator. ray_trainer.md also provide an implementation of ``MegatronRayWorkerGroup``. Assuming the model is distributed using FSDP, and there is a batch of data on the control process, for data parallelism, the underlying calling process is: .. code:: python data = xxx data_list = data.chunk(dp_size) output = [] for d in data_list: # worker_group._workers[i] is a SampleGenerator output.append(worker_group._workers[i].generate_sequences.remote(d)) output = ray.get(output) output = torch.cat(output) Single process calling multiple processes involves the following 3 steps: 1. Split the data into DP parts on the control process. 2. Send the data to remote, call the remote computation through RPC, and utilize multi-process computation. 3. Obtain the computation results of each worker on the control process and merge them. Frequently calling these 3 steps on the controller process greatly hurts code readability. **In verl, we have abstracted and encapsulated these 3 steps, so that the worker's method + dispatch + collect can be registered into the worker_group** .. code:: python from verl.single_controller.base.decorator import register def dispatch_data(worker_group, data): return data.chunk(worker_group.world_size) def collect_data(worker_group, data): return torch.cat(data) dispatch_mode = { 'dispatch_fn': dispatch_data, 'collect_fn': collect_data } @register(dispatch_mode=dispatch_mode) def generate_sequences(self, data): pass In this way, we can directly call the method inside the worker through the ``worker_group`` on the control (driver) process (which is a single process): .. code:: python output = worker_group.generate_sequences(data) This single line includes data splitting, data distribution and computation, and data collection. Furthermore, the model parallelism size of each model is usually fixed, including dp, tp, pp. So for these common distributed scenarios, we have pre-implemented specific dispatch and collect methods,in `decorator.py `_, which can be directly used to wrap the computations. .. code:: python from verl.single_controller.base.decorator import register, Dispatch @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(self, data: DataProto) -> DataProto: pass Here it requires the data interface to be ``DataProto``. Definition of ``DataProto`` is in `protocol.py `_. Step 3: Main training loop ~~~~~~~~~~~~~~~~~~~~~~~~~~ With the above training flows, we can implement the algorithm's control flow. It is recommended that ``main_task`` is also a ray remote process. .. code:: python @ray.remote(num_cpus=1) def main_task(config): # construct SampleGenerator resource_pool = RayResourcePool(process_on_nodes=[8] * 2) # 16 GPUs ray_cls = RayClassWithInitArgs(SampleGenerator, config=config) # put SampleGenerator onto resource pool sample_gen = RayWorkerGroup(resource_pool, ray_cls) # construct reference policy ray_cls = RayClassWithInitArgs(ReferencePolicy) ref_policy = RayWorkerGroup(resource_pool, ray_cls) # construct actor ray_cls = RayClassWithInitArgs(DPOActor) dpo_policy = RayWorkerGroup(resource_pool, ray_cls) dataloader = DataLoader() for data in dataloader: # generate data data = sample_gen.generate_sequences(data) # generate scores for each data data = generate_scores(data) # generate pairwise data using scores data = generate_pairwise_data(data) # generate ref_log_prob data.batch['ref_log_prob'] = ref_policy.infer(data) # update using dpo dpo_policy.update(data) # logging Here, different ``WorkerGroups`` can be placed in the same resource pool or in different resource pools using ``create_colocated_worker_cls`` similar as in `ray_trainer.py `_. ================================================ FILE: verl_distillation/docs/advance/fsdp_extension.rst ================================================ Add models with the FSDP backend ================================== Last updated: 02/09/2025. Model -------------------------- In principle, our FSDP backend can support any HF model and we can sychronoize the actor model weight with vLLM using `hf_weight_loader.py` under `third_party/vllm`. However, ``hf_weight_loader`` is will gather the full state_dict of a model during synchronization, which may cause OOM. We suggest using ``dtensor_weight_loader`` which gather the full model parameter layer by layer to reduce the peak memory usage. We already support dtensor weight loader for the models below in `dtensor_weight_loader.py` under `third_party/vllm`: - ``GPT2LMHeadModel`` - ``LlamaForCausalLM`` - ``LLaMAForCausalLM`` - ``MistralForCausalLM`` - ``InternLMForCausalLM`` - ``AquilaModel`` - ``AquilaForCausalLM`` - ``Phi3ForCausalLM`` - ``GemmaForCausalLM`` - ``Gemma2ForCausalLM`` - ``GPTBigCodeForCausalLM`` - ``Starcoder2ForCausalLM`` - ``Qwen2ForCausalLM`` - ``DeepseekV2ForCausalLM`` To implement ``dtensor_weight_loader`` of a model that's supported in vLLM, follow the guide of gemma model below: 1. Copy the ``load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]])`` from the vllm model class to ``dtensor_weight_loaders.py`` 2. Modify the arguments to ``(actor_weights: Dict, vllm_model: nn.Module)`` 3. Replace the ``self`` to ``vllm_model`` 4. Add the ``local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)`` before each ``param = params_dict[name]`` and modify the following weight loading using ``local_loaded_weight``. 5. Register the implemented dtensor weight loader to ``__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__``. .. code-block:: diff - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def gemma_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), ("gate_up_proj", "gate_proj", 0), ("gate_up_proj", "up_proj", 1), ] - params_dict = dict(self.named_parameters()) + params_dict = dict(vllm_model.named_parameters()) loaded_params = set() - for name, loaded_weight in weights: + for name, loaded_weight in actor_weights.items(): for (param_name, shard_name, shard_id) in stacked_params_mapping: if shard_name not in name: continue name = name.replace(shard_name, param_name) # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue + local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight) param = params_dict[name] weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) + weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id) break else: # lm_head is not used in vllm as it is tied with embed_token. # To prevent errors, skip loading lm_head.weight. if "lm_head.weight" in name: continue # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue + local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight) param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) - weight_loader(param, loaded_weight) + weight_loader(param, local_loaded_weight.to(dtype=param.dtype)) loaded_params.add(name) unloaded_params = params_dict.keys() - loaded_params if unloaded_params: raise RuntimeError( "Some weights are not initialized from checkpoints: " f"{unloaded_params}") ================================================ FILE: verl_distillation/docs/advance/fully_async.md ================================================ # Recipe: Fully Async Policy Trainer **Author:** `https://github.com/meituan-search` Last updated: 10/18/2025. This document introduces a fully asynchronous PPO training system that completely decouples the Trainer and Rollouter, supporting asynchronous sample generation and training. Under this system, we achieved a 2.35x-2.67x performance improvement when training the Qwen2.5-7B model with 128 GPUs, without significantly affecting the results. ## Introduction ### Background The separated rollout and train architecture, compared to the colocate architecture, can allocate resources more flexibly and design more flexible training logic, thereby addressing issues such as low GPU utilization and training efficiency caused by long-tail problems. The one_step_off_policy alleviates the problem of long rollout times and achieves some gains in training efficiency by designing a separated architecture and performing asynchronous training between rollout and train for one round. However, it forcibly uses data from one round of asynchronous training, which is not flexible enough and cannot completely eliminate the impact of long-tail on training efficiency. In other frameworks such as AReaL, Magistral, StreamRL, and AsyncFlow, asynchronous training and streaming training have been implemented based on the separated architecture and have achieved gains. We borrow from their methods and implemented them in VERL. The fully_async_policy supports asynchronous, streaming, and partial rollout training. By reasonably setting parameters such as resource allocation and parameter synchronization frequency, fully_async_policy can significantly improve training efficiency. > Magistral https://arxiv.org/abs/2506.10910 > > AReaL: A Large-Scale Asynchronous Reinforcement Learning System for Language > Reasoning https://arxiv.org/abs/2505.24298 > > StreamRL: Scalable, Heterogeneous, and Elastic RL for LLMs with Disaggregated Stream > Generation https://arxiv.org/abs/2504.15930 > > AsyncFlow: An Asynchronous Streaming RL Framework for Efficient LLM Post-Training https://arxiv.org/abs/2507.01663 > ### Core Contributions * **Resource Isolation**: Unlike using hybrid_engine, Rollouter and Trainer use separate computing resources and need to specify the resources they occupy separately. * **Parallel Generation and Training**: While the Trainer is training, the Rollouter is generating new samples. * **Multi-step Asynchronous**: Compared to one step off policy, it supports asynchronous settings from 0.x steps to multiple steps, making the asynchronous solution more flexible. * **NCCL Parameter Synchronization**: Uses NCCL communication primitives for parameter communication between Rollouter and Trainer. * **Stream Inference and Training**: Rollouter generates data sample by sample, and data transmission uses a single sample as the minimum transmission unit. * **Asynchronous Training and Freshness Control**: By setting the parameter async_training.staleness_threshold, it supports training with samples generated by old parameters. * **PartialRollout**: The Rollouter's inference process supports partial rollout logic. During parameter synchronization, by adding `sleep() and resume()` logic, it saves samples from ongoing rollouts and continues using them in the next rollout, reducing the time spent waiting for ongoing tasks to finish during parameter synchronization. Currently, the supported usage mode is megatron/fsdp+vllm. vllm must use the server mode based on AgentLoop. ## Design The overall architecture of fully_async_policy is shown in the figure below. fully_async_policy mainly consists of four parts: Rollouter, MessageQueue, Trainer, and ParameterSynchronizer. ![fully_async_policy_structure]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_structure.svg?raw=true) 1. Rollouter generates sequences sample by sample and puts the generated samples into the MessageQueue, with the production speed controlled by freshness. 2. MessageQueue is used to temporarily store samples generated by Rollouter. 3. Trainer fetches samples from MessageQueue sample by sample. After fetching `require_batches*ppo_mini_batch_size` samples, it will perform training. After training for async_training.trigger_parameter_sync_step rounds, it triggers a parameter synchronization with Rollouter. 4. ParameterSynchronizer implements the NCCL synchronous parameter synchronization capability. The source of benefits compared to the base scheme lies in the fact that in the colocate case, using more resources for rollout cannot solve the idleness caused by long-tail samples. After we perform resource isolation, the time for rollout and train may be longer than before (because fewer resources are used), but the overlap in their time consumption reduces the end-to-end time consumption. ![fully_async_policy_revenue]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_revenue.svg?raw=true) ## Usage ### Parameter Description | super params | implication | |-----------------------------------------------|------------------------------------------------------------------------------------------------| | `trainer.nnodes` | Number of nodes for Trainer | | `trainer.n_gpus_per_node` | Number of GPUs per node for Trainer | | `rollout.nnodes` | Number of nodes for Rollouter | | `rollout.n_gpus_per_node` | Number of GPUs per node for Rollouter | | `data.train_batch_size` | In the fully async strategy, this value is not effective (default is 0) | | `data.gen_batch_size` | In the fully async strategy, uses streaming sample production logic (default is 1) | | `rollout.total_rollout_steps` | Total number of rollout samples | | `rollout.test_freq` | How many times Rollouter updates parameters before performing a validation | | `actor_rollout_ref.actor.ppo_mini_batch_size` | The ppo_mini_batch_size is a global num across all workers/gpus | | `async_training.require_batches` | Number of ppo_mini_batch_size that FullyAsyncTrainer fetches at once | | `async_training.trigger_parameter_sync_step` | Indicates how many local updates FullyAsyncTrainer performs before a parameter synchronization | | `async_training.staleness_threshold` | Freshness control | | `async_training.partial_rollout` | Whether to perform partial_rollout | | `async_training.use_rollout_log_probs` | Use log_probs generated by rollout | | `async_training.compute_prox_log_prob` | Whether to compute log_prob using the training model's parameters during the training phase. | | **Further Explanation:** * `rollout.total_rollout_steps` Compared to colocate, the quantity can be aligned by multiplying train_batch_size and step: `rollout.total_rollout_steps = data.train_batch_size * step`. * `async_training.trigger_parameter_sync_step` In the fully async strategy, it indicates how many local updates the Trainer performs (i.e., how many times it fetches `require_batches * ppo_mini_batch_size` samples) before a parameter synchronization with Rollouter. Between every two parameter synchronizations between Rollouter and Trainer, the Trainer will process `trigger_parameter_sync_step* require_batches*ppo_mini_batch_size` samples. To fairly compare speed with colocate, trigger_parameter_sync_step should be set to `data.train_batch_size / (require_batches * ppo_mini_batch_size)`. * `async_training.staleness_threshold` In the fully async strategy, it indicates the maximum proportion of stale samples allowed to be used. * staleness_threshold=0, indicates synchronous training. Rollouter will generate a fixed number of samples between two parameter updates, the sample count is: $$rollout\_num = (trigger\_parameter\_sync\_step*require\_batches*ppo\_mini\_batch\_size)$$ * staleness_threshold>0, indicates asynchronous training, can be set to a decimal for more flexible asynchronous calls. Rollouter will generate at most the following number of samples between two parameter updates: $$rollout\_num = (1+staleness\_threshold)*(trigger\_parameter\_sync\_step*require\_batches*ppo\_mini\_batch\_size) - num\_staleness\_sample $$ num_staleness_sample represents the number of stale samples generated in excess during the last rollout. Since it's a streaming system, rollout continues to generate and trainer continues to consume. If rollouter is slower, trainer will trigger parameter synchronization earlier, and rollouter will not actually produce rollout_num samples. When rollout is fast enough, setting staleness_threshold to 1 is basically equivalent to one_step_off policy. To avoid too many expired samples affecting training accuracy, it is recommended to set this value to less than 1. * `async_training.partial_rollout` partial_rollout only actually takes effect when staleness_threshold>0. * `async_training.use_rollout_log_probs` In reinforcement learning algorithms, log_probs have implicit correlations with parameter versions and tokens. Due to the settings of algorithms like PPO/GRPO/DAPO, when calculating importance sampling, old_log_prob must use the log_probs corresponding to the rollout parameters and tokens to ensure algorithm correctness. In the fully async strategy, we default to old_log_prob being calculated by rollout rather than by trainer. * `async_training.require_batches` In streaming training, require_batches should be set to 1, indicating that training is performed after producing enough ppo_mini_batch_size samples. In actual testing, we found that if fewer samples are issued at once, due to the order of data distribution, it can cause training instability and longer response lengths. Here, we additionally provide require_batches for streaming distribution and control the number of samples participating in training at once. * `async_training.compute_prox_log_prob` (experimental) During the training process, we observed that metrics and response lengths may become unstable in the later stages of training. To mitigate this issue, we can use the [Rollout Importance Sampling](https://verl.readthedocs.io/en/latest/advance/rollout_is.html) technique for importance sampling. To utilize Rollout Importance Sampling, we need to compute log_prob using the training engine, which requires enabling this switch. Additionally, when compute_prox_log_prob and Rollout Importance Sampling are enabled under mode d (async stream pipeline with partial rollout), our implementation approximates `Areal's Decoupled PPO`. ### Supported Modes 1. on policy pipeline: 1. **trigger_parameter_sync_step=1, staleness_threshold=0** 2. Rollouter produces `require_batches*ppo_mini_batch_size` samples at once, Trainer fetches these samples for training, and after training completes, Trainer and Rollouter perform a parameter synchronization; 3. During the rollout phase, if there are long-tail samples but few rollout samples, shorter samples cannot fill idle resources, causing some resource waste. 4. As shown in figure a; 2. stream off policy pipeline: 1. **trigger_parameter_sync_step>1, staleness_threshold=0** 2. Synchronous streaming training will be performed. Rollouter produces `require_batches*ppo_mini_batch_size*trigger_parameter_sync_step` samples at once, Trainer performs a local training every time it fetches `require_batches*ppo_mini_batch_size` samples, and after training trigger_parameter_sync_step times, Trainer and Rollouter perform a parameter synchronization; 3. Compared to a, since more samples are generated at once, resource idleness will be lower. 4. In one step training, there will be two periods of resource idleness: when fetching the first batch of samples, train waits for `require_batches*ppo_mini_batch_size` samples to be produced, and during the last parameter update, rollout waits for training to complete. 5. As shown in figure b; 3. async stream pipeline with stale samples: 1. **trigger_parameter_sync_step>=1, staleness_threshold>0, partial_rollout=False** 2. After each parameter update, Rollouter will plan to produce at most rollout_num samples (in practice, the number of samples generated may be less than this value depending on rollout speed). 3. If the rollout process is relatively fast, Rollouter will generate some additional samples num_stale_samples before parameter synchronization for immediate use by Trainer after synchronization. When triggering parameter synchronization, if Rollouter has ongoing tasks, it will wait for the tasks to complete and not add new tasks; 4. Compared to b, except for the first step training, subsequent training will not have the time to wait for the first batch rollout to finish, but will have the time to wait for active tasks to finish. 5. As shown in figure c; 4. async stream pipeline with partial rollout: 1. **trigger_parameter_sync_step>=1, staleness_threshold>0, partial_rollout=True** 2. Compared to c, when triggering parameter synchronization, if Rollouter has samples being produced, it will interrupt the rollout process and perform parameter synchronization. The interrupted samples will continue to be generated after synchronization. This reduces the time to wait for active tasks to finish. 3. As shown in figure d; ![fully_async_policy_mode]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_mode.svg?raw=true) ### Key Metrics | metrics | implication | |------------------------------------------------|--------------------------------------------------------------------------------------------------------| | `trainer/idle_ratio` | Trainer idle rate | | `rollouter/idle_ratio` | Rollouter idle rate | | `fully_async/count/stale_samples_processed` | Total number of old samples used in training | | `fully_async/count/stale_trajectory_processed` | Total number of old trajectories used in training (one sample produces rollout.n trajectories) | | `fully_async/partial/total_partial_num` | Number of partial samples processed by Trainer between two trigger_parameter_sync_step | | `fully_async/partial/partial_ratio` | Ratio of partial samples processed by Trainer between two trigger_parameter_sync_step | | `fully_async/partial/max_partial_span` | Maximum parameter span of partial samples processed by Trainer between two trigger_parameter_sync_step | ### Parameter Tuning Recommendations * Resource Allocation and Adjustment: * Reasonable resource allocation is the prerequisite for achieving good training efficiency. The ideal resource allocation should make the rollout time and train time close, thereby minimizing pipeline bubbles in the entire training process, avoiding resource idleness, and ensuring Trainer does not use old samples. In real training scenarios, resource allocation can be adjusted based on the idle time of rollout and train during actual training, which can be obtained from rollouter/idle_ratio and trainer/idle_ratio. If rollouter/idle_ratio is high and trainer/idle_ratio is low, Trainer resources should be increased and Rollouter resources should be reduced, and vice versa. * Key Parameters: * staleness_threshold: Setting it too high will cause more old samples to be used, affecting model performance. It is recommended to set it to less than 1. * require_batches: The closer to 1, the closer to a pure streaming process, the smaller the training bubbles, and the faster the acceleration effect that can be achieved in terms of speed, but it will affect the order of sample processing; * trigger_parameter_sync_step: The smaller the setting, the closer to on policy, but it will cause frequent parameter synchronization. Long-tail samples waste resources that cannot be filled by short samples, resulting in low resource utilization. The larger the setting, the higher the computational efficiency, but the accuracy will be affected by off policy. * rollout.test_freq: It will occupy Rollouter resources and is not recommended to be set too small. * Mode Selection: By adjusting different parameters, the Fully Async architecture supports optimization acceleration at different levels, suitable for tasks in different scenarios. * For small-scale tasks that need to ensure training stability and on-policy nature, and have low speed requirements, the on policy pipeline mode (Mode 1) can be tried. * For scenarios that need to improve training throughput but are sensitive to staleness, the stream off policy pipeline mode can be tried. That is, by setting trigger_parameter_sync_step>1 to improve training efficiency, but still maintaining the synchronization mechanism (staleness_threshold=0) (Mode 2). * For large-scale tasks with high training speed requirements and can tolerate a certain degree of off-policy and staleness, setting staleness_threshold> 0 and partial_rollout=True can improve training efficiency, using the async stream pipeline mode (Mode 3 or 4). ### Quick Start ```shell rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*400))) test_freq=10 staleness_threshold=0 trigger_parameter_sync_step=16 partial_rollout=False python -m recipe.fully_async_policy.fully_async_main \ train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ trainer.nnodes="${NNODES_TRAIN}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.nnodes="${NNODES_ROLLOUT}" \ rollout.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.partial_rollout="${partial_rollout}" ``` ## Experiments ### Asynchronous Training on 7B Model We used Qwen2.5-Math-7B to verify the benefits of the fully async strategy under long candidates and multiple resources. Using the `async stream pipeline with stale samples` strategy, we achieved about 2x performance improvement on 32 cards, 64 cards, and 128 cards without significantly affecting experimental results. * Machine: H20 * Model: Qwen2.5-Math-7B * Rollout length: max_response_length FSDP2: 28K tokens; * Algorithm: DAPO * Dataset: TRAIN_FILE: dapo-math-17k.parquet TEST_FILE: aime-2024.parquet * Engine: vllm+FSDP2 * rollout.n: 16 * ppo_mini_batch_size: 32 * test_freq: 20 * colocate sync: * step: 400 * train_batch_size: 512 * fully_async_policy * total_rollout_steps: 512*400 * require_batches: 4 * trigger_parameter_sync_step: 4 * staleness_threshold: 0.5 * partial_rollout: True | training mode | resource allocation | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:--------------------:|:---------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:-------------------------------:| | colocate sync | 32 | 790.10 | 357.41 | 107.71 | 313.81 | 13h 44m | 1d 3h 43m | 2d 9h 22m | 3d 17h 5m | max: 0.3313
last: 0.2448 | | fully_async_policy | 16:16 | 294.77 | 21.26 | \ | 269.80 | 7h 58m
(1.72x) | 16h 21m
(1.70x) | 1d 0h 53m
(2.31x) | 1d 9h 26m
(2.66x) | max: 0.3302
last: 0.2333 | | colocate sync | 64 | 365.28 | 150.72 | 70.26 | 133.41 | 10h 22m | 20h 45m | 1d 7h 6m | 1d 17h 32m | max: 0.3365
last: 0.2333 | | fully_async_policy | 32:32 | 189.26 | 28.46 | \ | 156.98 | 4h 57m
(2.09x) | 10h 14m
(2.03x) | 16h 58m
(1.83x) | 21h 40m
(1.92x) | max: 0.3677
last: 0.3406 | | colocate sync | 128 | 356.30 | 177.85 | 53.92 | 113.81 | 8h 36m | 17h 56m | 1d 5h 6m | 1d 16h 48m | max: 0.3573
last: 0.2958 | | fully_async_policy | 64:64 | 150.63 | 33.14 | \ | 113.16 | 3h 13m
(2.67x) | 6h 46m
(2.65x) | 10h 53m
(2.67x) | 17h 22m
(2.35x) | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-colocate_async?nw=nwuserhouzg ### 128-card 7B Asynchronous Mode Experiment We used Qwen2.5-Math-7B to verify the effects of various modes supported by fully async. We can see that the benefit brought by streaming is approximately 1.6x, and after combining staleness and partial_rollout, the benefit reaches 2.35x. | mode | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:-------------------------------------------------------------------------------------------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------------:| | colocate sync | 356.30 | 177.85 | 53.92 | 113.81 | 8h 36m | 17h 56m | 1d 5h 6m | 1d 16h 48m | max: 0.3573
last: 0.2958 | | `stream off policy pipeline`
(+fully async: trigger_parameter_sync_step= 4,
require_batches= 4) | 231.34 | 128.47 | \ | 98.77 | 4h 25m | 9h 41m | 15h 2m | 1d 1h 53m | max: 0.2844
last: 0.2604 | | `async stream pipeline with stale samples`
(+staleness_threshold=0.5) | | | | | | | | | | | `async stream pipeline with partial rollout`
(+partial_rollout=True) | 150.63 | 33.14 | \ | 113.16 | 3h 13m | 6h 46m | 10h 53m | 17h 22m | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-stream_stale_partial?nw=nwuserhouzg ### 128-card Stale Ablation Experiment Under the `async stream pipeline with partial rollout` mode, we verified the impact of staleness settings on training efficiency. We found that the larger the staleness, the more obvious the final gains. We also noticed that the times for staleness values of 0.3 and 0.5 are quite close, because as the training steps increase, the response length changes significantly, causing training instability. Further analysis and optimization are needed for this issue. | staleness_threshold | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:---------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------------:| | 0 | 231.34 | 128.47 | \ | 98.77 | 4h 25m | 9h 41m | 15h 2m | 1d 1h 53m | max: 0.2844
last: 0.2604 | | 0.1 | 171.30 | 58.17 | \ | 109.12 | 3h 53m | 8h 37m | 14h 25m | 19h 59m | max: 0.3542
last: 0.2979 | | 0.3 | 146.11 | 38.88 | \ | 103.22 | 3h 18m | 6h 49m | 11h 40m | 17h 20m | max: 0.3469
last: 0.2865 | | 0.5 | 150.63 | 33.14 | \ | 113.16 | 3h 13m | 6h 46m | 10h 53m | 17h 22m | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-stream_stale_partial?nw=nwuserhouzg ### 128-card 7B require_batches Ablation Experiment In multiple tests, we found that the number of samples issued each time in streaming affects the response length during training, which in turn affects training time. We verified the impact on results by modifying `async_training.require_batches`. | require_batches | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | acc/mean@1 | |:-----------------:|:--------:|:-------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------------:| | 1 | 203.47 | 30.88 | \ | 181.08 | 3h 31m | 8h 29m | 17h 36m | max: 0.349
last: 0.326 | | 2 | 158.72 | 26.32 | \ | 128.08 | 3h 35m | 7h 38m | 13h 57m | max: 0.351
last: 0.3406 | | 4 | 124.64 | 25.62 | \ | 95.06 | 3h 13m | 6h 46m | 10h 53m | max: 0.3521
last: 0.3521 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-ablation_require_batches?nw=nwuserhouzg ### 30B Model Mode Experiment TODO: The 30B experiment is still in progress. * Machine: H20 * Model: Qwen2.5-32B * Rollout length: max_response_length FSDP2: 20K tokens; * Algorithm: DAPO * Engine: vllm+FSDP2 * rollout.n: 16 * ppo_mini_batch_size: 32 * test_freq: 20 * colocate sync: * step:200 * train_batch_size: 512 * fully_async_policy * total_rollout_steps: 512*200 * trigger_parameter_sync_step: 512/32 = 16 * staleness_threshold: 0 * partial_rollout: False | training mode | Resource allocation | mode | step | generate_sequences | old_log_prob | update_actor | total time | acc/best@32/mean | |--------------------|---------------------|--------------------------------------------|------|--------------------|--------------|--------------|------------|------------------| | colocate sync | 128 | | | | | | | | | fully_async_policy | 64:64 | stream off policy pipeline | | | | | | | | fully_async_policy | 64:64 | async stream pipeline with stale samples | | | | | | | | fully_async_policy | 64:64 | async stream pipeline with partial rollout | | | | | | | ## Future Plans * GRPO experiments * Megatron adaptation * SGLang integration * Transfer queue integration * Asynchronous parameter synchronization * AReaL asynchronous algorithm implementation * TPPO algorithm implementation * Multi-turn and Tool support ================================================ FILE: verl_distillation/docs/advance/megatron_extension.rst ================================================ Add models with the Megatron-LM backend ========================================= Last updated: 04/25/2025. Model ----------- If use latest verl, we have direct support of ``GPTModel`` for Megatron backend. You can use the similar way of using Megatron to pretrain custom models. We list the steps here: 1. Find `model_initializer.py `_ 2. If your model is configurable by ``TransformerLayerSpec`` , you can directly use ``GPTModel``. Otherwise, Please implement a new ``ModelLayerSpec`` and ``ModelLayer`` here. 3. Use the right ``LayerSpec`` , ``TransformerConfig`` and ``HuggingfaceConfig`` as arguments to initialize the GPTModel. 4. Return the model at last. ================================================ FILE: verl_distillation/docs/advance/one_step_off.md ================================================ # Recipe: One Step Off Policy Async Trainer **Author:** `https://github.com/meituan-search` Last updated: 07/17/2025. ## Introduction ### Background The current reinforcement learning training process implemented by verl is synchronous, adhering to the algorithmic workflows of established methods like PPO, GRPO, and DAPO. In each step, training samples are generated by the latest model, and the model is updated after training completes. While this approach aligns with off-policy reinforcement learning and stabilizes RL training, but it suffers from severe efficiency issues. Model updates must wait for the longest output in the generation phase to complete. During the generation of long-tail samples, GPUs remain idle, resulting in significant underutilization. The more severe the long-tail problem in sample generation, the lower the overall training efficiency. For example, in DAPO 32B training, the Rollout phase accounts for approximately 70% of the total time, and increasing resources does not reduce the Rollout duration. ![DAPO 32B Math Performance]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/dapo_32b_math.png) > source data: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=nwusertongyuxuan361 ### Solution We have implemented the **One Step Off Async Trainer** to help alleviate this issue. This approach parallelizes the generation and training processes, utilizing samples generated in the previous step for current training. It also involves appropriately partitioning resources, allocating dedicated resources for generation while automatically assigning the remainder to training. By reducing resources allocated to the generation phase, we mitigate GPU idle time during long-tail sample generation. Throughout this process, generation and training parameters maintain a one-step off policy. ![One Step Off Policy Diagram]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/one_step_off_policy.png) > reference: [AReaL: A Large-Scale Asynchronous Reinforcement Learning System for Language Reasoning]( > https://arxiv.org/abs/2505.24298) Our core contributions include: 1. **Parallel Generation and Training**: Samples for the next batch are asynchronously generated while the current batch is being trained. 2. **Resource Isolation**: Unlike `hybrid_engine`, this method requires explicit resource allocation for rollout, with remaining resources automatically assigned to training. 3. **NCCL Parameter Synchronization**: Employs NCCL communication primitives for seamless parameter transfer between generation and training modules. ### Experimental Results - **Machine Configuration**: 2 nodes with 16 H20 GPUs each - Generation: 4 GPUs - Training: 12 GPUs - **Model**: Qwen2.5-Math-7B - **Rollout Configuration**: - **Max Response Length**: FSDP2: 20,480 tokens; Megatron: 8,192 tokens - **Algorithm**: DAPO - **Rollout Engine**: vLLM | training mode | engine | step | gen | wait_prev_gen | generate_sequences | old_log_prob | update_actor | total time | acc/best@32/mean | acc/maj@32/mean | |------------------------|---------------|------|-----|---------------|--------------------|--------------|--------------|---------------|------------------|-----------------| | colocate sync | VLLM+FSDP2 | 749 | 321 | - | 247 | 88 | 286 | 19h18m | 0.5948 | 0.417 | | one-step-overlap async | VLLM+FSDP2 | 520 | - | 45 | 458 | 108 | 337 | 15h34m(+23%) | 0.6165 | 0.494 | | colocate sync | VLLM+Megatron | 699 | 207 | - | 162 | 119 | 344 | 18h21m | 0.605 | 0.4217 | | one-step-overlap async | VLLM+Megatron | 566 | - | 59 | 501 | 120 | 347 | 13h06m (+40%) | 0.6569 | 0.4038 | * colocate sync: step ≈ gen + old_log_prob + update_actor * one-step-overlap async: step ≈ wait_prev_gen + old_log_prob + update_actor ![One Step Off Megatron Performance]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/one_step_off_megatron.png) > source data: https://wandb.ai/hou-zg-meituan/one-step-off-policy?nw=nwuserhouzg ## Implementation ### One Step Off Policy Async Pipline Our implemented **One Step Off Policy Async Pipeline** integrates seamlessly into existing training logic at minimal cost, eliminating the need for additional sample storage management. The core mechanism uses `async_gen_next_batch` for asynchronous rollout generation while maintaining continuous operation during epoch transitions via `create_continuous_iterator`. ```python # iterator generator, simplify one-step integration of the training process def _create_continuous_iterator(self): for epoch in range(self.config.trainer.total_epochs): iterator = iter(self.train_dataloader) for batch_dict in iterator: yield epoch, batch_dict # read next batch samples, parameters sync and launch asyn gen_seq def _async_gen_next_batch(self, continuous_iterator): # read train_data try: epoch, batch_dict = next(continuous_iterator) except StopIteration: return None batch = DataProto.from_single_dict(batch_dict) gen_batch = batch_pocess(batch) # sync weights from actor to rollout self.sync_rollout_weights() # async generation gen_batch_output = self.rollout_wg.async_generate_sequences(gen_batch) # future encapsulated return GenerationBatchFuture(epoch, batch, gen_batch_output) continuous_iterator = self._create_continuous_iterator() # run rollout first to achieve one-step-off batch_data_future = self._async_gen_next_batch(continuous_iterator) while batch_data_future is not None: # wait for the gen_seq result from the previous step batch = batch_data_future.get() # launch the next async call to generate sequences batch_data_future = self._async_gen_next_batch(continuous_iterator) # compute advantages batch = critic.compute_values(batch) batch = reference.compute_log_prob(batch) batch = reward.compute_reward(batch) batch = compute_advantages(batch) # model update critic_metrics = critic.update_critic(batch) actor_metrics = actor.update_actor(batch) ``` ### Parameter Synchronization The exciting point is that our nccl based weights updating for rollout model has great performance. At most of time, the latency is under 300ms, which is negligible for RLHF. > **sync_rollout_weights**:The time for synchronizing parameters from actor to rollout is extremely fast and can almost > be ignored because it is implemented with nccl. ```python class ActorRolloutRefWorker: # actor acquires the meta-info of model parameters for parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): params = self._get_actor_params() ret = [] for key, tensor in params.items(): ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret # rollout sets the meta-info of model parameters for parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): self._weights_info = weights_info class AsyncRayPPOTrainer(RayPPOTrainer): def init_workers(self): ... # rollout obtains the meta-info of model parameters from the actor for parameter sync weights_info = self.actor_wg.get_actor_weights_info()[0] self.rollout_wg.set_actor_weights_info(weights_info) # Create an actor-rollout communication group for parameter sync self.create_weight_sync_group ``` ```python # The driving process invokes the actor and rollout respectively to create a weight synchronization group based on nccl/hccl. def create_weight_sync_group(self): master_address = ray.get(self.actor_wg.workers[0]._get_node_ip.remote()) master_port = ray.get(self.actor_wg.workers[0]._get_free_port.remote()) world_size = len(self.actor_wg.workers + self.rollout_wg.workers) self.actor_wg.create_weight_sync_group( master_address, master_port, 0, world_size, ) ray.get( self.rollout_wg.create_weight_sync_group( master_address, master_port, len(self.actor_wg.workers), world_size, ) ) # drive process call the actor and rollout respectively to sync parameters by nccl def sync_rollout_weights(self): self.actor_wg.sync_rollout_weights() ray.get(self.rollout_wg.sync_rollout_weights()) # fsdp model parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): params = self._get_actor_params() if self._is_actor else None if self._is_rollout: inference_model = ( self.rollout.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model ) from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader patch_vllm_moe_model_weight_loader(inference_model) # Model parameters are broadcast tensor-by-tensor from actor to rollout for key, shape, dtype in self._weights_info: tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor: assert key in params origin_data = params[key] if hasattr(origin_data, "full_tensor"): origin_data = origin_data.full_tensor() if torch.distributed.get_rank() == 0: tensor.copy_(origin_data) from ray.util.collective import collective collective.broadcast(tensor, src_rank=0, group_name="actor_rollout") if self._is_rollout: inference_model.load_weights([(key, tensor)]) ``` ## Usage ### FSDP2 Configuration Example ```shell python3 -m recipe.one_step_off_policy.async_main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_trainer.yaml' \ actor_rollout_ref.actor.strategy=fsdp2 \ # actor and rollout are placed separately actor_rollout_ref.hybrid_engine=False \ # actor and rollout resource trainer.nnodes=1 \ trainer.n_gpus_per_node=6 \ rollout.nnodes=1 \ rollout.n_gpus_per_node=2 ``` ### Megatron Configuration Example ```shell python3 -m recipe.one_step_off_policy.async_main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_megatron_trainer.yaml' \ actor_rollout_ref.actor.strategy=megatron \ # actor and rollout are placed separately actor_rollout_ref.hybrid_engine=False \ # actor and rollout resource trainer.nnodes=1 \ trainer.n_gpus_per_node=6 \ rollout.nnodes=1 \ rollout.n_gpus_per_node=2 ``` ### Configuration Guidelines 1. **Card Number Relationships** Maintain either of these relationships for optimal batch distribution: - `actor_rollout_ref.rollout.n` should be an integer divisor of: `trainer.n_gpus_per_node * trainer.nnodes` - `actor_rollout_ref.rollout.n * data.train_batch_size` should be evenly divisible by: `trainer.n_gpus_per_node * trainer.nnodes` > Rationale: Ensures training samples can be evenly distributed across training GPUs when using partial resources for generation. 2. **Dynamic Resource Tuning** Adjust `trainer.nnodes` `trainer.n_gpus_per_node` `rollout.nnodes` `rollout.n_gpus_per_node` based on phase durations: - **Ideal state**: Rollout and training phases have comparable durations - **Diagnostic metrics**: - Monitor `wait_prev_gen` duration - Analyze `sequence_length` distribution - **Adjustment strategy**: - High `wait_prev_gen` + uniform sequence lengths → Increase rollout resources - High `wait_prev_gen` + long-tail sequences → Optimize stopping criteria (resource increase won't help) > **wait_prev_gen**:The time consumed waiting for the previous rollout to end (the part that is not fully overlapped). **Resource Configuration Strategies:** - **Resource-constrained scenario**: Optimize resource utilization by adjusting GPU allocation ratios, keeping the number of nodes equal to allow training and rollout to share nodes; - Configure `trainer.nnodes = rollout.nnodes` with `trainer.n_gpus_per_node + rollout.n_gpus_per_node = physical_gpus_per_node`. Control rollout resource allocation by adjusting `n_gpus_per_node`. - **Resource-abundant scenario**: Optimize performance by adjusting the number of nodes, keeping the number of GPUs per node equal to enable independent scaling of training and rollout parallelism. - Configure `trainer.n_gpus_per_node = rollout.n_gpus_per_node` and control rollout resource allocation by adjusting `trainer.nnodes` and `rollout.nnodes`to achieve optimal performance. > **Note**: The total number of nodes required by the system is not simply `trainer.nnodes + rollout.nnodes`. The > actual calculation depends on GPU capacity: > - When `trainer.n_gpus_per_node + rollout.n_gpus_per_node <= physical_gpus_per_node`, > the required node count is `max(trainer.nnodes, rollout.nnodes)` > - When `trainer.n_gpus_per_node + rollout.n_gpus_per_node > physical_gpus_per_node`, > the required node count is `trainer.nnodes + rollout.nnodes` ## Functional Support | Category | Support Situation | |--------------------|-----------------------------------------------------------------------------------------------------------------| | train engine | FSDP2
Megatron | | rollout engine | vLLM | | AdvantageEstimator | GRPO
GRPO_PASSK
REINFORCE_PLUS_PLUS
RLOO
OPO
REINFORCE_PLUS_PLUS_BASELINE
GPG | | Reward | all | ================================================ FILE: verl_distillation/docs/advance/placement.rst ================================================ Ray API Design Tutorial ======================================= Last updated: 10/30/2024. We provide a tutorial for our Ray API design, including: - Ray basic concepts - Resource Pool and RayWorkerGroup - Data Dispatch, Execution and Collection - Initialize the RayWorkerGroup and execute the distributed computation in the given Resource Pool See details in `tutorial.ipynb `_. ================================================ FILE: verl_distillation/docs/advance/ppo_lora.rst ================================================ RL(HF) algorithms with LoRA Support =========================================== Last updated: 06/05/2025. We support LoRA (Low-Rank Adaptation) for reinforcement learning algorithms such as PPO, GRPO, and others. LoRA is a parameter-efficient fine-tuning technique that injects trainable low-rank matrices into pre-trained weights (typically linear layers). This reduces memory footprint and compute cost, making it possible to fine-tune large models with limited hardware. The benefits this brings include: - reinforcement learning with very large models (e.g. 70B+) with modest hardware (e.g. 8x80G GPUs), - enable larger batch sizes due to reduced memory usage, - simplify model transfer and deployment, as only LoRA adapters need to be saved, - Combine with techniques like `SLoRA `_ or `CCoE `_ to serve multiple LoRA adapters efficiently This guide explains how to enable LoRA in RL training and configure related parameters. Usage Guide ------------------------ 1. Lora is available in the `verl.trainer.ppo.ray_trainer.RayPPOTrainer`. Examples are provided via the `verl.trainer.main_ppo` entry point. 2. Currently, LoRA is supported via huggingface peft, only with fsdp/fsdp2 and vllm backend (sglang support coming soon). - `strategy=fsdp` or `strategy=fsdp2` - `rollout.name=vllm` 3. Required configurations for LoRA: - `actor_rollout_ref.model.lora_rank`: int, set to a reasonable value greater than 0 (e.g., 8, 16, 32, 64) - `actor_rollout_ref.model.lora_alpha`: float, the alpha term in LoRA - `actor_rollout_ref.rollout.load_format="safetensors"`: required. This enables vLLM to load the base model. - `actor_rollout_ref.model.target_modules`: the target modules for LoRA. Typically set to "all-linear". 4. Optional configurations for LoRA: - `actor_rollout_ref.model.lora_adapter_path`: string, path to a pretrained LoRA adapter directory. If provided, loads existing adapter instead of creating new one. Enables multi-stage training from previously saved adapters. Directory need contain `adapter_model.safetensors` and `adapter_config.json`. 5. Recommend options: - `actor_rollout_ref.model.use_shm=True`: preload the model into `/dev/shm` to improve model loading speed. - `actor_rollout_ref.rollout.layered_summon=True`: this enables the actor-model to gather the FSDP shards per layers when synchronizing the LoRA Adapter to vLLM, thereby reducing GPU peak memory. Recommended if the model is very large (70B+) or the GPU memory is limited (< 48GB) Best Practices and Notes ------------------------- 1. **Learning rate**: it is recommended to increase the value of learning rate by an order of magnitude. 2. **LoRA Rank**: - Too small a rank can hurt convergence. - LoRA rank recommendation from @thelongestusernameofall: - A very small lora_rank can lead to slower convergence or worse training performance. It is recommended to set lora_rank to be>=32. Tests have shown that for a 0.5B model, with lora_rank=32,the training convergence speed and final performance are almost identical to non-LoRA training - For a 32B model,with lora_rank=128,the training convergence speed and final performance are also almost identical to non-LoRA training. - More comprehensive reference results are coming soon. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/f2b80b8b26829124dd393b7a795a0640eff11644/docs/lora.jpg?raw=true 3. Reference configuration for RL training with the Qwen2.5-72B model using 8 x 80GB GPUs (increase lora_rank if needed): .. code-block:: data.train_batch_size=64 \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=8 \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=64 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ Example Scripts ------------------- For end-to-end examples, refer to the scripts below: - LoRA training from scratch: examples/grpo_trainer/run_qwen2_5-3b_gsm8k_grpo_lora.sh - LoRA training from adapter path: examples/grpo_trainer/run_qwen2_5-3b_gsm8k_grpo_lora_from_adapter.sh ================================================ FILE: verl_distillation/docs/advance/reward_loop.rst ================================================ Reward Loop =========== .. _yyding: https://yyding1.github.io Author: `Yuyang Ding `_ Last updated: 10/23/2025. .. warning:: Reward Loop is ready for use, but the API may change in future releaes. Reward Loop is designed for more flexible and easy-to-use reward computation. **Design goal**: - Make reward computation more efficient - Support broader reward model interface (including discriminative and generative models) - Make user customized reward function more flexible .. image:: https://github.com/yyDing1/verl-materials/blob/main/reward_loop_overview.svg?raw=true Async Reward Computation ------------------------ RewardLoopManager ~~~~~~~~~~~~~~~~~ The Reward Loop refactors the design of the reward manager so that each sample is processed asynchronously in the ``run_single`` function. This asynchronous design enables the Reward Loop to handle multiple reward computations concurrently, significantly improving computation efficiency. .. code:: python class RewardLoopManagerBase(ABC): async def run_single(self, data: DataProto) -> dict: # ... (data preprocessing) if self.is_async_reward_score: result = await self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, reward_router_address=self.reward_router_address, reward_model_tokenizer=self.reward_model_tokenizer, ) else: result = await self.loop.run_in_executor( None, lambda: self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, reward_router_address=self.reward_router_address, reward_model_tokenizer=self.reward_model_tokenizer, ), ) # ... (reward postprocessing) return final_result User-defined reward functions can be implemented as either synchronous or asynchronous. ``RewardLoopManager`` automatically detects the type of the user-defined function and executes it accordingly, ensuring that the reward computation process remains non-blocking. User-Customized Reward Function ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Users can define custom reward functions, for instance, by integrating external generative rewards or rule-based rewards to accommodate diverse scenario requirements. To facilitate this, the Reward Loop directly exposes the reward model interface, enabling complex reward computation pipelines that involve model-based scoring. A user-defined reward function may look like the following: .. code:: python async def compute_score_gsm8k( data_source: str, solution_str: str, ground_truth: str, extra_info: dict, reward_router_address: str, reward_model_tokenizer: PreTrainedTokenizer, ): """Compute the reward score.""" # Step 1: Prepare prompt and request payload grm_prompt = GRM_PROMPT_TEMPLATE.format(problem=extra_info["question"], solution=solution_str) messages = [{"role": "user", "content": grm_prompt}] sampling_params = {"temperature": 0.7, "top_p": 0.8, "max_tokens": 4096} chat_complete_request = {"messages": messages, **sampling_params} # Step 2: Send async request to the reward model # here, chat_complete sends async http request to the router address result = await chat_complete( router_address=reward_router_address, chat_complete_request=chat_complete_request, ) # Step 3: Parse model response and extract score grm_response = result.choices[0].message.content.strip() try: score_str = grm_response.split("\n\n")[-1].strip() score = int(score_str) except Exception: score = 0 return {"score": score} Runable examples are provided in the ``recipe/fapo`` directory for reference. Reward Models and Router ------------------------ To support flexible and scalable reward model computation, RewardLoop implement a reward router that coordinates requests among multiple reward model servers. Each reward model runs as an independent server and is registered with the router. This router will forward the requests to the registered reward servers with load balancing and return the results. This design allows us to expose a single unified router address to user-defined reward functions, enabling them to access various reward models seamlessly through the same interface. RewardModelManager ~~~~~~~~~~~~~~~~~~ .. image:: https://github.com/yyDing1/verl-materials/blob/main/reward_loop_full.svg?raw=true ``RewardModelManager`` will launch multiple reward servers and register them in the reward router. .. code:: python class RewardModelManager: """Reward model manager.""" def __init__(self, config: RewardModelConfig, worker_group: RayWorkerGroup = None): """ Initialize the reward model manager. Args: config (RewardModelConfig): Reward model configuration. worker_group (RayWorkerGroup, optional): Worker group. Defaults to None. """ self.config = config self.worker_group = worker_group self._initialize_llm_servers() self._initialize_router() if self.config.rollout.free_cache_engine: self.sleep() Reward Router ~~~~~~~~~~~~~ The router is to forward the requests to the registered reward servers with load balancing. - For sglang reward servers, we directly use the sglang router to forward the requests. - For vllm reward servers, we implement a simple round-robin ``NaiveRouter`` to dispatch the requests. .. code:: python class NaiveRouter: def __init__( self, worker_urls: list[str], max_connections: int = 1024, timeout: int = 60, max_attempts: int = 3, retry_delay: float = 2.0, verbose: bool = False, ): """A minimal async load-balancing router.""" self.verbose = verbose self.app = FastAPI() self.worker_urls = worker_urls self.request_counts = {url: 0 for url in worker_urls} self.max_connections = max_connections self.timeout = timeout self.max_attempts = max_attempts self.retry_delay = retry_delay self.app = FastAPI() # Register startup / shutdown hooks self.app.on_event("startup")(self._on_startup) self.app.on_event("shutdown")(self._on_shutdown) # Catch-all proxy route self.app.api_route("/{endpoint:path}", methods=["GET", "POST"])(self._make_async_request) # Placeholder for aiohttp client self.client = None Agent Reward Loop ----------------- Reward Loop can be integrated with AgentLoop to enable sample-wise rollout and reward computation. .. image:: https://github.com/yyDing1/verl-materials/blob/main/agent_reward_loop.svg?raw=true ================================================ FILE: verl_distillation/docs/advance/rollout_is.md ================================================ # Rollout Importance Sampling **Author:** [Yingru Li](https://richardli.xyz/) Last updated: 10/27/2025. This document provides a comprehensive overview of the Rollout Importance Sampling (IS) implementation in verl. ### BibTeX Citation ```bibtex @misc{liu-li-2025, title = {When Speed Kills Stability: Demystifying RL Collapse from the Inference-Training Mismatch}, url = {https://yingru.notion.site/When-Speed-Kills-Stability-Demystifying-RL-Collapse-from-the-Inference-Training-Mismatch-271211a558b7808d8b12d403fd15edda}, author = {Jiacai Liu and Yingru Li and Yuqian Fu and Jiawei Wang and Qian Liu and Yu Shen}, year = {2025}, month = september, } ``` ## Overview Rollout Importance Sampling corrects for distribution mismatch between: - **Rollout policy**: e.g., vLLM with BFloat16 - **Training policy**: e.g., FSDP with FP32 This mismatch can lead to biased gradient estimates and unstable training. Rollout IS applies importance sampling weights to correct these biases. ### Key Design Principle: Separation of IS Weights and Rejection Sampling **Important**: As of 10/27/2025, the implementation separates two mechanisms: 1. **IS Weights** (`rollout_is_weights`): Ratios π_train/π_rollout with processing: - **Safety-bounded** to [exp(-20), exp(20)] ≈ [2e-9, 5e8] to prevent overflow: * Token level: Bounds per-token ratios * Sequence level: Bounds product of ratios (broadcast to all tokens in sequence) * Geometric level: Bounds geometric mean of ratios (broadcast to all tokens) - **Truncate mode**: Upper clamped via .clamp(max=upper_threshold) - **Mask mode**: Safety-bounded ratios preserved (no threshold clamping) - **All modes**: Zeroed at padding positions (response_mask == 0) - Used for policy gradient calculations 2. **Rejection Sampling** (`modified_response_mask`): Applied via response_mask - Mask mode: Excludes tokens/sequences with outlier IS ratios - Veto: Excludes sequences with catastrophic tokens - Used for loss aggregation (denominator calculation) This separation ensures: - ✅ Correct loss normalization (rejected samples excluded from denominator) - ✅ Mode-specific weight processing (truncate: upper clamped, mask: safety-bounded only) - ✅ Padding positions zeroed in weights (necessary for correct aggregation) - ✅ Safety bounds always applied (prevent overflow in all modes) ## Configuration ```yaml # Rollout IS configuration (all in algorithm config) algorithm: # Main control: set threshold to enable (null = disabled) rollout_is_threshold: 2.0 # Whether to apply weights to loss (default: false = metrics only) rollout_is: true rollout_is_threshold_lower: null # Auto-reciprocal rollout_is_level: token rollout_is_mode: truncate rollout_is_veto_threshold: null # Disable veto by default # REQUIRED: Enable log prob calculation actor_rollout_ref: rollout: calculate_log_probs: true ``` Key features: - ✅ Three aggregation levels: token, sequence, geometric - ✅ Two bounding modes: truncate, mask - ✅ Dual threshold support (upper/lower) - ✅ Veto mechanism for catastrophic outliers - ✅ 30+ comprehensive metrics - ✅ Log-space computation for numerical stability - ✅ Memory-efficient implementation ## Files ### **Core Implementation** - `verl/trainer/ppo/mismatch_helper.py` - Contains `compute_rollout_importance_weights()` and `compute_is_metrics()` - `verl/trainer/ppo/core_algos.py` - Rollout IS integration with PPO - `verl/workers/actor/dp_actor.py` - Metrics collection and logging ### **Configuration Files** - `verl/trainer/config/algorithm.py` - Rollout IS parameters in `AlgoConfig` - `verl/workers/config/actor.py` - Rollout IS parameters in `ActorConfig` - `verl/trainer/config/actor/actor.yaml` - Rollout IS configuration section - `verl/trainer/config/ppo_trainer.yaml` - Algorithm config with rollout IS ### **Documentation** - `docs/examples/config.rst` - Configuration parameter descriptions ### **Example Scripts** - `recipe/dapo/run_dapo_qwen2.5_32b_rollout_is.sh` - DAPO example with rollout IS - `examples/rollout_importance_sampling/README.md` - Comprehensive usage guide - `examples/rollout_importance_sampling/run_with_rollout_is.sh` - Basic example ### **Tests** - `tests/trainer/ppo/test_rollout_is.py` - Unit tests - `tests/trainer/ppo/test_rollout_is_integration.py` - Integration tests ## Configuration Parameters ### `algorithm.rollout_is_threshold` (float or null) **Main on/off switch.** Upper threshold for IS weights. - `null` = disabled (no computation, no metrics) - `float` value (e.g., 2.0) = enabled (compute weights and metrics) ### `algorithm.rollout_is` (bool) Whether to apply IS weights to policy loss. Default: `False` - `true` = apply weights to loss (full IS correction) - `false` = metrics only mode (no weight correction, but rejection still applies) **IMPORTANT**: This flag controls IS weight application, NOT rejection sampling. See "Operation Modes" below. **Recommended threshold ranges:** - Token level: 1.5 - 5.0 - Sequence level: 2.0 - 10.0 - Geometric level: 1.0002 - 1.001 ### `algorithm.rollout_is_threshold_lower` (float or null) Lower threshold for IS weights. If `null`, defaults to 1/upper (reciprocal). ### `algorithm.rollout_is_level` (str) Aggregation level for IS weights: - `"token"`: Per-token ratios ρ_t = π_train(t)/π_rollout(t) - Each token has its own IS weight - Safety bound: each token's ratio bounded to [exp(-20), exp(20)] - Biased estimator but low variance - `"sequence"`: Product of ratios ρ_seq = ∏_t ρ_t for entire sequence - All tokens in a sequence share the same IS weight (product of per-token ratios) - Safety bound: product bounded to [exp(-20), exp(20)], then broadcast to all tokens - Unbiased estimator but high variance - `"geometric"`: Geometric mean ρ_geo = (∏_t ρ_t)^(1/T) (experimental) - All tokens in a sequence share the same IS weight (geometric mean) - Safety bound: geometric mean bounded to [exp(-20), exp(20)], then broadcast to all tokens - Trade-off between bias and variance ### `algorithm.rollout_is_mode` (str) Bounding mode for handling outlier IS weights: - `"truncate"`: Clamp weights at upper threshold only (TIS) - No lower bound clamping or rejection for outlier ratios - **IS weights modified**: Upper bound clamped via .clamp(max=upper_threshold) - Lower bound remains at exp(-20) ≈ 2e-9 from safety bound - **Note**: Veto-based rejection can still occur via response_mask (see `rollout_is_veto_threshold`) - `"mask"`: Rejection sampling via response_mask (MIS) - Rejects tokens/sequences with IS ratios outside [lower, upper] - **Important**: Rejection applied to `response_mask`, NOT by modifying IS weights - **IS weights**: Safety-bounded ratios preserved (no threshold clamping, rejection via mask) - **Note**: Veto-based rejection also applies via response_mask (independent mechanism) ### `algorithm.rollout_is_veto_threshold` (float or None) Per-token veto threshold for catastrophic outliers. - If any token has **unclamped** ratio < this threshold, the entire sequence is rejected via `response_mask` - Veto checks the **true per-token ratio** π_train(t)/π_rollout(t) before any bounds are applied - Applied for all levels (token, sequence, geometric) - always checks individual token ratios - Default: `None` (veto disabled by default) - Recommended: `1e-4` to `1e-6` when enabled (catches extreme outliers like 10,000x off) - Set to `None` to disable veto mechanism - **Important**: Applied **independently** of `rollout_is_mode` (works in both truncate and mask modes) - Veto applies rejection to `response_mask`, NOT by modifying IS weights - **IS weights unchanged by veto**: Already processed by mode (truncate: clamped, mask: safety-bounded) ### Summary: How IS Weights are Processed The final IS weights go through multiple stages of processing: **Stage 1: Safety Bound (All Modes)** - Token level: `exp(clamp(log_ratio, -20, 20))` per token → bounds each token to [2e-9, 5e8] - Sequence level: `exp(clamp(sum(log_ratio), -20, 20))` → bounds product to [2e-9, 5e8], broadcast to all tokens - Geometric level: `exp(clamp(mean(log_ratio), -20, 20))` → bounds geometric mean to [2e-9, 5e8], broadcast to all tokens **Stage 2: Threshold Processing (Mode-Dependent)** - Truncate mode: `.clamp(max=upper_threshold)` → upper clamps weights to threshold - Mask mode: No modification → weights remain as safety-bounded ratios **Stage 3: Padding (All Modes)** - `weights * response_mask` → zeros out padding positions **Rejection Mechanisms (Modify response_mask, NOT weights)** - Veto: Checks **unclamped per-token ratios** (before safety bound), rejects sequences via mask - Outlier (mask mode only): Checks safety-bounded weights against [lower, upper], rejects via mask ## Operation Modes The system has **two independent control flags** that combine to create different operation modes: 1. **`rollout_is_threshold`**: Main on/off switch (None = disabled, float = enabled) 2. **`rollout_is`**: Apply IS weights to loss (True/False) ### Mode Combinations | `rollout_is_threshold` | `rollout_is` | `rollout_is_mode` | Behavior | |------------------------|--------------|-------------------|----------| | `None` | any | any | **Disabled**: No computation, no metrics, no rejection | | `2.0` | `False` | `truncate` | **Metrics only**: Compute weights & metrics, NO weight correction, NO rejection for outliers | | `2.0` | `False` | `mask` | **Rejection only**: Compute weights & metrics, NO weight correction, YES rejection sampling | | `2.0` | `True` | `truncate` | **Truncate mode**: Weight correction enabled, weights upper-clamped, NO rejection for outliers | | `2.0` | `True` | `mask` | **Mask mode (full)**: Weight correction enabled, rejection sampling enabled | ### Key Insights **Rejection sampling is ALWAYS applied when:** - `rollout_is_threshold` is set (not None) - AND `rollout_is_mode = "mask"` - **Regardless of the `rollout_is` flag** This means: - ✅ You can use **rejection sampling alone** without IS weight correction (`rollout_is=False, rollout_is_mode="mask"`) - ✅ You can use **IS weights alone** without outlier rejection (`rollout_is=True, rollout_is_mode="truncate"`) - ✅ You can use **both together** (`rollout_is=True, rollout_is_mode="mask"`) - ✅ You can **monitor metrics only** without any correction or outlier rejection (`rollout_is=False, rollout_is_mode="truncate"`) **Veto rejection** (if enabled via `rollout_is_veto_threshold`) is applied **independently** in all modes where `rollout_is_threshold` is set. ### Recommended Workflow 1. **Start with metrics only** to understand the mismatch: ```yaml rollout_is_threshold: 2.0 rollout_is: false rollout_is_mode: truncate ``` Monitor `mismatch/rollout_is_mean`, `mismatch/mismatch_kl` to assess distribution mismatch. 2. **Enable rejection sampling** if you see high outlier fractions: ```yaml rollout_is_threshold: 2.0 rollout_is: false rollout_is_mode: mask # Rejection now applies ``` This excludes outliers from training without modifying gradients. 3. **Enable full IS correction** once comfortable with metrics: ```yaml rollout_is_threshold: 2.0 rollout_is: true rollout_is_mode: mask # Both rejection and weight correction ``` ## Usage ### Basic Setup ```yaml algorithm: rollout_is_threshold: 2.0 # Main control rollout_is: true # Apply to loss (default: false) rollout_is_level: token rollout_is_mode: truncate actor_rollout_ref: rollout: calculate_log_probs: true # Required! ``` ### Metrics All metrics are prefixed with `mismatch/`. For example, `rollout_is_mean` appears as `mismatch/rollout_is_mean` in logs. #### **Core IS Weight Metrics** - **`rollout_is_mean`**: Mean importance sampling weight across all valid tokens - **Ideal value**: Close to 1.0 (indicates minimal distribution mismatch) - **Warning**: < 0.5 or > 2.0 suggests significant policy mismatch - **`rollout_is_std`**: Standard deviation of IS weights - **Ideal value**: < 0.5 for stable training - **Warning**: > 1.0 indicates high variance, may need tighter thresholds - **`rollout_is_min`**: Minimum IS weight observed - Shows the most underweighted token/sequence - For sequence/geometric: computed from unclamped log-space ratios (true minimum) - For token: computed from safety-bounded weights - **`rollout_is_max`**: Maximum IS weight observed - Shows the most overweighted token/sequence - For sequence/geometric: computed from unclamped log-space ratios (true maximum before safety bound) - For token: computed from safety-bounded weights (before threshold clamping) - Compare with `rollout_is_threshold` to see truncation impact #### **Effective Sample Size** - **`rollout_is_eff_sample_size`**: Effective sample size after IS weighting - **Formula**: `1 / mean(weights²)` where weights are normalized - **Range**: 0.0 to 1.0 (as fraction of original batch) - **Ideal value**: > 0.5 (retaining at least 50% effective samples) - **Warning**: < 0.3 means high variance, losing too many effective samples #### **Veto Mechanism Metrics** - **`rollout_is_veto_fraction`**: Fraction of sequences rejected by veto mechanism - **Important**: Sequences are rejected via `response_mask=0`, NOT by modifying IS weights - **IS weights unchanged by veto**: Already processed by mode (truncate: clamped, mask: safety-bounded) - Veto checks **unclamped per-token ratios** π_train(t)/π_rollout(t) (true ratios before safety bound) - Detects catastrophic tokens (true ratio < veto_threshold, e.g., < 1e-4) - **Ideal value**: < 0.05 (less than 5% vetoed) - **Warning**: > 0.1 suggests policies are too different or numerical issues - **`rollout_is_catastrophic_token_fraction`**: Fraction of tokens below veto threshold - Identifies problematic tokens before sequence-level veto is applied - Checks **unclamped per-token ratios** (true ratios, not safety-bounded) - Each catastrophic token causes its entire sequence to be rejected - **Warning**: > 0.01 indicates widespread distribution issues or numerical instability #### **Threshold Exceedance Metrics** - **`rollout_is_ratio_fraction_high`**: Fraction of weights exceeding upper threshold - Shows how often truncation/masking occurs on high end - For sequence/geometric: computed from unclamped log-space ratios (true exceedance) - For token: computed from safety-bounded weights (before threshold clamping) - **Ideal value**: < 0.1 (most weights within bounds) - **`rollout_is_ratio_fraction_low`**: Fraction of weights below lower threshold - Shows how often masking occurs on low end (mask mode only) - For sequence/geometric: computed from unclamped log-space ratios (true exceedance) - For token: computed from safety-bounded weights - **Ideal value**: < 0.1 #### **Sequence-Level Metrics** (for sequence/geometric modes) - **`rollout_is_seq_mean`**: Mean IS weight at sequence level - Should match `rollout_is_mean` for sequence-level aggregation - **`rollout_is_seq_std`**: Standard deviation of sequence-level IS weights - **`rollout_is_seq_min`**: Minimum sequence-level IS weight - **`rollout_is_seq_max`**: Maximum sequence-level IS weight - **`rollout_is_seq_max_deviation`**: Maximum absolute deviation from 1.0 at sequence level - **Ideal value**: < 1.0 - Shows worst-case sequence mismatch - **`rollout_is_seq_fraction_high`**: Fraction of sequences exceeding upper threshold - **`rollout_is_seq_fraction_low`**: Fraction of sequences below lower threshold #### **Masking Metrics** (mask mode only) - **`rollout_is_masked_fraction`**: Fraction of tokens rejected via response_mask (mask mode only) - **Important**: Tokens are rejected by setting `response_mask=0`, NOT by modifying IS weights - **IS weights in mask mode**: Safety-bounded ratios preserved (no threshold clamping) - **Ideal value**: < 0.1 (less than 10% rejected) - **Warning**: > 0.3 means losing too much data - **`rollout_is_seq_masked_fraction`**: Fraction of sequences with at least one rejected token - Shows sequence-level impact of rejection sampling - For token-level: sequence rejected if ANY token is outside [lower, upper] - For sequence-level: all tokens have same weight, so entire sequence rejected or accepted #### **Distribution Mismatch Metrics** (Training vs Rollout Policy) - **`mismatch_training_ppl`**: Perplexity of training policy (e.g., FSDP FP32) - **Formula**: `exp(-mean(log_probs))` - Lower is better (model is more confident) - **`mismatch_rollout_ppl`**: Perplexity of rollout policy (e.g., vLLM BF16) - Should be close to `mismatch_training_ppl` if policies match well - **`mismatch_ppl_ratio`**: Ratio of training PPL to rollout PPL - **Formula**: `exp(mean(log(training_ppl / rollout_ppl)))` - **Ideal value**: Close to 1.0 - **Meaning**: > 1.0 means training is less confident than rollout - **`mismatch_training_log_ppl`**: Log perplexity of training policy - Useful for identifying trends (linear scale) - **`mismatch_rollout_log_ppl`**: Log perplexity of rollout policy - **`mismatch_log_ppl_diff`**: Mean difference in log perplexities - **Formula**: `mean(log_ppl_rollout - log_ppl_training)` - **Ideal value**: Close to 0.0 - Sign indicates which policy is more confident - **`mismatch_log_ppl_abs_diff`**: Mean absolute log perplexity difference - Magnitude of mismatch regardless of direction - **`mismatch_log_ppl_diff_max`**: Maximum log perplexity difference across sequences - Identifies worst-case sequence - **`mismatch_log_ppl_diff_min`**: Minimum log perplexity difference across sequences - **`mismatch_kl`**: KL divergence KL(π_rollout || π_training) - **Formula**: `mean(log_prob_rollout - log_prob_training)` - **Ideal value**: Close to 0.0 (policies match) - **Warning**: > 0.1 indicates significant mismatch - **Note**: Can be negative (rollout is less confident) - **`mismatch_k3_kl`**: K3 KL estimator - **Formula**: `mean(exp(log_ratio) - log_ratio - 1)` - More stable for small KL values - Always non-negative #### **Example: Accessing Metrics in Code** ```python # Metrics are returned from compute_rollout_importance_weights from verl.trainer.ppo.mismatch_helper import compute_rollout_importance_weights # NEW: Returns 3 values (weights, modified_response_mask, metrics) weights_proto, modified_response_mask, metrics = compute_rollout_importance_weights( old_log_prob=training_log_probs, # from training policy rollout_log_prob=rollout_log_probs, # from rollout policy response_mask=response_mask, rollout_is_level="token", rollout_is_mode="mask", # Using mask mode for rejection sampling rollout_is_threshold=2.0, rollout_is_threshold_lower=0.5, rollout_is_veto_threshold=1e-4, # Enable veto for catastrophic outliers ) # Extract IS weights (processed, zeroed at padding) is_weights = weights_proto.batch["rollout_is_weights"] # IS weights processing (mask mode with token level): # 1. Safety-bounded: exp(clamp(log_ratio, -20, 20)) per token # 2. Mask mode: no threshold clamping (safety-bounded ratios preserved) # 3. Zeroed at padding positions # modified_response_mask has rejection applied: # 1. Outlier rejection: tokens outside [0.5, 2.0] masked to 0 (mask mode) # 2. Veto rejection: sequences with catastrophic tokens (ratio < 1e-4) masked to 0 # Note: Veto checks unclamped per-token ratios, not the safety-bounded weights # All metrics have 'mismatch/' prefix print(f"Mean IS weight: {metrics['mismatch/rollout_is_mean']:.3f}") print(f"Effective sample size: {metrics['mismatch/rollout_is_eff_sample_size']:.3f}") print(f"Veto fraction: {metrics['mismatch/rollout_is_veto_fraction']:.3f}") print(f"Masked fraction: {metrics['mismatch/rollout_is_masked_fraction']:.3f}") print(f"KL divergence: {metrics['mismatch/mismatch_kl']:.3f}") # Check IS weights for valid tokens (non-padding) valid_weights = is_weights[response_mask.bool()] print(f"\n✓ IS weights min (valid tokens): {valid_weights.min():.4f}") print(f"✓ IS weights max (valid tokens): {valid_weights.max():.4f}") print(f"✓ All valid IS weights > 0: {(valid_weights > 0).all()}") # Check rejection via response_mask rejected_tokens = (response_mask == 1) & (modified_response_mask == 0) print(f"\n✓ Rejected {rejected_tokens.sum()} tokens via response_mask") print(f"✓ In mask mode: IS weights for rejected tokens are NON-ZERO (safety-bounded ratios)") print(f"✓ In truncate mode: IS weights upper clamped to {rollout_is_threshold}") print(f"✓ Both modes: IS weights safety-bounded to [exp(-20), exp(20)] ≈ [2e-9, 5e8]") # Check for warning conditions if metrics['mismatch/rollout_is_mean'] < 0.5 or metrics['mismatch/rollout_is_mean'] > 2.0: print("⚠️ Warning: Mean IS weight far from 1.0, significant policy mismatch detected") if metrics['mismatch/rollout_is_eff_sample_size'] < 0.3: print("⚠️ Warning: Low effective sample size, high variance in IS weights") if metrics['mismatch/rollout_is_veto_fraction'] > 0.1: print("⚠️ Warning: High veto fraction, policies may be too different") ``` #### **Example: Monitoring Metrics During Training** ```python # In your training loop for epoch in range(num_epochs): for batch_idx, batch in enumerate(dataloader): # ... rollout phase ... # Compute IS weights and get metrics (NEW: 3 return values) weights_proto, modified_response_mask, metrics = compute_rollout_importance_weights( old_log_prob=batch.old_log_prob, rollout_log_prob=batch.rollout_log_prob, response_mask=batch.response_mask, rollout_is_level=config.rollout_is_level, rollout_is_mode=config.rollout_is_mode, rollout_is_threshold=config.rollout_is_threshold, rollout_is_threshold_lower=config.rollout_is_threshold_lower, rollout_is_veto_threshold=config.rollout_is_veto_threshold, ) # Log to tensorboard/wandb for metric_name, metric_value in metrics.items(): logger.log_scalar(metric_name, metric_value, step=global_step) # IMPORTANT: Update batch response_mask with rejection applied batch.response_mask = modified_response_mask # Use IS weights in training (processed based on mode) # Truncate mode: upper clamped to min(weight, upper_threshold) # Mask mode: safety-bounded ratios preserved (no threshold clamping) # Both modes: safety bounded to [exp(-20), exp(20)], zeroed at padding is_weights = weights_proto.batch["rollout_is_weights"] # ... apply weights to policy gradient ... ``` #### **Example: Conditional Alerting Based on Metrics** ```python def check_rollout_is_health(metrics, config): """Check if rollout IS metrics indicate healthy training.""" warnings = [] # Check mean IS weight mean_weight = metrics['mismatch/rollout_is_mean'] if mean_weight < 0.5 or mean_weight > 2.0: warnings.append(f"Mean IS weight {mean_weight:.3f} is far from 1.0") # Check effective sample size ess = metrics['mismatch/rollout_is_eff_sample_size'] if ess < 0.3: warnings.append(f"Effective sample size {ess:.3f} is too low") # Check veto fraction veto_frac = metrics['mismatch/rollout_is_veto_fraction'] if veto_frac > 0.1: warnings.append(f"Veto fraction {veto_frac:.3f} is too high") # Check variance std = metrics['mismatch/rollout_is_std'] if std > 1.0: warnings.append(f"IS weight std {std:.3f} is too high") # Check KL divergence kl = metrics['mismatch/mismatch_kl'] if abs(kl) > 0.1: warnings.append(f"KL divergence {kl:.3f} indicates significant mismatch") if warnings: print("⚠️ Rollout IS Health Warnings:") for warning in warnings: print(f" - {warning}") return False else: print("✅ Rollout IS metrics look healthy") return True # Use in training (NEW: 3 return values) _, _, metrics = compute_rollout_importance_weights(...) is_healthy = check_rollout_is_health(metrics, config) if not is_healthy: # Consider adjusting config or investigating issues print("Consider:") print(" - Tightening rollout_is_threshold") print(" - Switching to geometric aggregation level") print(" - Checking if rollout and training policies are too different") ``` ### Running Examples Start with the basic token-level truncate configuration: ```bash bash examples/rollout_importance_sampling/run_with_rollout_is.sh ``` Monitor metrics for 1-2 epochs before adjusting parameters. ## Configuration Examples ### Example 1: Full IS Correction ```yaml algorithm: rollout_is_threshold: 2.0 rollout_is: true # Apply weights to loss rollout_is_level: token rollout_is_mode: truncate ``` ### Example 2: Metrics Only (Monitoring Mode) ```yaml algorithm: rollout_is_threshold: 2.0 rollout_is: false # Compute metrics, don't apply weights rollout_is_level: token rollout_is_mode: truncate ``` ### Example 3: Geometric Mean with Mask ```yaml algorithm: rollout_is_threshold: 1.0002 rollout_is: true rollout_is_threshold_lower: 0.9998 rollout_is_level: geometric rollout_is_mode: mask ``` ### Example 4: Asymmetric Thresholds ```yaml algorithm: rollout_is_threshold: 5.0 rollout_is: true rollout_is_threshold_lower: 0.8 rollout_is_level: token rollout_is_mode: mask ``` ## Troubleshooting ### Issue: High variance in IS weights **Symptoms:** `rollout_is_std` > 1.0, `rollout_is_eff_sample_size` < 0.3 **Solutions:** 1. Switch from `sequence` to `geometric` level 2. Tighten thresholds 3. Verify rollout and training aren't too different ### Issue: Too many sequences vetoed **Symptoms:** `rollout_is_veto_fraction` > 0.1 **Solutions:** 1. Relax veto threshold: `rollout_is_veto_threshold: 1e-3` 2. Check for numerical issues in log prob computation 3. Verify policies aren't completely different ### Issue: Mean IS weight far from 1.0 **Symptoms:** `rollout_is_mean` < 0.5 or > 2.0 **Solutions:** 1. Verify `calculate_log_probs=True` is set 2. Check rollout_log_probs are correctly passed 3. Check for systematic bias ### Debugging: Visualizing Metrics **Example: Plot IS weight distribution** ```python import matplotlib.pyplot as plt import numpy as np def plot_is_metrics(metrics_history): """Plot rollout IS metrics over training steps.""" fig, axes = plt.subplots(2, 3, figsize=(15, 10)) # Plot 1: Mean IS weight over time axes[0, 0].plot(metrics_history['mismatch/rollout_is_mean']) axes[0, 0].axhline(y=1.0, color='r', linestyle='--', label='Ideal') axes[0, 0].set_title('Mean IS Weight') axes[0, 0].set_xlabel('Step') axes[0, 0].legend() # Plot 2: Effective sample size axes[0, 1].plot(metrics_history['mismatch/rollout_is_eff_sample_size']) axes[0, 1].axhline(y=0.5, color='g', linestyle='--', label='Good') axes[0, 1].axhline(y=0.3, color='r', linestyle='--', label='Warning') axes[0, 1].set_title('Effective Sample Size') axes[0, 1].set_xlabel('Step') axes[0, 1].legend() # Plot 3: Veto fraction axes[0, 2].plot(metrics_history['mismatch/rollout_is_veto_fraction']) axes[0, 2].axhline(y=0.1, color='r', linestyle='--', label='Warning') axes[0, 2].set_title('Veto Fraction') axes[0, 2].set_xlabel('Step') axes[0, 2].legend() # Plot 4: KL divergence over time axes[1, 0].plot(metrics_history['mismatch/mismatch_kl'], label='KL') axes[1, 0].plot(metrics_history['mismatch/mismatch_k3_kl'], label='K3 KL') axes[1, 0].axhline(y=0, color='g', linestyle='--', alpha=0.3) axes[1, 0].set_title('KL Divergence') axes[1, 0].set_xlabel('Step') axes[1, 0].legend() # Plot 5: PPL ratio over time axes[1, 1].plot(metrics_history['mismatch/mismatch_ppl_ratio']) axes[1, 1].axhline(y=1.0, color='r', linestyle='--', label='Ideal') axes[1, 1].set_title('PPL Ratio (Training/Rollout)') axes[1, 1].set_xlabel('Step') axes[1, 1].legend() # Hide unused subplot axes[1, 2].axis('off') plt.tight_layout() plt.savefig('rollout_is_metrics.png', dpi=150) print("Saved plot to rollout_is_metrics.png") ``` **Example: Metric collection during training** ```python # Collect metrics over time metrics_history = { 'mismatch/rollout_is_mean': [], 'mismatch/rollout_is_eff_sample_size': [], 'mismatch/rollout_is_veto_fraction': [], 'mismatch/mismatch_kl': [], 'mismatch/mismatch_k3_kl': [], 'mismatch/mismatch_ppl_ratio': [], } # In training loop for step in range(num_steps): # ... compute IS weights ... (NEW: 3 return values) _, _, metrics = compute_rollout_importance_weights(...) # Store metrics for key in metrics_history.keys(): if key in metrics: metrics_history[key].append(metrics[key]) # Plot every 100 steps if step % 100 == 0: plot_is_metrics(metrics_history) ``` ## Performance Impact - **Memory overhead**: ~1% of model memory - **Computational overhead**: 1-3% depending on level - **Training stability**: Significantly improved when mismatch exists ## Testing Run the test suite to verify everything works: ```bash # Basic unit tests python test_rollout_is.py # Integration tests (if pytest is available) pytest tests/trainer/ppo/test_rollout_is_integration.py -v ``` Expected output: All tests pass ✓ ## Additional Resources - **Implementation**: `verl/trainer/ppo/mismatch_helper.py` - **Examples**: `examples/rollout_importance_sampling/` - **DAPO Example**: `recipe/dapo/run_dapo_qwen2.5_32b_rollout_is.sh` ## Summary Rollout Importance Sampling provides: - ✅ Robust handling of distribution mismatch - ✅ Numerical stability - ✅ Comprehensive metrics for monitoring - ✅ Flexibility for different scenarios - ✅ Memory-efficient computation ## References - [When Speed Kills Stability: Demystifying RL Collapse from the Inference-Training Mismatch](https://yingru.notion.site/When-Speed-Kills-Stability-Demystifying-RL-Collapse-from-the-Inference-Training-Mismatch-271211a558b7808d8b12d403fd15edda) - [Your Efficient RL Framework Secretly Brings You Off-Policy RL Training](https://fengyao.notion.site/off-policy-rl) ================================================ FILE: verl_distillation/docs/advance/rollout_skip.rst ================================================ RolloutSkip Function Usage Documentation ======================================== Last updated: 08/01/2025. Applicable Scenarios -------------------- The RolloutSkip functionality is designed to accelerate the rollout process in reinforcement learning training by caching and reusing previously generated sequences. This feature is particularly useful when: 1. You need to repeatedly run experiments with the same configuration 2. You want to save time by avoiding redundant sequence generation to come close to the optimal policy API and Usage Example ---------------------- 2.1 Trainer Adaptation ~~~~~~~~~~~~~~~~~~~~~~ Both`RayDAPOTrainer()` (in `verl/recipe/dapo/dapo_ray_trainer.py`) and `RayPPOTrainer()`(in `verl/trainer/ppo/ray_trainer.py``) have already been adapted. This is an example of how to patch rollout_skip in RayPPOTrainer. .. code-block:: python #* Import the RolloutSkip class from verl.utils.rollout_skip import RolloutSkip ... class RayPPOTrainer: ... def fit(self): ... #* Add code as follow: rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg) rollout_skip.wrap_generate_sequences() ... for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: ... 2.2 Basic Configuration ~~~~~~~~~~~~~~~~~~~~~~~ Then, you should add the following parameters to your config to enable the RolloutSkip feature: .. code-block:: bash actor_rollout_ref.rollout.skip_rollout=True \ actor_rollout_ref.rollout.skip_dump_dir="/tmp/rollout_dump" \ Note: 1. The `skip_dump_dir` is the directory where the cached sequences will be stored. Ensure that this directory is writable and accessible by your training process. And make sure that `skip_dump_dir` is not relative path because ray will store the data in `/tmp/ray/session_/` and the relative path will not be found in the worker. 2. The dumped data path follows this naming pattern `{experiment_name}_{project_name}_TrainGBS{train_gbs}__InferGBS{gen_gbs}__N{n}`, once you change the `experiment_name`, `project_name`, `train_gbs`, `gen_gbs`, or `n`, the cached data will be stored in a new directory. ================================================ FILE: verl_distillation/docs/advance/rollout_trace.rst ================================================ Trace Function Usage Instructions ======================================== Last updated: 07/10/2025. Applicable Scenarios -------------------- Agentic RL involves multiple turns of conversations, tool invocations, and user interactions during the rollout process. During the Model Training process, it is necessary to track function calls, inputs, and outputs to understand the flow path of data within the application. The Trace feature helps, in complex multi-round conversations, to view the transformation of data during each interaction and the entire process leading to the final output by recording the inputs, outputs, and corresponding timestamps of functions, which is conducive to understanding the details of how the model processes data and optimizing the training results. The Trace feature integrates commonly used Agent trace tools, including wandb weave and mlflow, which are already supported. Users can choose the appropriate trace tool according to their own needs and preferences. Here, we introduce the usage of each tool. Trace Parameter Configuration ----------------------------- - ``actor_rollout_ref.rollout.trace.backend=mlflow|weave`` # the trace backend type - ``actor_rollout_ref.rollout.trace.token2text=True`` # To show decoded text in trace view Glossary -------- +----------------+------------------------------------------------------------------------------------------------------+ | Object | Explaination | +================+======================================================================================================+ | trajectory | A complete multi-turn conversation includes: | | | 1. LLM output at least once | | | 2. Tool Call | +----------------+------------------------------------------------------------------------------------------------------+ | step | The training step corresponds to the global_steps variable in the trainer | +----------------+------------------------------------------------------------------------------------------------------+ | sample_index | The identifier of the sample, defined in the extra_info.index of the dataset. It is usually a number,| | | but may also be a uuid in some cases. | +----------------+------------------------------------------------------------------------------------------------------+ | rollout_n | In the GROP algorithm, each sample is rolled out n times. rollout_n represents the serial number of | | | the rollout. | +----------------+------------------------------------------------------------------------------------------------------+ | validate | Whether the test dataset is used for evaluation? | +----------------+------------------------------------------------------------------------------------------------------+ Rollout trace functions ----------------------- There are 2 functions used for tracing: 1. ``rollout_trace_op``: This is a decorator function used to mark the functions to trace. In default, only few method has it, you can add it to more functions to trace more infor. 2. ``rollout_trace_attr``: This function is used to mark the entry of a trajectory and input some info to trace. If you add new type of agent, you may need to add it to enable trace. Usage of wandb weave -------------------- 1.1 Basic Configuration ~~~~~~~~~~~~~~~~~~~~~~~ 1. Set the ``WANDB_API_KEY`` environment variable 2. Configuration Parameters 1. ``actor_rollout_ref.rollout.trace.backend=weave`` 2. ``trainer.logger=['console', 'wandb']``: This item is optional. Trace and logger are independent functions. When using Weave, it is recommended to also enable the wandb logger to implement both functions in one system. 3. ``trainer.project_name=$project_name`` 4. ``trainer.experiment_name=$experiment_name`` 5. ``actor_rollout_ref.rollout.mode=async``: Since trace is mainly used for agentic RL, need to enable agent toop using async mode for either vllm or sglang. Note: The Weave Free Plan comes with a default monthly network traffic allowance of 1GB. During the training process, the amount of trace data generated is substantial, reaching dozens of gigabytes per day, so it is necessary to select an appropriate wandb plan. 1.2 View Trace Logs ~~~~~~~~~~~~~~~~~~~ After executing the training, on the project page, you can see the WEAVE sidebar. Click Traces to view it. Each Trace project corresponds to a trajectory. You can filter and select the trajectories you need to view by step, sample_index, rollout_n, and experiment_name. After enabling token2text, prompt_text and response_text will be automatically added to the output of ToolAgentLoop.run, making it convenient to view the input and output content. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/weave_trace_list.png?raw=true 1.3 Compare Trace Logs ~~~~~~~~~~~~~~~~~~~~~~ Weave can select multiple trace items and then compare the differences among them. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/weave_trace_compare.png?raw=true Usage of mlflow --------------- 1. Basic Configuration ~~~~~~~~~~~~~~~~~~~~~~ 1. Set the ``MLFLOW_TRACKING_URI`` environment variable, which can be: 1. Http and https URLs corresponding to online services 2. Local files or directories, such as ``sqlite:////tmp/mlruns.db``, indicate that data is stored in ``/tmp/mlruns.db``. When using local files, it is necessary to initialize the file first (e.g., start the UI: ``mlflow ui --backend-store-uri sqlite:////tmp/mlruns.db``) to avoid conflicts when multiple workers create files simultaneously. 2. Configuration Parameters 1. ``actor_rollout_ref.rollout.trace.backend=mlflow`` 2. ``trainer.logger=['console', 'mlflow']``. This item is optional. Trace and logger are independent functions. When using mlflow, it is recommended to also enable the mlflow logger to implement both functions in one system. 3. ``trainer.project_name=$project_name`` 4. ``trainer.experiment_name=$experiment_name`` 2. View Log ~~~~~~~~~~~ Since ``trainer.project_name`` corresponds to Experiments in mlflow, in the mlflow view, you need to select the corresponding project name, then click the "Traces" tab to view traces. Among them, ``trainer.experiment_name`` corresponds to the experiment_name of tags, and tags corresponding to step, sample_index, rollout_n, etc., are used for filtering and viewing. For example, searching for ``"tags.step = '1'"`` can display all trajectories of step 1. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/mlflow_trace_list.png?raw=true Opening one of the trajectories allows you to view each function call process within it. After enabling token2text, prompt_text and response_text will be automatically added to the output of ToolAgentLoop.run, making it convenient to view the content. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/mlflow_trace_view.png?raw=true Note: 1. mlflow does not support comparing multiple traces 2. rollout_trace can not associate the mlflow trace with the run, so the trace content cannot be seen in the mlflow run logs. ================================================ FILE: verl_distillation/docs/advance/rope.rst ================================================ RoPE Scaling override ======================================= Last updated: 05/14/2025. Some models such as `Qwen/Qwen2.5-7B-Instruct `_ support RoPE Scaling but don't have it defined in their config.json file. For example, this model supports this configuration: .. code:: python { ..., "rope_scaling": { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn" } } In order to support a longer context for such models, you must override the model configs when starting the trainer. PPO example: .. code:: bash +actor_rollout_ref.model.override_config.rope_scaling.type=yarn \ +actor_rollout_ref.model.override_config.rope_scaling.factor=4.0 \ +actor_rollout_ref.model.override_config.rope_scaling.original_max_position_embeddings=32768 \ And for the critic model .. code:: bash +critic.model.override_config.rope_scaling.type=yarn \ +critic.model.override_config.rope_scaling.factor=4.0 \ +critic.model.override_config.rope_scaling.original_max_position_embeddings=32768 \ ================================================ FILE: verl_distillation/docs/algo/baseline.md ================================================ # Algorithm Baselines Last updated: 06/18/2025. ## Math related datasets ### GSM8k Assuming GSM8k/math dataset is preprocessed via: ```bash python3 examples/data_preprocess/*.py ``` Refer to the table below to reproduce RL training from different pre-trained checkpoints. Below is the performance on the GSM8k dataset if not specified otherwise. More comprehensive benchmark results areavailable in the recipe folder. | Hardware | Model | Method | Test score | Details | |-------------|----------------------------------|-------------------|--------------|---------| | NVIDIA GPU | google/gemma-2-2b-it | hf checkpoint | 23.9 | [Huggingface](https://huggingface.co/google/gemma-2-2b-it#benchmark-results) | | NVIDIA GPU | google/gemma-2-2b-it | SFT | 52.06 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/gemma-2-2b-it-sft-0.411.log) | | NVIDIA GPU | google/gemma-2-2b-it | SFT + PPO | 64.02 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/gemma-2-2b-it-ppo-bsz512_4-prompt1024-resp-512-0.640.log), [wandb](https://api.wandb.ai/links/verl-team/h7ux8602) | | NVIDIA GPU | Qwen/Qwen2.5-0.5B-Instruct | hf checkpoint | 36.4 | [Qwen blog](https://qwenlm.github.io/blog/qwen2.5-llm/) | | NVIDIA GPU | Qwen/Qwen2.5-0.5B-Instruct | PPO | 56.7 | [command and log](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) | | NVIDIA GPU | Qwen/Qwen2.5-0.5B-Instruct | PRIME | 58.7 | [script](https://github.com/volcengine/verl/blob/main/recipe/prime/run_prime_qwen.sh), [wandb](https://api.wandb.ai/links/zefan-wang-thu-tsinghua-university/rxd1btvb) | | NVIDIA GPU | Qwen/Qwen2.5-0.5B-Instruct | GRPO-LoRA | 54.3 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz64_2-prompt512-resp1024-lorarank32-score0.543.log)| | NVIDIA GPU | Qwen/Qwen2.5-1.5B-Instruct | GRPO-LoRA | 77.9 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-1.5B-bsz64_2-prompt512-resp1024-lorarank32-score0.779.log)| | NVIDIA GPU | Qwen/Qwen2.5-3B-Instruct | GRPO-LoRA | 86.1 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-3B-bsz64_2-prompt512-resp1024-lorarank32-score0.861.log)| | NVIDIA GPU | deepseek-ai/deepseek-llm-7b-chat | PPO (Megatron) | 69.5 [1] | [log](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/deepseek-llm-7b-chat-megatron-bsz256_4-prompt512-resp512-0.695.log), [wandb](https://wandb.ai/verl-team/verl_megatron_gsm8k_examples/runs/10fetyr3) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GRPO | 89 | [script](https://github.com/volcengine/verl/blob/a65c9157bc0b85b64cd753de19f94e80a11bd871/examples/grpo_trainer/run_qwen2-7b_seq_balance.sh) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GRPO (FSDP2) | 89.8 | [log](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/qwen2-7b-fsdp2.log) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GRPO (Megatron) | 89.6 | [log](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/qwen2-7b_math_megatron.log) | | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | ReMax | 97 | [script](https://github.com/eric-haibin-lin/verl/blob/main/examples/remax_trainer/run_qwen2.5-3b_seq_balance.sh), [wandb](https://wandb.ai/liziniu1997/verl_remax_example_gsm8k/runs/vxl10pln) | | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | SPPO | 65.6 (MATH) | [SPPO script](https://github.com/volcengine/verl/tree/main/recipe/sppo/README.md) | | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | GRPO-LoRA | 93.4 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-7B-bsz64_8-prompt512-resp1024-lorarank32-score0.934.log)| | NVIDIA GPU | Mixtral-8x22B-Instruct-v0.1 | Instruct model | 83.7 | [Qwen Blog](https://qwenlm.github.io/blog/qwen2.5-llm/) | | NVIDIA GPU | Mixtral-8x22B-Instruct-v0.1 | RLOO (Megatron) | 92.3 | [wandb](https://api.wandb.ai/links/ppo_dev/sbuiuf2d) | | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | SPIN | 92 | [script](https://github.com/volcengine/verl/tree/main/recipe/spin/README.md) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GPG | 88 | [log](https://github.com/diqiuzhuanzhuan/verldata/blob/main/run_logs/qwen2-7b_math.log), [wandb](https://wandb.ai/diqiuzhuanzhuan/verl_gpg_example_gsm8k_math/runs/ab86c4va) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GPG (Megatron) | 88 | [log](https://github.com/diqiuzhuanzhuan/verldata/blob/main/run_logs/qwen2-7b_math_megatron.log), [wandb](https://wandb.ai/diqiuzhuanzhuan/verl_gpg_example_gsm8k_math/runs/yy8bheu8) | | NVIDIA GPU | Qwen/Qwen2.5-VL-7B-Instruct | GRPO (Megatron) | 65.4 (GEO3k) | [script](https://github.com/volcengine/verl/blob/main/examples/grpo_trainer/run_qwen2_5_vl-7b-megatron.sh), [wandb](https://api.wandb.ai/links/megatron-core-moe-dev/1yngvkek) | | AMD MI300 | deepseek-ai/deepseek-llm-7b-chat | PPO | 70.5 [1] | [log](https://github.com/yushengsu-thu/verl_training_log/blob/main/gsm8k/ppo_run_deepseek7b_llm.log) | | AMD MI300 | deepseek-ai/deepseek-llm-7b-chat | GRPO | 71.4 [1] | [log](https://github.com/yushengsu-thu/verl_training_log/blob/main/gsm8k/grpo_run_deepseek7b_llm.log) | | NVIDIA GPU | Qwen/Qwen2.5-14B-Instruct | GRPO-LoRA | 94.6 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-14B-bsz64_8-prompt512-resp1024-lorarank32-score0.946.log)| | NVIDIA GPU | Qwen/Qwen2.5-32B-Instruct | GRPO-LoRA | 95.8 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-32B-bsz64_8-prompt512-resp1024-lorarank32-score0.958.log)| | NVIDIA GPU | Qwen/Qwen2.5-72B-Instruct | GRPO-LoRA | 96.0 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-72B-bs64_8-prompt512-resp1024-lorarank32-score0.960.log)| ### DAPO math-17k - Training DAPO math-17k dataset: https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k - Testing: AIME'24: https://huggingface.co/datasets/BytedTsinghua-SIA/AIME-2024 Note: - For Qwen/Qwen2.5-Math-7B, we directly modify the max_position_embeddings to 32768 without observing performance degradation in order to train longer response length. | Hardware | Model | Method | Test score | Details | |-------------|-----------------------------|-------------------------|------------|---------| | NVIDIA GPU | Qwen/Qwen2.5-Math-7B (32k) | DAPO | 36.3 | [command](https://github.com/volcengine/verl/blob/main/recipe/dapo/test_dapo_7b_math.sh), [logs](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/runs/ow47vvon?nw=nwusertongyuxuan361)| | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | DAPO + Code Interpreter | 40.0 | [command](https://github.com/volcengine/verl/blob/main/recipe/retool/run_qwen2_7b_dapo.sh)| ## Coding related datasets Below is the result on leetcode if not specified otherwise. | Hardware | Model | Method | Test score | Details | |-------------|----------------------------------|-------------------|--------------|---------| | NVIDIA GPU | PRIME-RL/Eurus-2-7B-SFT | RPIME | 36.1 | [script](https://github.com/volcengine/verl/blob/main/recipe/prime/run_prime_qwen_code.sh), [swanlab](https://swanlab.cn/@wangzefan/prime_example/runs/7f541qhspgmy8nmhdlx35/chart) | ### Notes [1] During evaluation, we have only extracted answers following the format `"####"`. A more flexible answer extraction, longer response length, and better prompt engineering may lead to a higher score. [2] The default value of `actor_rollout_ref.actor.entropy_coeff` is set to `0.0` since verl 0.3.x on 2025-05-30, which is different from previous versions. ================================================ FILE: verl_distillation/docs/algo/collabllm.md ================================================ # Recipe: CollabLLM Last updated: 09/22/2025. > Open-Source Algorithm Implementation & Expriement Running: [Haiquan Chen](https://github.com/chenhaiq), [Shirley Wu](https://github.com/Wuyxin) 🏠 [Homepage](https://aka.ms/CollabLLM) | 📝 [Paper](https://arxiv.org/pdf/2502.00640) | 🤗 [Datasets & Models](https://huggingface.co/collabllm) | ⭐️ [Original Implementation](https://github.com/Wuyxin/collabllm) `verl` provides a recipe for the Outstanding Paper at ICML 2025, **"CollabLLM: From Passive Responders to Active Collaborators"**. [CollabLLM](https://aka.ms/CollabLLM) is a unified fine-tuning framework that optimizes LLMs for effective and efficient multiturn collaboration with users. **Core Idea:** Models are rewarded based on how well their responses enable effective *future* collaboration with users. Paper Authors: [Shirley Wu](https://cs.stanford.edu/~shirwu/), [Michel Galley](https://www.microsoft.com/en-us/research/people/mgalley/), Baolin Peng, Hao Cheng, Gavin Li, Yao Dou, Weixin Cai, [James Zou](https://www.james-zou.com/), [Jure Leskovec](https://cs.stanford.edu/people/jure/), [Jianfeng Gao](https://www.microsoft.com/en-us/research/people/jfgao/) --- ## Quick Start ### 0. Environment Make sure the required packages for `verl` are installed. Additionally, install `litellm` and export the required API keys. The API model will be used for user simulators and, optionally, LLM Judges (see the Configuration section below). ### 1. Prepare Your Dataset First, process your dataset using the provided script (see example commands and usage in `process_dataset.py`): ```bash python process_dataset.py --dataset <> ... --dataset_type ``` **Requirements:** - Input: A Hugging Face multiturn dataset. Existing datasets: `collabllm/collabllm-multiturn-$DATASET`, with `DATASET` in one of [`math-hard(-large)`, `medium(-large)`, `bigcodebench(-large)`] (*-large are the datasets used in the CollabLLM paper) - Example format: See [collabllm-multiturn-math-hard](https://huggingface.co/datasets/collabllm/collabllm-multiturn-math-hard) - To generate your own dataset: Use [build_dataset.py](https://github.com/Wuyxin/collabllm/blob/main/scripts/engine/build_dataset.py) from the original CollabLLM repository ### 2. Train Your Model **(Optional) For Supervised Fine-Tuning (SFT):** ```bash bash train_sft_collabllm.sh ``` **For Reinforcement Learning (RL):** ```bash bash train_rl_collabllm.sh ``` The RL script shows an example to train CollabLLM on `math-hard-large`. - The config to sample future conversations are in `recipe/collabllm/config/collabllm_interaction_config.yaml`. - The Multiturn-aware Reward is aggregated from these three conversational-level rewards: ``` +reward_model.reward_kwargs.metric_weights.accuracy=1 \ +reward_model.reward_kwargs.metric_weights.interactivity=1 \ +reward_model.reward_kwargs.metric_weights.token_amount=-0.0001 \ ``` You can remove, add, or modify the weights depending on your task. A list of implemented metrics you can already add are under `recipe/collabllm/metrics`. For example, on `medium-large`, you can replace `accuracy` with `bleu_score` via ``` +reward_model.reward_kwargs.metric_weights.bleu_score=1 ``` which will instead apply bleu score on the sampled future conversations. ## Algorithm | Step | Name | Description | |------|-------------------------------|-----------------------------------------------------------------------------| | 1 | Model response generation | The model generates multiple responses for each prompt in a batch. | | 2 | Collaborative simulation | A user simulator (e.g., GPT or Claude) samples `num_repeat_rollouts` conversations for up to `max_user_turns` additional turns. | | 3 | Compute Multiturn-aware Reward | Customized conversational reward functions are applied to the sampled conversations. Rewards are aggregated, then averaged across rollouts. | | 4 | Update model | The model weights are updated using the computed multiturn-aware rewards. | --- ## Configuration The primary configuration is managed through the launch script `train_rl_collabllm.sh` and the YAML file `recipe/collabllm/config/collabllm_interaction_config.yaml`. Key configuration sections: | Section | Key Parameters / Notes | |----------------------|-----------------------------------------------------------------------------------------| | `data` | Paths to training/validation files, batch sizes, sequence lengths. | | `actor_rollout_ref` (common) | Base model path (used for actor + initial reference), FSDP settings, optimization (LR, scheduler). | | `actor_rollout_ref` (CollabLLM-specific) | Hyperparameters under `actor_rollout_ref.rollout.multi_turn`: `max_user_turns`, `max_assistant_turns`, `num_repeat_rollouts`. | | `interaction` | Defined in `collabllm_interaction_config.yaml`. Specifies user simulator and hyperparameters. Requires exported API keys. | | `reward_model` | Manager set to `collabllm` by default. Modify `reward_model.reward_kwargs.metric_weights` for conversational rewards and weights. LLM Judge hyperparameters (e.g., `model`, `temperature`) go under `reward_model.reward_kwargs.llm_judge_kwargs`. | | `algorithm` | GRPO-specific hyperparameters such as `actor_rollout_ref.rollout.n`. | | `trainer` | Distributed training (nodes, GPUs per node), logging (WandB), checkpointing frequency. | --- ## Key Files | File Path | Purpose | |-----------|---------| | `recipe/collabllm/collabllm_agent_loop.py` | Main logic to sample future conversations, using `CollabLLMInteraction` from `verl/interactions/collabllm_interaction.py`. | | `verl/workers/reward_manager/collabllm.py` | Computes rewards for future conversations, leveraging `recipe/collabllm/reward_function.py` to apply each metric. | --- ## Acknowledgement We sincerely thank the `verl` community and advisors for their contributions and guidance! ================================================ FILE: verl_distillation/docs/algo/dapo.md ================================================ # Recipe: Decoupled Clip and Dynamic Sampling Policy Optimization (DAPO) Last updated: 06/19/2025. > Open-Source Algorithm Implementation & Expriement Running: [Yuxuan Tong](https://tongyx361.github.io/), [Guangming Sheng](https://hk.linkedin.com/in/guangming-sheng-b50640211) 🏠 [Homepage](https://dapo-sia.github.io/) | 📝 [Paper@arXiv](https://arxiv.org/abs/2503.14476) | 🤗 [Datasets&Models@HF](https://huggingface.co/collections/BytedTsinghua-SIA/dapo-67d7f1517ee33c8aed059da0) | 🐱 [Code@GitHub](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo) | 🐱 [Repo@GitHub](https://github.com/BytedTsinghua-SIA/DAPO) > We propose the **D**ecoupled Clip and Dynamic s**A**mpling **P**olicy **O**ptimization (DAPO) algorithm. By making our work publicly available, we provide the broader research community and society with practical access to scalable reinforcement learning, enabling all to benefit from these advancements. Our system is based on the awesome [verl](https://github.com/volcengine/verl) framework. Thanks for their great work! Applying DAPO training to Qwen2.5-32B base model proves to outperform the previous state-of-the-art DeepSeek-R1-Zero-Qwen-32B on AIME 2024, achieving **50%** accuracy with **50%** less training steps. > > ![dapo-main-result](https://dapo-sia.github.io/static/images/score.png) ## Quickstart 1. Prepare the datasets **on the Ray cluster**: ```bash bash prepare_dapo_data.sh # This downloads the datasets to ${HOME}/verl/data by default ``` 2. Submit the job to the Ray cluster **from any machine**: ```bash cd verl # Repo root export RAY_ADDRESS="http://${RAY_IP:-localhost}:8265" # The Ray cluster address to connect to export WORKING_DIR="${PWD}" # The local directory to package to the Ray cluster # Set the runtime environment like env vars and pip packages for the Ray cluster in yaml export RUNTIME_ENV="./recipe/dapo/runtime_env.yaml" # This sets environment variables for the Ray cluster bash recipe/dapo/run_dapo_qwen2.5_32b.sh # or other scripts ``` ## Reproduction Runs | Setup | AIME 2024 Acc. | Hardware | Image | Commit | Environment Variables | Training Script | Training Record | | -------------------------------------------- | -------------- | --------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | | DAPO | 52% | 16x8xH800 | `hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | | DAPO w/o Dynamic Sampling | 50% | 16x8xH800 | `hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_wo_ds_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_wo_ds_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | | DAPO w/o Token-level Loss & Dynamic Sampling | 44% | 16x8xH20 | `hiyouga/verl:ngc-th2.5.1-cu120-vllm0.7.4-hotfix` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_early_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_early_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | > [!IMPORTANT] > > **📢 Call for Contribution!** > > Welcome to submit your reproduction runs and setups! ## Configuration ### Separated Clip Epsilons (-> Clip-Higher) An example configuration: ```yaml actor_rollout_ref: actor: clip_ratio_low: 0.2 clip_ratio_high: 0.28 ``` `clip_ratio_low` and `clip_ratio_high` specify the $\varepsilon_{\text {low }}$ and $\varepsilon_{\text {high }}$ in the DAPO objective. Core relevant code: ```python pg_losses1 = -advantages * ratio pg_losses2 = -advantages * torch.clamp(ratio, 1 - cliprange_low, 1 + cliprange_high) pg_losses = torch.maximum(pg_losses1, pg_losses2) ``` ### Dynamic Sampling (with Group Filtering) An example configuration: ```yaml data: gen_batch_size: 1536 train_batch_size: 512 algorithm: filter_groups: enable: True metric: acc # score / seq_reward / seq_final_reward / ... max_num_gen_batches: 10 # Non-positive values mean no upper limit ``` Setting `filter_groups.enable` to `True` will filter out groups whose outputs' `metric` are all the same, e.g., for `acc`, groups whose outputs' accuracies are all 1 or 0. The trainer will repeat sampling with `gen_batch_size` until there are enough qualified groups for `train_batch_size` or reaching the upper limit specified by `max_num_gen_batches`. Core relevant code: ```python prompt_bsz = self.config.data.train_batch_size if num_prompt_in_batch < prompt_bsz: print(f'{num_prompt_in_batch=} < {prompt_bsz=}') num_gen_batches += 1 max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches: print(f'{num_gen_batches=} < {max_num_gen_batches=}. Keep generating...') continue else: raise ValueError( f'{num_gen_batches=} >= {max_num_gen_batches=}. Generated too many. Please check your data.' ) else: # Align the batch traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n batch = batch[:traj_bsz] ``` ### Flexible Loss Aggregation Mode (-> Token-level Loss) An example configuration: ```yaml actor_rollout_ref: actor: loss_agg_mode: "token-mean" # / "seq-mean-token-sum" / "seq-mean-token-mean" # NOTE: "token-mean" is the default behavior ``` Setting `loss_agg_mode` to `token-mean` will mean the (policy gradient) loss across all the tokens in all the sequences in a mini-batch. Core relevant code: ```python if loss_agg_mode == "token-mean": loss = verl_F.masked_mean(loss_mat, loss_mask) elif loss_agg_mode == "seq-mean-token-sum": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) # token-sum loss = torch.mean(seq_losses) # seq-mean elif loss_agg_mode == "seq-mean-token-mean": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) / torch.sum(loss_mask, dim=-1) # token-mean loss = torch.mean(seq_losses) # seq-mean else: raise ValueError(f"Invalid loss_agg_mode: {loss_agg_mode}") ``` ### Overlong Reward Shaping An example configuration: ```yaml data: max_response_length: 20480 # 16384 + 4096 reward_model: overlong_buffer: enable: True len: 4096 penalty_factor: 1.0 ``` Setting `overlong_buffer.enable` to `True` will penalize the outputs whose lengths are overlong but still within the hard context limit. Specifically, the penalty increases linearly from `0` to `overlong_buffer.penalty_factor` when the length of the output exceeds the `max_response_length - overlong_buffer.len` by `0` to `overlong_buffer.len` tokens. Core relevant code: ```python if self.overlong_buffer_cfg.enable: overlong_buffer_len = self.overlong_buffer_cfg.len expected_len = self.max_resp_len - overlong_buffer_len exceed_len = valid_response_length - expected_len overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0) reward += overlong_reward ``` ## FAQ ### Where is the "Overlong Filtering" in the paper? Most experiments in the paper, including the best-performant one, are run without Overlong Filtering because it's somehow overlapping with Overlong Reward Shaping in terms of properly learning from the longest outputs. So we don't implement it here. ### What's the difference between [the `recipe/dapo` directory in the `main` branch](https://github.com/volcengine/verl/tree/main/recipe/dapo) and the [`recipe/dapo` branch](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo)? [The `recipe/dapo` branch](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo) is for **as-is reproduction** and thus won't be updated with new features. [The `recipe/dapo` directory in the `main` branch](https://github.com/volcengine/verl/tree/main/recipe/dapo) works as an example of how to extend the latest `verl` to implement an algorithm recipe, which will be maintained with new features. ### Why can't I produce similar results after modifications? RL infrastructures nowadays still have inherent unrobustness, on which we are still working hard to improve. We strongly recommend to only modify one thing at a time. We also list some known problems here: 1. Enabling CUDA graph (`enforce_eager=False`) might cause model performance degradation, whose cause is still under investigation. ================================================ FILE: verl_distillation/docs/algo/entropy.md ================================================ # Recipe: Entropy Mechanism Last updated: 06/27/2025.
The Entropy Mechanism of Reinforcement Learning for Large Language Model Reasoning. [![Paper](https://img.shields.io/badge/paper-A42C25?style=for-the-badge&logo=arxiv&logoColor=white)](https://arxiv.org/pdf/2505.22617) [![Github](https://img.shields.io/badge/PRIME-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white)](https://github.com/PRIME-RL/Entropy-Mechanism-of-RL) [![alphaXiv](https://img.shields.io/badge/discussion-A42C25?style=for-the-badge&logo=arxiv&logoColor=white&color=blue )](https://www.alphaxiv.org/abs/2505.22617) [![Twitter](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/stingning/status/1928088554166505667) [![Twitter](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/charlesfornlp/status/1928089451080585283) [![Twitter-ak](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/_akhaliq/status/1928077929105268861)
## 🎉News - **[2025/05/29]** 🎉 Ranked **#1** of the day on [Huggingface Daily Papers](https://huggingface.co/papers?date=2025-05-29). - **[2025/05/29]** Released our Paper on arXiv. See [here](https://arxiv.org/pdf/2505.22617). We provide insights into the entropy mechanism of RL for LLMs and propose two simple yet effective strategies to alleviate the entropy collapse. ## ✨Getting started After preparing the training data, for training Qwen2.5-7B on a single node, taking the KL-Cov approach as an example, you can simply run: ``` cd verl conda activate your_env bash recipe/dapo/7b_kl_cov.sh ``` While for training Qwen2.5-32B on multi nodes, you can run the following commands: ``` cd verl conda activate your_env bash recipe/dapo/32b_kl_cov.sh ``` ## 📖Introduction
issue
This paper addresses the entropy collapse issue in scaling reinforcement learning (RL) for large language models (LLMs), where policy entropy drops sharply during training, leading to overconfidence and performance saturation. We empirically establish a relationship between entropy ($H$) and performance ($R$): $R=−aexp(H)+b$, showing performance is bottlenecked by entropy exhaustion.
issue
Theoretically, we find entropy changes are driven by the covariance between action probability and logit updates, which correlates with advantage in Policy Gradient methods. High-probability, high-advantage actions reduce entropy, while rare, high-advantage actions increase it. Empirically, the covariance term remains positive, explaining entropy’s monotonic decline. To mitigate this, we propose ​​Clip-Cov​​ and ​​KL-Cov​​, which restrict updates for high-covariance tokens. These methods effectively prevent entropy collapse, and improve performance. ## 📃Evaluation
issue
Our method is able to maintain a considerably higher level of entropy throughout training. For example, when the baseline's entropy reaches a plateau and can no longer be consumed, the KL-Cov method still sustains an entropy level over 10 times higher. Meanwhile, the response length of the policy model steadily increases, and its performance on the test set consistently surpasses that of the baseline. This indicates that our model is able to explore more freely during training, learning better policy through RL. | **Method** | **AIME24** | **AIME25** | **AMC** | **MATH-500** | **OMNI-MATH** | **OlympiadBench** | **Minerva** | **Avg.** | | ----------------- | ---------: | ---------: | -------: | -----------: | ------------: | ----------------: | ----------: | -------: | | *Qwen2.5-7B* | | | | | | | | | | GRPO | 21.2 | 9.6 | 58.7 | 78.8 | 27.9 | 40.7 | 36.7 | 38.6 | | w. Clip-higher | 18.1 | 11.5 | 56.6 | 79.2 | 29.8 | 43.3 | 40.4 | 38.8 | | w. **`CLIP-Cov`** | 22.1 | **15.8** | 58.2 | 80.4 | **30.5** | **44.1** | **41.1** | 40.4 | | w. **`KL-Cov`** | **22.6** | 12.9 | **61.4** | **80.8** | 29.1 | 42.6 | 38.2 | **40.6** | | *Qwen2.5-32B* | | | | | | | | | | GRPO | 21.8 | 16.2 | 69.7 | 84.2 | 35.2 | 43.6 | 45.5 | 45.8 | | w. Clip-higher | 35.6 | 22.3 | 69.5 | 77.2 | 35.1 | 42.5 | 43.0 | 47.2 | | w. **`CLIP-Cov`** | 32.3 | 22.7 | 67.2 | **87.0** | **42.0** | **57.2** | 46.0 | 50.3 | | w. **`KL-Cov`** | **36.8** | **30.8** | **74.5** | 84.6 | 39.1 | 49.0 | **46.3** | **52.2** | Our two approaches both achieve non-trivial improvements across all benchmarks. Compared to GRPO, our method outperforms it by 2.0% on average for the 7B model and by 6.4% for the 32B model. Moreover, we observe that our method yields more substantial gains on the larger Qwen2.5-32B. Specifically, our method achieves improvements of 15.0% and 14.6% compared to GRPO on the most challenging benchmarks, AIME24 and AIME25, respectively. ## 🎈Citation If you find this paper or repo helpful, please cite us. ```bibtex @article{cui2025entropy, title={The Entropy Mechanism of Reinforcement Learning for Reasoning Language Models}, author={Cui, Ganqu and Zhang, Yuchen and Chen, Jiacheng and Yuan, Lifan and Wang, Zhi and Zuo, Yuxin and Li, Haozhan and Fan, Yuchen and Chen, Huayu and Chen, Weize and others}, journal={arXiv preprint arXiv:2505.22617}, year={2025} } ``` ## 🌻Acknowledgement We implement our reinforcement learning algorithm extending from [verl](https://github.com/volcengine/verl). We utilize [vLLM](https://github.com/vllm-project/vllm) for inference. Our models are trained primarily on [Qwen2.5 family](https://github.com/QwenLM/Qwen2.5). Our training data is built from [DAPO-MATH](https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k). Thanks for their great contributions! ## 📬 Contact For questions, discussion, or collaboration opportunities, feel free to contact: - Ganqu Cui: cuiganqu@pjlab.org.cn - Yuchen Zhang: yuchen.zhang2003@gmail.com - Jiacheng Chen: jackchan9345@gmail.com - Ning Ding: ningding.cs@gmail.com ================================================ FILE: verl_distillation/docs/algo/gpg.md ================================================ # GPG: Group Policy Gradient Last updated: 07/03/2025. Group Policy Gradient (GPG) is a minimalist reinforcement learning (RL) method that enhances the reasoning ability of large language models without relying on supervised fine-tuning or complex tricks. GPG revisits traditional policy gradients and directly optimizes the RL objective—no surrogate losses, no KL penalties, no critic, and no reference model. Compared to GRPO, GPG is simpler, more efficient, and achieves better results on many tasks. For more details, please refer to the original paper [GPG: A Simple and Strong Reinforcement Learning Baseline for Model Reasoning ](https://arxiv.org/abs/2504.02546). ## Key Components - Use a corrected advantage function to improve policy gradient accuracy and training efficiency. - By eliminating the critic and reference models, avoiding KL divergence constraints, significantly simplifies the training process compared to Group Relative Policy Optimization (GRPO) ## Configuration To configure GPG within the framework, use the following YAML settings. ```yaml algorithm: adv_estimator: gpg actor_rollout_ref: actor: policy_loss: loss_mode: "gpg" ``` ## Advanced Extensions GPG is a simple and strong baseline for model reasoning. Although it avoids using KL loss in its original form, you can still use KL loss to further improve the performance. ```yaml algorithm: adv_estimator: gpg actor_rollout_ref: actor: use_kl_loss: True # enable kl regularization kl_loss_coef: 0.01 policy_loss: loss_mode: "gpg" ``` ================================================ FILE: verl_distillation/docs/algo/grpo.md ================================================ # Group Relative Policy Optimization (GRPO) Last updated: 05/31/2025. In reinforcement learning, classic algorithms like PPO rely on a "critic" model to estimate the value of actions, guiding the learning process. However, training this critic model can be resource-intensive. GRPO simplifies this process by eliminating the need for a separate critic model. Instead, it operates as follows: - Group Sampling: For a given problem, the model generates multiple possible solutions, forming a "group" of outputs. - Reward Assignment: Each solution is evaluated and assigned a reward based on its correctness or quality. - Baseline Calculation: The average reward of the group serves as a baseline. - Policy Update: The model updates its parameters by comparing each solution's reward to the group baseline, reinforcing better-than-average solutions and discouraging worse-than-average ones. This approach reduces computational overhead by avoiding the training of a separate value estimation model, making the learning process more efficient. For more details, refer to the original paper [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://arxiv.org/pdf/2402.03300) ## Key Components - No Value Function (Critic-less): unlike PPO, GRPO does not train a separate value network (critic) - Group Sampling (Grouped Rollouts): instead of evaluating one rollout per input, GRPO generates multiple completions (responses) from the current policy for each prompt. This set of completions is referred to as a group. - Relative Rewards: within each group, completions are scored (e.g., based on correctness), and rewards are normalized relative to the group. ## Configuration Note that all configs containing `micro_batch_size` are used to configure the maximum sample or token count per forward or backward pass to avoid GPU OOMs, whose value should not change algorithmic/convergence behavior. Despite that many configurations start with the `ppo_` prefix, they work across different RL algorithms in verl, as the GRPO training loop is similar to that of PPO (without critic). ![image](https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d) - `actor_rollout.ref.rollout.n`: For each prompt, sample n times. Default to 1. For GRPO, please set it to a value larger than 1 for group sampling. - `data.train_batch_size`: The global batch size of prompts used to generate a set of sampled trajectories/rollouts. The number of responses/trajectories is `data.train_batch_size * actor_rollout.ref.rollout.n` - `actor_rollout_ref.actor.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO actor updates. The ppo_mini_batch_size is a global size across all workers. - `actor_rollout_ref.actor.ppo_epochs`: Number of epochs for GRPO updates on one set of sampled trajectories for actor - `actor_rollout_ref.actor.clip_ratio`: The GRPO clip range. Default to 0.2 - `algorithm.adv_estimator`: Default is gae. Please set it to grpo instead - `actor_rollout_ref.actor.loss_agg_mode`: Default is "token-mean". Options include "token-mean", "seq-mean-token-sum", "seq-mean-token-mean". The original GRPO paper takes the sample-level loss (seq-mean-token-mean), which may be unstable in long-CoT scenarios. All GRPO example scripts provided in verl uses the default configuration "token-mean" for loss aggregation instead. Instead of adding KL penalty in the reward, GRPO regularizes by directly adding the KL divergence between the trained policy and the reference policy to the loss: - `actor_rollout_ref.actor.use_kl_loss`: To use kl loss in the actor. When used, we are not applying KL in the reward function. Default is False. Please set it to True for GRPO. - `actor_rollout_ref.actor.kl_loss_coef`: The coefficient of kl loss. Default is 0.001. - `actor_rollout_ref.actor.kl_loss_type`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. Appending "+" in the end (e.g., 'k1+' and 'k3+') would apply straight through to employ k2 for unbiased gradient estimation, regardless of the kl value estimation (see https://github.com/volcengine/verl/pull/2953#issuecomment-3162113848 for more details). How to calculate the kl divergence between actor and reference policy. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html ## Advanced Extensions ### DrGRPO [Understanding R1-Zero-Like Training: A Critical Perspective](https://arxiv.org/pdf/2503.20783) claims there's optimization bias in GRPO, which leads to artificially longer responses, especially for incorrect outputs. This inefficiency stems from the way GRPO calculates advantages using group-based reward normalization. Instead, DrGRPO aggregates token-level losses by normalizing with a global constant to eliminate length bias. Configure the following to enable DrGRPO, with all other parameters the same as GRPO's: - `actor_rollout_ref.actor.loss_agg_mode`: "seq-mean-token-sum-norm", which turns off seq-dim averaging - `actor_rollout_ref.actor.use_kl_loss`: Please set it to False for DrGRPO - `algorithm.norm_adv_by_std_in_grpo`: False, which turns off standard deviation norm ## Reference Example Qwen2.5 GRPO training log and commands: [link](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/qwen2-7b-fsdp2.log) ```bash bash examples/grpo_trainer/run_qwen3-8b.sh ``` For more reference performance, please see https://verl.readthedocs.io/en/latest/algo/baseline.html ================================================ FILE: verl_distillation/docs/algo/opo.md ================================================ # On-Policy RL with Optimal Reward Baseline (OPO) Last updated: 06/02/2025. Loose on-policy constraints and suboptimal baselines in reinforcement learning often lead to training instability such as large policy shifts and entropy collapse. OPO addresses these challenges by using exact on-policy training with the theretically optimal reward baseline for advantage estimation. It achieves lower policy shifts and higher output entropy, encouraging more diverse and less repetitive responses. OPO uses group sampling to generate multiple outputs for each input like GRPO. Unlike group-based algorithms which typically use the mean reward of a group as its baseline, OPO employs a theoretically optimal baseline: the length-weighted reward of the group. It also omits the standard deviation normalization. By adopting these two key components, OPO enables the training of a single policy model with the objective of maximizing only the expected reward. For more detailes, refer to the original paper [On-Policy RL with Optimal Reward Baseline](https://arxiv.org/pdf/2505.23585). ## Key Components - Exact On-Policy Training: always generates responses from the current policy, without using any pre-generated data or off-policy data. - Optimal Reward Baseline: uses a length-weighted reward of the group as the baseline for normalizing the rewards. ## Configuration To configure OPO within the framework, use the following YAML settings. These parameters are crucial for enabling exact on-policy training and activating the optimal reward baseline. ```yaml algorithm: adv_estimator: opo # Use OPO for optimal reward baseline data: train_batch_size: 1024 actor_rollout_ref: actor: ppo_mini_batch_size: 1024 # ppo_mini_batch_size should equal to train_batch_size to enable exact on-policy training entropy_coeff: 0 # disable entropy regularization use_kl_loss: False # disable kl regularization kl_loss_coef: 0 ``` ## Advanced Extensions OPO can also be extended to other algorithms like RLOO and Reinforce++. It just needs to adjust their configurations to enable exact on-policy training and incorporate the optimal length-weighted reward baseline with minimal modifications to their advantage estimation functions. ================================================ FILE: verl_distillation/docs/algo/ppo.md ================================================ # Proximal Policy Optimization (PPO) Last updated: 06/19/2025. Proximal Policy Optimization (PPO) is a family of policy gradient methods for reinforcement learning, proposed by OpenAI in 2017. PPO strikes a balance between simplicity, stability, and performance, making it one of the most widely used algorithms in modern RL applications, including large-scale language model fine-tuning. Traditional policy gradient methods like REINFORCE or Vanilla Policy Gradient suffer from: - High variance and sample inefficiency. - Instability due to large policy updates. PPO addresses this problem using a clipped surrogate objective that avoids overly large updates without requiring second-order derivatives. For more technical details regarding PPO, we suggest reading the introduction in the [OpenAI spinning up tutorial](https://spinningup.openai.com/en/latest/algorithms/ppo.html), and the paper [Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347). ## Key Components - Actor-Critic Architecture: PPO requires both an actor model (policy) and a critic model (value function). This differs from other algorithms like GRPO and RLOO that don't require a critic model. - Generalized Advantage Estimation (GAE): PPO uses GAE for computing advantage values, which helps reduce variance in policy gradient estimates while maintaining low bias. - Clipped Surrogate Objective: The core of PPO is implemented through the clipped surrogate objective function that limits policy updates. ## Configuration Note that all configs containing `micro_batch_size` are used to configure the maximum sample or token count per forward or backward pass to avoid GPU OOMs, whose value should not change algorithmic/convergence behavior. Most critic configs are similar to those of actors. Note that the critic model is omitted from the figure below. ![image](https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d) - `data.train_batch_size`: The global batch size of prompts used to generate a set of sampled trajectories/rollouts. The number of responses/trajectories is `data.train_batch_size * actor_rollout.ref.rollout.n` - `actor_rollout_ref.actor.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO actor updates. The ppo_mini_batch_size is a global size across all workers - `critic.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO critic updates. The ppo_mini_batch_size is a global size across all workers - `actor_rollout_ref.actor.clip_ratio`: The PPO clip range. Default to 0.2 - `actor_rollout_ref.actor.ppo_epochs`: Number of epochs for PPO updates on one set of sampled trajectories for actor - `critic.ppo_epochs`: Number of epochs for PPO updates on one set of sampled trajectories for critic. Defaults to `actor_rollout_ref.actor.ppo_epochs` - `algorithm.gemma`: discount factor - `algorithm.lam`: The lambda term that trades off between bias and variance in the GAE estimator - `algorithm.adv_estimator`: Support gae, grpo, reinforce_plus_plus, reinforce_plus_plus_baseline, rloo ## Advanced Extensions ### KL Divergence Control Options to prevent the policy from diverging too far from a reference policy. Two mechanisms are available: KL reward penalty and KL loss. For more technical details, see [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) Options to use KL loss for KL divergence control: - `actor_rollout_ref.actor.use_kl_loss`: to use kl loss in the actor. When used, we are not applying KL in the reward function. Default is False - `actor_rollout_ref.actor.kl_loss_coef`: The coefficient of kl loss. Default is 0.001. - `actor_rollout_ref.actor.kl_loss_type`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. Appending "+" in the end (e.g., 'k1+' and 'k3+') would apply straight through to employ k2 for unbiased gradient estimation, regardless of the kl value estimation (see https://github.com/volcengine/verl/pull/2953#issuecomment-3162113848 for more details). How to calculate the kl divergence between actor and reference policy. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html Options to use KL penalty in the reward: - `algorithm.use_kl_in_reward`: Whether to enable in-reward kl penalty. Default is False. - `algorithm.kl_penalty`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. This defines the way to calculate the kl divergence between actor and reference policy. For specific options, refer to `kl_penalty` in core_algos.py. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html - `algorithm.kl_ctrl.kl_coef`: The (initial) coefficient of in-reward kl_penalty. Default is 0.001. - `algorithm.kl_ctrl.type`: 'fixed' for FixedKLController and 'adaptive' for AdaptiveKLController. - `algorithm.kl_ctrl.horizon`: See source code of AdaptiveKLController for details. - `algorithm.kl_ctrl.target_kl`: See source code of AdaptiveKLController for details. ### Dual-clip PPO The Dual-Clip PPO introduces a approach by applying a lower bound to the policy ratio when the advantage is less than zero, when multiplied by a large raito, does not exceed a specified lower bound. ![image](https://github.com/user-attachments/assets/fc232181-d8b0-4307-8dd2-4dc0a4c1c139) - `actor_rollout_ref.actor.clip_ratio_c`: lower bound of the value for Dual-clip PPO, defaults to 3.0 ## Reference Example Qwen2.5 training log and commands: [link](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) ```bash bash run_gemma.sh trainer.n_gpus_per_node=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ trainer.logger=console \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ data.train_batch_size=256 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size=2 \ critic.ppo_micro_batch_size=2 ``` Reference performance with verl v0.2: | Model | Method | Score | Link | |-------------------------------|------------------|-------|------------------------------------------------------------------------------------------------| | Qwen/Qwen2.5-0.5B-Instruct | pretrained model | 36.4 | [Qwen Blog](https://qwenlm.github.io/blog/qwen2.5-llm/) | | Qwen/Qwen2.5-0.5B-Instruct | PPO | 56.7 | [PPO Command and Logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) | ================================================ FILE: verl_distillation/docs/algo/spin.md ================================================ # Recipe: Self-Play Fine-Tuning (SPIN) Last updated: 05/31/2025. `verl` provides a recipe inspired by the paper **"Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models"** (SPIN). SPIN is a language model finetuning algorithm that enables iterative self-improvement through a self-play mechanism inspired by game theory. **Core Idea:** Models learn by playing against themselves, reducing reliance on external preference datasets or stronger teacher models: 1. **Synthetic Data Generation:** The current model generates responses, creating its own training data from previous iterations. 2. **Two-Player Game Setup:** A game involving two players acted by a single LLM. 3. **Iterative Training:** The model progressively improves by refining its policy, with each iteration's model becoming the opponent for the next iteration. Paper Authors: [Zixiang Chen](https://github.com/uclaml/SPIN)\*, [Yihe Deng](https://github.com/uclaml/SPIN)\*, [Huizhuo Yuan](https://scholar.google.com/citations?user=8foZzX4AAAAJ)\*, [Kaixuan Ji](https://scholar.google.com/citations?user=FOoKDukAAAAJ), [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) [[Webpage](https://uclaml.github.io/SPIN/)] [[Huggingface](https://huggingface.co/papers/2401.01335)] [[Paper](https://arxiv.org/abs/2401.01335)] [[Original Implementation](https://github.com/uclaml/SPIN)] verl Implementation Authors: [Chendong Wang](https://cdwang96.github.io/), [Chenyang Zhao](https://github.com/zhaochenyang20) --- ## Key Function (compute_online_dpo_loss) and Related works SPIN (Chen et al., 2024) proposes an iterative self-play mechanism to fine-tune language models. In each iteration, SPIN's training objective, when using a logistic loss function, is equivalent to Direct Preference Optimization (DPO) loss (Rafailov et al., 2023). This `verl` recipe realizes SPIN's core concept by using DPO loss iteratively (Xu et al., 2023; Xiong et al., 2023; Snorkel AI, 2024). This means that in each iteration, we fine-tune the LLM using DPO loss for preference optimization. Notably, Xu et al. (2023) explored iterative preference optimization with pairwise cringe loss, while Xiong et al. (2023) discussed how to bridge theory and practice for RLHF under KL constraints using iterative training. The concept of iterative preference learning was also explored in online DPO (Guo et al., 2024), which focuses on direct alignment from online AI feedback. In online DPO, preference data is dynamically updated during training, allowing the model to learn from its own generated data. Specifically, we developed the **`compute_online_dpo_loss`** function and built this SPIN recipe on top of it. By incorporating online preference generation, this approach enables continuously refining language models without relying on fixed external preference datasets. **Reference Papers:** * [Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models](https://arxiv.org/abs/2401.01335) (Chen et al., 2024) * [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://arxiv.org/abs/2305.18290) (Rafailov et al., 2023) * [Somethings are more cringe than others: Preference optimization with the pairwise cringe loss](https://arxiv.org/abs/2312.16682) (Xu et al., 2023) * [Iterative preference learning from human feedback: Bridging theory and practice for rlhf under kl-constraint](https://arxiv.org/abs/2312.11456) (Xiong et al., 2023) * [Snorkel-Mistral-PairRM-DPO](https://huggingface.co/snorkelai/Snorkel-Mistral-PairRM-DPO) (Snorkel AI, 2024) * [Direct language model alignment from online ai feedback](https://arxiv.org/abs/2402.04792) (Guo et al., 2024) ## Our Online DPO Implementation Our `compute_online_dpo_loss` function adapts `verl`'s existing PPO infrastructure (based on `verl` v0.3.0.post1) for this iterative online DPO. Key aspects of our implementation include: * **No Critic:** Unlike PPO, we omit the value function critic. * **Dynamic Reference Model:** An explicit reference policy (`ref_policy_wg`) is used for DPO loss. This reference model's weights can be periodically updated from the actor (`ref_update_freq`), providing a dynamic baseline. * **Online Preference Generation:** The `compute_onlineDPO_pref` function (in `core_algos.py`) dynamically creates chosen/rejected pairs based on a reward source (e.g., rule-based ranking for math problems). * **DPO Loss Integration:** We replace PPO's policy loss with our `compute_online_dpo_loss` (in `core_algos.py`) within the actor update (`dp_actor.py`), directly optimizing the policy using the generated preferences. * **Iterative Training Orchestration:** The `SpinTrainer` (in `spin_trainer.py`) manages the entire self-play loop: generation, preference labeling, optional reference model updates, and policy updates, enabling continuous self-improvement aligned with SPIN's principles. --- ## Algorithm This recipe implements an Online algorithm adapted to the `verl` Reinforcement Learning framework, which provides an alternative to PPO for fine-tuning language models. **Online Loop:** Instead of maximizing a scalar reward signal in PPO, this approach directly optimizes the policy model to align with preference data generated *online* during training: 1. **Generation:** The current model generates multiple responses for each prompt in a batch. 2. **Preference Labeling:** A function evaluates these generated responses to determine which one is preferred (chosen) and which is dispreferred (rejected). This can be done using a reward function or implicit ranking based on specific rules. (In this recipe, we use rule-based ranking on the math problem). 3. **Update:** This preference tuple (`prompt`, `chosen_response`, `rejected_response`) is used to update the actor model using `compute_online_dpo_loss`, comparing against a reference model. **Connection with SPIN:** Instead of only using a fixed target data distribution, the online generation loop in step 2 will dynamically change the target data distribution by using a certain Preference Labeling method (rule-based ranking on the math problem by selecting the better one in this recipe). This explores the direction mentioned in SPIN's paper Section 7 about "dynamically changing target data distribution" to potentially elevate LLM performance beyond the fixed human-annotated data ceiling. --- ## Reproduce the Experiment (Example Setup) The following steps outline how to set up the environment and run the SPIN recipe, based on the provided test log using GSM8K and Qwen2.5-3B-Instruct. 1. **Setup Environment (Example using Docker):** ```bash # Start a container with GPU access and shared memory docker run -it --name spin_test --gpus all \ --shm-size=32g \ --ipc=host \ -v /path/to/host/.cache:/root/.cache \ -e HF_TOKEN= \ lmsysorg/sglang:latest \ /bin/bash # Inside the container or on your host machine: # Ensure /tmp is writable mkdir -p /tmp chmod 1777 /tmp # Install Python 3.10 (if not present) and venv sudo apt update sudo apt install -y python3.10 python3.10-venv tmux python3 -m ensurepip --upgrade # Create and activate a virtual environment python3 -m venv ~/.python/spin_env source ~/.python/spin_env/bin/activate # Install uv (fast package installer) python3 -m pip install uv ``` 2. **Install verl and Dependencies:** ```bash # Clone the verl repository and checkout the spin branch cd ~ git clone git@github.com:volcengine/verl.git && cd verl # Install flash-attn (handle potential build issues) python3 -m uv pip install wheel packaging python3 -m uv pip install flash-attn --no-build-isolation --no-deps # Install verl with sglang extras python3 -m uv pip install -e ".[sglang]" ``` *Note: If `flash-attn` installation fails, try the manual steps again or consult its documentation.* 3. **Login & Download Data/Model:** ```bash # Login to Weights & Biases (optional, for logging) export WANDB_API_KEY= # wandb login # Download the GSM8K dataset python3 examples/data_preprocess/gsm8k.py --local_save_dir ~/data/gsm8k # Adjusted path # Download the base model (Example: Qwen2.5-3B-Instruct) huggingface-cli download Qwen/Qwen2.5-3B-Instruct --local-dir $HOME/models/Qwen2.5-3B-Instruct ``` 4. **Configure:** * Modify the configuration file (e.g., `config/spin_trainer.yaml` or the one specified in the run script) with correct paths to your downloaded model, data, desired hyperparameters (`dpo_beta`, learning rate, etc.), and distributed training settings (nodes, GPUs per node). * Pay attention to `actor_rollout_ref.model_path`, `data` paths, `reward_model` config (if using one), and `trainer.ref_update_freq`. 5. **Run Training:** ```bash # Set CUDA visible devices (adjust based on your hardware and config) export CUDA_VISIBLE_DEVICES=0,1,2,3 # Launch the training script (e.g., test.sh or a custom script) # Ensure test.sh points to the correct config and main script bash recipe/spin/run_spin.sh ``` --- ## Configuration * The primary configuration is typically managed through a YAML file specified in the launch script (e.g., `config/spin_trainer.yaml`). * Key configuration sections: * `data`: Paths to training/validation prompt files, batch sizes, sequence lengths. * `actor_rollout_ref`: Paths to the base model (used for actor and initial reference), FSDP settings, optimization parameters (learning rate, scheduler). * `reward_model`: Configuration for the reward model used for online preference labeling (path, batch size, etc.). Can be omitted if using a simpler reward function. * `algorithm`: DPO-specific hyperparameters like `dpo_beta`, `dpo_loss_type`. * `trainer`: Distributed training settings (nodes, GPUs per node), logging (WandB), checkpointing frequency, and `ref_update_freq` (set > 0 to enable periodic reference model updates from the actor). --- ## Key Files * `main_spin.py`: Main entry point using Hydra to load the config and launch the `SpinTrainer`. * `spin_trainer.py`: Defines the `SpinTrainer` class, orchestrating the Online DPO training loop. * `fsdp_workers.py`: Implements Ray workers (Actor, Reference) potentially using FSDP. * `dp_actor.py`: Contains the actor class, including the DPO policy update logic. * `core_algos.py`: Includes helper functions for `compute_online_dpo_loss` and `compute_onlineDPO_pref`. * `config/spin_trainer.yaml` (or similar): Main Hydra configuration file for the recipe. * `run_spin.sh` (or similar): Example bash script for launching a training run. * `README.md`: This file. --- ## Acknowledgement We sincerely thank the contribution and guidance from the `verl` community and advisors, including (adapted from SPPO): * [Zixiang Chen](https://sites.google.com/view/zxchen) * [Yuhao Yang](https://github.com/yhyang201) * [Yifan Zhang](https://github.com/yifanzhang-pro) * [Yongan Xiang](https://github.com/BearBiscuit05) * [Junrong Lin](https://github.com/ocss884) * [Yuxuan Tong](https://github.com/tongyx361) * [Guangming Shen](https://github.com/PeterSH6) * [Biao He](https://www.linkedin.com/in/biao-he/) * [Qingquan Song](https://qingquansong.github.io/) * [Chenyang Zhao](https://zhaochenyang20.github.io/Chayenne/) * [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) ================================================ FILE: verl_distillation/docs/algo/sppo.md ================================================ # Recipe: Self-Play Preference Optimization (SPPO) Last updated: 05/28/2025. verl provides a community recipe implementation for the paper [Self-Play Preference Optimization for Language Model Alignment](https://arxiv.org/abs/2405.00675). SPPO can significantly enhance the performance of an LLM without strong external signals such as responses or preferences from GPT-4. It can outperform the model trained with iterative direct preference optimization (DPO), among other methods. SPPO is theoretically grounded, ensuring that the LLM can converge to the von Neumann winner (i.e., Nash equilibrium) under general, potentially intransitive preference, and empirically validated through extensive evaluations on multiple datasets. Paper Authors: [Yue Wu](https://yuewu.us/)\*, [Zhiqing Sun](https://www.cs.cmu.edu/~zhiqings/)\*, [Huizhuo Yuan](https://scholar.google.com/citations?user=8foZzX4AAAAJ)\*, [Kaixuan Ji](https://scholar.google.com/citations?user=FOoKDukAAAAJ), [Yiming Yang](https://www.cs.cmu.edu/~yiming/), [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) verl Implementation Authors: [Yuhao Yang](https://github.com/yhyang201), [Chenyang Zhao](https://github.com/zhaochenyang20) [[Webpage](https://uclaml.github.io/SPPO/)] [[Huggingface](https://huggingface.co/papers/2405.00675)] [[Paper](https://arxiv.org/abs/2405.00675)][[Original Implementation](https://github.com/uclaml/SPPO)] ## Reproduce the Experiment We evaluate the performance of SPPO on the MATH dataset. Starting from an initial score of 46.6 with Qwen2.5-7B-Instruct, we achieve a score of 65.6 after 20 epochs of training, placing our model approximately in the top 20 on the [MATH leaderboard](https://paperswithcode.com/sota/math-word-problem-solving-on-math). It's important to note that verl's internal evaluation metrics may not perfectly align with the official evaluation methodology for Qwen2.5-7B-Instruct. Therefore, for consistency and fair comparison, we report only the results based on verl's evaluation framework. ``` git clone git@github.com:volcengine/verl.git cd verl python3 -m uv pip install -e ".[sglang]" export WANDB_API_KEY= python3 examples/data_preprocess/math_dataset.py --local_dir ~/data/math huggingface-cli download Qwen/Qwen2.5-7B-Instruct --local-dir $HOME/models/Qwen2.5-7B-Instruct export CUDA_VISIBLE_DEVICES=0,1,2,3 bash recipe/sppo/run_qwen2.5-7b_rm.sh ``` Note that the installation would occasionally fail to install flash-attn. If this happens, you can install it manually by running: ```bash python3 -m uv pip install wheel python3 -m uv pip install packaging python3 -m uv pip install flash-attn --no-build-isolation --no-deps ``` ## Acknowledgement We sincerely thank the contribution and guidance from: - [Yue Wu](https://yuewu.us/) - [Chendong Wang](https://cdwang96.github.io/) - [Yifan Zhang](https://github.com/yifanzhang-pro) - [Yongan Xiang](https://github.com/BearBiscuit05) - [Junrong Lin](https://github.com/ocss884) - [Yuxuan Tong](https://github.com/tongyx361) - [Guangming Shen](https://github.com/PeterSH6) - [Biao He](https://www.linkedin.com/in/biao-he/) - [Qingquan Song](https://qingquansong.github.io/) - [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) ================================================ FILE: verl_distillation/docs/amd_tutorial/amd_build_dockerfile_page.rst ================================================ Getting started with AMD (ROCM Kernel) ===================================================== Last updated: 07/06/2025. Author: `Yusheng Su `_ Setup ----- If you run on AMD GPUs (MI300) with ROCM platform, you cannot use the previous quickstart to run verl. You should follow the following steps to build a docker and set ``RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES`` or ``RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES`` when starting ray in verl's RLHF training. docker/Dockerfile.rocm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash FROM "rlsys/rocm-6.3.4-patch:rocm6.3.4-numa-patch_ubuntu-22.04" SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV MAX_JOBS=512 ENV PATH="/usr/local/python3.12/bin:$PATH" RUN ln -sf /usr/bin/python3.12 /usr/bin/python && \ ln -sf /usr/bin/pip3.12 /usr/bin/pip ############################################ RUN apt-get update RUN apt-get install -y pkg-config liblzma-dev ############################################ ########################################### ##########Install TransformerEngine######## ########################################### WORKDIR /workspace/ # transformer-engine install # https://github.com/ROCm/TransformerEngine RUN rm -rf TransformerEngine RUN git clone --recursive https://github.com/ROCm/TransformerEngine.git WORKDIR /workspace/TransformerEngine git checkout 236178e5 # git checkout bb061ade # git checkout 864405c ENV NVTE_FRAMEWORK=pytorch ENV NVTE_ROCM_ARCH=gfx942 ENV NVTE_USE_HIPBLASLT=1 ENV NVTE_USE_ROCM=1 # export CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr:${CMAKE_PREFIX_PATH:-}" ENV CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr" RUN MAX_JOBS=$(MAX_JOBS) pip install . -vvv WORKDIR /workspace/ ########################################### ########################################### ########################################### #################################################################################### ################Install vllm - sglang require vllm 0.6.7 dependency################# #################################################################################### #### Require vllm 0.6.7 - checkout 113274a0 WORKDIR /workspace/ RUN rm -rf vllm RUN pip uninstall -y vllm # Refer to here (down-grade vllm to 0.6.3): https://docs.vllm.ai/en/v0.6.3/getting_started/amd-installation.html RUN git clone https://github.com/ROCm/vllm.git # git clone https://github.com/vllm-project/vllm.git WORKDIR /workspace/vllm RUN git checkout 113274a0 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" #ENV MAX_JOBS=512 ENV MAX_JOBS=${MAX_JOBS} RUN pip install "boto3>=1.26.0" RUN pip install setuptools_scm # will add src into py. You can delete the repo RUN python3 setup.py install WORKDIR /workspace/ #################################################################################### #################################################################################### #################################################################################### ########################################### ############For hack docker################ ########################################### RUN pip install setuptools==75.8.0 ########################################### ########################################### ########################################### ########################################### ############build sgalng################### ########################################### # Set environment variables ENV BASE_DIR=/sgl-workspace ENV BUILD_TYPE=all ENV SGL_REPO=https://github.com/sgl-project/sglang ENV SGL_BRANCH=v0.4.6.post5 ENV TRITON_REPO=https://github.com/ROCm/triton.git ENV TRITON_COMMIT=improve_fa_decode_3.0.0 ENV AITER_REPO=https://github.com/ROCm/aiter.git ENV AITER_COMMIT=v0.1.2 # v0.1.2 version - commit id: 9d11f47 # ENV AITER_COMMIT=9d11f47 ENV HIP_FORCE_DEV_KERNARG=1 ENV HSA_NO_SCRATCH_RECLAIM=1 ENV SGLANG_SET_CPU_AFFINITY=1 ENV SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 ENV NCCL_MIN_NCHANNELS=112 ENV MOE_PADDING=1 ENV VLLM_FP8_PADDING=1 ENV VLLM_FP8_ACT_PADDING=1 ENV VLLM_FP8_WEIGHT_PADDING=1 ENV VLLM_FP8_REDUCE_CONV=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE=1 ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" ENV AMDGPU_TARGETS=gfx942 ENV ROCM_ARCH=gfx942 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" # Switch to working directory WORKDIR /sgl-workspace # Clean and create directory RUN rm -rf /sgl-workspace && mkdir -p /sgl-workspace # Clone and build sglang RUN git clone ${SGL_REPO} \ && cd sglang \ && git checkout ${SGL_BRANCH} || echo "Using default branch" \ && cd sgl-kernel \ && rm -f pyproject.toml \ && mv pyproject_rocm.toml pyproject.toml \ && python setup_rocm.py install \ && cd .. \ && if [ "$BUILD_TYPE" = "srt" ]; then \ python -m pip --no-cache-dir install -e "python[srt_hip]"; \ else \ python -m pip --no-cache-dir install -e "python[all_hip]"; \ fi \ && cd /sgl-workspace \ && cp -r /sgl-workspace/sglang /sglang \ && python -m pip cache purge # Install common Python packages RUN pip install IPython orjson python-multipart torchao pybind11 # Rebuild Triton RUN pip uninstall -y triton || true \ && git clone ${TRITON_REPO} \ && cd triton \ && git checkout ${TRITON_COMMIT} \ && cd python \ && python3 setup.py install \ && cd /sgl-workspace # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942 --amdgpu-lower-module-lds-strategy=1" # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" # Build aiter #version: Commit 9d11f47 # && git checkout ${AITER_COMMIT} \ RUN pip uninstall -y aiter || true RUN git clone ${AITER_REPO} \ && cd aiter \ && git checkout ${AITER_COMMIT} \ && git submodule sync \ && git submodule update --init --recursive \ && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py install \ && cd /sgl-workspace # Copy MI300X config RUN find /sgl-workspace/sglang/python/sglang/srt/layers/quantization/configs/ \ /sgl-workspace/sglang/python/sglang/srt/layers/moe/fused_moe_triton/configs/ \ -type f -name '*MI300X*' | \ xargs -I {} sh -c 'vf_config=$(echo "$1" | sed "s/MI300X/MI300X_VF/"); cp "$1" "$vf_config"' -- {} # Environment setup complete. RUN echo "Environment setup complete." WORKDIR /workspace/ ########################################### ########################################### ########################################### ########################################### ###############vllm v0.8.5################# ########################################### WORKDIR /workspace/ ENV VLLM_TARGET_DEVICE=rocm ENV ROCM_PATH=/opt/rocm ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev # Find the repo path in: DockerFile/Dockerfile.rocm_yang # RUN git clone https://github.com/RLFoundation/vllm-patch.git RUN pip uninstall -y vllm || true RUN rm -rf vllm-patch RUN git clone https://github.com/RLFoundation/vllm-patch.git \ && cd vllm-patch \ && git checkout v0.8.5-sleep-numa \ && rm -rf build/ dist/ *.egg-info \ && ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so \ && SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py install # RUN SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py develop WORKDIR /workspace/ ########################################### ########################################### ########################################### ######################################### #### Install megatron-core############### ######################################### RUN pip uninstall -y megatron-core && \ git clone https://github.com/yushengsu-thu/Megatron-LM-amd_version.git && \ cd Megatron-LM-amd_version && \ pip install -vvv -e . && \ cd /workspace/ ######################################### ######################################### ######################################### ####################################### ################apex################### ####################################### WORKDIR /workspace/ RUN pip uninstall -y apex && \ git clone git@github.com:ROCm/apex.git && \ cd apex && \ python setup.py install && \ cd /workspace/ ####################################### ####################################### ####################################### ################################################################################ ###########################Add torch_memory_saver############################### ################################################################################ # Set environment variables ENV HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" ENV CFLAGS="-D__HIP_PLATFORM_AMD__" ENV CXXFLAGS="-D__HIP_PLATFORM_AMD__" RUN pip install "git+https://github.com/YangWang92/torch_memory_saver_numa.git@numa" ################################################################################ ################################################################################ ################################################################################ ######################################## ######Install ray####################### ######################################## # need to add this patch: https://github.com/ray-project/ray/pull/53531/files RUN pip uninstall ray -y RUN pip install "ray[data,train,tune,serve]>=2.47.0" ######################################## ######################################## ######################################## ########################################## #######Install other dependencies######### ########################################## RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ torchdata \ wandb \ orjson \ pybind11 WORKDIR /workspace/ RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ pip install -e . ########################################## ########################################## ########################################## WORKDIR /workspace/ CMD ["/usr/bin/bash"] Build the image: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash docker docker/build -t verl-rocm . Run the container ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note: You can pull the docker from this DockerHub: [RLSys Foundation](https://hub.docker.com/u/yushengsuthu) Pull the image: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash docker pull rlsys/verl:verl-0.4.1_ubuntu-22.04_rocm6.3.4-numa-patch_vllm0.8.5_sglang0.4.6.post4 docker tag rlsys/verl:verl-0.4.1_ubuntu-22.04_rocm6.3.4-numa-patch_vllm0.8.5_sglang0.4.6.post4 verl-rocm:latest Run the container ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optional: Running without root and with user permissions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash docker run --rm -it \ --device /dev/dri \ --device /dev/kfd \ -p 8265:8265 \ --group-add video \ --cap-add SYS_PTRACE \ --security-opt seccomp=unconfined \ --privileged \ -v $HOME/.ssh:/root/.ssh \ -v $HOME:$HOME \ --shm-size 128G \ -w $PWD \ verl-rocm \ /bin/bash (Optional): If you do not want to root mode and require assign yourself as the user Please add ``-e HOST_UID=$(id -u)`` and ``-e HOST_GID=$(id -g)`` into the above docker launch script. Example ------- Due to to special setting in AMD (ROCM) torch, 1. If your ``ray>=2.45.0`` (default), you need to set ``RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES`` when starting ray in verl's RLHF training and add this [patch](https://github.com/ray-project/ray/pull/53531/files). 2. If your ``ray<2.45.0``, you need to set ``RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES`` when starting ray in verl's RLHF training. Inference ``$ENGINE`` can be ``vllm`` or ``sglang``. We choose ``vllm`` as default in the following examples. PPO ~~~ .. code-block:: bash YOUR_PROJECT_NAME=r1-verl-ppo-upstream YOUR_RUN_NAME=r1-training_ppo-upstream # export HYDRA_FULL_ERROR=1 export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # [ray] < 2.45.0 #export RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 # [ray] >= 2.45.0 export RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 # Patch with https://github.com/ray-project/ray/pull/52794 GPUS_PER_NODE=8 MODEL_PATH=Qwen/Qwen2.5-0.5B-Instruct python3 examples/data_preprocess/gsm8k.py --local_save_dir data/gsm8k python3 -c "import transformers; transformers.pipeline('text-generation', model='$MODEL_PATH')" ENGINE=vllm #sglang PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.val_batch_size=1312 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=$MODEL_PATH \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.project_name=$YOUR_PROJECT_NAME \ trainer.experiment_name=$YOUR_RUN_NAME \ trainer.val_before_train=False \ trainer.n_gpus_per_node=$GPUS_PER_NODE \ trainer.nnodes=1 \ trainer.save_freq=10 \ trainer.test_freq=10 \ trainer.total_epochs=15 #2>&1 | tee verl_demo.log GRPO ~~~~ .. code-block:: bash YOUR_PROJECT_NAME=r1-verl-grpo-upstream YOUR_RUN_NAME=r1-training_grpo-upstream # export HYDRA_FULL_ERROR=1 # export FSDP_VERBOSE=1 #export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # [ray] < 2.45.0 #export RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 # [ray] >= 2.45.0 export RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 # Patch with https://github.com/ray-project/ray/pull/52794 GPUS_PER_NODE=8 MODEL_PATH=Qwen/Qwen2.5-0.5B-Instruct # MODEL_PATH=Qwen/Qwen2-7B-Instruct python3 examples/data_preprocess/gsm8k.py --local_save_dir data/gsm8k python3 -c "import transformers; transformers.pipeline('text-generation', model='$MODEL_PATH')" ENGINE=vllm #sglang python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.val_batch_size=1312 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=Flase \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=False \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name=$YOUR_PROJECT_NAME \ trainer.experiment_name=$YOUR_RUN_NAME \ trainer.n_gpus_per_node=$GPUS_PER_NODE \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 Multi-node training: slurm with Docker/Podman container --------------------------------------------------------------------------------------- If you want to run multi-node training with slurm, you can use the following script. .. note:: 1. You need to use ``podman`` or ``docker`` in the following script. We will release the apptainer script later. 2. If you want to use ``podman``, you just replace ``docker`` with ``podman`` in the following script. The script includes the following steps: 1. SLURM Configuration 2. Environment Setup 3. Docker/Podman Container Setup 4. Ray Cluster Initialization 5. Data Preprocessing 6. Model Setup 7. Training Launch slurm_script.sh ~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash #!/bin/bash #SBATCH --job-name=verl-ray-on-slurm #SBATCH --nodes=2 #SBATCH --ntasks-per-node=2 #SBATCH --mem=200G #SBATCH --time=30-00:00:00 #SBATCH --gpus-per-node=8 #SBATCH --cpus-per-task=28 #SBATCH --output=../verl_log/slurm-%j.out #SBATCH --error=../verl_log/slurm-%j.err #SBATCH --nodelist=gpu-[0,1] # load necessary modules ### Run this setup # [Cluster]: Use docker # docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 ########################################################################## ###The following setting should be set in different project and cluster### ########################################################################## ### Project CONTAINER_NAME="multinode_verl_training" IMG="verl.rocm" DOCKERFILE="docker/Dockerfile.rocm" # echo $PWD verl_workdir="${HOME}/projects/verl_upstream" export TRANSFORMERS_CACHE="${HOME}/.cache/huggingface" export HF_HOME=$TRANSFORMERS_CACHE ### Cluster Network Setting export NCCL_DEBUG=TRACE export GPU_MAX_HW_QUEUES=2 export TORCH_NCCL_HIGH_PRIORITY=1 export NCCL_CHECKS_DISABLE=1 # export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7 export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9 export NCCL_IB_GID_INDEX=3 export NCCL_CROSS_NIC=0 export CUDA_DEVICE_MAX_CONNECTIONS=1 export NCCL_PROTO=Simple export RCCL_MSCCL_ENABLE=0 export TOKENIZERS_PARALLELISM=false export HSA_NO_SCRATCH_RECLAIM=1 ########################################################################## ## Assign using GPUs export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ### For rocm and training script # [ray] < 2.45.0 #export RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 # [ray] >= 2.45.0 export RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 # Patch with https://github.com/ray-project/ray/pull/52794 # Build and launch the Docker container srun bash -c " # Exit on any error set -e # Clean up dangling images (images with tag) docker image prune -f # Need to pull the docker first docker pull rlsys/verl:verl-0.4.1_ubuntu-22.04_rocm6.3.4-numa-patch_vllm0.8.5_sglang0.4.6.post4 if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "${IMG}"; then echo \"Building ${IMG} image...\" docker build -f \"${DOCKERFILE}\" -t \"${IMG}\" . else echo \"${IMG} image already exists, skipping build\" fi # Removing old container if exists docker rm \"${CONTAINER_NAME}\" 2>/dev/null || true # Checking network devices ibdev2netdev # Launch the docker docker run --rm -d \ -e HYDRA_FULL_ERROR=1 \ -e RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 \ -e RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 \ -e NCCL_DEBUG=${NCCL_DEBUG} \ -e GPU_MAX_HW_QUEUES=${GPU_MAX_HW_QUEUES} \ -e TORCH_NCCL_HIGH_PRIORITY=${TORCH_NCCL_HIGH_PRIORITY} \ -e NCCL_CHECKS_DISABLE=${NCCL_CHECKS_DISABLE} \ -e NCCL_IB_HCA=${NCCL_IB_HCA} \ -e NCCL_IB_GID_INDEX=${NCCL_IB_GID_INDEX} \ -e NCCL_CROSS_NIC=${NCCL_CROSS_NIC} \ -e CUDA_DEVICE_MAX_CONNECTIONS=${CUDA_DEVICE_MAX_CONNECTIONS} \ -e NCCL_PROTO=${NCCL_PROTO} \ -e RCCL_MSCCL_ENABLE=${RCCL_MSCCL_ENABLE} \ -e TOKENIZERS_PARALLELISM=${TOKENIZERS_PARALLELISM} \ -e HSA_NO_SCRATCH_RECLAIM=${HSA_NO_SCRATCH_RECLAIM} \ -e TRANSFORMERS_CACHE=${TRANSFORMERS_CACHE} \ -e HF_HOME=${HF_HOME} \ --network host \ --device /dev/dri \ --device /dev/kfd \ --device /dev/infiniband \ --group-add video \ --cap-add SYS_PTRACE \ --security-opt seccomp=unconfined \ --privileged \ -v \${HOME}:\${HOME} \ -v \${HOME}/.ssh:/root/.ssh \ -w "${verl_workdir}" \ --shm-size 128G \ --name \"${CONTAINER_NAME}\" \ \"${IMG}\" \ tail -f /dev/null echo \"Container setup completed\" " # (Optional): If you do not want to root mode and require assign yuorself as the user # Please add `-e HOST_UID=$(id -u)` and `-e HOST_GID=$(id -g)` into the above docker launch script. ### Ray launch the nodes before training # Getting the node names nodes_array=($(scontrol show hostnames "$SLURM_JOB_NODELIST" | tr '\n' ' ')) head_node=${nodes_array[0]} head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) # if we detect a space character in the head node IP, we'll # convert it to an ipv4 address. This step is optional. if [[ "$head_node_ip" == *" "* ]]; then IFS=' ' read -ra ADDR <<<"$head_node_ip" if [[ ${#ADDR[0]} -gt 16 ]]; then head_node_ip=${ADDR[1]} else head_node_ip=${ADDR[0]} fi echo "IPV6 address detected. We split the IPV4 address as $head_node_ip" fi port=6379 ip_head=$head_node_ip:$port export ip_head echo "IP Head: $ip_head" # make sure we set environment variables before Ray initialization # Print out all env variables printenv echo "Starting HEAD at $head_node" srun --nodes=1 --ntasks=1 -w "$head_node" \ docker exec "${CONTAINER_NAME}" \ ray start --head --node-ip-address="$head_node_ip" --port=$port \ --dashboard-port=8266 \ --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & # optional, though may be useful in certain versions of Ray < 1.0. sleep 10 # number of nodes other than the head node worker_num=$((SLURM_JOB_NUM_NODES - 1)) for ((i = 1; i <= worker_num; i++)); do node_i=${nodes_array[$i]} echo "Debug: Starting worker on node_i = ${node_i}" if [ -z "$node_i" ]; then echo "Error: Empty node name for worker $i" continue fi echo "Starting WORKER $i at $node_i" srun --nodes=1 --ntasks=1 -w "$node_i" \ docker exec "${CONTAINER_NAME}" \ ray start --address "$ip_head" --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & sleep 5 done # Ray initlization test (See whether any error in the above execution) echo "Testing Ray initialization in the slurm nodes..." docker exec "${CONTAINER_NAME}" python3 -c ' import ray try: ray.init(address="auto") print("\n=== Ray Cluster Status ===") print(f"Number of nodes: {len(ray.nodes())}") for node in ray.nodes(): print("Node: {}, Status: {}".format(node["NodeManagerHostname"], node["Alive"])) # print(f"Node: {node}") ray.shutdown() print("Ray initialization successful!") except Exception as e: print(f"Ray initialization failed: {str(e)}") ' echo "=== Ray test completed ===" ###### # Run data preprocessing echo "Starting data preprocessing..." docker exec "${CONTAINER_NAME}" \ python3 "examples/data_preprocess/gsm8k.py" "--local_save_dir" "../data/gsm8k" echo "Starting data preprocessing..." docker exec "${CONTAINER_NAME}" \ python3 "examples/data_preprocess/math_dataset.py" "--local_dir" "../data/math" train_files="../data/gsm8k/train.parquet" val_files="../data/gsm8k/test.parquet" # Download and test model echo "Loading model..." docker exec "${CONTAINER_NAME}" \ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')" MODEL_PATH="Qwen/Qwen2-7B-Instruct" # Set model path after pipeline test MODEL_PATH="Qwen/Qwen2.5-0.5B-Instruct" echo "== Data and model loading Done ==" echo "Start to train..." docker exec "${CONTAINER_NAME}" \ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')" MODEL_PATH="Qwen/Qwen2-7B-Instruct" PYTHONUNBUFFERED=1 srun --overlap --nodes=${SLURM_NNODES} --ntasks=1 -w "$head_node" \ docker exec "${CONTAINER_NAME}" \ python3 -m verl.trainer.main_ppo \ data.train_files=$train_files \ data.val_files=$val_files \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.enable_gradient_checkpointing=False \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=$MODEL_PATH \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.kl_ctrl.kl_coef=0.0001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \ trainer.n_gpus_per_node=${SLURM_GPUS_PER_NODE} \ trainer.val_before_train=False \ trainer.nnodes=${SLURM_NNODES} \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 Run slurm_script.sh ~~~~~~~~~~~~~~~~~~~~ Just sbatch your slurm_script.sh .. code-block:: bash sbatch slurm_script.sh ================================================ FILE: verl_distillation/docs/amd_tutorial/amd_vllm_page.rst ================================================ verl performance tuning for AMD (ROCm Kernel) ===================================================== Last updated: 04/25/2025. Author: `Yang Wang `_ Patch vLLM to Enable Sleep Mode for AMD GPUs -------------------------------------------------------------- By default, verl requires vLLM to enable sleep mode, which allows vLLM to offload GPU memory to CPU memory after rollout. However, this feature is still under review by the vLLM community. To enable vLLM's sleep mode, you can first use community patched code (from `this pull request `_) to build vLLM from the source code in the corresponding pull request. After the patch merged in vLLM main branch, you can directly install vLLM from the latest version. 1. Clone the vLLM repository and build it with the following commands: .. code-block:: bash git clone -b sleep_amd https://github.com/HollowMan6/vllm.git cd vllm sudo ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so VLLM_TARGET_DEVICE=rocm ROCM_PATH=/opt/rocm/ VLLM_GPU_LANG=HIP SETUPTOOLS_SCM_PRETEND_VERSION=0.8.4.dev python3 setup.py develop 2. Additionally, make sure to use the ROCm version in your Docker image lager than or equal to ROCm 6.3.4, and we recommend to use ROCm 6.4.0 for better performance (see `this comment `_). After the upgrade, you can verify whether sleep mode is enabled by running the following test code (from `this comment `_). .. code-block:: python import torch from vllm import LLM llm = LLM(model="meta-llama/Llama-3.1-8B-Instruct", enable_sleep_mode=True) def run_inference(prompt): outputs = llm.generate(prompt) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") print("CUDA Memory Usage (after inference):") torch.cuda.empty_cache() print(f"{torch.cuda.memory_allocated()=}") run_inference("San Francisco is") llm.sleep() print("CUDA Memory Usage (after sleep):") torch.cuda.empty_cache() print(f"{torch.cuda.memory_allocated()=}") llm.wake_up() print("CUDA Memory Usage (after wakeup):") torch.cuda.empty_cache() print(f"{torch.cuda.memory_allocated()=}") run_inference("Paris is") If sleep mode is enabled, you should see the memory usage reduce after sleep. After applying the vLLM patch and completing the installation, you can enable sleep mode in verl to reduce memory overhead. This allows verl to offload unused GPU memory during rollout, significantly lowering the memory footprint during long-context training or multi-node reinforcement learning. Enable CUDA Graph and Bypass ROCm-related issues -------------------------------------------------------------- Due to potential issues with CUDA graph capture in ROCm, we’ve found that vLLM’s CUDA graph feature cannot be enabled on multiple nodes in verl on AMD platforms with vLLM V1 mode. This leads to significantly slower rollout performance. Our investigation shows that ROCm may trigger an unexpected crash when attempting to capture large batches with CUDA graph. One workaround is to patch the LLM configuration (from `this commit `_). .. code-block:: python self.inference_engine = LLM( model=model_path, enable_sleep_mode=True, tensor_parallel_size=tensor_parallel_size, distributed_executor_backend="external_launcher", dtype=config.dtype, enforce_eager=config.enforce_eager, gpu_memory_utilization=config.gpu_memory_utilization, disable_custom_all_reduce=True, disable_mm_preprocessor_cache=True, limit_mm_per_prompt=limit_mm_per_prompt, skip_tokenizer_init=False, max_model_len=max_model_len, load_format=load_format, disable_log_stats=config.disable_log_stats, max_num_batched_tokens=max_num_batched_tokens, enable_chunked_prefill=config.enable_chunked_prefill, enable_prefix_caching=True, trust_remote_code=trust_remote_code, # enable compilation config to bypass oom on rocm # change depends on your GPU memory size compilation_config={"cudagraph_capture_sizes": [1, 2, 4, 8, 16, 32, 64]}, seed=config.get('seed', 0), ) Then, you can choose to enable CUDA graph by setting the following environment variables (see `this page `_): .. code-block:: bash actor_rollout_ref.rollout.enforce_eager=False \ ================================================ FILE: verl_distillation/docs/api/data.rst ================================================ Data interface ========================= Last updated: 05/19/2025 (API docstrings are auto-generated). DataProto is the interface for data exchange. The :class:`verl.DataProto` class contains two key members: - batch: a :class:`tensordict.TensorDict` object for the actual data - meta_info: a :class:`Dict` with additional meta information TensorDict ~~~~~~~~~~~~ :attr:`DataProto.batch` is built on top of :class:`tensordict`, a project in the PyTorch ecosystem. A TensorDict is a dict-like container for tensors. To instantiate a TensorDict, you must specify key-value pairs as well as the batch size. .. code-block:: python >>> import torch >>> from tensordict import TensorDict >>> tensordict = TensorDict({"zeros": torch.zeros(2, 3, 4), "ones": torch.ones(2, 3, 5)}, batch_size=[2,]) >>> tensordict["twos"] = 2 * torch.ones(2, 5, 6) >>> zeros = tensordict["zeros"] >>> tensordict TensorDict( fields={ ones: Tensor(shape=torch.Size([2, 3, 5]), device=cpu, dtype=torch.float32, is_shared=False), twos: Tensor(shape=torch.Size([2, 5, 6]), device=cpu, dtype=torch.float32, is_shared=False), zeros: Tensor(shape=torch.Size([2, 3, 4]), device=cpu, dtype=torch.float32, is_shared=False)}, batch_size=torch.Size([2]), device=None, is_shared=False) One can also index a tensordict along its batch_size. The contents of the TensorDict can be manipulated collectively as well. .. code-block:: python >>> tensordict[..., :1] TensorDict( fields={ ones: Tensor(shape=torch.Size([1, 3, 5]), device=cpu, dtype=torch.float32, is_shared=False), twos: Tensor(shape=torch.Size([1, 5, 6]), device=cpu, dtype=torch.float32, is_shared=False), zeros: Tensor(shape=torch.Size([1, 3, 4]), device=cpu, dtype=torch.float32, is_shared=False)}, batch_size=torch.Size([1]), device=None, is_shared=False) >>> tensordict = tensordict.to("cuda:0") >>> tensordict = tensordict.reshape(6) For more about :class:`tensordict.TensorDict` usage, see the official tensordict_ documentation. .. _tensordict: https://pytorch.org/tensordict/overview.html Core APIs ~~~~~~~~~~~~~~~~~ .. autoclass:: verl.DataProto :members: to, select, union, make_iterator, concat ================================================ FILE: verl_distillation/docs/api/single_controller.rst ================================================ Single Controller interface ============================ Last updated: 05/27/2025 (API docstrings are auto-generated). The Single Controller provides a unified interface for managing distributed workers using Ray or other backends and executing functions across them. It simplifies the process of dispatching tasks and collecting results, particularly when dealing with data parallelism or model parallelism. Core APIs ~~~~~~~~~~~~~~~~~ .. autoclass:: verl.single_controller.Worker :members: __init__, __new__, get_master_addr_port, get_cuda_visible_devices, world_size, rank .. autoclass:: verl.single_controller.WorkerGroup :members: __init__, world_size .. autoclass:: verl.single_controller.ClassWithInitArgs :members: __init__, __call__ .. autoclass:: verl.single_controller.ResourcePool :members: __init__, world_size, local_world_size_list, local_rank_list .. autoclass:: verl.single_controller.ray.RayWorkerGroup :members: __init__ .. autofunction:: verl.single_controller.ray.create_colocated_worker_cls ================================================ FILE: verl_distillation/docs/api/trainer.rst ================================================ Trainer Interface ================================ Last updated: 06/08/2025 (API docstrings are auto-generated). Trainers drive the training loop. Introducing new trainer classes in case of new training paradiam is encouraged. .. autosummary:: :nosignatures: verl.trainer.ppo.ray_trainer.RayPPOTrainer Core APIs ~~~~~~~~~~~~~~~~~ .. autoclass:: verl.trainer.ppo.ray_trainer.RayPPOTrainer :members: __init__, init_workers, fit .. automodule:: verl.utils.tokenizer :members: hf_tokenizer .. automodule:: verl.trainer.ppo.core_algos :members: agg_loss, kl_penalty, compute_policy_loss, kl_penalty .. automodule:: verl.trainer.ppo.reward :members: load_reward_manager, compute_reward, compute_reward_async .. autoclass:: verl.workers.reward_manager.NaiveRewardManager .. autoclass:: verl.workers.reward_manager.DAPORewardManager ================================================ FILE: verl_distillation/docs/api/utils.rst ================================================ Utilities ============ Last updated: 05/19/2025 (API docstrings are auto-generated). This section documents the utility functions and classes in the VERL library. Python Functional Utilities ------------------------------ .. automodule:: verl.utils.py_functional :members: append_to_dict File System Utilities ------------------------ .. automodule:: verl.utils.fs :members: copy_to_local Tracking Utilities --------------------- .. automodule:: verl.utils.tracking :members: Tracking Metrics Utilities --------------------- .. automodule:: verl.utils.metric :members: reduce_metrics Checkpoint Management ------------------------ .. automodule:: verl.utils.checkpoint.checkpoint_manager :members: find_latest_ckpt_path .. automodule:: verl.utils.checkpoint.fsdp_checkpoint_manager :members: FSDPCheckpointManager Dataset Utilities --------------------- .. automodule:: verl.utils.dataset.rl_dataset :members: RLHFDataset, collate_fn Torch Functional Utilities ----------------------------- .. automodule:: verl.utils.torch_functional :members: get_constant_schedule_with_warmup, masked_whiten, masked_mean, logprobs_from_logits Sequence Length Balancing ---------------------------- .. automodule:: verl.utils.seqlen_balancing :members: get_reverse_idx, rearrange_micro_batches Ulysses Utilities -------------------- .. automodule:: verl.utils.ulysses :members: gather_outputs_and_unpad, ulysses_pad_and_slice_inputs FSDP Utilities ------------------ .. automodule:: verl.utils.fsdp_utils :members: get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, load_fsdp_model_to_gpu, load_fsdp_optimizer, offload_fsdp_model_to_cpu, offload_fsdp_optimizer, Debug Utilities ------------------- .. automodule:: verl.utils.profiler :members: log_gpu_memory_usage, GPUMemoryLogger ================================================ FILE: verl_distillation/docs/ascend_tutorial/ascend_profiling_en.rst ================================================ Data collection based on FSDP backend on Ascend devices(en) ========================================================================================== Last updated: 08/14/2025. This is a tutorial for data collection using the GRPO or DAPO algorithm based on FSDP on Ascend devices. Configuration ------------- Leverage two levels of configuration to control data collection: 1. **Global profiler control**: Use parameters in ``ppo_trainer.yaml`` to control the collection mode and steps. 2. **Role profile control**: Use parameters in each role's ``profile`` field to control the collection mode for each role. Global collection control ~~~~~~~~~~~~~~~~~~~~~~~~~ Use parameters in ppo_trainer.yaml to control the collection mode and steps. - global_profiler: Control the ranks and mode of profiling - tool: The profiling tool to use, options are nsys, npu, torch, torch_memory. - steps: This parameter can be set as a list that has collection steps, such as [2, 4], which means it will collect steps 2 and 4. If set to null, no collection occurs. - save_path: The path to save the collected data. Default is "outputs/profile". Role collection control ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In each role's ``profiler`` field, you can control the collection mode for that role. - enable: Whether to enable profiling for this role. - all_ranks: Whether to collect data from all ranks. - ranks: A list of ranks to collect data from. If empty, no data is collected. - tool_config: Configuration for the profiling tool used by this role. Use parameters in each role's ``profiler.tool_config.npu`` to control npu profiler behavior: - level: Collection level—options are level_none, level0, level1, and level2 - level_none: Disables all level-based data collection (turns off profiler_level). - level0: Collect high-level application data, underlying NPU data, and operator execution details on NPU. - level1: Extends level0 by adding CANN-layer AscendCL data and AI Core performance metrics on NPU. - level2: Extends level1 by adding CANN-layer Runtime data and AI CPU metrics. - contents: A list of options to control the collection content, such as npu, cpu, memory, shapes, module, stack. - npu: Whether to collect device-side performance data. - cpu: Whether to collect host-side performance data. - memory: Whether to enable memory analysis. - shapes: Whether to record tensor shapes. - module: Whether to record framework-layer Python call stack information. - stack: Whether to record operator call stack information. - analysis: Enables automatic data parsing. - discrete: Whether to enable discrete mode. Examples -------- Disabling collection ~~~~~~~~~~~~~~~~~~~~ .. code:: yaml global_profiler: steps: null # disable profile End-to-End collection ~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml global_profiler: steps: [1, 2, 5] actor_rollout_ref: actor: profiler: enable: True all_ranks: True tool_config: npu: discrete: False # rollout & ref follow actor settings Discrete Mode Collection ~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml global_profiler: steps: [1, 2, 5] actor_rollout_ref: actor: profiler: enable: True all_ranks: True tool_config: npu: discrete: True # rollout & ref follow actor settings Visualization ------------- Collected data is stored in the user-defined save_path and can be visualized by using the `MindStudio Insight `_ tool. If the analysis parameter is set to False, offline parsing is required after data collection: .. code:: python import torch_npu # Set profiler_path to the parent directory of the "localhost.localdomain___ascend_pt" folder torch_npu.profiler.profiler.analyse(profiler_path=profiler_path) ================================================ FILE: verl_distillation/docs/ascend_tutorial/ascend_profiling_zh.rst ================================================ Data collection based on FSDP backend on Ascend devices(zh) ==================================== 在昇腾设备上基于FSDP后端进行数据采集 Last updated: 08/14/2025. 这是一份在昇腾设备上基于FSDP后端使用GRPO或DAPO算法进行数据采集的教程。 配置 ---- 使用两级profile设置来控制数据采集 - 全局采集控制:使用verl/trainer/config/ppo_trainer.yaml中的配置项控制采集的模式和步数, - 角色profile控制:通过每个角色中的配置项控制等参数。 全局采集控制 ~~~~~~~~~~~~ 通过 ppo_trainer.yaml 中的参数控制采集步数和模式: - global_profiler: 控制采集的rank和模式 - tool: 使用的采集工具,选项有 nsys、npu、torch、torch_memory。 - steps: 此参数可以设置为包含采集步数的列表,例如 [2, 4],表示将采集第2步和第4步。如果设置为 null,则不进行采集。 - save_path: 保存采集数据的路径。默认值为 "outputs/profile"。 角色profiler控制 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 在每个角色的 ``profiler`` 字段中,您可以控制该角色的采集模式。 - enable: 是否为此角色启用性能分析。 - all_ranks: 是否从所有rank收集数据。 - ranks: 要收集数据的rank列表。如果为空,则不收集数据。 - tool_config: 此角色使用的性能分析工具的配置。 通过每个角色的 ``profiler.tool_config.npu`` 中的参数控制具体采集行为: - level: 采集级别—选项有 level_none、level0、level1 和 level2 - level_none: 禁用所有基于级别的数据采集(关闭 profiler_level)。 - level0: 采集高级应用数据、底层NPU数据和NPU上的算子执行详情。 - level1: 在level0基础上增加CANN层AscendCL数据和NPU上的AI Core性能指标。 - level2: 在level1基础上增加CANN层Runtime数据和AI CPU指标。 - contents: 控制采集内容的选项列表,例如 npu、cpu、memory、shapes、module、stack。 - npu: 是否采集设备端性能数据。 - cpu: 是否采集主机端性能数据。 - memory: 是否启用内存分析。 - shapes: 是否记录张量形状。 - module: 是否记录框架层Python调用栈信息。 - stack: 是否记录算子调用栈信息。 - analysis: 启用自动数据解析。 - discrete: 使用离散模式。 示例 ---- 禁用采集 ~~~~~~~~~~~~~~~~~~~~ .. code:: yaml global_profiler: steps: null # disable profile 端到端采集 ~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml global_profiler: steps: [1, 2, 5] actor_rollout_ref: actor: profiler: enable: True all_ranks: True tool_config: npu: discrete: False # rollout & ref follow actor settings 离散模式采集 ~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml global_profiler: steps: [1, 2, 5] actor_rollout_ref: actor: profiler: enable: True all_ranks: True tool_config: npu: discrete: True # rollout & ref follow actor settings 可视化 ------ 采集后的数据存放在用户设置的save_path下,可通过 `MindStudio Insight `_ 工具进行可视化。 如果analysis参数设置为False,采集之后需要进行离线解析: .. code:: python import torch_npu # profiler_path请设置为"localhost.localdomain___ascend_pt"目录的上一级目录 torch_npu.profiler.profiler.analyse(profiler_path=profiler_path) ================================================ FILE: verl_distillation/docs/ascend_tutorial/ascend_quick_start.rst ================================================ verl x Ascend =================================== Last updated: 10/31/2025. 我们在 verl 上增加对华为昇腾设备的支持。 硬件支持 ----------------------------------- Atlas 200T A2 Box16 Atlas 900 A2 PODc Atlas 800T A3 安装 ----------------------------------- 基础环境准备 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +-----------+-------------+ | software | version | +-----------+-------------+ | Python | == 3.10 | +-----------+-------------+ | CANN | == 8.2.RC1 | +-----------+-------------+ | torch | == 2.5.1 | +-----------+-------------+ | torch_npu | == 2.5.1 | +-----------+-------------+ 基础环境准备请参照这份 `文档 `_ 。 vllm & vllm-ascend ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 为了能够在 verl 中正常使用 vllm,需使用以下命令编译安装 vllm 和 vllm-ascend。请注意根据机器类型区分安装方式。 .. code-block:: bash # vllm git clone -b v0.9.1 --depth 1 https://github.com/vllm-project/vllm.git cd vllm pip install -r requirements-build.txt # for Atlas 200T A2 Box16 VLLM_TARGET_DEVICE=empty pip install -e . --extra-index https://download.pytorch.org/whl/cpu/ # for Atlas 900 A2 PODc VLLM_TARGET_DEVICE=empty pip install -e . .. code-block:: bash # vllm-ascend git clone -b v0.9.1 --depth 1 https://github.com/vllm-project/vllm-ascend.git cd vllm-ascend export COMPILE_CUSTOM_KERNELS=1 python setup.py install 安装verl ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash git clone https://github.com/volcengine/verl.git cd verl pip install -r requirements-npu.txt pip install -e . DockerFile镜像构建 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 如需要通过DockerFile构建镜像, 请参考 `文档 `_ 。 其他三方库说明 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------+---------------+ | software | description | +--------------+---------------+ | transformers | v4.52.4 | +--------------+---------------+ | flash_attn | not supported | +--------------+---------------+ | liger-kernel | not supported | +--------------+---------------+ 1. 支持通过 transformers 使能 --flash_attention_2, transformers 需等于 4.52.4版本。 2. 不支持通过 flash_attn 使能 flash attention 加速。 3. 不支持 liger-kernel 使能。 4. 针对 x86 服务器,需要安装 cpu 版本的 torchvision。 .. code-block:: bash pip install torchvision==0.20.1+cpu --index-url https://download.pytorch.org/whl/cpu 快速开始 ----------------------------------- 正式使用前,建议您通过对Qwen2.5-0.5B GRPO的训练尝试以检验环境准备和安装的正确性。 1.下载数据集并将数据集预处理为parquet格式,以便包含计算RL奖励所需的必要字段 .. code-block:: bash python3 examples/data_preprocess/gsm8k.py --local_save_dir ~/data/gsm8k 2.执行训练 .. code-block:: bash set -x export VLLM_ATTENTION_BACKEND=XFORMERS python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=512 \ data.max_response_length=128 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=5e-7 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.entropy_coeff=0.001 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=20 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=1 \ trainer.device=npu $@ (可选) 设置MindSpeed训练后端指导 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. 参考 `MindSpeed README `_ 说明安装 MindSpeed 加速库。 2. 使能 verl worker 模型 ``strategy`` 配置为 ``megatron`` ,例如 ``actor_rollout_ref.actor.strategy=megatron``。 3. MindSpeed 自定义入参可通过 ``override_transformer_config`` 参数传入,例如对 actor 模型开启 FA 特性可使用 ``+actor_rollout_ref.actor.megatron.override_transformer_config.use_flash_attn=True``。 4. 更多特性信息可参考 `MindSpeed+verl 文档 `_ 。 支持现状 ----------------------------------- **表1** RL类算法 +-----------+-------------------------+-------------------+-------------------+--------------------------+ | algorithm | model | actor.strategy | rollout.name | hardware | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | GRPO | Qwen2.5-7B-instruct | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | GRPO | Qwen2.5-32B-instruct | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | GRPO | Qwen2.5-VL-3B-instruct | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | GRPO | Qwen2.5-VL-7B-instruct | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | GRPO | Qwen2.5-VL-32B-instruct | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | GRPO | Qwen3-8B | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | GRPO | Qwen3-32B | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | DAPO | Qwen2.5-7B-instruct | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | DAPO | Qwen2.5-32B | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | DAPO | Qwen3-8B-base | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | DAPO | Qwen3-14B-base | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | DAPO | Qwen3-30B-A3B-base | FSDP | vllm-ascend | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | DAPO | Qwen3-30B-A3B | megatron | vllm-ascend | Atlas 800T A3 | +-----------+-------------------------+-------------------+-------------------+--------------------------+ | PPO | Qwen3-8B | FSDP | vllm-ascend | Atlas 900 A2 PODc | +-----------+-------------------------+-------------------+-------------------+--------------------------+ **表2** SFT类算法 +-----------+-------------------------+-------------------+----------------------+ | algorithm | model | actor.strategy | hardware | +-----------+-------------------------+-------------------+----------------------+ | SFT-PEFT | Qwen3-8B | FSDP | Atlas 900 A2 PODc | +-----------+-------------------------+-------------------+----------------------+ | ReTool-SFT| Qwen2.5-7B-instruct | FSDP | Atlas 900 A2 PODc | +-----------+-------------------------+-------------------+----------------------+ 计划 ----------------------------------- 查看 `roadmap `_ 获取更多特性的支持进度。 声明 ----------------------------------- verl中提供的ascend支持代码、Dockerfile、镜像皆为参考样例,如在生产环境中使用请通过官方正式途径沟通,谢谢。 ================================================ FILE: verl_distillation/docs/ascend_tutorial/ascend_sglang_quick_start.rst ================================================ verl x Ascend =================================== Last updated: 09/25/2025. 我们在 verl 上增加对华为昇腾设备的支持。 硬件支持 ----------------------------------- Atlas 200T A2 Box16 Atlas 900 A2 PODc Atlas 800T A3 安装 ----------------------------------- 基础环境准备 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +-----------+-------------+ | software | version | +-----------+-------------+ | Python | == 3.11 | +-----------+-------------+ | CANN | == 8.3.RC1 | +-----------+-------------+ | HDK | == 25.3.RC1 | +-----------+-------------+ | torch | == 2.6.0 | +-----------+-------------+ | torch_npu | == 2.6.0 | +-----------+-------------+ **目前verl框架中sglang npu后端仅支持上述HDK、CANN和PTA版本, 商发可用版本预计2025年10月发布** 为了能够在 verl 中正常使用 sglang,需使用以下命令安装sglang、torch_memory_saver和verl。 sglang ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash # sglang git clone https://github.com/sgl-project/sglang.git cd sglang mv python/pyproject.toml python/pyproject.toml.backup mv python/pyproject_other.toml python/pyproject.toml pip install -e "python[srt_npu]" 安装torch_memory_saver ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash # torch_memory_saver git clone https://github.com/sgl-project/sgl-kernel-npu.git cd sgl-kernel-npu bash build.sh -a memory-saver pip install output/torch_memory_saver*.whl 安装verl ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash git clone https://github.com/volcengine/verl.git cd verl pip install --no-deps -e . pip install -r requirements-npu.txt 其他三方库说明 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------+---------------+ | software | description | +--------------+---------------+ | transformers | v4.56.1 | +--------------+---------------+ | triton_ascend| v3.2.0 | +--------------+---------------+ 1. sglang依赖 transformers v4.56.1 2. sglang依赖triton_ascend v3.2.0 3. 暂不支持多模态模型,卸载相关安装包torchvision、timm .. code-block:: bash pip uninstall torchvision pip uninstall timm pip uninstall triton pip install transformers==4.56.1 pip install -i https://test.pypi.org/simple/ triton-ascend==3.2.0.dev20250925 快速开始 ----------------------------------- 正式使用前,建议您通过对Qwen3-8B GRPO的训练尝试以检验环境准备和安装的正确性。 1.下载数据集并将数据集预处理为parquet格式,以便包含计算RL奖励所需的必要字段 .. code-block:: bash python3 examples/data_preprocess/gsm8k.py --local_save_dir ~/data/gsm8k 2.执行训练 .. code-block:: bash bash verl/examples/grpo_trainer/run_qwen3_8b_grpo_sglang_1k_npu.sh ================================================ FILE: verl_distillation/docs/ascend_tutorial/dockerfile_build_guidance.rst ================================================ Ascend Dockerfile Build Guidance =================================== Last updated: 10/31/2025. 我们在verl上增加对华为昇腾镜像构建的支持。 硬件支持 ----------------------------------- Atlas 200T A2 Box16 Atlas 900 A2 PODc Atlas 800T A3 组件版本信息 ---------------- =========== ============ 组件 版本 =========== ============ 基础镜像 Ubuntu 22.04 Python 3.11 CANN 8.2.RC1 torch 2.5.1 torch_npu 2.5.1 vLLM 0.9.1 vLLM-ascend 0.9.1 Megatron-LM v0.12.1 MindSpeed (f2b0977e) =========== ============ Dockerfile构建镜像脚本 --------------------------- ============== ============== ============== 设备类型 基础镜像版本 参考文件 ============== ============== ============== A2 8.2.RC1 `Dockerfile.ascend_8.2.rc1_a2 `_ A3 8.2.RC1 `Dockerfile.ascend_8.2.rc1_a3 `_ ============== ============== ============== 镜像构建命令示例 -------------------- .. code:: bash # Navigate to the directory containing the Dockerfile cd {verl-root-path}/docker/ascend # Build the image docker build -f Dockerfile.ascend_8.2.rc1_a2 -t verl-ascend:8.2.rc1-a2 . 声明 -------------------- verl中提供的ascend相关Dockerfile、镜像皆为参考样例,可用于尝鲜体验,如在生产环境中使用请通过官方正式途径沟通,谢谢。 ================================================ FILE: verl_distillation/docs/conf.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = "verl" copyright = "2024 ByteDance Seed Foundation MLSys Team" author = "Guangming Sheng, Chi Zhang, Yanghua Peng, Haibin Lin" # -- General configuration --------------------------------------------------- # The master toctree document. master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "myst_parser", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.autosectionlabel", "sphinx.ext.napoleon", "sphinx.ext.viewcode", ] # Use Google style docstrings instead of NumPy docstrings. napoleon_google_docstring = True napoleon_numpy_docstring = False # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = { ".rst": "restructuredtext", ".md": "markdown", } # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add the JavaScript file html_js_files = [ "js/runllm-widget.js", "js/resizable-sidebar.js", ] # Add custom CSS file for full-width layout html_css_files = [ "custom.css", ] exclude_patterns += ["README.md", "README_vllm0.7.md"] suppress_warnings = ["ref.duplicate", "ref.myst"] ================================================ FILE: verl_distillation/docs/data/transfer_queue.md ================================================ # TransferQueue Data System Last updated: 09/28/2025. This doc introduce [TransferQueue](https://github.com/TransferQueue/TransferQueue), an asynchronous streaming data management system for efficient post-training.

Overview

TransferQueue is a high-performance data storage and transfer system with panoramic data visibility and streaming scheduling capabilities, optimized for efficient dataflow in post-training workflows.

TransferQueue offers **fine-grained, sample-level** data management capabilities, serving as a data gateway that decouples explicit data dependencies across computational tasks. This enables a divide-and-conquer approach, significantly simplifying the design of the algorithm controller.

Components

### Control Plane: Panoramic Data Management In the control plane, `TransferQueueController` tracks the **production status** and **consumption status** of each training sample as metadata. When all the required data fields are ready (i.e., written to the `TransferQueueStorage`), we know that this data sample can be consumed by downstream tasks. For consumption status, we record the consumption records for each computational task (e.g., `generate_sequences`, `compute_log_prob`, etc.). Therefore, even different computation tasks require the same data field, they can consume the data independently without interfering with each other.

> In the future, we plan to support **load-balancing** and **dynamic batching** capabilities in the control plane. Besides, we will support data management for disaggregated frameworks where each rank manages the data retrieval by itself, rather than coordinated by a single controller. ### Data Plane: Distributed Data Storage In the data plane, `TransferQueueStorageSimpleUnit` serves as a naive storage unit based on CPU memory, responsible for the actual storage and retrieval of data. Each storage unit can be deployed on a separate node, allowing for distributed data management. `TransferQueueStorageSimpleUnit` employs a 2D data structure as follows: - Each row corresponds to a training sample, assigned a unique index within the corresponding global batch. - Each column represents the input/output data fields for computational tasks. This data structure design is motivated by the computational characteristics of the post-training process, where each training sample is generated in a relayed manner across task pipelines. It provides an accurate addressing capability, which allows fine-grained, concurrent data read/write operations in a streaming manner.

> In the future, we plan to implement a **general storage abstraction layer** to support various storage backends. Through this abstraction, we hope to integrate high-performance storage solutions such as [MoonCakeStore](https://github.com/kvcache-ai/Mooncake) to support device-to-device data transfer through RDMA, further enhancing data transfer efficiency for large-scale data. ### User Interface: Asynchronous & Synchronous Client The interaction workflow of TransferQueue system is as follows: 1. A process sends a read request to the `TransferQueueController`. 2. `TransferQueueController` scans the production and consumption metadata for each sample (row), and dynamically assembles a micro-batch metadata according to the load-balancing policy. This mechanism enables sample-level data scheduling. 3. The process retrieves the actual data from distributed storage units using the metadata provided by the controller. To simplify the usage of TransferQueue, we have encapsulated this process into `AsyncTransferQueueClient` and `TransferQueueClient`. These clients provide both asynchronous and synchronous interfaces for data transfer, allowing users to easily integrate TransferQueue to their framework. > In the future, we will provide a `StreamingDataLoader` interface for disaggregated frameworks as discussed in [RFC#2662](https://github.com/volcengine/verl/discussions/2662). Leveraging this abstraction, each rank can automatically get its own data like `DataLoader` in PyTorch. The TransferQueue system will handle the underlying data scheduling and transfer logic caused by different parallelism strategies, significantly simplifying the design of disaggregated frameworks.

Show Cases

### General Usage The primary interaction points are `AsyncTransferQueueClient` and `TransferQueueClient`, serving as the communication interface with the TransferQueue system. Core interfaces: - (async_)get_meta(data_fields: list[str], batch_size:int, global_step:int, get_n_samples:bool, task_name:str) -> BatchMeta - (async_)get_data(metadata:BatchMeta) -> TensorDict - (async_)put(data:TensorDict, metadata:BatchMeta, global_step) - (async_)clear(global_step: int) We will soon release a detailed tutorial and API documentation. ### verl Example The primary motivation for integrating TransferQueue to verl now is to **alleviate the data transfer bottleneck of the single controller `RayPPOTrainer`**. Currently, all `DataProto` objects must be routed through `RayPPOTrainer`, resulting in a single point bottleneck of the whole post-training system. ![verl_dataflow_DataProto](https://cdn.nlark.com/yuque/0/2025/jpeg/23208217/1758704289414-bcc54228-716b-4d4a-ad3b-f9ace6d10fcf.jpeg) Leveraging TransferQueue, we separate experience data transfer from metadata dispatch by - Replacing `DataProto` with `BatchMeta` (metadata) and `TensorDict` (actual data) structures - Preserving verl's original Dispatch/Collect logic via BatchMeta (maintaining single-controller debuggability) - Accelerating data transfer by TransferQueue's distributed storage units ![verl_dataflow_TransferQueue](https://cdn.nlark.com/yuque/0/2025/jpeg/23208217/1758704301666-0807dc06-766c-4a2d-9cde-889a6bb56b34.jpeg) You may refer to the [recipe](https://github.com/TransferQueue/TransferQueue/tree/dev/recipe/simple_use_case), where we mimic the verl usage in both async & sync scenarios.

Citation

Please kindly cite our paper if you find this repo is useful: ```bibtex @article{han2025asyncflow, title={AsyncFlow: An Asynchronous Streaming RL Framework for Efficient LLM Post-Training}, author={Han, Zhenyu and You, Ansheng and Wang, Haibo and Luo, Kui and Yang, Guang and Shi, Wenqi and Chen, Menglong and Zhang, Sicheng and Lan, Zeshun and Deng, Chunshi and others}, journal={arXiv preprint arXiv:2507.01663}, year={2025} } ``` ================================================ FILE: verl_distillation/docs/examples/config.rst ================================================ .. _config-explain-page: Config Explanation =================== Last updated: 06/18/2025. ppo_trainer.yaml for RL FSDP Backend ------------------------------------- Data ~~~~ .. code:: yaml data: tokenizer: null train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet train_max_samples: -1 # set to -1 to use full dataset val_max_samples: -1 # set to -1 to use full dataset prompt_key: prompt max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs return_raw_chat: False return_full_prompt: False shuffle: True seed: 42 filter_overlong_prompts: False filter_overlong_prompts_workers: 1 truncation: error image_key: images trust_remote_code: True custom_cls: path: null name: null - ``data.train_files``: Training set parquet. Can be a list or a single file. The program will read all files into memory, so it can't be too large (< 100GB). The path can be either local path or HDFS path. For HDFS path, we provide utils to download it to DRAM and convert the HDFS path to local path. - ``data.val_files``: Validation parquet. Can be a list or a single file. - ``data.train_max_samples``: Maximum number of samples to use from the training dataset. Set to -1 to use the full dataset. - ``data.val_max_samples``: Maximum number of samples to use from the validation dataset. Set to -1 to use the full dataset. - ``data.prompt_key``: The field in the dataset where the prompt is located. Default is 'prompt'. - ``data.max_prompt_length``: Maximum prompt length. All prompts will be left-padded to this length. An error will be reported if the length is too long - ``data.max_response_length``: Maximum response length. Rollout in RL algorithms (e.g. PPO) generates up to this length - ``data.train_batch_size``: Batch size sampled for one training iteration of different RL algorithms. - ``data.return_raw_input_ids``: Whether to return the original input_ids without adding chat template. This is mainly used to accommodate situations where the reward model's chat template differs from the policy. It needs to be decoded first, then apply the RM's chat template. If using a model-based RM, and the policy and RM chat_templates are different, this flag needs to be set - ``data.return_raw_chat``: Whether to return the original chat (prompt) without applying chat template. - ``data.return_full_prompt``: Whether to return the full prompt with chat template - ``data.shuffle``: Whether to shuffle the data in the dataloader. - ``data.seed``: An integer seed to use when shuffling the data. If not set or set to `null`, the data shuffling will not be seeded, resulting in a different data order on each run. - ``data.filter_overlong_prompts``: Default don't filter. - ``data.filter_overlong_prompts_workers``: For large-scale dataset, filtering overlong prompts could be timeconsuming. You cat set the ``filter_overlong_prompts_workers`` to use multiprocessing for speed up. Default to 1. - ``data.truncation``: Truncate the input_ids or prompt length if they exceed max_prompt_length. Default is 'error', not allow exceed the max_prompt_length. The users should increase the max_prompt_length if throwing the error. You can also set ``left``, ``right`` and ``middle``. When ``middle`` is selected, the logic splits the allowed max length roughly in half and keeps the head and tail of the sequence, effectively discarding the middle section. - ``data.image_key``: The field in the multi-modal dataset where the image is located. Default is 'images'. - ``data.trust_remote_code``: If the remote tokenizer has python file, we can use this field to allow using remote tokenizer. For example: moonshotai/Moonlight-16B-A3B-Instruct Customized Dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~ Customized dataset extension is implemented for the SFT trainer and can be extended to other trainers with similar changes. .. code:: yaml custom_cls: path: null name: null - ``data.custom_cls.path``: The path to the file containing your customized dataset class. If not specified, pre-implemented dataset will be used. - ``data.custom_cls.name``: The name of the dataset class within the specified file. Actor/Rollout/Reference Policy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml actor_rollout_ref: hybrid_engine: True model: path: ~/models/deepseek-llm-7b-chat external_lib: null override_config: attn_implementation: flash_attention_2 # or eager, sdpa - attention implementation override model_config: {} moe_config: # Megatron only, can adjust moe configuration freeze_moe_router: False # Megatron only, can freeze moe router (no grad) enable_gradient_checkpointing: False enable_activation_offload: False trust_remote_code: False use_remove_padding: False actor: strategy: fsdp # This is for backward-compatibility ppo_mini_batch_size: 256 ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu ppo_micro_batch_size_per_gpu: 8 use_dynamic_bsz: False ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length} grad_clip: 1.0 clip_ratio: 0.2 entropy_coeff: 0.0 use_kl_loss: False # True for GRPO # Rollout Importance Sampling (corrects distribution mismatch between rollout and training) rollout_is: False # Enable IS correction rollout_is_threshold: null # Upper threshold for IS weights (null to disable) rollout_is_threshold_lower: null # Lower threshold (null = auto 1/upper) rollout_is_level: token # Aggregation: token/sequence/geometric rollout_is_mode: truncate # Bounding: truncate/mask rollout_is_veto_threshold: null # Catastrophic outlier threshold (null to disable) use_torch_compile: True # False to disable torch compile kl_loss_coef: 0.001 # for grpo kl_loss_type: low_var_kl # for grpo ppo_epochs: 1 data_loader_seed: null shuffle: False ulysses_sequence_parallel_size: 1 # sp size optim: lr: 1e-6 lr_warmup_steps: -1 # Prioritized. Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime min_lr_ratio: 0.0 # only used with cosine lr scheduler, default to 0.0 num_cycles: 0.5 # only used with cosine lr scheduler, default to 0.5 lr_scheduler_type: constant # select from constant/cosine total_training_steps: -1 # must be override by program fsdp_config: wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 param_offload: False optimizer_offload: False fsdp_size: -1 checkpoint: # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # For more flexibility, you can specify the contents to load from the checkpoint. load_contents: ${actor_rollout_ref.actor.checkpoint.save_contents} ref: fsdp_config: param_offload: False wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: 16 log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} ulysses_sequence_parallel_size: ${actor_rollout_ref.actor.ulysses_sequence_parallel_size} # sp size rollout: name: vllm temperature: 1.0 top_k: -1 # 0 for hf rollout, -1 for vllm rollout top_p: 1 prompt_length: ${data.max_prompt_length} # not use for opensource response_length: ${data.max_response_length} # for vllm rollout dtype: bfloat16 # should align with FSDP gpu_memory_utilization: 0.5 ignore_eos: False enforce_eager: True free_cache_engine: True load_format: dummy_dtensor tensor_model_parallel_size: 2 max_num_batched_tokens: 8192 max_num_seqs: 1024 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: 16 log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} # for hf rollout do_sample: True engine_kwargs: # inference engine parameters, please refer vllm/sglang official doc for detail vllm: {} sglang: {} n: 1 # for each prompt, sample n responses (i.e. num sample times). set it to values > 1 for grpo, rloo calculate_log_probs: False # set to True for computing log probs via rollouts val_kwargs: # sampling parameters for validation top_k: -1 # 0 for hf rollout, -1 for vllm rollout top_p: 1.0 temperature: 0 n: 1 do_sample: False # default eager for validation agent: custom_async_server: # Use custom async server implementation for rollout path: null name: null **Common config for actor, rollout and reference model** - ``actor_rollout_ref.hybrid_engine``: Whether it's a hybrid engine, currently only supports hybrid engine - ``actor_rollout_ref.model.path``: Huggingface model path. This can be either local path or HDFS path. For HDFS path, we provide utils to download it to DRAM and convert the HDFS path to local path. - ``actor_rollout_ref.model.external_libs``: Additional Python packages that need to be imported. Used to register models or tokenizers into the Huggingface system. - ``actor_rollout_ref.model.override_config``: Used to override some of the model's original configurations. Common overrides include: - ``attn_implementation``: Override the attention implementation. Default is ``flash_attention_2``. Supported values: ``flash_attention_2``, ``eager``, ``sdpa``. Use ``eager`` for debugging or compatibility issues. See :ref:`attention-implementation-override` for detailed usage. - ``actor_rollout_ref.model.enable_gradient_checkpointing``: FSDP only, decide Whether to enable gradient checkpointing for the actor, Megatron uses recompute options in ``override_transformer_config`` to set this - ``actor_rollout_ref.model.enable_activation_offload``: Whether to enable activation offloading for the actor - ``actor_rollout_ref.model.trust_remote_code``: Whether to enable loading a remote code model - ``actor_rollout_ref.model.use_fused_kernels``: Whether to use fused kernels in the model. If set to True, the following parameters will be used. - ``actor_rollout_ref.model.fused_kernel_options.impl_backend``: The implementation backend for fused kernels. Options: "triton" or "torch". Default is "torch". While in megatron, we only support "triton" as the implementation backend, so there is no need for this option. - ``actor_rollout_ref.model.use_remove_padding``: Whether to use remove padding in the model. If set to True, the model will remove padding tokens in the input_ids and response_ids. This helps a lot in improving model running efficiency. **Actor model** - ``actor_rollout_ref.actor.strategy``: fsdp or megatron. In this example, we use fsdp backend. - ``actor_rollout_ref.actor.ppo_mini_batch_size``: One sample is split into multiple sub-batches with batch_size=ppo_mini_batch_size for PPO updates. The ppo_mini_batch_size is a global num across all workers/gpus - ``actor_rollout_ref.actor.ppo_micro_batch_size``: [Will be deprecated, use ppo_micro_batch_size_per_gpu] Similar to gradient accumulation, the micro_batch_size_per_gpu for one forward pass, trading speed for GPU memory. The value represent the global view. - ``actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu``: Similar to gradient accumulation, the micro_batch_size_per_gpu for one forward pass, trading speed for GPU memory. The value represent the local num per gpu. - ``actor_rollout_ref.actor.grad_clip``: Gradient clipping for actor updates - ``actor_rollout_ref.actor.use_kl_loss``: to use kl loss in actor. When used, we are not applying KL in the reward function. - ``actor_rollout_ref.actor.clip_ratio``: PPO clip ratio - ``actor_rollout_ref.actor.use_torch_compile``: Whether to use torch compile in actor - ``actor_rollout_ref.actor.entropy_coeff``: The weight of entropy when calculating PPO loss. The default value is changed to 0.0 since v0.3.x - ``actor_rollout_ref.actor.ppo_epochs``: Number of epochs for PPO updates on one set of sampled data - ``actor_rollout_ref.actor.data_loader_seed``: From torch 2.6.0 Megatron backend can get wrong seed generated by pytorch between cp ranks and cause misalignment between data on these ranks, so we shall manually set the seed to avoid hanging issue. if ``actor_rollout_ref.actor.shuffle`` is not null, this must be set. - ``actor_rollout_ref.actor.shuffle``: Whether to shuffle data when there are multiple epochs - ``actor_rollout_ref.actor.optim``: Actor's optimizer parameters - ``actor_rollout_ref.actor.fsdp_config``: FSDP config for actor training - ``wrap_policy``: FSDP wrap policy. By default, it uses Huggingface's wrap policy, i.e., wrapping by DecoderLayer - No need to set transformer_layer_cls_to_wrap, so we comment it. - ``*_offload``: Whether to enable parameter, gradient and optimizer offload - Trading speed for GPU memory. - ``actor_rollout_ref.actor.use_kl_loss``: Whether to enable kl loss. Default is False. - ``actor_rollout_ref.actor.kl_loss_coef``: The coefficient of kl loss. Default is 0.001. - ``actor_rollout_ref.actor.kl_loss_type``: Support ``kl`` (``k1``), ``abs``, ``mse`` (``k2``), ``low_var_kl`` (``k3``) and ``full``. Appending ``+`` in the end (e.g., ``k1+`` and ``k3+``) would use straight-through to employ ``k2`` for unbiased gradient estimation, regardless of the kl value estimation (see https://github.com/volcengine/verl/pull/2953#issuecomment-3162113848 for more details). How to calculate the kl divergence between actor and reference policy. For specific options, refer to `kl_penalty()` in `core_algos.py `_ . See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html - ``actor_rollout_ref.actor.checkpoint``: The configurations of checkpoint function in actor - ``save_contents``: The contents to save in the checkpoint. By default, we save model, optimizer and extra information in the checkpoint. The extra information includes Rng states currently, FSDP supported lr_scheduler, and Megatron opt_param_scheduler will coming soon. We do not store hf_model in checkpoint by default, but we provide a tool in ``scripts/model_merge.py`` to convert checkpoint format to hf format. - ``load_contents``: The contents to load in the checkpoint, you can specify different checkpoint loading contents. By default, it is the same with ``save_checkpoint``. **Reference Model** Reference model will be enabled when ``actor.use_kl_loss`` or/and ``algorithm.use_kl_in_reward`` is/are True. - ``actor_rollout_ref.ref``: FSDP config same as actor. **For models larger than 7B, it's recommended to turn on offload for ref by default** - ``actor_rollout_ref.ref.log_prob_micro_batch_size``: [Will be deprecate, use log_prob_micro_batch_size_per_gpu] The batch size for one forward pass in the computation of ``ref_log_prob``. The value represent the global num. - ``actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu``: The batch size for one forward pass in the computation of ``ref_log_prob``. The value represent the local num per gpu. **Rollout Model** - ``actor_rollout_ref.rollout.name``: hf/vllm/sglang. - Rollout (Auto-regressive) parameters. The key should be equal to the property name in vLLM's ``SamplingParams``. - ``temperature``, ``top_k``, ``top_p`` and others: Sampling parameters in ``SamplingParams``. - ``actor_rollout_ref.rollout.dtype``: Rollout model parameters type. This should be align with the actor model parameter type in FSDP/Megatron backend. - ``actor_rollout_ref.rollout.gpu_memory_utilization``: - For vLLM v0.7.0 and later: The fraction of **total** GPU memory to be used for the vLLM instance. - For SGLang: Corresponding to ``mem_fraction_static``, the fraction of the free GPU memory used for **static** memory like model weights and KV cache. - ``actor_rollout_ref.rollout.tensor_model_parallel_size``: TP size for rollout. Only effective for vllm. - ``actor_rollout_ref.rollout.log_prob_micro_batch_size``: [Will be deprecate, use log_prob_micro_batch_size_per_gpu] The batch size for one forward pass in the computation of ``log_prob``. The value represent the global num. - ``actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu``: Micro batch size per gpu (The batch size for one forward pass) for recalculating ``log_prob``. The value represent the local num per gpu. - ``actor_rollout_ref.rollout.do_sample``: Whether to sample during training rollout. If set to False, the rollout model will perform greedy sampling. - ``actor_rollout_ref.rollout.val_kwargs```: Sampling parameters used specifically during validation. - ``top_k``: Top-k sampling parameter. Default to -1 for vLLM rollout or 0 for HF rollout. - ``top_p``: Top-p sampling parameter. Default is 1.0 (disabled). - ``temperature``: Sampling temperature. Default is 0 (deterministic greedy). - ``n``: Number of responses to generate during validation. Default is 1. - ``do_sample``: Whether to use sampling during validation. Default is False for deterministic outputs. When set to True, the rollout will use the ``actor_rollout_ref.rollout.val_kwargs`` parameters (top_k, top_p, temperature) to control the sampling behavior. - ``actor_rollout_ref.rollout.engine_kwargs.vllm``: extra vllm engine args, please refer vllm official doc for detail - ``actor_rollout_ref.rollout.engine_kwargs.sglang``: extra sglang engine args, please refer sglang official doc for detail - ``actor_rollout_ref.rollout.ignore_eos``: Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. - ``actor_rollout_ref.rollout.free_cache_engine``: Offload the KVCache after rollout generation stage. Default is True. When set to True, for vllm v0.5.4 and v0.6.3, we need to disable the usage of CUDAGraph (set ``enforce_eager`` to True.) - ``actor_rollout_ref.rollout.enforce_eager``: Whether to use CUDAGraph in vLLM generation. Default set to True to disable CUDAGraph. - ``actor_rollout_ref.rollout.load_format``: Which weight loader to use to load the actor model weights to the rollout model. - ``auto``: Use Megatron weight loader. - ``megatron``: Use Megatron weight loader. Deployed with Megatron backend. The input model ``state_dict()`` is already partitioned along TP dimension and already gathered along PP dimension. This weight loader requires that the Rollout model and Actor model's parameters shape and name should be identical. - ``dtensor``: Default solution when using Huggingface weight loader. Deployed with FSDP backend and the state_dict_type is ``StateDictType.SHARDED_STATE_DICT``. Recommend to use this weight loader - ``hf``: Use Huggingface weight loader. Deployed with FSDP backend and the state_dict_type is ``StateDictType.FULL_STATE_DICT``. This solution doesn't need to rewrite the weight loader for each model implemented in vLLM but it results in larger peak memory usage. - ``dummy_hf``, ``dummy_megatron``, ``dummy_dtensor``: Random initialization. .. note:: **NOTED**: In this config field, users only need to select from ``dummy_megatron``, ``dummy_dtensor``, ``dummy_hf`` for rollout initialization and our hybrid engine will select the corresponding weight loader (i.e., ``megatron``, ``dtensor``, ``hf``) during actor/rollout weight synchronization. Megatron Optimizer and Optimizer Parameter Scheduler ____________________________________________________ .. code:: yaml optim: optimizer: adam lr: 1e-6 clip_grad: 1.0 total_training_steps: -1 # must be override by program lr_warmup_init: 0.0 # initial learning rate for warmup, default to 0.0 lr_warmup_steps: -1 # Prioritized. Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime lr_decay_steps: null lr_decay_style: constant # select from constant/linear/cosine/inverse_square_root min_lr: 0.0 # minimum learning rate, default to 0.0 weight_decay: 0.01 weight_decay_incr_style: constant # select from constant/linear/cosine lr_wsd_decay_style: exponential # select from constant/exponential/cosine lr_wsd_decay_steps: null use_checkpoint_opt_param_scheduler: False # use checkpoint optimizer parameter scheduler Notice that there are some differences in APIs between Megatron optimizer and FSDP optimizer. - Megatron optimizer scheduler names the period after lr_warmup as lr_decay_steps, so the ``lr_scheduler_type`` actually means the style of lr decay after warmup. - Megatron optimizer also support weight decay decay mechanism - ``use_checkpoint_opt_param_scheduler`` determines whether to use the checkpoint optimizer parameter scheduler. If set to True, the optimizer parameter scheduler will be saved in the checkpoint and loaded from the checkpoint during resuming training. For learning rate decay, original Megatron pretrain default option of ``lr_decay_style`` is ``linear``, meaning that the learning rate will be linearly decayed from the initial learning rate to ``min_lr`` within the ``lr_decay_steps``. However, in verl, to align with FSDP's default behavior, we set the default ``lr_decay_style`` to ``constant``, meaning that the learning rate will be kept constant after the warmup stage. Critic Model ~~~~~~~~~~~~ Most parameters for Critic are similar to Actor Model. Reward Model ~~~~~~~~~~~~ .. code:: yaml reward_model: enable: False model: input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical path: ~/models/Anomy-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} trust_remote_code: False fsdp_config: min_num_params: 0 param_offload: False micro_batch_size_per_gpu: 16 max_length: null reward_manager: naive - ``reward_model.enable``: Whether to enable reward model. If False, we compute the reward only with the user-defined reward functions. In GSM8K and Math examples, we disable reward model. For RLHF alignment example using full_hh_rlhf, we utilize reward model to assess the responses. If False, the following parameters are not effective. - ``reward_model.model`` - ``input_tokenizer``: Input tokenizer. If the reward model's chat template is inconsistent with the policy, we need to first decode to plaintext, then apply the rm's chat_template. Then score with RM. If chat_templates are consistent, it can be set to null. - ``path``: RM's HDFS path or local path. Note that RM only supports AutoModelForSequenceClassification. Other model types need to define their own RewardModelWorker and pass it from the code. - ``trust_remote_code``: Whether to enable loading a remote code model, default to False. - ``reward_model.reward_manager``: Reward Manager. This defines the mechanism of computing rule-based reward and handling different reward sources. Default is ``naive``. If all verification functions are multiprocessing-safe, the reward manager can be set to ``prime`` for parallel verification. Customized Reward Function ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml custom_reward_function: path: null name: compute_score - ``custom_reward_function.path``: The path to the file containing your customized reward function. If not specified, pre-implemented reward functions will be used. - ``custom_reward_function.name`` (Optional) : The name of the reward function within the specified file. Default is 'compute_score'. Algorithm ~~~~~~~~~ .. code:: yaml algorithm: gamma: 1.0 lam: 1.0 adv_estimator: gae use_kl_in_reward: False kl_penalty: kl # how to estimate kl divergence kl_ctrl: type: fixed kl_coef: 0.005 horizon: 10000 target_kl: 0.1 # Rollout Importance Sampling rollout_is: False rollout_is_threshold: null rollout_is_threshold_lower: null rollout_is_level: token rollout_is_mode: truncate rollout_is_veto_threshold: null # Disabled by default - ``gamma``: discount factor - ``lam``: Trade-off between bias and variance in the GAE estimator - ``adv_estimator``: Support ``gae``, ``grpo``, ``reinforce_plus_plus``, ``reinforce_plus_plus_baseline``, ``rloo``, ``rloo_vectorized``, ``grpo_vectorized`` - ``use_kl_in_reward``: Whether to enable in-reward kl penalty. Default is False. - ``kl_penalty``: Support ``kl``, ``abs``, ``mse``, ``low_var_kl`` and ``full``. How to calculate the kl divergence between actor and reference policy. For specific options, refer to `kl_penalty()` in `core_algos.py `_ . - ``kl_ctrl``: Config for in-reward kl_penalty controller - ``kl_coef``: The (initial) coefficient of in-reward kl_penalty. Default is 0.001. - ``type``: 'fixed' for FixedKLController and 'adaptive' for AdaptiveKLController. - ``horizon`` and ``target_kl``: See source code of AdaptiveKLController for details. - ``rollout_is``: Whether to enable rollout importance sampling correction. Default is False. - ``rollout_is_threshold``: Upper threshold for IS weights. Set to ``null`` to disable IS completely. - ``rollout_is_threshold_lower``: Lower threshold for IS weights. If ``null``, defaults to reciprocal of upper (1/upper). - ``rollout_is_level``: Aggregation level: ``token`` (biased), ``sequence`` (unbiased), or ``geometric`` (experimental). - ``rollout_is_mode``: Bounding mode: ``truncate`` (cap upper only) or ``mask`` (zero outside bounds). - ``rollout_is_veto_threshold``: Per-token veto threshold for catastrophic outliers. Default is null (disabled). Note: Rollout IS requires setting ``actor_rollout_ref.rollout.calculate_log_probs=True``. Trainer ~~~~~~~ .. code:: yaml trainer: total_epochs: 30 project_name: verl_examples experiment_name: gsm8k logger: ['console', 'wandb'] log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 val_before_train: True test_freq: 2 critic_warmup: 0 default_hdfs_dir: null # hdfs checkpoint path default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} # local checkpoint path resume_mode: auto # or disable or resume_path if resume_from_path is set resume_from_path: null remove_previous_ckpt_in_save: False del_local_ckpt_after_load: False ray_wait_register_center_timeout: 300 - ``trainer.total_epochs``: Number of epochs in training. - ``trainer.project_name``: For wandb, swanlab, mlflow - ``trainer.experiment_name``: For wandb, swanlab, mlflow - ``trainer.logger``: Support console and wandb, swanlab, mlflow, tensorboard, trackio - ``trainer.log_val_generations``: The number of logged generation during validation (default ``0``) - ``trainer.nnodes``: Number of nodes used in the training. - ``trainer.n_gpus_per_node``: Number of GPUs per node. - ``trainer.save_freq``: The frequency (by iteration) to save checkpoint of the actor and critic model. - ``trainer.val_before_train``: Whether to run validation before training. - ``trainer.test_freq``: The validation frequency (by iteration). - ``trainer.critic_warmup``: The number of iteration to train the critic model before actual policy learning. - ``trainer.resume_mode``: The mode of resuming training. Support ``disable``, ``auto`` and ``resume_path``. If set to ``auto`` as default, the program will automatically resume from the latest checkpoint in the ``default_local_dir``. If set to ``resume_path``, the program will resume from the path specified in ``resume_from_path``. - ``trainer.resume_from_path``: The path to resume training from. Only effective when ``resume_mode`` is set to ``resume_path``. - ``trainer.remove_previous_ckpt_in_save``: Whether to remove previous checkpoints in the save directory. Default is False. - ``trainer.del_local_ckpt_after_load``: Whether to delete local checkpoints after loading them. Default is False. - ``trainer.ray_wait_register_center_timeout``: The timeout for waiting for the ray register center to be ready. Default is 300 seconds. This figure illustrates how the configurations affect the training. https://excalidraw.com/#json=pfhkRmiLm1jnnRli9VFhb,Ut4E8peALlgAUpr7E5pPCA .. image:: https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d evaluation.yaml --------------- Data ~~~~ .. code:: yaml data: path: /tmp/math_Qwen2-7B-Instruct.parquet prompt_key: prompt response_key: responses data_source_key: data_source reward_model_key: reward_model - ``data.path``: Path to the dataset file (Parquet format). - ``data.prompt_key``: The field in the dataset where the prompt is located. Default is 'prompt'. - ``data.response_key``: The key holds the generated responses. This should be a list of strings representing the responses. Default is 'responses'. - ``data.data_source_key``: This is used to separate metric calculations for different data sources, ensuring that metrics are calculated independently for each source. - ``data.reward_model_key``: The key holds the reference answers. These reference answers typically serve as the ground truth or test cases for the task. Customized Reward Function ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml custom_reward_function: path: null name: compute_score - ``custom_reward_function.path``: The path to the file containing your customized reward function. If not specified, pre-implemented reward functions will be used. - ``custom_reward_function.name`` (Optional) : The name of the reward function within the specified file. Default is 'compute_score'. sft_trainer.yaml for SFT FSDP Backend -------------------------------------- Optim ~~~~~~~ .. code:: yaml optim: optimizer: AdamW optimizer_impl: torch.optim lr: 1e-5 weight_decay: 0.01 lr_warmup_steps_ratio: 0.1 clip_grad: 1.0 lr_scheduler: cosine override_optimizer_config: null - ``optimizer``: Optimizer class name (e.g., ``"AdamW"``, ``"AdamW8bit"``, ``"_AdamW"``). The class name as it appears in the module. - ``optimizer_impl``: Module path to import optimizer from (e.g., ``"torch.optim"``, ``"torchao.optim"``, ``"bitsandbytes.optim"``). - ``optim.lr``: Learning rate for the optimizer. - ``optim.weight_decay``: Weight decay for the optimizer. - ``optim.lr_warmup_steps_ratio``: Ratio of warmup steps to total training steps. - ``optim.clip_grad``: Gradient clipping value. - ``optim.lr_scheduler``: Learning rate scheduler type. Options: - ``cosine``: Cosine learning rate scheduler with warmup (default). - ``wsd``: Warmup-Stable-Decay scheduler that provides a stable learning rate phase between warmup and decay phases. - ``override_optimizer_config``: Dictionary of additional optimizer-specific keyword arguments. For example, to use ``torchao.optim``'s ``_AdamW`` with BF16 stochastic rounding: ``{"bf16_stochastic_round": true}`` Model ~~~~~~~~~~~~ Most parameters for Model are similar to Reward Model. .. code:: yaml model: partial_pretrain: ~/models/gemma-1.1-7b-it fsdp_config: model_dtype: fp32 wrap_policy: min_num_params: 0 cpu_offload: False offload_params: False external_lib: null enable_gradient_checkpointing: False trust_remote_code: False lora_rank: 0 lora_alpha: 16 target_modules: all-linear use_liger: False - ``partial_pretrain``: HDFS path or local path for the pretrained model. - ``fsdp_config`` - ``model_dtype``: Model parameters type, default to ``fp32``. Support: ``bf16``, ``fp16``, ``fp32``. - ``cpu_offload``: Whether to enable CPU offloading for FSDP. If True, the offload_params will be used as argument. - ``offload_params``: Whether to offload parameters to CPU when not involved in computation. If True, then this offloads gradients to CPU as well, meaning that the optimizer step runs on CPU. - ``lora_rank``: The rank of the LoRA model, default to 0. If ``lora_rank``>0, we will train LoRA modules instead of tuning the full model. - ``lora_alpha``: The alpha parameter for LoRA scaling, default to 16. - ``target_modules``: The names of the modules to apply the adapter to, default to ``all-linear``. See `peft docs `_ for detail. - ``use_liger``: Whether to enable Liger kernel, default to False. If True, we apply Liger kernel to the model (depends on `liger-kernel`). ================================================ FILE: verl_distillation/docs/examples/gsm8k_example.rst ================================================ GSM8K Example ============= Last updated: 03/25/2025. Introduction ------------ In this example, we train an LLM to tackle the GSM8k task. Paper: https://arxiv.org/pdf/2110.14168 Dataset: https://huggingface.co/datasets/gsm8k Note that the original paper mainly focuses on training a verifier (a reward model) to solve math problems via Best-of-N sampling. In this example, we train an RLHF agent using a rule-based reward model. Dataset Introduction -------------------- GSM8k is a math problem dataset. The prompt is an elementary school problem. The LLM model is required to answer the math problem. The training set contains 7473 samples and the test set contains 1319 samples. **An example** Prompt Katy makes coffee using teaspoons of sugar and cups of water in the ratio of 7:13. If she used a total of 120 teaspoons of sugar and cups of water, calculate the number of teaspoonfuls of sugar she used. Solution The total ratio representing the ingredients she used to make the coffee is 7+13 = <<7+13=20>>20 Since the fraction representing the number of teaspoons she used is 7/20, she used 7/20\ *120 = <<7/20*\ 120=42>>42 #### 42 Step 1: Prepare dataset ----------------------- .. code:: bash cd examples/data_preprocess python3 gsm8k.py --local_save_dir ~/data/gsm8k Step 2: Download Model ---------------------- There're three ways to prepare the model checkpoints for post-training: - Download the required models from huggingface or modelscope .. code:: bash huggingface-cli download deepseek-ai/deepseek-math-7b-instruct --local-dir ~/models/deepseek-math-7b-instruct --local-dir-use-symlinks False # or modelscope download --model deepseek-ai/deepseek-math-7b-instruct --local_dir ~/models/deepseek-math-7b-instruct - Already store your store model in the local directory or HDFS path. - Also, you can directly use the model name in huggingface (e.g., deepseek-ai/deepseek-math-7b-instruct) in ``actor_rollout_ref.model.path`` and ``critic.model.path`` field in the run script. You can also download models from modelscope by setting environmental variable ``VERL_USE_MODELSCOPE=True``. See examples/ppo_trainer/run_deepseek7b_llm_modelscope.sh for example. Noted that users should prepare checkpoints for actor, critic and reward model. [Optional] Step 3: SFT your Model --------------------------------- We provide a SFT Trainer using PyTorch FSDP in `fsdp_sft_trainer.py `_. Users can customize their own SFT script using our FSDP SFT Trainer. We also provide various training scripts for SFT on GSM8K dataset in `gsm8k sft directory `_. .. code:: shell set -x torchrun -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=question \ data.response_key=answer \ data.micro_batch_size_per_gpu=8 \ model.partial_pretrain=deepseek-ai/deepseek-coder-6.7b-instruct \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-deepseek-coder-6.7b-instruct \ trainer.total_epochs=4 \ trainer.logger='["console","wandb"]' If you use AMD GPUs (ROCm kernel), you need to add the following environment variables into the run script: .. code-block:: bash export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES export CUDA_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES Step 4: Perform PPO training with your model on GSM8K Dataset ------------------------------------------------------------- - Prepare your own run.sh script. Here's an example for GSM8k dataset and deepseek-llm-7b-chat model. - Users could replace the ``data.train_files`` ,\ ``data.val_files``, ``actor_rollout_ref.model.path`` and ``critic.model.path`` based on their environment. - See :doc:`config` for detailed explanation of each config field. **Reward Model/Function** We use a rule-based reward model. We force the model to produce a final answer following 4 “#” as shown in the solution. We extract the final answer from both the solution and model's output using regular expression matching. We compare them and assign a reward of 1 to correct answer, 0.1 to incorrect answer and 0 to no answer. **Training Script** The training script example for FSDP and Megatron-LM backend are stored in examples/ppo_trainer directory. .. code:: bash cd ../ppo_trainer bash run_deepseek7b_llm.sh The script of run_deepseek7b_llm.sh .. code:: bash set -x python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=1 \ trainer.total_epochs=15 $@ If you use AMD GPUs (ROCm kernel), you need to add the following environment variables into the run script: .. code-block:: bash export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES export CUDA_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES If you encounter any issues in using AMD GPUs running VeRL, feel free to contact me - `Yusheng Su `_. ================================================ FILE: verl_distillation/docs/examples/multi_modal_example.rst ================================================ Multi-Modal Example Architecture ================================= Last updated: 04/28/2025. Introduction ------------ Now, verl has supported multi-modal training. You can use fsdp and vllm/sglang to start a multi-modal RL task. Megatron supports is also on the way. Follow the steps below to quickly start a multi-modal RL task. Step 1: Prepare dataset ----------------------- .. code:: python # it will be saved in the $HOME/data/geo3k folder python examples/data_preprocess/geo3k.py Step 2: Download Model ---------------------- .. code:: bash # download the model from huggingface python3 -c "import transformers; transformers.pipeline(model='Qwen/Qwen2.5-VL-7B-Instruct')" Step 3: Perform GRPO training with multi-modal model on Geo3K Dataset --------------------------------------------------------------------- .. code:: bash # run the task bash examples/grpo_trainer/run_qwen2_5_vl-7b.sh ================================================ FILE: verl_distillation/docs/examples/ppo_code_architecture.rst ================================================ PPO Example Architecture ======================== Last updated: 02/17/2025. Let's start with the Proximal Policy Optimization algorithm, which is most widely used algorithm in LLM post-training. The main entry point of the PPO algorithm example is: `main_ppo.py `_. In this tutorial, we will go through the code architecture in `main_ppo.py `_. Define the data --------------- Users need to preprocess and store the dataset in parquet files. And we implement `RLHFDataset` to load and tokenize the parquet files. For ``RLHFDataset`` (Default), at least 1 fields are required: - ``prompt``: Contains the string prompt We already provide some examples of processing the datasets to parquet files in `data_preprocess directory `_. Currently, we support preprocess of GSM8k, MATH, Hellasage, Full_hh_rlhf datasets. See :doc:`../preparation/prepare_data` for more information. Define the reward functions for different datasets -------------------------------------------------- In this main entry point, the users only need to define their own reward function based on the datasets (or applications) utilized in PPO training. For example, we already provide reward functions for `GSM8k `_ and `MATH `_ datasets in the ``_select_rm_score_fn``. In the ``RewardManager``, we will compute the reward score based on the data_source to select corresponding reward functions. For some RLHF datasets (e.g., full_hh_rlhf), the reward model is utilized to assess the responses without any reward functions. In this case, the ``RewardManager`` will return the ``rm_score`` computed by the reward model directly. See `reward functions `_ for detailed implementation. Define worker classes --------------------- .. code:: python if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: # for FSDP backend assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker from verl.single_controller.ray import RayWorkerGroup ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == 'megatron': # for Megatron backend assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup ray_worker_group_cls = NVMegatronRayWorkerGroup # Ray worker class for Megatron-LM else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ActorRolloutRefWorker, Role.Critic: CriticWorker, Role.RefPolicy: ActorRolloutRefWorker } global_pool_id = 'global_pool' resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, Role.Critic: global_pool_id, Role.RefPolicy: global_pool_id, } Step 1: Construct the mapping between roles and workers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A role represents a group of workers in the same process. We have pre-defined several roles in `ray_trainer.py `_. .. code:: python class Role(Enum): """ To create more roles dynamically, you can subclass Role and add new members """ Actor = 0 # This worker only has Actor Rollout = 1 # This worker only has Rollout ActorRollout = 2 # This worker has both actor and rollout, it's a HybridEngine Critic = 3 # This worker only has critic RefPolicy = 4 # This worker only has reference policy RewardModel = 5 # This worker only has reward model ActorRolloutRef = 6 # This worker contains actor, rollout and reference policy simultaneously Step 2: Define the worker class corresponding to this role ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - We have pre-implemented the ``ActorRolloutRefWorker``. Through different configs, it can be a standalone actor, a standalone rollout, an ActorRollout HybridEngine, or an ActorRolloutRef HybridEngine - We also pre-implemented workers for ``Actor``, ``Rollout``, ``Critic``, ``Reward Model`` and ``Reference model`` on two different backend: PyTorch FSDP and Megatron-LM. See `FSDP Workers `_ and `Megatron-LM Workers `_ for more information. Step 3: Define resource pool id and resource pool spec ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Resource pool is a division of global GPU resources, ``resource_pool_spec`` is a dict, mapping from id to # of GPUs - In the above example, we defined a global resource pool: global_pool_id, and then put all roles on this one resource pool with all the GPUs in this post-training task. This refers to *co-locate* placement where all the models share the same set of GPUs. - See resource pool and placement for advance usage. Defining reward model/function ------------------------------ .. code:: python # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: from verl.workers.fsdp_workers import RewardModelWorker role_worker_mapping[Role.RewardModel] = RewardModelWorker mapping[Role.RewardModel] = global_pool_id reward_fn = RewardManager(tokenizer=tokenizer, num_examine=0) # Note that we always use function-based RM for validation val_reward_fn = RewardManager(tokenizer=tokenizer, num_examine=1) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) Since not all tasks use model-based RM, users need to define here whether it's a model-based RM or a function-based RM - If it's a model-based RM, directly add the ``RewardModel`` role in the resource mapping and add it to the resource pool mapping. - Note that the pre-defined ``RewardModelWorker`` only supports models with the structure of huggingface ``AutoModelForSequenceClassification``. If it's not this model, you need to define your own RewardModelWorker in `FSDP Workers `_ and `Megatron-LM Workers `_. - If it's a function-based RM, the users are required to classified the reward function for each datasets. .. code:: python def _select_rm_score_fn(data_source): if data_source == 'openai/gsm8k': return gsm8k.compute_score elif data_source == 'lighteval/MATH': return math.compute_score else: raise NotImplementedError See reward functions implemented in `directory `_ for more information. Define, init and run the PPO Trainer ------------------------------------ .. code:: python trainer = RayPPOTrainer(config=config, tokenizer=tokenizer, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn) trainer.init_workers() trainer.fit() - We first initialize the ``RayPPOTrainer`` with user config, tokenizer and all the above worker mapping, resource pool, worker group and reward functions - We first call the ``trainer.init_workers()`` to initialize the models on the allocated GPUs (in the resource pool) - The actual PPO training will be executed in ``trainer.fit()`` verl can be easily extended to other RL algorithms by reusing the Ray model workers, resource pool and reward functions. See :doc:`extension<../advance/dpo_extension>` for more information. Details of the ``RayPPOTrainer`` is discussed in :doc:`Ray Trainer<../workers/ray_trainer>`. ================================================ FILE: verl_distillation/docs/examples/sandbox_fusion_example.rst ================================================ Sandbox Fusion Example ============================ Last updated: 06/27/2025. Introduction ------------ Sandbox Fusion is a remote code sandbox service that provides a secure environment for running and evaluating code generated by Large Language Models (LLMs). This example demonstrates how to train an LLM and use Sandbox Fusion to verify generated code, enhancing both security and performance. By leveraging a remote code sandbox service with greater CPU resources for concurrent code verification, you can reduce the reward stage time by 10-30%, depending on the quality of the generated code. Step 1: Prepare the Dataset --------------------------- We use the Eurus-2-RL-Data dataset for training. This dataset combines math and code questions, making it suitable for LLM training tasks. You can download it from HuggingFace: `Eurus-2-RL-Data Dataset `_. Step 2: Set Up the Sandbox Fusion Service ----------------------------------------- Sandbox Fusion is a remote code sandbox service designed to securely run and evaluate LLM-generated code. To use it: 1. **Access Full Documentation**: For detailed setup instructions, refer to the `Sandbox Fusion Documentation `_. 2. **Deploy the Service**: Choose one of the following deployment methods: - **Local Deployment**: Follow the guide `here `_. - **FaaS Instance (Volcengine)**: Create an instance using the `Volcengine Documentation `_. After deployment, you will receive an API endpoint in the format: ``https:///run_code``. Step 3: Configure the Training Script ------------------------------------- To integrate Sandbox Fusion into your training script, configure the following parameters: **Key Settings for Sandbox Fusion** - ``reward_model.sandbox_fusion.url=''``: Enable Sandbox Fusion by specifying the API endpoint (must end with ``/run_code``). - ``reward_model.sandbox_fusion.max_concurrent=256``: Set the maximum number of concurrent API requests to the Sandbox Fusion service. - ``reward_model.sandbox_fusion.memory_limit_mb=1024``: Set the memory limit (in MB) for each sandbox instance. Defaults to 1024MB if not specified. **Additional Optimization** To further reduce code verification time, enable parallel processing with: - ``reward_model.reward_manager=prime``: The Prime reward manager verifies code across multiple subprocesses concurrently. **Example Script** For a practical implementation, refer to the example script: ``examples/ppo_trainer/run_deepseek7b_llm_sandbox_fusion.sh`` Once you’ve set your API endpoint in the script, you can start the training job. ================================================ FILE: verl_distillation/docs/examples/skypilot_examples.rst ================================================ SkyPilot Examples ================= Last updated: 09/04/2025. This guide provides examples of running VERL reinforcement learning training on Kubernetes clusters or cloud platforms with GPU nodes using `SkyPilot `_. Installation and Configuration ------------------------------- Step 1: Install SkyPilot ~~~~~~~~~~~~~~~~~~~~~~~~~ Choose the installation based on your target platform: .. code-block:: bash # For Kubernetes only pip install "skypilot[kubernetes]" # For AWS pip install "skypilot[aws]" # For Google Cloud Platform pip install "skypilot[gcp]" # For Azure pip install "skypilot[azure]" # For multiple platforms pip install "skypilot[kubernetes,aws,gcp,azure]" Step 2: Configure Your Platform ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See https://docs.skypilot.co/en/latest/getting-started/installation.html Step 3: Set Up Environment Variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Export necessary API keys for experiment tracking: .. code-block:: bash # For Weights & Biases tracking export WANDB_API_KEY="your-wandb-api-key" # For HuggingFace gated models (if needed) export HF_TOKEN="your-huggingface-token" Examples -------- All example configurations are available in the `examples/skypilot/ `_ directory on GitHub. See the `README `_ for additional details. PPO Training ~~~~~~~~~~~~ .. code-block:: bash sky launch -c verl-ppo verl-ppo.yaml --secret WANDB_API_KEY -y Runs PPO training on GSM8K dataset using Qwen2.5-0.5B-Instruct model across 2 nodes with H100 GPUs. Based on examples in ``examples/ppo_trainer/``. `View verl-ppo.yaml on GitHub `_ GRPO Training ~~~~~~~~~~~~~ .. code-block:: bash sky launch -c verl-grpo verl-grpo.yaml --secret WANDB_API_KEY -y Runs GRPO (Group Relative Policy Optimization) training on MATH dataset using Qwen2.5-7B-Instruct model. Memory-optimized configuration for 2 nodes. Based on examples in ``examples/grpo_trainer/``. `View verl-grpo.yaml on GitHub `_ Multi-turn Tool Usage Training ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash sky launch -c verl-multiturn verl-multiturn-tools.yaml \ --secret WANDB_API_KEY --secret HF_TOKEN -y Single-node training with 8xH100 GPUs for multi-turn tool usage with Qwen2.5-3B-Instruct. Includes tool and interaction configurations for GSM8K. Based on examples in ``examples/sglang_multiturn/`` but uses vLLM instead of sglang. `View verl-multiturn-tools.yaml on GitHub `_ Configuration ------------- The example YAML files are pre-configured with: - **Infrastructure**: Kubernetes clusters (``infra: k8s``) - can be changed to ``infra: aws`` or ``infra: gcp``, etc. - **Docker Image**: VERL's official Docker image with CUDA 12.6 support - **Setup**: Automatically clones and installs VERL from source - **Datasets**: Downloads required datasets during setup phase - **Ray Cluster**: Configures distributed training across nodes - **Logging**: Supports Weights & Biases via ``--secret WANDB_API_KEY`` - **Models**: Supports gated HuggingFace models via ``--secret HF_TOKEN`` Launch Command Options ---------------------- - ``-c ``: Cluster name for managing the job - ``--secret KEY``: Pass secrets for API keys (can be used multiple times) - ``-y``: Skip confirmation prompt Monitoring Your Jobs -------------------- Check Cluster Status ~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash sky status View Logs ~~~~~~~~~ .. code-block:: bash sky logs verl-ppo # View logs for the PPO job SSH into Head Node ~~~~~~~~~~~~~~~~~~ .. code-block:: bash ssh verl-ppo Access Ray Dashboard ~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash sky status --endpoint 8265 verl-ppo # Get dashboard URL Stop a Cluster ~~~~~~~~~~~~~~ .. code-block:: bash sky down verl-ppo ================================================ FILE: verl_distillation/docs/faq/faq.rst ================================================ Frequently Asked Questions ==================================== Last updated: 09/24/2025. Ray related ------------ How to add breakpoint for debugging with distributed Ray? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Please checkout the official debugging guide from Ray: https://docs.ray.io/en/latest/ray-observability/ray-distributed-debugger.html "Unable to register worker with raylet" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The cause of this issue is due to some system setting, e.g., SLURM added some constraints on how the CPUs are shared on a node. While `ray.init()` tries to launch as many worker processes as the number of CPU cores of the machine, some constraints of SLURM restricts the `core-workers` seeing the `raylet` process, leading to the problem. To fix this issue, you can set the config term ``ray_init.num_cpus`` to a number allowed by your system. Distributed training ------------------------ How to run multi-node post-training with Ray? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can start a ray cluster and submit a ray job, following the official guide from Ray: https://docs.ray.io/en/latest/ray-core/starting-ray.html Then in the configuration, set the ``trainer.nnode`` config to the number of machines for your job. How to use verl on a Slurm-managed cluster? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Ray provides users with `this `_ official tutorial to start a Ray cluster on top of Slurm. We have verified the :doc:`GSM8K example<../examples/gsm8k_example>` on a Slurm cluster under a multi-node setting with the following steps. 1. [Optional] If your cluster support `Apptainer or Singularity `_ and you wish to use it, convert verl's Docker image to an Apptainer image. Alternatively, set up the environment with the package manager available on your cluster or use other container runtimes (e.g. through `Slurm's OCI support `_) available to you. .. code:: bash apptainer pull /your/dest/dir/vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3.sif docker://verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3 2. Follow :doc:`GSM8K example<../examples/gsm8k_example>` to prepare the dataset and model checkpoints. 3. Modify `examples/slurm/ray_on_slurm.slurm `_ with your cluster's own information. 4. Submit the job script to the Slurm cluster with `sbatch`. Please note that Slurm cluster setup may vary. If you encounter any issues, please refer to Ray's `Slurm user guide `_ for common caveats. If you changed Slurm resource specifications, please make sure to update the environment variables in the job script if necessary. Install related ------------------------ NotImplementedError: TensorDict does not support membership checks with the `in` keyword. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Detail error information: .. code:: bash NotImplementedError: TensorDict does not support membership checks with the `in` keyword. If you want to check if a particular key is in your TensorDict, please use `key in tensordict.keys()` instead. Cause of the problem: There is no suitable version of tensordict package for the linux-arm64 platform. The confirmation method is as follows: .. code:: bash pip install tensordict==0.6.2 Output example: .. code:: bash ERROR: Could not find a version that satisfies the requirement tensordict==0.6.2 (from versions: 0.0.1a0, 0.0.1b0, 0.0.1rc0, 0.0.2a0, 0.0.2b0, 0.0.3, 0.1.0, 0.1.1, 0.1.2, 0.8.0, 0.8.1, 0.8.2, 0.8.3) ERROR: No matching distribution found for tensordict==0.6.2 Solution 1st: Install tensordict from source code: .. code:: bash pip uninstall tensordict git clone https://github.com/pytorch/tensordict.git cd tensordict/ git checkout v0.6.2 python setup.py develop pip install -v -e . Solution 2nd: Temperally modify the error takeplace codes: tensordict_var -> tensordict_var.keys() Illegal memory access --------------------------------- If you encounter the error message like ``CUDA error: an illegal memory access was encountered`` during rollout, please check the vLLM documentation for troubleshooting steps specific to your vLLM version. Checkpoints ------------------------ If you want to convert the model checkpoint into huggingface safetensor format, please refer to ``verl/model_merger``. Triton ``compile_module_from_src`` error ------------------------------------------------ If you encounter triton compilation error similar to the stacktrace below, please set the ``use_torch_compile`` flag according to https://verl.readthedocs.io/en/latest/examples/config.html to disable just-in-time compilation for fused kernels. .. code:: bash File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/jit.py", line 345, in return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs) File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 338, in run return self.fn.run(*args, **kwargs) File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/jit.py", line 607, in run device = driver.active.get_current_device() File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/driver.py", line 23, in __getattr__ self._initialize_obj() File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/driver.py", line 20, in _initialize_obj self._obj = self._init_fn() File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/driver.py", line 9, in _create_driver return actives[0]() File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/backends/nvidia/driver.py", line 371, in __init__ self.utils = CudaUtils() # TODO: make static File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/backends/nvidia/driver.py", line 80, in __init__ mod = compile_module_from_src(Path(os.path.join(dirname, "driver.c")).read_text(), "cuda_utils") File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/backends/nvidia/driver.py", line 57, in compile_module_from_src so = _build(name, src_path, tmpdir, library_dirs(), include_dir, libraries) File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/build.py", line 48, in _build ret = subprocess.check_call(cc_cmd) File "/data/lbh/conda_envs/verl/lib/python3.10/subprocess.py", line 369, in check_call raise CalledProcessError(retcode, cmd) What is the meaning of train batch size, mini batch size, and micro batch size? ------------------------------------------------------------------------------------------ This figure illustrates the relationship between different batch size configurations. https://excalidraw.com/#json=pfhkRmiLm1jnnRli9VFhb,Ut4E8peALlgAUpr7E5pPCA .. image:: https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d How to generate ray timeline to analyse performance of a training job? ------------------------------------------------------------------------------------------ To generate the ray timeline file, you can set the config term ``ray_init.timeline_file`` to a json file path. For example: .. code:: bash ray_init.timeline_file=/tmp/ray_timeline.json The file will be generated in the specified path at the end of a training job. You can use tools like chrome://tracing or the Perfetto UI and view the ray timeline file. This figure shows the ray timeline file generated by from a training job on 1 node with 4 GPUs .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray_timeline.png?raw=true How to set proxy only for wandb? ------------------------------------------------------------------------------------------ If you need a proxy to access wandb, you can add below config in your training job script. Comparing to using global https_proxy env variable, this approach won't mess up other http requests, such as ChatCompletionScheduler. .. code:: bash +trainer.wandb_proxy=http:// Missmatch between inference and training sequence (high actor/grad_norm) ------------------------------------------------------------------------------------------ If you encounter the issue of actor/grad_norm metric continuously increasing during training, it might be caused by a significant precision mismatching between the inference engine and training. You can use the following parameter to confirm this: .. code:: bash actor_rollout_ref.rollout.calculate_log_probs=True This parameter will add metrics like training/rollout_probs_diff_mean , which can be used to verify if there is a precision difference between inference and training. Under normal circumstances, the value of training/rollout_probs_diff_mean should be below 0.005. If you observe this value to be higher than 0.01, it indicates a precision issue from the inference engine. The precision issue is known to occur under the following conditions: 1. Using non-Hopper architecture GPUs, such as A100, L20, B200, etc. 2. Using vLLM `with issue 22103 `_ as the inference engine. 3. The input and output texts are long, for example, in multi-turn scenarios using reasioning models like Qwen3 for RL training. If all three conditions above are met and you observe that rollout_probs_diff_mean is too high, it is recommended to add the following parameter to resolve the precision issue: .. code:: bash +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_cascade_attn=True The root cause of this issue is a bug in the flash attention used by vLLM. Although it has been fixed, the fix has not yet been released in the latest version of vLLM (v0.10.2). For a more detailed explanation of this issue, please refer to `Fix LSE output error in FA2 kv-split `_. Until vLLM releases a new version with this fix, it is recommended to use the configuration above to disable cascade attention as a workaround. ================================================ FILE: verl_distillation/docs/hybrid_flow.rst ================================================ ========================================================= HybridFlow Programming Guide ========================================================= Last updated: 06/02/2025. .. _vermouth: https://github.com/vermouth1992 Author: `Chi Zhang `_ verl is an open source implementation of the paper `HybridFlow `_ [1]_. In this section, we will introduce the basic concepts of HybridFlow, the motivation and how to program with verl APIs. Motivation and Design ------------------------ We use dataflow to represent RL systems. [4]_. DataFlow ~~~~~~~~~~~~~~~~~~~~ Dataflow is an abstraction of computations. Neural Network training is a typical dataflow. It can be represented by computational graph. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/dataflow.jpeg?raw=true :alt: The dataflow graph from CS231n 2024 lecture 4 This figure [2]_ represents the computation graph of a polynomial function followed by a sigmoid function. In the data flow of neural network computation, each node represents an operator, and each edge represents the direction of forward/backward propagation. The computation graph determines the architecture of the neural network. RL as a dataflow problem ++++++++++++++++++++++++++++++++++++++++++++++ Reinforcement learning (RL) training can also be represented as a dataflow. Below is the dataflow graph that represents the PPO algorithm used in RLHF [3]_: .. image:: https://picx.zhimg.com/70/v2-cb8ab5ee946a105aab6a563e92682ffa_1440w.avis?source=172ae18b&biz_tag=Post :alt: PPO dataflow graph, credit to Zhihu 低级炼丹师 However, the dataflow of RL has fundamental differences compared with dataflow of neural network training as follows: +--------------------------+--------------------------------------------------+---------------------+ | Workload | Node | Edge | +--------------------------+--------------------------------------------------+---------------------+ | Neural Network Training | Operator (+/-/matmul/softmax) | Tensor movement | +--------------------------+--------------------------------------------------+---------------------+ | Reinforcement Learning | High-level operators (rollout/model forward) | Data Movement | +--------------------------+--------------------------------------------------+---------------------+ In the case of tabular reinforcement learning, each operator is a simple scalar math operation (e.g., bellman update). In deep reinforcement learning(DRL), each operator is a high-level neural network computation such as model inference/update. This makes RL a two-level dataflow problem: - Control flow: defines how the high-level operators are executed (e.g., In PPO, we first perform rollout. Then, we perform advantage computation. Finally, we perform training). It expresses the **core logics of RL algorithms**. - Computation flow: defines the dataflow of **neural network computation** (e.g., model forward/backward/optimizer). Design Choices ~~~~~~~~~~~~~~~~~~~~ The model size used in DRL before the LLM era is typically small. Thus, the high-level neural network computation can be done in a single process. This enables embedding the computation flow inside the control flow as a single process. However, in the LLM era, the computation flow (e.g., training neural network) becomes a multi-process program. This naturally leads to two design choices: 1. Convert the control flow into a multi-process program as well. Then colocate with computation flow (unified multi-controller) - Advantages: - Achieves the **optimal performance** under fixed computation flow and control flow as the communication overhead in both training and data transfer is minimized. - Disadvantages: - The computation and/or control flow is **hard to reuse** from software perspective as computation code is coupled with specific controller code. For example, the training loop of PPO is generic. Say we have an PPO training flow implemented with a specific computation flow such as FSDP. Neither the control flow or computation flow can be reused if we want to switch the computation flow from FSDP to Megatron, due to the coupling of control and computation flows. - Requires more efforts from the user under flexible and dynamic control flows, due to the multi-process nature of the program. 2. Separate the flows: single process for the control flow and multi-process for computation flow - Advantages: - The computation flow defined elsewhere can be **easily reused** after the decoupling. - The controller runs on a single process. Implementing a new RL algorithm with a **different control flow is simple and easy**. - Disadvantages: - Additional **data communication overhead** each time the controller process and computatation processes interact. The data has to be sent back and forth. In verl, the latter strategy with separate control flow and computation flow is adopted. verl is designed to decouple the control flow of RL algorithms, and the implementation of computation engines. Overall Execution Diagram ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Below is a simplified diagram denoting the execution of a reinforcement learning job. In the diagram, the controller runs on a single process, while the generator/actor workers, critic workers run on multiple processes, placed with specific resource groups. For rollout, the controller passes the data to the generator to perform sample generation. When the rollout is done, the data is passed back to controller for the next step of the algorithm. Similar execution is done for other workers. With the hybrid controller design, the data flow and computation is decoupled to provide both efficiency in computation and flexibility in defining algorithm training loops. .. figure:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/driver_worker.png?raw=true :alt: The execution diagram Codebase walkthrough (PPO) ------------------------------------------------ Entry function ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Code: https://github.com/volcengine/verl/blob/main/verl/trainer/main_ppo.py In this file, we define a remote function `main_task` that serves as the controller (driver) process as shown in the above figure. We also define a ``RewardManager``, where users can customize their reward function based on the data source in the dataset. Note that `RewardManager` should return the final token-level reward that is optimized by RL algorithms. Note that users can combine model-based rewards and rule-based rewards. The ``main_task`` constructs a RayPPOTrainer instance and launch the fit. Note that ``main_task`` **runs as a single process**. We highly recommend that the ``main_task`` is NOT scheduled on the head of the ray cluster because ``main_task`` will consume a lot of memory but the head usually contains very few resources. Ray trainer ~~~~~~~~~~~~~~~~~~~~ Code: https://github.com/volcengine/verl/blob/main/verl/trainer/ppo/ray_trainer.py The RayPPOTrainer manages - Worker and WorkerGroup construction - Runs the main loop of PPO algorithm Note that, the fit function of RayPPOTrainer **runs as a single process**. Worker and WorkerGroup construction ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each workerGroup manages a list of workers that runs remotely. Note that the worker group runs in the process of its constructor. Each worker inside the WorkerGroup runs on a GPU. The worker group serves as a proxy for the controller process to interact with a list of workers, in order to perform certain computations. **In order to do so, we have to bind the methods of the worker into the method of the WorkerGroup and define the data dispatch and data collection**. This is done via simple decoration that will be introduced in the Worker definition section. For example, in PPO, we define 3 worker groups: - ActorRolloutRef: manages actor, rollout and reference policy. ActorRolloutRefWorker can be instantiated as a single actor, a single rollout, a single reference policy, a combined actor/rollout or a combined actor/rollout/ref. This design is aimed for the maximum code reuse in various scenarios. The reason for colocating actor and rollout is for fast weight transfer using nccl. The reason for coloating actor and reference is to implement an efficient lora PPO as the reference policy is simply the base model of PPO in lora. The colocation is done via ``verl.single_controller.ray.base.create_colocated_worker_cls``, where it creates a single ray remote class exposing all class methods from these roles. - Critic: manages the critic model - Reward: manages the reward model The worker group will be constructed on the resource pool it designates. The resource pool is a set of GPUs in the ray cluster. Worker definition ~~~~~~~~~~~~~~~~~~~~ .. _ActorRolloutRefWorker: https://github.com/volcengine/verl/blob/main/verl/workers/fsdp_workers.py We take `ActorRolloutRefWorker `_ for an example. The APIs it should expose to the controller process are: - init_model: build the underlying model - generate_sequences: given prompts, generate responses - compute_log_prob: compute the log-probability of a generated sequence using actor - compute_ref_log_prob: compute the log-probability of a generated sequence using reference policy - save_checkpoint: save the checkpoint Note that these methods are defined in the worker that can only be invoked via remote calls. For example, if the controller process wants to initialize the model, it has to call .. code-block:: python for worker in actor_rollout_ref_wg: worker.init_model.remote() If the controller process wants to generate sequences, it has to call .. code-block:: python data = xxx # split the data into dp chunks data_dp_lst = data.split(dp_size) output_dp_lst = [] for i, worker in enumerate(actor_rollout_ref_wg): output_future = worker.generate_sequences.remote(data_dp_lst[i]) output_dp_lst.append(output_future) output = torch.cat(ray.get(output_dp_lst), dim=0) We observe that controller process calling worker group methods in general can be divided into 3 parts: - Split the data into data parallel sizes - Dispatch the corresponding data into each worker - Collect and concatenate the data when the computation finishes In verl, we design a syntax sugar to encapsulate the 3 processes into a single call from the controller process. .. code-block:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(data): ... # on the driver output = actor_rollout_ref_wg.generate_sequences(data) We decorate the method of the worker with a ``register`` that explicitly defines how the input data should be split and dispatched to each worker, and how the output data should be collected and concatenated by the controller. For example, ``Dispatch.DP_COMPUTE_PROTO`` splits the input data into dp chunks, dispatch each data to each worker, collect the output and concatenate the results. Note that this function requires the input and output to be a DataProto defined here (https://github.com/volcengine/verl/blob/main/verl/protocol.py). PPO main loop ~~~~~~~~~~~~~~~~~~~~ With the aforementioned APIs, we can implement the main loop of PPO as if it is a single process program .. code-block:: python for prompt in dataloader: output = actor_rollout_ref_wg.generate_sequences(prompt) old_log_prob = actor_rollout_ref_wg.compute_log_prob(output) ref_log_prob = actor_rollout_ref_wg.compute_ref_log_prob(output) values = critic_wg.compute_values(output) rewards = reward_wg.compute_scores(output) # compute_advantages is running directly on the control process advantages = compute_advantages(values, rewards) output = output.union(old_log_prob) output = output.union(ref_log_prob) output = output.union(values) output = output.union(rewards) output = output.union(advantages) # update actor actor_rollout_ref_wg.update_actor(output) critic.update_critic(output) Takeaways ~~~~~~~~~~~~~~~~~~~~ - This programming paradigm enables users to use different computation backend without modification of the control process. - This programming paradigm enables flexible placement (by changing the mapping of WorkerGroup and ResourcePool) without modification of the control process. Repository organization ------------------------------------------------ Important code files in the repository are organized as below: .. code-block:: bash verl # the verl package trainer main_ppo.py # the entrypoint for RL training ppo ray_trainer.py # the training loop for RL algorithms such as PPO fsdp_sft_trainer.py # the SFT trainer with FSDP backend config generation.yaml # configuration template for rollout ppo_trainer.yaml # configuration template for the RL trainer workers protocol.py # the interface of DataProto fsdp_workers.py # the FSDP worker interfaces: ActorRolloutRefWorker, CriticWorker, RewardModelWorker megatron_workers.py # the Megatron worker interfaces: ActorRolloutRefWorker, CriticWorker, RewardModelWorker actor dp_actor.py # data parallel actor with FSDP backend megatron_actor.py # nD parallel actor with Megatron backend critic dp_critic.py # data parallel critic with FSDP backend megatron_critic.py # nD parallel critic with FSDP backend reward_model megatron reward_model.py # reward model with Megatron backend rollout vllm vllm_rollout.py # rollout with vllm backend hf_rollout.py # rollout with huggingface TGI backend sharding_manager fsdp_ulysses.py # data and model resharding when using FSDP + ulysses fsdp_vllm.py # data and model resharding when using FSDP + ulysses + vllm megatron_vllm.py # data and model resharding when using Megatron + vllm utils dataset # datasets for SFT/RM/RL reward_score # function based reward gsm8k.py # reward function for gsm8k dataset math.py # reward function for math dataset seqlen_balancing.py # the sequence balance optimization models llama # Megatron implementation for llama, deepseek, mistral, etc transformers # ulysses integration with transformer models such as llama, qwen, etc weight_loader_registery.py # registry of weight loaders for loading hf ckpt into Megatron third_party vllm # adaptor for vllm's usage in RL vllm_spmd # vllm >= v0.7 adaptor examples # example scripts tests # integration and unit tests .github # the configuration of continuous integration tests .. [1] HybridFlow: A Flexible and Efficient RLHF Framework: https://arxiv.org/abs/2409.19256v2 .. [2] Data flow graph credit to CS231n 2024 lecture 4: https://cs231n.stanford.edu/slides/2024/lecture_4.pdf .. [3] PPO dataflow graph credit to 低级炼丹师 from Zhihu​: https://zhuanlan.zhihu.com/p/635757674 .. [4] RLFlow ================================================ FILE: verl_distillation/docs/index.rst ================================================ Welcome to verl's documentation! ================================================ verl is a flexible, efficient and production-ready RL training framework designed for large language models (LLMs) post-training. It is an open source implementation of the `HybridFlow `_ paper. verl is flexible and easy to use with: - **Easy extension of diverse RL algorithms**: The hybrid programming model combines the strengths of single-controller and multi-controller paradigms to enable flexible representation and efficient execution of complex Post-Training dataflows. Allowing users to build RL dataflows in a few lines of code. - **Seamless integration of existing LLM infra with modular APIs**: Decouples computation and data dependencies, enabling seamless integration with existing LLM frameworks, such as PyTorch FSDP, Megatron-LM, vLLM and SGLang. Moreover, users can easily extend to other LLM training and inference frameworks. - **Flexible device mapping and parallelism**: Supports various placement of models onto different sets of GPUs for efficient resource utilization and scalability across different cluster sizes. - Ready integration with popular HuggingFace models verl is fast with: - **State-of-the-art throughput**: By seamlessly integrating existing SOTA LLM training and inference frameworks, verl achieves high generation and training throughput. - **Efficient actor model resharding with 3D-HybridEngine**: Eliminates memory redundancy and significantly reduces communication overhead during transitions between training and generation phases. -------------------------------------------- .. _Contents: .. toctree:: :maxdepth: 2 :caption: Quickstart start/install start/quickstart start/multinode start/ray_debug_tutorial start/more_resources start/agentic_rl .. toctree:: :maxdepth: 2 :caption: Programming guide hybrid_flow single_controller .. toctree:: :maxdepth: 1 :caption: Data Preparation preparation/prepare_data preparation/reward_function .. toctree:: :maxdepth: 2 :caption: Configurations examples/config .. toctree:: :maxdepth: 1 :caption: PPO Example examples/ppo_code_architecture examples/gsm8k_example examples/multi_modal_example examples/skypilot_examples .. toctree:: :maxdepth: 1 :caption: Algorithms algo/ppo.md algo/grpo.md algo/collabllm.md algo/dapo.md algo/spin.md algo/sppo.md algo/entropy.md algo/opo.md algo/baseline.md algo/gpg.md .. toctree:: :maxdepth: 1 :caption: PPO Trainer and Workers workers/ray_trainer workers/fsdp_workers workers/megatron_workers workers/sglang_worker workers/model_engine .. toctree:: :maxdepth: 1 :caption: Performance Tuning Guide perf/dpsk.md perf/best_practices perf/perf_tuning README_vllm0.8.md perf/device_tuning perf/verl_profiler_system.md perf/nsight_profiling.md .. toctree:: :maxdepth: 1 :caption: Adding new models advance/fsdp_extension advance/megatron_extension .. toctree:: :maxdepth: 1 :caption: Advanced Features advance/checkpoint advance/rope advance/attention_implementation advance/ppo_lora.rst sglang_multiturn/multiturn.rst sglang_multiturn/interaction_system.rst advance/placement advance/dpo_extension examples/sandbox_fusion_example advance/rollout_trace.rst advance/rollout_skip.rst advance/rollout_is.md advance/one_step_off advance/agent_loop advance/reward_loop advance/fully_async data/transfer_queue.md .. toctree:: :maxdepth: 1 :caption: Hardware Support amd_tutorial/amd_build_dockerfile_page.rst amd_tutorial/amd_vllm_page.rst ascend_tutorial/ascend_quick_start.rst ascend_tutorial/ascend_profiling_zh.rst ascend_tutorial/ascend_profiling_en.rst ascend_tutorial/dockerfile_build_guidance.rst ascend_tutorial/ascend_sglang_quick_start.rst .. toctree:: :maxdepth: 1 :caption: API References api/data api/single_controller.rst api/trainer.rst api/utils.rst .. toctree:: :maxdepth: 2 :caption: FAQ faq/faq .. toctree:: :maxdepth: 1 :caption: Development Notes sglang_multiturn/sandbox_fusion.rst Contribution ------------- verl is free software; you can redistribute it and/or modify it under the terms of the Apache License 2.0. We welcome contributions. Join us on `GitHub `_, `Slack `_ and `Wechat `_ for discussions. Contributions from the community are welcome! Please check out our `project roadmap `_ and `good first issues `_ to see where you can contribute. Code Linting and Formatting ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We use pre-commit to help improve code quality. To initialize pre-commit, run: .. code-block:: bash pip install pre-commit pre-commit install To resolve CI errors locally, you can also manually run pre-commit by: .. code-block:: bash pre-commit run Adding CI tests ^^^^^^^^^^^^^^^^^^^^^^^^ If possible, please add CI test(s) for your new feature: 1. Find the most relevant workflow yml file, which usually corresponds to a ``hydra`` default config (e.g. ``ppo_trainer``, ``ppo_megatron_trainer``, ``sft_trainer``, etc). 2. Add related path patterns to the ``paths`` section if not already included. 3. Minimize the workload of the test script(s) (see existing scripts for examples). We are HIRING! Send us an `email `_ if you are interested in internship/FTE opportunities in MLSys/LLM reasoning/multimodal alignment. ================================================ FILE: verl_distillation/docs/perf/best_practices.rst ================================================ Verl LLM Best Practices (DAPO + Qwen3-235B) =========================================== Last updated: 11/03/2025. Purpose ------- This guide uses DAPO training on Qwen3-235B as a concrete example. We unpack every parameter that appears in the optimization objective, map it to Verl configuration entries, and share field-tested recommendations so you can derive sensible settings for your own workloads. .. note:: 1. The guide only covers the subset of parameters required to reproduce the DAPO experiments discussed here. For the full list, refer to the ``config`` components in the Verl source tree: https://github.com/volcengine/verl/tree/main/verl/trainer/config 2. PPO and GRPO introduce KL-constrained policies. We therefore include that setup in the explanations below. You can treat all configurations mentioned here as a DAPO pipeline augmented with a KL penalty. Optimization Objectives ----------------------- DAPO objective ~~~~~~~~~~~~~~ .. math:: \begin{aligned} \mathcal{J}_{\mathrm{DAPO}}(\theta)= & \mathbb{E}_{(q, a) \sim \mathcal{D},\left\{o_i\right\}_{i=1}^G \sim \pi_{\theta_{\text {old }}}(\cdot \mid q)} \ {\left[\frac{1}{\sum_{i=1}^G\left|o_i\right|} \sum_{i=1}^G \sum_{t=1}^{\left|o_i\right|} \min \left(r_{i, t}(\theta) \hat{A}_{i, t}, \operatorname{clip}\left(r_{i, t}(\theta), 1-\varepsilon_{\text {low }}, 1+\varepsilon_{\text {high }}\right) \hat{A}_{i, t}\right)\right] } \\ \end{aligned} .. math:: \text { s.t. } \quad 0<\mid\left\{o_i \mid \text { is_equivalent }\left(a, o_i\right)\right\} \mid 2 * model_parameters`` (bf16/fp16). Increase TP gradually to expand KV cache capacity while watching communication cost—especially once TP > 8. - ``actor_rollout_ref.rollout.temperature`` / ``top_p`` / ``top_k``: Sampling knobs for rollout. Keep enough randomness; ``temperature=1.0``, ``top_p=1.0``, ``top_k=-1`` are good defaults. - ``actor_rollout_ref.rollout.val_kwargs.temperature`` / ``top_p`` / ``top_k`` / ``do_sample`` / ``n``: Sampling options for validation. Set ``temperature > 0`` to prevent repetitive thinking chains. For small test sets (e.g., AIME24) raise ``n`` (64 is a common choice) to reduce variance. A practical starting point is ``temperature=1.0``, ``top_p=0.7``, ``top_k=-1``, ``do_sample=True``, ``n=1`` and then increase ``n`` as needed. - ``+actor_rollout_ref.rollout.engine_kwargs.vllm.*`` / ``+actor_rollout_ref.rollout.engine_kwargs.sglang.*``: Extra backend options injected via the ``+`` syntax. Consult backend docs for exact semantics. Some switches (for example ``pipeline_parallel_size``) may not be supported yet; when TP=32, ``enable_expert_parallel=True`` can even slow down DeepSeek-V3 rollout, so benchmark carefully. :math:`\pi_\theta` - ``data.train_batch_size``: Total batch size per training iteration. Each rollout produces ``train_batch_size * n`` samples. Larger values reduce the number of rollouts but increase off-policy drift. - ``actor_rollout_ref.actor.ppo_mini_batch_size``: Mini-batch size per optimization step. Tune it the same way you would for standard deep learning workloads. - ``actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu``: Samples processed per forward pass on one GPU group (a Megatron group contains TP * PP * CP GPUs). Keep it ≤ ``ppo_mini_batch_size`` and as large as memory allows. - ``actor_rollout_ref.actor.use_dynamic_bsz``: Enable dynamic batch sizing to adapt to sequence length and improve throughput. - ``actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu``: Maximum tokens per GPU when computing log probabilities under dynamic batching. Set it to at least a multiple of ``max_prompt_length + max_response_length`` to prevent truncation. - Megatron parallelism parameters (``pipeline_model_parallel_size`` / ``tensor_model_parallel_size`` / ``expert_model_parallel_size`` / ``expert_tensor_parallel_size`` / ``context_parallel_size``): Balance PP/TP/EP/ETP/CP to match memory and network constraints. In bf16/fp16, each parameter consumes roughly ``2 / TP`` bytes; if you keep FP32 master weights or skip optimizer offload, reserve another 4–8 bytes for Adam. Activations scale with ``micro_batch_size × sequence_length × hidden_size`` and can be mitigated with gradient checkpointing, dynamic batches, or offload. Prefer increasing TP first, add PP when necessary, extend sequence capacity with CP, align EP/ETP with TP for MoE models, and keep DP minimal on constrained clusters while combining with offload. Always align the setup with hardware topology and communication cost. - ``actor_rollout_ref.model.use_fused_kernels``: Enable Verl’s fused kernels for supported models to squeeze out additional performance. :math:`\hat{A}_{i,t}` - ``algorithm.adv_estimator``: Advantage estimator. Set to ``grpo`` for DAPO/GRPO. :math:`R_i` - ``reward_model.reward_manager``: Reward aggregation strategy. Use ``dapo`` for DAPO and ``naive`` for GRPO. :math:`D_{KL}` - ``algorithm.use_kl_in_reward``: Whether to add a KL term to the reward. ``True`` for PPO, ``False`` for GRPO and DAPO. - ``actor_rollout_ref.actor.use_kl_loss``: Whether to include a KL loss term. ``False`` for PPO, ``True`` for GRPO, ``False`` for DAPO. :math:`\beta` - ``actor_rollout_ref.actor.kl_loss_coef``: Weight of the KL loss. Start around 0.001. Larger values curb reward hacking but reduce exploration. - ``algorithm.kl_ctrl.kl_coef``: KL coefficient applied within the reward. Adjust to match your tolerance for divergence. :math:`\pi_{old}` - ``actor_rollout_ref.rollout.log_prob_use_dynamic_bsz``: Enable dynamic batching when the old policy computes log-probabilities. Recommended. :math:`\pi_{ref}` - ``actor_rollout_ref.ref.log_prob_use_dynamic_bsz``: Enable dynamic batching for the reference policy. Recommended. - Reference Megatron parallelism: Keep ``pipeline_model_parallel_size``, ``tensor_model_parallel_size``, ``expert_model_parallel_size``, ``expert_tensor_parallel_size``, and ``context_parallel_size`` in sync with the actor. - ``actor_rollout_ref.ref.megatron.param_offload``: Offload reference parameters to CPU when the actor does so. Even without gradients or optimizer states, parity helps with capacity planning. :math:`o_i` / :math:`|o_i|` - ``actor_rollout_ref.actor.loss_agg_mode``: Loss aggregation mode. Token-level ``token-mean`` matches the recommendations from Dr.GRPO and DAPO; use ``seq-mean-token-mean`` to reproduce the original GRPO behavior. :math:`\pi_\theta(o_{i,t} \mid q_i,o_{i,`_ - `SimonHuang `_ 1.5B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2.5-1.5B - GRPO-LoRA - 1*H100 - 128 - fsdp - vllm0.8.3 - `qwen2-1.5b_grpo-lora_1_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 3B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2.5-3B - GRPO-LoRA - 1*H100 - 62 - fsdp - vllm0.8.3 - `qwen2-3b_grpo-lora_1_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 7B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2-7B - GRPO - 2*H800 - \ - fsdp - vllm0.8.2 - `qwen2-7b_grpo_2_h800_fsdp_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2.5-7B - GRPO-LoRA - 1*H100 - 16 - fsdp - vllm0.8.3 - `qwen2-7b_grpo-lora_1_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 14B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2-14B - GRPO - 4*H800 - \ - fsdp - vllm0.8.2 - `qwen2-14b_grpo_4_h800_fsdp_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2.5-14B - GRPO-LoRA - 2*H100 - 116 - fsdp - vllm0.8.3 - `qwen2-14b_grpo-lora_2_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 32B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2-32B - GRPO - 8*H20 - \ - megatron - vllm0.8.2 - `qwen2-32b_grpo_8_h20_megatron_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2.5-32B - GRPO-LoRA - 4*H100 - 180 - fsdp - vllm0.8.3 - `qwen2-32b_grpo-lora_4_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 70B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2-70B - GRPO - 32*H20 - \ - fsdp - vllm0.8.2 - `qwen2-70b_grpo_32_h20_fsdp_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2-70B - GRPO - 32*H800 - \ - fsdp - vllm0.8.3 - `qwen2-70b_grpo_32_h800_fsdp_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2.5-72B - GRPO-LoRA - 8*H100 - 176 - fsdp - vllm0.8.3 - `qwen2-72b_grpo-lora_8_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 405B ~~~~ .. table:: :widths: auto ====== ====== ====== ======== ======== ====== ====== ====== tag model task resource MaxBatch train infer link ====== ====== ====== ======== ======== ====== ====== ====== \ \ \ \ \ \ \ ====== ====== ====== ======== ======== ====== ====== ====== 671B ~~~~ .. table:: :widths: auto ====== ====== ====== ======== ======== ====== ====== ====== tag model task resource MaxBatch train infer link ====== ====== ====== ======== ======== ====== ====== ====== \ \ \ \ \ \ \ ====== ====== ====== ======== ======== ====== ====== ====== ================================================ FILE: verl_distillation/docs/perf/dpsk.md ================================================ # Training DeepSeek 671b Last updated: 08/20/2025. verl integrates Megatron to support large MoE models such as `Qwen3-235B-A22B` and `deepseek-ai/DeepSeek-V3`. This is an ongoing community effort. In the journey the community added the following features and optimizations that enable verl with larger models: - per tensor weight resharding between rollout and training - context parallelism and expert parallelism enabled via megatron - dynamic batch size (sequence balance) for megatron - reduced ray-related serialization overhead - optimizer offloading, recomputation, and efficient kernels - various debugging metrics and utils - hybrid optimizer and the megatron backend now has a wider list of models supported: - DeepSeek-V3 - Moonlight - Qwen3 - Qwen2.5-VL (to be merged soon) - Qwen2 - Mixtral ## Getting Started ### preparation The recommended image with pre-built Megatron dependency is `verlai/verl:app-verl0.4-vllm0.8.5-mcore0.13.0-preview`, which is built using the Dockerfile at [docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.vllm.mcore0.13.preview](https://github.com/volcengine/verl/blob/main/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.vllm.mcore0.13.preview). The image is build in Hopper GPUs with DeepEP. It does not support None-Hopper GPUs, such as A100. You may need to reinstall DeepEP to work with A100. With `OFFLOAD_FRACTION=1`, the system's minimum requirements are lowered. It can run on as few as 96 H20 (96GB) GPUs for DeepSeek-V3, and on as few as 32 H20 (96GB) GPUs for Qwen3-235B-A22B. However, this configuration will use 1.6TB CPU memory per node. If you run out of CPU memory or require faster training speed, you can add more nodes. ### DeepSeek 671b For DeepSeek-V3 671b, please refer to [examples/grpo_trainer/run_deepseek671b_math_megatron_96gb.sh](https://github.com/volcengine/verl/blob/main/examples/grpo_trainer/run_deepseek671b_math_megatron_96gb.sh). MTP and quantilization is disabled during RL training. To train your project, configure the following environment variables based on the number of available GPUs. These are recommended settings and can be adjusted based on your specific hardware. | num gpus | NNODES | TP | PP | EP | OFFLOAD_FRACTION | OFFLOAD_OPTIM | LAST_LAYER | | -- | -- | -- | -- | -- | -- | -- | -- | | 96 | 12 | 8 | 12 | 8 | 1. | False | 6 | | 128 | 16 | 8 | 16 | 8 | 0.5 | True | 1 | | 256 | 32 | 8 | 16 | 8 | 0. | True | 1 | | 512 | 64 | 1 | 16 | 32 | 0 | True | 1 | ### Qwen3 235b For Qwen3-235b, please refer to [examples/grpo_trainer/run_qwen3-235b_megatron_96gb.sh](https://github.com/volcengine/verl/blob/main/examples/grpo_trainer/run_qwen3-235b_megatron_96gb.sh). To train your project, configure the following environment variables based on the number of available GPUs. These are recommended settings and can be adjusted based on your specific hardware. | num gpus | NNODES | TP | PP | EP | OFFLOAD_FRACTION | OFFLOAD_OPTIM | LAST_LAYER | | -- | -- | -- | -- | -- | -- | -- | -- | | 32 | 4 | 4 | 8 | 4 | 1. | False | 6 | | 64 | 8 | 4 | 8 | 4 | 0.5 | True | 6 | | 128 | 16 | 4 | 8 | 4 | 0 | True | 6 | | 256 | 32 | 4 | 8 | 4 | 0 | True | 6 | ### Benchmark Here are some benchmark results for DeepSeek / Qwen3-235B. All configurations match the recommended settings based on the number of GPUs. | model | num gpus | mean response length | rollout time(s) | GPU memory(GB) | CPU memory(GB) | MFU | step time(s) | | -- | -- | -- | -- | -- | -- | -- | -- | | DeepSeek 671b | 96 | 1960 | 1050 | 66 | 1500 | 0.19 | 1700 | ### Qwen3-30B-A3B MOE For Qwen3-30b, please refer to [examples/grpo_trainer/run_qwen3moe-30b_megatron_96gb.sh](https://github.com/volcengine/verl/blob/main/examples/grpo_trainer/run_qwen3moe-30b_megatron_96gb.sh). To train your project, configure the following environment variables based on the number of available GPUs. These are recommended settings and can be adjusted based on your specific hardware. | num gpus | NNODES | TP | PP | EP | OFFLOAD_FRACTION | OFFLOAD_OPTIM | MFU | | -- | -- | -- | -- | -- | -- | -- | -- | | 8 | 1 | 1 | 1 | 8 | 1. | True | 0.4 | | 16 | 2 | 1 | 1 | 8 | 1. | True | 0.37 | | 32 | 4 | 1 | 1 | 8 | 1. | True | 0.31 | ## Upcoming Optimizations The community continue to optimize large MoE models further, ongoing efforts include: - further optimizing memory consumption, and provide recommended/tuned configurations with various machine types - optimizing long context RL training performance - performance improvement with SGLang x Megatron We invite the community to try and improve verl together. Get connected with us on [slack](https://join.slack.com/t/verlgroup/shared_invite/zt-2w5p9o4c3-yy0x2Q56s_VlGLsJ93A6vA)/[wechat](https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/WeChat.JPG)/[Github issues](https://github.com/volcengine/verl/issues/708)! ## Acknowledgement @vermouth1992 @ISEEKYAN @ETOgaosion @yzlnew @ShareLer @BearBiscuit05 @ccclyu @ann-qin-lu @SwordFaith @zzong2006 @zhaochenyang20 @ocss884 @eric-haibin-lin @chenhaiq @techkang ================================================ FILE: verl_distillation/docs/perf/nsight_profiling.md ================================================ # NVIDIA Nsight Systems profiling in verl Last updated: 06/20/2025. This guide explains how to use NVIDIA Nsight Systems for profiling verl training runs. ## Configuration Profiling in verl can be configured through several parameters in the trainer configuration file (ppo_trainer.yaml or other files like dapo_trainer.yaml): ### Prerequisites Nsight Systems version is important, please reference `docker/Dockerfile.vllm.sglang.megatron` for the version we used. ### Global profiling control verl has one single controller process and multiple worker processes. Both controller and worker processes can be profiled. Since the controller process can be executed in any nodes in the cluster, there is a message printed in the logging to indicate the controller process node hostname and process id. In `global_profiler`, three new config entries control the profiler behaviors: * **`global_profiler.steps`**. List of step numbers at which profiling should be performed. For example: [1, 2, 5] will profile steps 1, 2, and 5. And ``null`` means no profiling. * **`global_profiler.profile_continuous_steps`**. If true, and the following `global_profiler.discrete==False`, then the continuous steps in `global_profiler.steps` will be combined into one database. For example the above step 1 and 2 are in one database, and 5 in another. If false, every step occupies at least one database. The reason for this config is to observe the program behaviors between steps. Nsys options in controller nodes and worker nodes are configured in `global_profiler.global_tool_config.nsys`: * **`global_profiler.global_tool_config.nsys.controller_nsight_options`**. This config group is for the single controller. All fields in this config group will be just sent to Nsight Systems when Ray starts the controller process. `ppo_trainer.yaml` provides a workable example. Users can reference [Nsight Systems manual](https://docs.nvidia.com/nsight-systems/UserGuide/index.html) and [Ray user guide](https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html) for more details. * **`global_profiler.global_tool_config.nsys.worker_nsight_options`**. This config group is for the worker processes. Similarly all fields in this config group will be just sent to Nsight Systems when Ray starts the controller process. Capture range is used to control the profiler when to start and stop. So `capture-range: "cudaProfilerApi"` is fixed and does not change it. Users can change `capture-range-end` with some accurate calculation or just leave it `null`. ### Worker process profiling Verl manages mulitiple RL roles, _Actor_, _Ref_, _Rollout_, _Critic_, _Reward_, which are implemented in different Worker classes. And these workers can be combined into one Ray Actor, running in a process group. Each RL role has its own profiling config group, `profiler`, which consists of three fields: * **`all_ranks` and `ranks`**. When `all_ranks` is set `True` then all ranks will be profiled; when set `False`, `ranks` will be profiled. By default, verl profiles the whole training process in a series ` worker_process_..nsys-rep` files for each process rank. PID is the process ID; RID is the capture range ID. * **`discrete`**. When set `False`, all the roles actions in one training step will be dumped in one database. When set `True`, the actions annotated by `DistProfiler.annotate` will be dumped into a discrete database. In this case, each role's action occupies one ``. * **Verl collocate mode**. Verl can combine two Worker sub classes to one Worker Actor. In this case, the user should take care that the combined Workers have consistent `discrete`. The Nsight Systems profiler uses a `torch.cuda.profiler.start()` and `stop()` pair to dump a `` database anyway. ### where to find the profiling data By default the `*.nsys-rep` files are saved in the directory `/tmp/ray/session_latest/logs/nsight/` at each node. According to the Ray manual, this default directory is not changeable. ["however, Ray preserves the `--output` option of the default config"](https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html). Some users may think it is not convenient, but it is understandable that Ray may start hundreds of processes and it would be a big network file system pressure if we save the files in one central place. ## Usage Example To enable profiling for specific components and steps, modify your ppo_trainer.yaml like this: ### Disable profiler ```yaml profiler: steps: null # disable profile ``` ### Enable profiler and one database for one training step ```yaml global_profiler: steps: [1, 2, 5] discrete: False actor_rollout_ref: actor: profiler: enable: True all_ranks: True # rollout & ref follow actor settings critic: profiler: enable: True all_ranks: True reward_model: profiler: enable: True all_ranks: True ``` ### Enable profiler and multiple databases for one training step ```yaml profiler: steps: [1, 2, 5] discrete: True ``` ## Profiling Output When profiling is enabled, verl will generate Nsight Systems profiles for the specified components and steps. The profiles will include: - CUDA kernel execution - Memory operations - CPU-GPU synchronization - NVTX markers for key operations Nsight Systems supports multi-report view, to open multiple databases together. In this mode, different processes and steps can be aligned in one time line for better analysis. ================================================ FILE: verl_distillation/docs/perf/perf_tuning.rst ================================================ Performance Tuning Guide ============================== Last updated: 07/17/2025. Author: `Guangming Sheng `_, `Jiali Zheng `_ In this section, we will discuss how to tune the performance of all the stages in verl, including: 1. Rollout generation throughput. 2. Enable ``use_remove_padding=True`` for sequence packing (i.e., data packing and remove padding). 3. Batch size tuning for forward and backward computation 4. Enable ``use_dynamic_bsz=True`` for higher throughput. 5. Utilize Ulysses Sequence Parallel for Long Context Training 6. LigerKernel for SFT performance optimization 7. Forward prefetch in FSDP training backend 8. Memory optimization for entropy calculation from logits Rollout Generation Tuning -------------------------- verl currently supports two rollout backends: vLLM and TGI (with SGLang support coming soon). Below are key factors for tuning vLLM-based rollout. Before tuning, we recommend setting ``actor_rollout_ref.rollout.disable_log_stats=False`` so that rollout statistics are logged. - Increase ``gpu_memory_utilization``. - For vLLM v0.7.0 and later, the vLLM instance will only use gpu_memory_utilization of the **total** memory. - For SGLang, it's the fraction of the free GPU memory used for **static** memory like model weights and KV cache. However, the remaining (1-gpu_memory_utilization) will also be used during inference. However, if model parameters and optimizer states are not offloaded, using too high a fraction can lead to OOM. A value between 0.5 and 0.7 often strikes a good balance between high throughput and avoiding OOM. Note: since the definition of ``gpu_memory_utilization`` varies across inference engines, a value that works well for one engine may cause OOM for another. - Adjust ``max_num_seqs`` or ``max_num_batched_tokens``. If the GPU cache utilization is relatively low in the log, increase ``max_num_seqs`` or ``max_num_batched_tokens`` can enlarge the effective batch size in the decoding stage, allowing more concurrent requests per batch. We recommend setting ``max_num_batched_tokens > 2048`` for higher throughput. - Use a smaller ``tensor_parallel_size``. When GPU resources allow, a smaller tensor parallel size spawns more vLLM replicas. Data parallelism (DP) can yield higher throughput than tensor parallelism (TP), but also increases KVCache consumption. Carefully balance the trade-off between more replicas and higher memory usage. Our experiment in Sec. 8.4 of `HybridFlow paper `_ evaluate this trade-off. - Balance performance and memory using ``cudagraph_capture_sizes``. If ``cudagraph_capture_sizes`` is set, vLLM will try to capture the model execution graph for different batch sizes. Since cudagraph memory can not be offloaded to cpu, The memory stay in gpu when update actor is running. Using smaller batch sizes can avoid OOM but slightly reduce throughput. Must to set ``enforce_eager=False`` to use ``cudagraph_capture_sizes``. More tuning details such as dealing with Preemption and Chunked-prefill can be found in `vLLM official tuning guide `_ For optimal performance, we recommend using vLLM v0.8.3 or later. See https://github.com/volcengine/verl/blob/main/docs/README_vllm0.8.md for details. Enable remove padding (sequence packing) ----------------------------------------- Currently, for llama, mistral, gemma1 and qwen based models, users can enable `use_remove_padding=True` to utilize the sequence packing implementation provided by transformers library. For other models, transformers library may also support it but we haven't tested it yet. Users can add the desired model config to the `test_transformer.py `_ file. And test its functionality by running the following command: .. code-block:: bash pytest -s tests/models/test_transformer.py If the test passes, you can add your desired model into the model `registry.py `_ file. Then, you can enjoy the performance boost of sequence packing and welcome to PR your tested model to verl! Batch Size Tuning ----------------- To achieve higher throughput in experience preparation (i.e., model fwd) and model update (i.e., actor/critic fwd/bwd), users may need to tune the ``*micro_batch_size_per_gpu`` for different computation. In verl, the core principle for setting batch sizes is: - **Algorithmic metrics** (train batch size, PPO mini-batch size) are *global* (from a single-controller perspective), normalized in each worker. See the `normalization code `_. - **Performance-related parameters** (micro batch size, max token length for dynamic batch size) are *local* parameters that define the per-GPU data allocations. See the `normalization code `_. .. note:: In your training script, please use ``*micro_batch_size_per_gpu`` instead of ``*micro_batch_size``. So that you don't need to consider the normalization of the ``micro_batch_size`` and ``micro_batch_size`` will be deprecated. Batch Size Tuning tips """""""""""""""""""""" Therefore, users may need to tune the ``*micro_batch_size_per_gpu`` to accelerate training. Here're some tips: 1. **Enable gradient checkpointing**: Set ``actor_rollout_ref.model.enable_gradient_checkpointing=True`` and ``critic.model.enable_gradient_checkpointing=True``. This often allows for larger micro-batch sizes and will be beneficial for large mini-batch training. 2. Increase the ``*micro_batch_size_per_gpu`` as much as possible till equals to normalized ``mini_batch_size``. 3. **Use larger forward-only parameters**: Forward only parameter, such as ``actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu``, ``actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu``, ``critic.forward_micro_batch_size_per_gpu`` could be larger (e.g., 2x) than training related micro batch sizes, such as ``actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu``, ``critic.ppo_micro_batch_size_per_gpu``. 4. **Allow larger micro-batch sizes for Critic and Reward models**: micro batch size of Critic and Reward model could be larger than Actor model. This is because the actor model has much larger vocab size in the final layer. 5. **Enable activation offloading**: Set ``actor_rollout_ref.model.enable_activation_offload=True`` and ``critic.model.enable_activation_offload=True``. This often works together with gradient checkpointing to get larger micro-batch sizes and it's only available in FSDP backend now. Tuning for Dynamic Batch Size ----------------------------- Dynamic batch size is a technique that allows the model to process similar number of tokens in a single forward pass (with different actual batch sizes). This can significantly improve the training efficiency and reduce the memory usage. To utilize this technique, users can set ``use_dynamic_bsz=True`` in actor, ref, critic and reward models. With ``use_dynamic_bsz=True``, users don't need to tune ``*micro_batch_size_per_gpu``. Instead, users should tune the following parameters: - ``actor_rollout_ref.actor.ppo_max_token_len_per_gpu``, ``critic.ppo_max_token_len_per_gpu``: The maximum number of tokens to be processed in fwd and bwd of ``update_policy`` and ``update_critic``. - ``actor_rollout_ref.ref.log_prob_max_token_len_per_gpu`` and ``actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu``: The maximum number of tokens to be processed in a the fwd computation of ``compute_log_prob`` and ``compute_ref_log_prob``. - ``critic.forward_micro_batch_size_per_gpu``, ``reward_model.forward_micro_batch_size_per_gpu``: The maximum number of tokens to be processed in a the fwd computation of ``compute_values``, ``compute_rm_score``. Dynamic Batch Size Tuning tips """""""""""""""""""""""""""""" Here're some tips to tune the above parameters: 1. **Increase** ``actor_rollout_ref.actor.ppo_max_token_len_per_gpu`` Make it at least 2 x (max_prompt_length + max_response_length). We set it to 3x in `run_qwen2-7b_rm_seq_balance.sh `_. Try to increase it to get higher throughput. 2. **Forward-only parameters can be larger**: Similar to the non-dynamic-batch scenario, forward-only token limits can exceed those used in forward/backward operations. 3. **Use larger limits for Critic and Reward models**: Critic and Reward parameters can be set at least 2× the Actor’s limits. For instance, we set them to 4× here: `run_qwen2-7b_rm_seq_balance.sh `_ .. :math:`\text{critic.ppo_max_token_len_per_gpu} = 2 \times \text{actor.ppo_max_token_len_per_gpu})`. Ulysses Sequence Parallel for Long Context Training ---------------------------------------------------- To utilize this technique, users can set ``ulysses_sequence_parallel_size>1`` in actor, ref, critic and reward models. We support different model utilize different ulysses_sequence_parallel_size sizes. To train long sequence (>32k), users may need to decrease the ``*micro_batch_size_per_gpu`` and ``*max_token_len_per_gpu`` to avoid OOM. LigerKernel for SFT ---------------------- LigerKernel is a high-performance kernel for Supervised Fine-Tuning (SFT) that can improve training efficiency. To enable LigerKernel in your SFT training: 1. Install liger-kernel via ``pip3 install liger-kernel``. In your SFT configuration file (e.g., ``verl/trainer/config/sft_trainer.yaml``), set the ``use_liger`` parameter: .. code-block:: yaml model: use_liger: True # Enable LigerKernel for SFT 2. The default value is ``False``. Enable it only when you want to use LigerKernel's optimizations. 3. LigerKernel is particularly useful for improving training performance in SFT scenarios. Forward prefetch in FSDP training backend ---------------------- During the training phase, users can enable forward prefetching in FSDP by setting ``fsdp_config.forward_prefetch=True``. For example, ``actor_rollout_ref.actor.fsdp_config.forward_prefetch=True``. This configuration prefetches the next forward-pass all-gather operation before completing the current forward computation, overlapping communication with computation and improving efficiency. For further details, refer to the `FSDP forward_prefetch `_ documentation. .. note:: Backward prefetch is unsupported because the ``BACKWARD_POST`` policy may prefetch incorrectly in nested-module cases. For details, see the `FSDP documentation `_ Migrating to FSDP2 ---------------------- FSDP2 offers notable improvements over FSDP1. According to `PyTorch TorchTitan benchmarks `_: - 7% lower GPU memory usage on average - 1.5% throughput improvement with BF16 training - Better composability with DTensor and per-parameter sharding **Enabling FSDP2 in VERL:** .. code-block:: python # Enable FSDP2 in actor configuration actor_rollout_ref.actor.strategy="fsdp2" .. note:: FSDP2 requires PyTorch 2.1+ and is recommended for models with transformer architecture. Memory optimization for entropy calculation from logits ---------------------- The ``logits`` tensor (typically of shape ``[bsz*seq_len, voc]``) can consume significant memory. When using ``compute_entropy_from_logits``, memory usage reaches approximately ``[bsz*seq_len, voc] × (4 bytes (float32) + 2 bytes (autocast for softmax+logsumexp) + 1 byte (softmax output))``. To reduce this memory peak, enable chunked computation by setting: ``actor_rollout_ref.ref.entropy_from_logits_with_chunking = True`` This processes the tensor in chunks of shape ``[chunk_size, voc]`` (e.g., 2048) rather than the full sequence length, exclusively during the model's forward pass. Additionally, during training, standard gradient checkpointing (``enable_gradient_checkpointing=True``) does not apply to entropy calculations. To reduce memory peaks in this context, set: ``actor_rollout_ref.actor.entropy_checkpointing = True`` This enables entropy recomputation specifically for the entropy calculation, lowering memory usage during training. ================================================ FILE: verl_distillation/docs/perf/verl_profiler_system.md ================================================ # verl Profiler System Last updated: 08/18/2025. ## Architecture The architecture of verl profiler system is like below: ![verl-profiler-arch](https://raw.githubusercontent.com/eric-haibin-lin/verl-community/2bc7ed0ba2f37f21707bfac3b241eca4b86d1bc6/docs/verl_profiler_arch.png) There is a global profiler and tool configuration to set some common config in single controller level, deciding - `tool`: which tool to use - `steps`: which steps to profile - `save_path`: results saving path When some tool need to profile behavior of each role, configurations in role-level is needed: - `tool`: which tool to use - `enable`: whether enable profiling on this role - rank info: `all_ranks` and `rank` to decide which rank to profile or log output For tool config in role-level, there are some detailed behavior needed to control, like the `discrete` mode in nsys profiler. Every role has a profiler config, and by default, rollout/ref/reward models follow the Actor's behavior. ## To Add a new profiling tool New added profiling tool shall reuse the current APIs as much as possible. 1. The logic of **whether to use the tool**: `tool == [new tool]`. 2. Add the global and local tool config to `ppo_trainer.yaml`/`ppo_megatron_trainer.yaml` and each `[role].yaml`, under `global_tool_config.[new tool]` and `tool_config.[new tool]` 3. The tool config should be implemented in `verl/utils/profiler/config.py`, inherit the `BaseConfig` class. 4. Implement profiling tool initialization logic using configurations in `global_profiler.global_tool_config.[new tool]` and the results saving logics (can also save in role-level profile) 5. For role function-level profiling, please follow the nsys profiler way in `nvtx_profiler.py`, implement a profiler class inherit `DistProfiler` and import new profiler in `verl/utils/profiler/__init__.py` 6. Add unit test and examples for others to use in convinience. ================================================ FILE: verl_distillation/docs/preparation/prepare_data.rst ================================================ Prepare Data for Post-Training ======================================== Last updated: 02/09/2025. Before starting the post-training job, we need to prepare the data for the policy training. The data should be stored in the parquet format. We provide several data preprocess scripts for different datasets, including GSM8K, MATH, HelloSwag, Full_hh_rlhf. To prepare other datasets, we need to follow the following steps: The data preprocess script can be divided into two parts: 1. The first part is the common part, which loads the dataset from huggingface's ``datasets`` package. Then preprocess the datasets with the ``make_map_fn`` and then store in the parquet format. .. code:: python import re import os import datasets from verl.utils.hdfs_io import copy, makedirs import argparse # To extract the solution for each prompts in the dataset # def extract_solution(solution_str): # ... if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--local_dir', default='/opt/tiger/gsm8k') parser.add_argument('--hdfs_dir', default=None) args = parser.parse_args() num_few_shot = 5 data_source = 'openai/gsm8k' dataset = datasets.load_dataset(data_source, 'main') train_dataset = dataset['train'] test_dataset = dataset['test'] # Construct a `def make_map_fn(split)` for the corresponding datasets. # ... train_dataset = train_dataset.map(function=make_map_fn('train'), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn('test'), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, 'train.parquet')) test_dataset.to_parquet(os.path.join(local_dir, 'test.parquet')) makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) 2. The users are required to implement the ``make_map_fn()`` function (as well as the ``extract_solution``) on their own to support different datasets or tasks. We already implemented the data preprocess of GSM8k, MATH, Hellaswag and Full_hh_rlhf datasets. And we take the GSM8k dataset as an example: **GSM8K** In the ``make_map_fn``, each data field should consist of the following 5 fields: 1. ``data_source``: The name of the dataset. To index the corresponding reward function in the ``RewardModel`` 2. ``prompt``: This field should be constructed in the format of huggingface chat_template. The tokenizer in ``RLHFDataset`` will apply chat template and tokenize the prompt. 3. ``ability``: Define the task category. 4. ``reward_model``: Currently, we only utilize the ``ground_truth`` field during evaluation. The ``ground_truth`` is computed by the ``extract_solution`` function. **NOTED** that the implementation of the corresponding reward function should align with this extracted ``ground_truth``. 5. ``extra_info``: Record some information of the current prompt. Not use for now. .. code:: python def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) # extract the solution after #### assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split('#### ')[1].replace(',', '') return final_solution instruction_following = "Let's think step by step and output the final answer after \"####\"." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question = example.pop('question') question = question + ' ' + instruction_following answer = example.pop('answer') solution = extract_solution(answer) data = { "data_source": data_source, "prompt": [{ "role": "user", "content": question }], "ability": "math", "reward_model": { "style": "rule", "ground_truth": solution }, "extra_info": { 'split': split, 'index': idx } } return data return process_fn ================================================ FILE: verl_distillation/docs/preparation/reward_function.rst ================================================ Implement Reward Function for Dataset ====================================== Last updated: 06/02/2025. For each dataset, we need to implement a reward function or utilize a reward model to compute the rewards for the generated responses. We already pre-implemented some reward functions in `reward_score directory `_. You can also use customized reward functions. Currently, we support reward functions for GSM8k and MATH datasets. For RLHF datasets (e.g., full_hh_rlhf) and Code Generation (e.g., APPS), we utilize reward model and SandBox (will opensource soon) for evaluation respectively. RewardManager ------------- In the entrypoint of the PPO Post-Training script `main_ppo.py `_, we implement a ``RewardManager`` that utilize pre-implemented reward functions to compute the scores for each response. In the ``RewardManager``, we implemented a ``__call__`` function to compute the score for each response. All the reward functions are executed by ``compute_score_fn``. The input is a ``DataProto``, which includes: - ``input_ids``, ``attention_mask``: ``input_ids`` and ``attention_mask`` after applying chat_template, including prompt and response - ``responses``: response tokens - ``ground_truth``: The ground truth string of the current prompt. Stored in ``non_tensor_batch`` in the ``DataProto``, which should be preprocessed in the parquet files. - ``data_source``: The dataset name of the current prompt. Stored in ``non_tensor_batch`` in the ``DataProto``, which should be preprocessed in the parquet files. After detokenize the responses, the responses string and the ground truth string will be input to the ``compute_score_fn`` to compute the score for each response. Reward Functions ---------------- Pre-implemented ~~~~~~~~~~~~~~~ We already pre-implemented some reward functions in `reward_score directory `_. - In the `GSM8k example `_, we force the response to output the final answer after four ####, then use string matching to compare with the ground truth. If completely correct, score 1 point; if the format is correct, score 0.1 points; if the format is incorrect, score 0 points. - In the `MATH example `_, we follow the implementation in `lm-evaluation-harness repository `_. Customized ~~~~~~~~~~ You can implement customized reward functions in a separate file and specify them using ``custom_reward_function.path`` and ``custom_reward_function.name``. For the set of them, please refer to :ref:`config-explain-page`. The parameters of your reward function should be ``data_source``, ``solution_str``, ``ground_truth``, and ``extra_info``. For example: .. code:: python def my_reward_fn(data_source, solution_str, ground_truth, extra_info=None): return len(solution_str)/100 If you are testing only a single customized reward function, you can simply name it 'compute_score' and leave ``custom_reward_function.name`` unset. To run multiple tests with different customized reward functions, you can modify both ``custom_reward_function.path`` and ``custom_reward_function.name`` for each trial. For instance, you might create a single `my_reward.py` file and implement multiple reward functions within it. This way, for different trials, you only need to adjust ``custom_reward_function.name``, making it more convenient to conduct multiple tests within scripts. ================================================ FILE: verl_distillation/docs/requirements-docs.txt ================================================ # markdown support recommonmark myst_parser # markdown table support sphinx-markdown-tables # theme default rtd # crate-docs-theme sphinx-rtd-theme # pin tokenizers version to avoid env_logger version req tokenizers==0.21 ================================================ FILE: verl_distillation/docs/sglang_multiturn/interaction_system.rst ================================================ Interaction System for Multi-turn RL Training ============================================= Last updated: 06/25/2025. Overview -------- The verl interaction system enables dynamic, multi-turn conversational feedback during reinforcement learning training. This system allows models to engage in iterative problem-solving scenarios where interaction agents can provide corrective feedback, guidance, or evaluation based on the model's responses. **New in Multi-Interaction Support**: The system now supports multiple named interactions within a single training session, enabling sophisticated training scenarios where different samples can use different interaction strategies. This allows for curriculum learning, domain-specific feedback, and flexible agent switching at the sample level. Key features: - **Async-based Architecture**: Non-blocking interaction processing for distributed training - **Instance Management**: Stateful session handling with unique instance IDs for concurrent interactions - **SGLang Integration**: Seamless integration with SGLang rollout system for multi-turn conversations - **Configuration-driven**: Dynamic agent loading via YAML configuration files - **Multi-Interaction Support**: Registry system enabling multiple named interactions per rollout - **Sample-Level Selection**: Each sample can specify which interaction to use via configuration - **Reward Integration**: Turn-level scoring mechanism integrated with verl's reward system Architecture ------------ The interaction system follows a plugin-based architecture with clear separation of concerns: .. code-block:: Interaction Registry System ↓ BaseInteraction (Abstract Interface) ↓ Multiple Named Interactions (e.g., Gsm8kInteraction, CustomInteraction) ↓ SGLang Rollout Integration (interaction_map) ↓ Sample-Level Interaction Selection ↓ Async Request Lifecycle Management Core Components ~~~~~~~~~~~~~~~ **Interaction Registry System** The interaction registry system allows loading and managing multiple named interactions: .. code-block:: python from verl.interactions.utils.interaction_registry import initialize_interactions_from_config # Load multiple interactions from config interaction_map = initialize_interactions_from_config("config.yaml") # Access specific interaction by name gsm8k_interaction = interaction_map["gsm8k"] custom_interaction = interaction_map["custom_solver"] **BaseInteraction Interface** All interaction agents must implement the ``BaseInteraction`` abstract class: .. code-block:: python from verl.interactions.base import BaseInteraction from typing import Dict, Any, List, Tuple, Optional class BaseInteraction: def __init__(self, config: Dict[str, Any]): self.config = config self.name: str = config.get("name", "interaction_agent") async def start_interaction(self, instance_id: Optional[str] = None, **kwargs) -> str: """Initialize interaction session, return instance_id""" async def generate_response(self, instance_id: str, messages: List[Dict[str, Any]], **kwargs) -> Tuple[bool, str, float, Dict[str, Any]]: """Generate response, return (should_terminate, response, score, metadata)""" async def calculate_score(self, instance_id: str, **kwargs) -> float: """Calculate turn-level score for RL training""" async def finalize_interaction(self, instance_id: str, **kwargs) -> None: """Clean up resources""" **Request Lifecycle** The interaction system integrates with SGLang's async rollout via state management: 1. ``PENDING`` → Initialize interaction via ``start_interaction()`` 2. ``GENERATING`` → Model generates response 3. ``INTERACTING`` → Process response via ``generate_response()`` 4. ``GENERATING`` → Continue if not terminated, otherwise ``COMPLETED`` Configuration ------------- **Basic Setup** Enable interaction in your rollout configuration: .. code-block:: yaml actor_rollout_ref: rollout: multi_turn: enable: true interaction_config_path: "path/to/interaction_config.yaml" max_user_turns: 10 max_assistant_turns: 10 **Interaction Configuration File** Create an interaction configuration file (e.g., ``interaction_config.yaml``): **Single Interaction (Legacy Format)** .. code-block:: yaml interaction: - name: "gsm8k" class_name: "verl.interactions.gsm8k_interaction.Gsm8kInteraction" config: {} **Multiple Interactions (New Format)** .. code-block:: yaml interaction: - name: "gsm8k" class_name: "verl.interactions.gsm8k_interaction.Gsm8kInteraction" config: {} - name: "custom_solver" class_name: "custom.interactions.CustomInteraction" config: solver_type: "advanced" timeout: 30 - name: "code_verifier" class_name: "verl.interactions.base.BaseInteraction" config: verification_mode: "strict" **Automatic Name Generation** If no ``name`` field is provided, the system will automatically generate one from the class name: .. code-block:: yaml interaction: - class_name: "verl.interactions.gsm8k_interaction.Gsm8kInteraction" config: {} # Automatically generates name: "gsm8k" The system will dynamically load all specified interaction classes and make them available by name. Implementation Example: GSM8K ----------------------------- The GSM8K interaction demonstrates a complete implementation for math problem-solving scenarios: .. code-block:: python from verl.interactions.base import BaseInteraction from verl.utils.reward_score import gsm8k from uuid import uuid4 class Gsm8kInteraction(BaseInteraction): def __init__(self, config: dict): super().__init__(config) self._instance_dict = {} async def start_interaction(self, instance_id=None, ground_truth=None, **kwargs): if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id async def generate_response(self, instance_id, messages, **kwargs): # Extract last assistant message content content = "" for item in reversed(messages): if item.get("role") == "assistant": content = item.get("content", "") break # Ensure GSM8K format (#### prefix) self._instance_dict[instance_id]["response"] = content reward = await self.calculate_score(instance_id) if reward == 1.0: return True, "Your response is correct!", 1.0, {} else: return False, "Your response is incorrect! You need to reflect on your answer and try again.", 0.0, {} async def calculate_score(self, instance_id, **kwargs): return gsm8k.compute_score( self._instance_dict[instance_id]["response"], self._instance_dict[instance_id]["ground_truth"], method="strict", format_score=0.0, score=1.0, ) async def finalize_interaction(self, instance_id, **kwargs): del self._instance_dict[instance_id] Training Integration -------------------- **Training Script Configuration** Include interaction configuration in your training command: .. code-block:: bash python3 -m verl.trainer.main_ppo \\ --config-path="$CONFIG_PATH" \\ --config-name='gsm8k_multiturn_grpo_w_interaction' \\ algorithm.adv_estimator=grpo \\ data.train_batch_size=512 \\ data.return_raw_chat=True \\ actor_rollout_ref.rollout.name=sglang \\ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml" \\ trainer.total_epochs=15 **Data Requirements** Ensure your dataset includes interaction parameters with the ``name`` field for interaction selection: .. code-block:: python # Dataset should include interaction_kwargs in non_tensor_batch interaction_kwargs = [ {"name": "gsm8k", "query": "What is 2+2?", "ground_truth": "4"}, {"name": "custom_solver", "query": "Solve: x^2 + 5x + 6 = 0", "ground_truth": "x = -2, -3"}, {"name": "gsm8k", "query": "What is 3+3?", "ground_truth": "6"}, ] **Sample-Level Interaction Selection** Each sample can specify which interaction to use via the ``name`` field. This enables flexible training scenarios where different samples use different interaction strategies: .. code-block:: python # Example: Math problems use GSM8K interaction, code problems use code verifier data_samples = [ { "prompt": "What is 15% of 200?", "interaction_kwargs": { "name": "gsm8k", "query": "What is 15% of 200?", "ground_truth": "30" } }, { "prompt": "Write a function to check if a number is prime", "interaction_kwargs": { "name": "code_verifier", "code_type": "python", "expected_behavior": "return True for prime numbers" } } ] **Backward Compatibility** If no ``name`` field is provided in ``interaction_kwargs``, the system defaults to ``"gsm8k"`` for backward compatibility. Best Practices -------------- **Resource Management** - Always implement proper cleanup in ``finalize_interaction()`` - Use unique instance IDs to avoid conflicts in concurrent training - Handle edge cases like empty messages or malformed content **Performance Optimization** - Keep interaction logic lightweight to avoid blocking training - Use async/await properly to maintain non-blocking behavior - Consider caching expensive computations within interaction instances **Testing** Comprehensive testing is essential for interaction systems: .. code-block:: python import pytest from unittest.mock import patch @pytest.mark.asyncio async def test_interaction_workflow(): interaction = YourInteraction({}) # Test complete workflow instance_id = await interaction.start_interaction(ground_truth="expected_answer") messages = [{"role": "user", "content": "user_content"}, {"role": "assistant", "content": "assistant_content"}] should_terminate, response, reward, metadata = await interaction.generate_response(instance_id, messages) assert should_terminate in [True, False] assert isinstance(reward, float) await interaction.finalize_interaction(instance_id) Advanced Usage -------------- **Multi-Interaction Training Strategies** You can design sophisticated training scenarios using multiple interactions: .. code-block:: python # Example: Progressive difficulty with different interaction agents class MathTrainingPipeline: def create_interaction_config(self): return { "interaction": [ { "name": "basic_math", "class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {"difficulty": "easy"} }, { "name": "advanced_math", "class_name": "custom.interactions.AdvancedMathInteraction", "config": {"difficulty": "hard", "allow_hints": True} }, { "name": "competition_math", "class_name": "custom.interactions.CompetitionMathInteraction", "config": {"time_limit": 300, "show_steps": False} } ] } def create_curriculum_data(self, epoch): if epoch < 5: return [{"name": "basic_math", ...} for _ in samples] elif epoch < 10: return [{"name": "advanced_math", ...} for _ in samples] else: return [{"name": "competition_math", ...} for _ in samples] **Custom Scoring Functions** You can integrate custom reward functions: .. code-block:: python async def calculate_score(self, instance_id, **kwargs): response = self._instance_dict[instance_id]["response"] ground_truth = self._instance_dict[instance_id]["ground_truth"] # Custom evaluation logic if custom_evaluation_function(response, ground_truth): return 1.0 else: return 0.0 **Multi-step Interactions** For complex scenarios requiring multiple feedback rounds: .. code-block:: python async def generate_response(self, instance_id, messages, **kwargs): instance = self._instance_dict[instance_id] instance["attempts"] += 1 # Evaluate current response reward = await self.calculate_score(instance_id) if reward > 0.8: return True, "Excellent work!", reward, {} elif instance["attempts"] < 3: return False, "Good attempt, but try to improve...", reward, {} else: return True, "Maximum attempts reached.", reward, {} Troubleshooting --------------- **Common Issues** 1. **Instance ID Conflicts**: Ensure unique instance IDs across concurrent sessions 2. **Memory Leaks**: Always call ``finalize_interaction()`` to clean up resources 3. **Blocking Operations**: Keep interaction logic async and non-blocking 4. **Configuration Errors**: Verify interaction config path and class name are correct 5. **Interaction Name Conflicts**: Ensure all interactions have unique names in the configuration 6. **Missing Interaction**: Verify the ``name`` field in ``interaction_kwargs`` matches available interactions 7. **Backward Compatibility**: When migrating from single to multi-interaction, add ``name`` fields to existing data **Debugging** Enable debug logging to trace interaction flow: .. code-block:: bash export VERL_LOGGING_LEVEL=DEBUG **Performance Monitoring** Monitor interaction performance impact on training throughput and adjust accordingly. Related Documentation -------------------- - :doc:`multiturn`: Basic multi-turn rollout configuration - :doc:`sandbox_fusion`: Tool integration with SGLang - :doc:`search_tool_example`: Search tool implementation example ================================================ FILE: verl_distillation/docs/sglang_multiturn/multiturn.rst ================================================ Multi-turn Rollout Support ========================== Last updated: 06/27/2025. Basic Configuration ~~~~~~~~~~~~~~~~~~~ To enable multi-turn rollout, make sure to configure the following fields in your rollout configuration: .. code-block:: yaml actor_rollout_ref: rollout: multi_turn: True name: "sglang" These configuration activates the sglang engine for multi-turn interaction during rollout. Custom Tool Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~ For custom environment interaction tools, you can implement your own tools based on ``verl.tools.base_tool.BaseTool``. Then, specify your tool configurations in a YAML file: .. code-block:: yaml tools: - class_name: "" config: type: native tool_schema: You may refer to GSM8KTool_example_configuration_, which is one example of the tool configurations. Its implementation can be found in gsm8k_tool.py_. Finally, set the ``tools_config_file`` in your rollout config: .. code-block:: yaml actor_rollout_ref: rollout: tool_kwargs: tools_config_file: This allows integration of customized tool behaviors during actor rollout steps. If you want rollout with simulated interaction, you can set the ``interaction_config_file`` in your rollout config: .. code-block:: yaml interaction: - class_name: "" config: {} .. code-block:: yaml actor_rollout_ref: rollout: interaction_config_file: If your tool creates multi-modal inputs, you should return a list of multi-modal inputs in your tool.execute() implementation. Image and video should be processed before returning. For example, if you are using Qwen2.5-VL, you can use the following code to get the representations: .. code-block:: python async def create(self, ...) -> tuple[str, ToolResponse]: ... from verl.utils.dataset.vision_utils import process_image, process_video img1 = process_image(img1) video1 = process_video(video1) # due to the (image | video) key is ("image" | "video") instead of ("images" | "videos") in vllm, we need to use ("image" | "video") to specify list of images/videos # link: https://github.com/vllm-project/vllm/blob/3c545c0c3b98ee642373a308197d750d0e449403/vllm/multimodal/parse.py#L205 return instance_id, ToolResponse(image=[img1, ...], video=[video1, ...], text="...") async def execute(self, ...) -> Tuple[str | Dict[str, Any], float, dict]: ... from verl.utils.dataset.vision_utils import process_image, process_video img1 = process_image(img1) video1 = process_video(video1) # due to the (image | video) key is ("image" | "video") instead of ("images" | "videos") in vllm, we need to use ("image" | "video") to specify list of images/videos # link: https://github.com/vllm-project/vllm/blob/3c545c0c3b98ee642373a308197d750d0e449403/vllm/multimodal/parse.py#L205 return ToolResponse(image=[img1, ...], video=[video1, ...], text="..."), 0, {} remeber to set ``return_multi_modal_inputs: False`` in your dataset config in order to process the multi-modal inputs in the rollout correctly. Refer to the `Handling Multi-Modal Inputs in Datasets`_ section for more details. MCP Tool Configuration ~~~~~~~~~~~~~~~~~~~~~~ For MCP interaction tools, you can flexibly configure them using a YAML file. The typical setup is as follows: .. code-block:: yaml tools: - class_name: "" config: type: mcp mcp: mcp_servers_config_path: ./mcp_server.json tool_selected_list: {} The ``tool_selected_list`` field is optional and specifies which tools to use from the servers. If you want to enable all available tools, simply omit this attribute. Besides, ``mcp_servers_config_path`` points to a JSON file containing the MCP server configurations. For example: .. code-block:: json { "mcpServers": { "SSE Server": { "url": "your_server_url", "auth_token": "your_server_api_token" }, "STDIO Server": { "command": "npx", "args": ["-y", "server-mcp@0.2.1"], "env": { "SERVER_API_KEY": "your_server_api_token" } } } } Since the content formats returned by the MCP server may vary, users can inherit from ``MCPBaseTool`` and override the ``_parse_tool_result`` method to implement custom parsing logic. .. code-block:: python class MCPYourTool(MCPBaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): super().__init__(config, tool_schema) def _parse_tool_result(self, content: list) -> Tuple[str, dict]: ... Overall, you may refer to mcp_search_tool.py_ and mcp_tool_config.yaml_ for custom implementation and configuration. Multi-turn Tokenization ~~~~~~~~~~~~~~~~~~~~~~~ Tokenizing multi-turn rollouts poses a challenge: after applying the chat template and tokenizing the full message list, it's hard to identify which tokens belong to assistant messages. Since the token list is flat, it lacks direct alignment with the message roles. To address this, we adopt a **delta-based tokenization** strategy. Each time the LLM generates a new message, we: 1. Apply the chat template to all prior messages (`messages[:i]`). 2. Apply the chat template again including the latest message (`messages[:i+1]`). 3. Tokenize only the *delta* between these two serialized message strings. This ensures that only tokens generated by the assistant are included in the loss mask. .. code-block:: python # When using tokenizer # Exclude the assistant prompt (e.g., "<|im_start|>assistant") from the loss by setting add_generation_prompt=True prev = tokenizer.apply_chat_template(messages[:i], add_generation_prompt=True, tokenize=False) curr = tokenizer.apply_chat_template(messages[:i+1], add_generation_prompt=False, tokenize=False) token_ids += tokenizer.encode(curr[len(prev):], add_special_tokens=False) loss_mask += [1] * len(token_ids) # Mask only the new assistant tokens .. code-block:: python # When using processor # Exclude the assistant prompt (e.g., "<|im_start|>assistant") from the loss by setting add_generation_prompt=True prev = processor.apply_chat_template(messages[:i], add_generation_prompt=True, tokenize=False) prev_model_inputs = processor(text=prev, images=images, videos=videos, return_tensors="pt")[0].tolist() curr = processor.apply_chat_template(messages[:i+1], add_generation_prompt=False, tokenize=False) curr_model_inputs = processor(text=curr, images=images, videos=videos, return_tensors="pt")[0].tolist() token_ids += curr_model_inputs["input_ids"][len(prev_model_inputs["input_ids"]):] loss_mask += [1] * len(token_ids) # Mask only the new assistant tokens While we've validated this produces consistent results with full message tokenization, future models' chat template could break compatibility. To guard against silent inconsistencies, we compare the delta-based tokenization with full-tokenization results by default at the end of each rollout. If you see the following warning, you can check the mismatched substring in the log: .. code-block:: Inconsistent training and inference tokenization detected. This may lead to unexpected behavior during training. Please review your chat template to determine if this is intentional. For more information, refer to the multiturn README.md. The tokenization sanity check mode can be configured using the ``actor_rollout_ref.rollout.multi_turn.tokenization_sanity_check_mode`` parameter, which accepts the following values: - ``strict`` (default): Performs strict comparison between delta-based and full tokenization results, raising warnings for any differences. - ``ignore_strippable``: Ignores differences in whitespace characters (``\n``, ``\t``, ``\r``, spaces) while still checking for meaningful text mismatches. This is useful when debugging chat template issues where whitespace variations are expected and acceptable. - ``disable``: Completely disables the tokenization sanity check. Only use this if you have thoroughly validated that tokenization discrepancies are expected and won't impact training. Example configuration: .. code-block:: yaml actor_rollout_ref: rollout: multi_turn: tokenization_sanity_check_mode: "ignore_strippable" # Choose from: "disable", "ignore_strippable", "strict" Handling Multi-Modal Inputs in Datasets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If your dataset includes multi-modal inputs (such as images or videos), you can control whether these are pre-processed and included in each sample by setting the return_multi_modal_inputs flag in your dataset config (used by RLHFDataset). - ``return_multi_modal_inputs: True`` (default): The dataset will pre-process and include a multi_modal_inputs dictionary for each sample. This dict contains the model-ready representations (e.g., image tensors, video tensors, etc.) as produced by your processor. This is useful for single-turn or SFT-style training, where the model expects all modalities to be present in the batch. - ``return_multi_modal_inputs: False``: The dataset will not include the multi_modal_inputs field. This is recommended for multi-turn RL or tool-augmented rollouts, where the model may generate new multi-modal inputs dynamically during rollout, and you want to avoid conflicts or redundant data in the batch. Special Cases ^^^^^^^^^^^^^ Some models (e.g., Qwen/QwQ-32B and Qwen3 series) remove internal reasoning content during chat template rendering. As a result, the message content can vary across turns, making the delta-based tokenization inaccurate. For example, for the following conversation: .. code-block:: python messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2 + 2?"}, {"role": "assistant", "content": "user asked about a simple math question. 2 + 2 = 4."}, {"role": "user", "content": "Explain why."}, {"role": "assistant", "content": "user wants to know the reasoning behind the answer. Search for a good explanation", "tool_calls": [{"id": "tool1", "type": "search", "arguments": {"query": "Why is 2 + 2 = 4?"}}]}, {"role": "tool", "content": "The sum of two and two is four because it is a basic arithmetic operation."}, {"role": "assistant", "content": "The tool provided a good explanation.The sum of two and two is four because it is a basic arithmetic operation."} ] 1. Qwen/QwQ-32B will remove all reasoning content except the last assistant message after applying the chat template. .. code-block:: text <|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user What is 2 + 2?<|im_end|> <|im_start|>assistant 2 + 2 = 4.<|im_end|> <|im_start|>user Explain why.<|im_end|> <|im_start|>assistant {"name": "", "arguments": {"query": "Why is 2 + 2 = 4?"}} <|im_end|> <|im_start|>user The sum of two and two is four because it is a basic arithmetic operation. <|im_end|> <|im_start|>assistant The tool provided a good explanation. The sum of two and two is four because it is a basic arithmetic operation.<|im_end|> 2. Qwen3 series will remove all reasoning content before the last user message. .. code-block:: text <|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user What is 2 + 2?<|im_end|> <|im_start|>assistant 2 + 2 = 4.<|im_end|> <|im_start|>user Explain why.<|im_end|> <|im_start|>assistant user wants to know the reasoning behind the answer. Search for a good explanation {"name": "", "arguments": {"query": "Why is 2 + 2 = 4?"}} <|im_end|> <|im_start|>user The sum of two and two is four because it is a basic arithmetic operation. <|im_end|> <|im_start|>assistant The tool provided a good explanation. The sum of two and two is four because it is a basic arithmetic operation.<|im_end|> To handle this, we fall back to a **fixed base conversation** containing only a single system and user message. Since this base doesn't include assistant messages or reasoning content, it remains consistent across turns. .. code-block:: python BASE_CHAT_HISTORY = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "I am a user."} ] prev = tokenizer.apply_chat_template(BASE_CHAT_HISTORY, add_generation_prompt=True, tokenize=False) curr = tokenizer.apply_chat_template([*BASE_CHAT_HISTORY, messages[i]], add_generation_prompt=False, tokenize=False) token_ids += tokenizer.encode(curr[len(prev):], add_special_tokens=False) loss_mask += [1] * len(token_ids) This method works well for Qwen3 series. However, Qwen/QwQ-32B currently has a bug in its chat template. A fix_ has been proposed but not yet adopted. Until then, use the following command to download the fixed model revision: .. code-block:: bash pip install huggingface_hub huggingface-cli download Qwen/QwQ-32B --revision refs/pr/81 .. _fix: https://huggingface.co/Qwen/QwQ-32B/discussions/81 Discrepancy Between Training and Inference Templates ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Although the above approach fixes the delta mismatch issue, the removal of reasoning content in the inference-time chat template introduces a new discrepancy: training uses the full reasoning content, while inference does not. This mismatch can affect model performance in unpredictable ways. To avoid it, we default to using the full response (including reasoning) for both training and rollout. However, this approach comes with trade-offs: 1. Long reasoning contents can easily exceed the model's context window, especially in multi-turn rollout. 2. There's a mismatch between rollout and production environment now—models will not have reasoning content from past turns if you use the default chat template in production. We are still evaluating the impact of these issues. If you experience context length problems or prefer rollouts that match production (i.e., exclude reasoning), you can enable: ``actor_rollout_ref.rollout.multi_turn.use_inference_chat_template = True`` GSM8K Multi-turn Training Performance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See the training performance of multi-turn rollout on the GSM8K task HERE_. .. _HERE: https://wandb.ai/zhaochenyang20/gsm8k_async_rl/runs/1ro1r7om?nw=nwuserzhaochenyang20 .. _GSM8KTool_example_configuration: https://github.com/volcengine/verl/blob/main/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml .. _gsm8k_tool.py: https://github.com/volcengine/verl/blob/main/verl/tools/gsm8k_tool.py .. _mcp_search_tool.py: https://github.com/volcengine/verl/blob/main/verl/tools/mcp_search_tool.py .. _mcp_tool_config.yaml: https://github.com/volcengine/verl/blob/main/examples/sglang_multiturn/config/tool_config/mcp_tool_config.yaml Interaction System ~~~~~~~~~~~~~~~~~~ For dynamic conversational feedback during RL training, see: .. toctree:: :maxdepth: 1 interaction_system Search Tool Integration ~~~~~~~~~~~~~~~~~~~~~~~ .. toctree:: :maxdepth: 1 search_tool_example Code Walkthrough ~~~~~~~~~~~~~~~~~~~~~~~ If you want to learn more in depth about the code execution flow, please read https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/tree/main/rlhf/verl/multi-turn/code-walk-through ================================================ FILE: verl_distillation/docs/sglang_multiturn/sandbox_fusion.rst ================================================ =============================== Sandbox Fusion Tool Integration =============================== Last updated: 06/10/2025. Motivations =========== - As users of verl, we want to allow the model to call certain tools during Actor rollout, incorporating the results into the training process. - A colleague from ByteDance proposed a paper aimed at enhancing model capability through code execution tools. - We aim to support tool-calling capabilities of inference engines using `sandbox-fusion` as the code execution system, providing the community with a reimplementation of `retools`. Reward Compute with Sandbox Fusion + FaaS Integration ===================================================== - In current datasets and tasks, similar work already exists (e.g., Prime), which uses local processes as runners to execute model-generated code for reward computation. - On this basis, #1429 has advanced the design by integrating FaaS as the runner for reward computation. Goals ===== - Adapt to the `sglang` tool-calling protocol and define tools for sandbox fusion. - Integrate with the `async-rollout` process, ensuring sandbox fusion tools follow asyncIO conventions. - Design and implement a basic rate limiter to prevent issues such as 429 errors. Non-Goals ========= - Training effectiveness is out of scope. - Observability metrics are not considered. - Distributed failover and component fault tolerance are not addressed. Design Details ============== Tool Schema Definition ---------------------- - Currently, only code execution is considered, requiring a `code` field in the JSON from the model. - Only Python code is supported for now, so no `language` parameter is defined. .. code-block:: python OpenAIFunctionToolSchema( type="function", function=OpenAIFunctionSchema( name="code_interpreter", description="A tool for executing code.", parameters=OpenAIFunctionParametersSchema( type="object", properties={ "code": OpenAIFunctionPropertySchema( type="string", description="The code to execute.", enum=None, ) }, required=["code"], ), strict=False, ) ) Configuration Parameters -------------------------- +----------------------------+--------------------------------------------------------------+ | Parameter Name | Description | +============================+==============================================================+ | `num_workers` | Number of worker threads/processes per DP to request runner. | +----------------------------+--------------------------------------------------------------+ | `rate_limit` | Global limit of concurrent code executions. Default: 10 | +----------------------------+--------------------------------------------------------------+ | `default_timeout` | Timeout (in seconds) for each code execution. Default: 30 | +----------------------------+--------------------------------------------------------------+ | `default_language` | Default programming language. Default: "python" | +----------------------------+--------------------------------------------------------------+ | `enable_global_rate_limit` | Whether to enable global rate limiting. Default: True | +----------------------------+--------------------------------------------------------------+ | `sandbox_fusion_url` | URL for the veFaas sandbox execution service | +----------------------------+--------------------------------------------------------------+ Rate Limiting Design ----------------------- Objective: - Limit the number of inflight requests using a token bucket model. - Ensure ordered submission to code runners to avoid starvation due to backoff. Design Highlights: - Use Ray Global Actor as a singleton distributed counter at cluster level. - Semaphore used for counting, with `acquire` and `release` in separate thread pools to preserve order. - Use Ray’s cloud-pickle to serialize functions for decoupled `ExecutionWorker`. .. code-block:: python @ray.remote(concurrency_groups={"acquire": 1,"release": 10}) class TokenBucketWorker: def __init__(self, rate_limit: int): self.rate_limit = rate_limit self.current_count = 0 self._semaphore = threading.Semaphore(rate_limit) @ray.method(concurrency_group="acquire") def acquire(self): self._semaphore.acquire() self.current_count += 1 @ray.method(concurrency_group="release") def release(self): self._semaphore.release() self.current_count -= 1 def get_current_count(self): return self.current_count class ExecutionWorker: def __init__(self, enable_global_rate_limit=True, rate_limit=10): self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None def _init_rate_limit(self, rate_limit): return TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote(rate_limit) def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T: with ExitStack() as stack: stack.callback(self.rate_limit_worker.release.remote) ray.get(self.rate_limit_worker.acquire.remote()) try: return fn(*fn_args, **fn_kwargs) except Exception as e: logger.warning(f"Error when executing code: {e}") def init_execution_pool(num_workers: int, enable_global_rate_limit=True, rate_limit=10, mode: PoolMode=PoolMode.ThreadMode): if mode == PoolMode.ThreadMode: return ray.remote(ExecutionWorker).options(max_concurrency=num_workers).remote( enable_global_rate_limit=enable_global_rate_limit, rate_limit=rate_limit ) else: raise NotImplementedError("Process mode is not implemented yet") Tool Implementation ------------------- - Use `instance_id` to identify requests across multiple dialogue rounds. - Use `execution_pool` to implement async invocation. - Cleanup state after rollout completion. .. code-block:: python class SandboxFusionTool(BaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): ... self.execution_pool = init_execution_pool(...) ... async def create(self, instance_id: Optional[str] = None, ...): ... async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> Tuple[str, float, dict]: code = parameters.get("code", "") timeout = parameters.get("timeout", self.default_timeout) language = parameters.get("language", self.default_language) if not isinstance(code, str): code = str(code) result = await self.execution_pool.execute.remote(self.execute_code,instance_id,code,timeout,language) self._instance_dict[instance_id]["reward"].append(result.strip()) return result, result, {} def execute_code(self,instance_id,code,timeout=30,language="python"): result_status, metadata = _process_single_case(0, None, None,self.sandbox_fusion_url, code, timeout, language) # we should always expect this since we don't have correct answer if metadata["run_status"] == "Finished": actual_output = metadata["stdout"] if metadata["stdout"] is not None else "" return actual_output else: return "no stdout here" async def calc_reward(self, instance_id: str, ...): ... async def release(self, instance_id: str, ...): ... Test Plan ========= Unit Tests ---------- - **test_tools_registration**: Test tool registration and initialization. - **test_rollout_req_creation**: Validate that `AsyncRolloutReq` is built correctly. - **test_over_size_case**: Ensure rollout terminates early when exceeding `max_seq_len`. - **test_tool_call_basic_case**: Mock `sglang` output, validate tool call and result. - **test_tool_call_batch_case**: Test batch processing of tool calls. - **test_basic_multi_process_init**: Validate Ray global actor behaves as singleton. - **TestSingleNodeRateLimiterCase**: Verify rate limiter works in single-node mode. - **test_rotten_execution**: Ensure rate limiter recovers from function errors. - **TestMultiNodeRateLimiterCase**: Verify behavior in multi-node environments. e2e Tests ---------- we provide e2e test scripts in `tests/special_e2e` folder, named `tests/special_e2e/run_gsm8k_fsdp_sgl_multiturn_sf_tool.sh` by setting 'trainer.rollout_data_dir' you can dump the rollout data to local disk. here is an sample taken from the rollout data: .. code-block:: python { "input": " system\nYou are a math expert. You are given a question and you need to solve it step by step. Reasoning step by step before any tool call. You should use the `calc_gsm8k_reward` tool after step by step solving the question, before generate final answer at least once and refine your answer if necessary. Put your final answer in the format of `#### `.\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n\n{\"type\": \"function\", \"function\": {\"name\": \"code_interpreter\", \"description\": \"A tool for executing code.\", \"parameters\": {\"type\": \"object\", \"properties\": {\"code\": {\"type\": \"string\", \"description\": \"The code to execute.\", \"enum\": null}}, \"required\": [\"code\"]}, \"strict\": false}}\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n\n user\nJohn gets a bonus that's the same percentage every year. Last year he made $100,000 and got a $10,000 bonus. This year he makes $200,000. How much will John make this year when adding both his total pay and bonus together? Let's think step by step and output the final answer after `####`.\n assistant\n", "output": "assistant\n\nOkay, let's see. John gets a bonus that's the same percentage every year. Last year, he made $100,000 and got a $10,000 bonus. This year, he makes $200,000. The question is asking how much he'll make this year when adding both his total pay and bonus together.\n\nFirst, I need to figure out the percentage bonus he gets. Since the bonus is the same percentage each year, if I can find that percentage based on last year's details, I can apply it to this year's earnings to find this year's bonus. Then, I'll add that bonus to his total pay this year.\n\nLast year, he earned $100,000 and got a $10,000 bonus. To find the percentage, I can divide the bonus by his total earnings. So, 10,000 divided by 100,000. Let me calculate that.\n\n10,000 / 100,000 equals 0.1, which is 10% as a percentage. So, his bonus is 10% of his pay each year.\n\nThis year, he makes $200,000. So, the bonus this year would be 10% of 200,000. Let me compute that using the code interpreter to ensure accuracy.\n \n{\"name\": \"code_interpreter\", \"arguments\": {\"code\": \"total_pay_this_year = 200000\\nbonus_percentage = 10 / 100\\nbonus_this_year = total_pay_this_year * bonus_percentage\\ntotal_income_this_year = total_pay_this_year + bonus_this_year\\nprint(total_income_this_year)\", \"executes\": \"True\"}}\n\n tool\n220000.0\n\n assistant\n\nSo the code shows that John's total income this year would be $220,000. This is consistent with the 10% bonus on last year's pay of $100,000, which was $10,000. Therefore, this year's bonus of $20,000 added to his total pay of $200,000 results in a total income of $220,000.\n\nI need to verify that there are no other factors at play. The problem states it's a constant percentage bonus each year, so there's no mention of changing percentages or other variables. Therefore, applying a straightforward percentage of 10% on this year's partial salary amount would result in $20,000. Adding that to his total pay gives $220,000. The calculation is correct and aligns with the question's parameters.\n\n\n#### 220000.0", "score": 0, "step": 1 } here is the readable format version: .. code-block:: python [system] You are a math expert. You are given a question and you need to solve it step by step. Reasoning step by step before any tool call. You should use the `calc_gsm8k_reward` tool after step by step solving the question, before generate final answer at least once and refine your answer if necessary. Put your final answer in the format of `#### `. # Tools You may call one or more functions to assist with the user query. You are provided with function signatures within XML tags: {"type": "function", "function": {"name": "code_interpreter", "description": "A tool for executing code.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to execute.", "enum": null}}, "required": ["code"]}, "strict": false}} For each function call, return a json object with function name and arguments within XML tags: {"name": , "arguments": } [user] John gets a bonus that's the same percentage every year. Last year he made $100,000 and got a $10,000 bonus. This year he makes $200,000. How much will John make this year when adding both his total pay and bonus together? Let's think step by step and output the final answer after `####`. [assistant] Okay, let's see. John gets a bonus that's the same percentage every year. Last year, he made $100,000 and got a $10,000 bonus. This year, he makes $200,000. The question is asking how much he'll make this year when adding both his total pay and bonus together. First, I need to figure out the percentage bonus he gets. Since the bonus is the same percentage each year, if I can find that percentage based on last year's details, I can apply it to this year's earnings to find this year's bonus. Then, I'll add that bonus to his total pay this year. Last year, he earned $100,000 and got a $10,000 bonus. To find the percentage, I can divide the bonus by his total earnings. So, 10,000 divided by 100,000. Let me calculate that. 10,000 / 100,000 equals 0.1, which is 10% as a percentage. So, his bonus is 10% of his pay each year. This year, he makes $200,000. So, the bonus this year would be 10% of 200,000. Let me compute that using the code interpreter to ensure accuracy. {"name": "code_interpreter", "arguments": {"code": "total_pay_this_year = 200000\nbonus_percentage = 10 / 100\nbonus_this_year = total_pay_this_year * bonus_percentage\ntotal_income_this_year = total_pay_this_year + bonus_this_year\nprint(total_income_this_year)", "executes": "True"}} [tool] 220000.0 [assistant] So the code shows that John's total income this year would be $220,000. This is consistent with the 10% bonus on last year's pay of $100,000, which was $10,000. Therefore, this year's bonus of $20,000 added to his total pay of $200,000 results in a total income of $220,000. I need to verify that there are no other factors at play. The problem states it's a constant percentage bonus each year, so there's no mention of changing percentages or other variables. Therefore, applying a straightforward percentage of 10% on this year's partial salary amount would result in $20,000. Adding that to his total pay gives $220,000. The calculation is correct and aligns with the question's parameters. #### 220000.0 You can also use the `RolloutViewer` TUI tool to view the dumped rollout data: .. code-block:: bash python scripts/rollout_viewer.py ${trainer.rollout_data_dir} .. image:: https://github.com/user-attachments/assets/e34e5157-2880-4a21-afb2-73885d0dfb11 :alt: RolloutViewer screenshot ================================================ FILE: verl_distillation/docs/sglang_multiturn/search_tool_example.rst ================================================ ======================= Search Tool Integration ======================= Last updated: 05/30/2025. Introduction ------------ - We have added a search tool calling function to Multi-Turn RL, enabling the model to initiate retrieval requests during Actor rollout and directly use retrieval results for training. **We support using a local dense retriever as the retrieval tool, as well as integrating with your own local retrieval engine.** Quick Reproduction ------------------ Create a New Docker Container ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: bash docker run \ -it \ --shm-size 32g \ --gpus all \ -v {Huggingface-Cache-Path}:/root/.cache \ --ipc=host \ --network=host \ --privileged \ --name sglang_{your-name} \ lmsysorg/sglang:dev \ /bin/zsh If you need to restart after exiting the container: .. code:: bash docker start -i sglang_{your-name} Update Python and Configure the Virtual Environment using uv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: bash apt update apt install -y python3.10 python3.10-venv # Create a virtual environment python3 -m venv ~/.python/verl-multiturn-rollout # Activate the virtual environment source ~/.python/verl-multiturn-rollout/bin/activate # Install uv python3 -m pip install uv Install verl Upstream ~~~~~~~~~~~~~~~~~~~~~ .. code:: bash cd ~ git clone https://github.com/volcengine/verl.git cd verl # Install verl python3 -m uv pip install . python3 -m uv pip install -r ./requirements_sglang.txt # Manually install flash-attn python3 -m uv pip install wheel python3 -m uv pip install packaging python3 -m uv pip install flash-attn --no-build-isolation --no-deps Set Up a Local Retrieval Engine ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are using your own local retrieval service, you can skip this step. We chose the local dense retriever provided in the search-R1 example; detailed instructions are in the `searchR1 docs `__. In brief: - The GPU version offers higher accuracy and speed; each GPU uses about 5–7 GB of memory. - The CPU version can be used for simple testing but has lower retrieval precision, which will degrade training performance. See the `retriever documentation `__ in search-R1 for details. - Recommend using Conda to install faiss-gpu=1.8.0; venv may cause errors. **Note**: To start both the training process and the local retrieval service, we launch two separate Python environments. The training uses uv in the verl-multiturn-rollout environment, while the retriever uses conda to install ``faiss-gpu``. .. code:: bash # Download the Miniconda installer script wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh # Install to $HOME/miniconda3 in batch mode bash ~/miniconda.sh -b -p $HOME/miniconda3 # Activate conda (only in the current shell) eval "$($HOME/miniconda3/bin/conda shell.bash hook)" # (Optional) Add conda to your default shell startup conda init # Reload shell config source ~/.bashrc # Create and activate the retriever environment with Python 3.10 conda create -n retriever python=3.10 -y conda activate retriever # Install PyTorch (with GPU support) and related libraries conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=12.1 -c pytorch -c nvidia -y # Install other Python packages pip install transformers datasets pyserini huggingface_hub # Install the GPU version of faiss conda install faiss-gpu=1.8.0 -c pytorch -c nvidia -y # Install the API service framework pip install uvicorn fastapi Download the Indexing and Corpus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The local retrieval files are large—prepare sufficient disk space. Downloading is about 60–70 GB, and uncompressed takes about 132 GB: .. code:: bash conda activate retriever save_path=/the/path/to/save python examples/sglang_multiturn/search_r1_like/local_dense_retriever/download.py --save_path $save_path cat $save_path/part_* > $save_path/e5_Flat.index gzip -d $save_path/wiki-18.jsonl.gz Start the Local flat e5 Retrieval Server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. The first startup will download models and load the index. 2. Apart from the download, startup takes about 1–2 minutes. 3. After startup, each GPU uses about 5–7 GB of memory, leaving the rest for multi-turn RL training. .. code:: bash conda activate retriever index_file=$save_path/e5_Flat.index corpus_file=$save_path/wiki-18.jsonl retriever_name=e5 retriever_path=intfloat/e5-base-v2 python examples/sglang_multiturn/search_r1_like/local_dense_retriever/retrieval_server.py \ --index_path $index_file \ --corpus_path $corpus_file \ --topk 3 \ --retriever_name $retriever_name \ --retriever_model $retriever_path \ --faiss_gpu Set Up WANDB_API_KEY ~~~~~~~~~~~~~~~~~~~~ .. code:: bash export WANDB_API_KEY={YOUR_WANDB_API_KEY} # Define a timestamp function function now() { date '+%Y-%m-%d-%H-%M' } **Preprocess the Dataset** ~~~~~~~~~~~~~~~~~~~~~~~~~~ **Note:** The following data processing and training commands must be run in the verl-multiturn-rollout environment. .. code:: bash python3 examples/data_preprocess/preprocess_search_r1_dataset.py Testing on 8 x H20 ~~~~~~~~~~~~~~~~~~ .. code:: bash # Ensure the now() function is defined # Create a logs directory mkdir -p logs # Set GPUs and run with a suitable log path export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 nohup bash examples/sglang_multiturn/search_r1_like/run_qwen2.5-3b_instruct_search_multiturn.sh \ trainer.experiment_name=qwen2.5-3b-it_rm-searchR1-like-sgl-multiturn-$(now) \ > logs/searchR1-like$(now).log 2>&1 & Custom Search Configuration --------------------------- To enable multi-turn reasoning, set the following fields in your config: .. code:: yaml actor_rollout_ref: rollout: name: "sglang" multi_turn: enable: True You must specify ``retrieval_service_url`` in ``examples/sglang_multiturn/config/tool_config/search_tool_config.yaml``, and properly configure concurrency. For more details on concurrency, refer to the Sandbox Fusion example: .. code:: yaml tools: - class_name: verl.tools.search_tool.SearchTool config: retrieval_service_url: http://127.0.0.1:8000/retrieve num_workers: 120 rate_limit: 120 timeout: 30 The retriever input/output formats are as follows. If your service parameters match, only modify ``retrieval_service_url``. You can also customize in ``search_r1_like_utils.py``. .. code:: python Input format: { "queries": ["What is Python?", "Tell me about neural networks."], "topk": 3, "return_scores": true } Output format (when return_scores=True, similarity scores are returned): { "result": [ [ # Results for each query { "document": doc, "score": score }, # ... more documents ], # ... results for other queries ] } Notes ----- 1. The total training time is about 27 hours; meanwhile, the validation dataset is very large (51 k), and each validation takes about 6000 s. (Therefore, ``val_before_train=False`` by default) ================================================ FILE: verl_distillation/docs/single_controller.rst ================================================ The Design of ``verl.single_controller`` ============================================== Last updated: 05/21/2025. **Author:**\ `Wang Zhang `__ Preface ------- We prepared this document for developers of ``verl``, particularly those interested in understanding or contributing to the ``verl.single_controller`` module. It is not intended for end users, but for contributors seeking to understand the architectural rationale and internal mechanics. -------------- Origin ------ The ``single_controller`` module originated from a request I received — to adapt a toy single-process RLHF script into a distributed system with minimal changes, while maintaining ease of debugging. Common practice — such as using PyTorch’s Distributed Data Parallel (DDP) — typically involves wrapping ``nn.Module`` and launching multiple processes that execute the same function under different ranks. However, this approach presents two main limitations in the context of distributed RLHF: - Difficulty representing multiple DAGs as required by PPO; - Difficulty inspecting intermediate tensors during training. To maintain debuggability, we opted for a different approach — breaking the training loop into well-defined stages like ``generate_sequences``, ``compute_advantages``, and so on. We selected `Ray `__ as the initial backend for ``verl`` due to its ability to expose Python class methods as RPC endpoints. However, Ray’s default model only supports **one method call, one RPC**, while training LLMs typically requires coordination across multiple processes. To hide this multi-Ray actors invocation for a single method from users, we introduced the following components: - ``WorkerGroup`` – manages a group of remote workers and provides a unified interface for multi-process distributed computation; - ``ResourcePool`` – binds computational resources to worker processes; - ``ClassWithArgs`` – enables delayed remote instantiation with specified initialization arguments. -------------- A Running Example: ``generate_sequences`` ----------------------------------------- To illustrate the design, we walk through how the ``generate_sequences`` method in the ``ActorRolloutRefWorker`` class is registered and invoked across distributed workers. -------------- Step 1: Register with a Decorator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is to define the ``generate_sequences`` and decorate it with ``@register`` as it will be called in driver script. **Source:** `fsdp_workers.py `__ .. code:: python class ActorRolloutRefWorker(Worker): ... @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(self, prompts: DataProto): prompts = prompts.to(torch.cuda.current_device()) ... The ``@register`` decorator adds metadata to the ``generate_sequences`` method. Currently, it doesn’t alter functionality, but attaches attributes via a magic key (``MAGIC_ATTR``): **Source:** `decorator.py `__ .. code:: python def register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.ALL, blocking=True, materialize_futures=True): ... def decorator(func): @wraps(func) def inner(*args, **kwargs): if materialize_futures: args, kwargs = _materialize_futures(*args, **kwargs) return func(*args, **kwargs) attrs = {"dispatch_mode": dispatch_mode, "execute_mode": execute_mode, "blocking": blocking} setattr(inner, MAGIC_ATTR, attrs) return inner return decorator As the code shows, values of ``dispatch_mode``, ``execute_mode`` and ``blocking`` is attached the ``generate_sequences`` method. -------------- Step 2: Binding During Initialization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These attached attributes are extracted and utilized when ``ActorRolloutRefWorker``, wrapped in a ``RayClassWithArgs``, is passed into a ``RayWorkerGroup``. **Source:** `main_generation.py `__ .. code:: python ray_cls_with_init = RayClassWithInitArgs(cls=ray.remote(ActorRolloutRefWorker), config=config, role="rollout") resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes) wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init) During the `initialization `__ of ``RayWorkerGroup``, two key steps occur: 1. Worker instances (Ray actors) are created: `RayWorkerGroup._init_with_resource_pool `__ 2. Methods decorated with ``@register`` are bound to ``RayWorkerGroup``: `RayWorkerGroup._bind_worker_method `__ .. figure:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/worker_group_init.png?raw=true :alt: initialization_and_binding_of_worker_group initialization_and_binding_of_worker_group The binding procedure is the heart of ``verl.single_controller``. **Key function:** `WorkerGroup._bind_worker_method `__ .. code:: python def _bind_worker_method(self, user_defined_cls, func_generator): ... for method_name in dir(user_defined_cls): try: method = getattr(user_defined_cls, method_name) assert callable(method) except Exception: continue # Skip properties <<>> When a method has the ``MAGIC_ATTR``, the attributes set by ``@register`` are extracted: .. code:: python <<>> if hasattr(method, MAGIC_ATTR): attribute = getattr(method, MAGIC_ATTR) dispatch_mode = attribute["dispatch_mode"] execute_mode = attribute["execute_mode"] blocking = attribute["blocking"] <<>> As show in the flow chart above, these attributes are fed into ``func_generator``. However, ``func_generator`` takes ``method_name``, ``dispatch_fn``, ``collect_fn``, ``execute_fn``, ``blocking``. We need to find the corresponding ``dispatch_fn`` and ``collect_fn`` associated with the ``dispatch_mode`` (``DP_COMPUTE_PROTO``) from `DISPATCH_MODE_FN_REGISTRY `__: .. code:: python3 DISPATCH_MODE_FN_REGISTRY = { Dispatch.ONE_TO_ALL: { "dispatch_fn": dispatch_one_to_all, "collect_fn": collect_all_to_all, }, ... Dispatch.DP_COMPUTE_PROTO: { "dispatch_fn": dispatch_dp_compute_data_proto, "collect_fn": collect_dp_compute_data_proto, }, ... } Similarly, the ``execute_fn`` is selected by ``execute_mode`` and extracted by: .. code:: python <<>> # get execute_fn_name execute_mode = get_predefined_execute_fn(execute_mode=execute_mode) wg_execute_fn_name = execute_mode["execute_fn_name"] # get execute_fn from string try: execute_fn = getattr(self, wg_execute_fn_name) assert callable(execute_fn), "execute_fn must be callable" except Exception: print(f"execute_fn {wg_execute_fn_name} is invalid") raise <<>> In this ``generate_sequences`` cases: - ``dispatch_mode = Dispatch.DP_COMPUTE_PROTO`` - ``dispatch_fn = dispatch_dp_compute_data_proto`` - ``collect_fn = collect_dp_compute_data_proto`` - ``execute_fn = RayWorkerGroup.execute_all`` ONE_TO_ALL v.s. DP_COMPUTE_PROTO ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``dispatch_mode`` is associated with a ``dispatch_fn`` and a ``collect_fn``. As the name implies, ``dispatch_fn`` processes the input arguments in ``WorkerGroup`` and generate a batch (list) of input arguments, each of which will be fed into a worker attached to the ``WorkerGroup``. ``dispatch_fn`` of ``ONE_TO_ALL`` is `dispatch_one_to_all `__, which just duplicates all the input arguments into N replicas, where N equals the number of Workers attached to the ``worker_group``: .. code:: python def dispatch_one_to_all(worker_group, *args, **kwargs): args = tuple([arg] * worker_group.world_size for arg in args) kwargs = {k: [v] * worker_group.world_size for k, v in kwargs.items()} return args, kwargs ``dispatch_fn`` of ``DP_COMPUTE_PROTO`` is `dispatch_dp_compute_data_proto `__, which uses ``DataProto.chunk`` to split a large ``DataProto`` into N smaller ``DataProto``, where N equals the world_size (number of the workers) of the ``worker_group``: .. code:: python def dispatch_dp_compute_data_proto(worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) # Note: enable auto padding for dp compute DatapProto splitted_args, splitted_kwargs = _split_args_kwargs_data_proto_with_auto_padding( worker_group.world_size, *args, **kwargs, ) return splitted_args, splitted_kwargs The ``collect_fn`` follows the same pattern and process a batch (list) of returned value from all workers of a ``WorkerGroup`` and merge it into a list as ``collect_all_to_all`` does or a large ``DataProto`` as ``collect_dp_compute_data_proto`` does. Finally, a new method is dynamically generated using ``func_generator`` and added to the ``WorkerGroup`` instance: .. code:: python <<>> # bind a new method to the RayWorkerGroup func = func_generator( self, method_name, dispatch_fn=dispatch_fn, collect_fn=collect_fn, execute_fn=execute_fn, blocking=blocking, ) try: setattr(self, method_name, func) method_names.append(method_name) except Exception as e: raise ValueError(f"Fail to set method_name {method_name}") from e This makes the method invocable via the ``WorkerGroup`` interface. -------------- Step 3: Call Chain ~~~~~~~~~~~~~~~~~~ All the machinery above ensures that distributed calls feel identical to single-process ones. In the original single-process script, the code looks like: .. code:: python rollout = Rollout() rollout.generate_sequences(batch) With ``verl``, the multiprocess program becomes: .. code:: python rollout = RayWorkerGroup(resource_pool=[4], RayClassWithArgs(Rollout)) rollout.generate_sequences(batch) .. figure:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/call_generate_sequences.png?raw=true :alt: call_chain_of_generate_sequences call_chain_of_generate_sequences Behind this simple call: - ``dispatch_fn`` splits input across workers - ``execute_fn`` performs the actual remote invocation - ``collect_fn`` gathers the results All of this is abstracted away, enabling developers to write distributed code with minimal changes to their existing logic. -------------- Beyond RL Post-Training: Generalizing ``verl.single_controller`` ---------------------------------------------------------------- The ``verl.single_controller`` module generalizes well beyond reinforcement learning. It provides a clean abstraction to batch-process remote method calls, with automatic input/output handling. By minimizing the gap between single-process and multi-process scripts, ``verl.single_controller`` opens the door to distributed computing in broader domains — not limited to RL post-training. We hope this design inspires more examples and extensions from the community. ================================================ FILE: verl_distillation/docs/start/agentic_rl.rst ================================================ Agentic RL Training =================== Last updated: 07/15/2025. Overview ---------- The goal of Agentic RL is to improve the performance of backend models from reinforcement learning to the Agent. During the training process, a series of features are developed: 1. Server-based asynchronous rollout 2. Multi-turn conversations and tool calls 3. LangGraph-based Agent This document explains the system principles and usage involved to help users implement Agentic RL. Server-based Asynchronous Rollout --------------------------------- Since Agents need to interact with the environment through various tool calls, in order to avoid GPU idling while waiting for tool call return results, an asyncio based co-routing mechanism is utilized to execute each rollout requests asynchronously, thereby improving training performance. To support asynchronous rollout, the inference engine (server) and the agent (client) are architecturally separated, implementing a server-based system with the following objectives: 1. Enabling load balancing mechanisms to balance loads across multiple GPUs and reduce the impact of long-tail requests on performance. For this purpose, scheduling capabilities in stream mode (recipe\stream_mode) are implemented as a recipe. 2. Preventing agent specific features such as tracing from affecting the inference engine. System Architecture ~~~~~~~~~~~~~~~~~~~ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/agent_loop.png?raw=true For more detail on internal design, please refer to :doc:`Agent Loop<../advance/agent_loop>`. System Components ~~~~~~~~~~~~~~~~~ +--------------------------+----------------------------------------------------------------------------+ | Component | Role | +==========================+============================================================================+ | AgentLoop | Client, implements Agent functions | +--------------------------+----------------------------------------------------------------------------+ | AsyncLLMServerManager | Inference gateway, provides generate interface for AgentLoop | +--------------------------+----------------------------------------------------------------------------+ | AsyncServer | Server, each instance is connected to one DP group of the inference engine | +--------------------------+----------------------------------------------------------------------------+ **"generate" Interface** The "generate" function based on ray actor is used between the Client and Server instead of the standard chat completion API. This is because the conversion between tokens and text can be irreversible. For example, the token converted from "" will be different from that generated by the LLM. During the training phase, it is necessary to strictly use the tokens generated by LLM inference to avoid inaccurate in computing advantage, which may affect model performance. Having the Server provide a token-based API helps the Client maintain the relationship between the text generated by tool calls and the tokens returned by the LLM, so as to output correct tokens for training. **Inference Engine Adaptation** AsyncServer uniformly provides a generate function to the upper layer, with separate implementations for SGLang and vLLM to hide underlying differences: 1. The SGLang AsyncServer uses the async_generate interface of the SGLang engine, which is located on the first GPU of each TP group. Therefore, AsyncServer needs to remotely call async_generate through ray actor. 2. The vLLM AsyncServer uses the generate interface of the vLLM engine, which can communicate with the GPUs in the TP group through ZMQ and can be directly called in AsyncServer. Usage Example ~~~~~~~~~~~~~ Follow :doc:`GSM8K example<../examples/gsm8k_example>` to prepare the dataset and model checkpoints. There are two options required to use agent loop: - `data.return_raw_chat=True` - `actor_rollout_ref.rollout.mode=async` This example uses the sglang inference engine by default, and you can also modify rollout_name to use vllm. .. code-block:: bash bash examples/grpo_trainer/run_qwen2-7b_seq_balance.sh Multi-turn Conversations and Tool Calls --------------------------------------- Follow :doc:`Multi-turn Rollout Support<../sglang_multiturn/multiturn>` to prepare tool and configuration files. The Tool Agent Loop has an additional requirement: adding an "agent_name" field to the dataset. During rollout, it will choose to use tool_agent_loop or single_turn_agent (default) based on this field. Usage Example ~~~~~~~~~~~~~ .. code-block:: bash # install mlflow to view toolcall and llm trace pip install mlflow # This will download and preprocess the GSM8K dataset into ~/data/gsm8k/ and add the "agent_name" field. python examples/data_preprocess/gsm8k_tool_agent_loop.py # Start training with tool calls and enabled mlflow based trace helping to debug the rollout details bash examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_tool_agent_mlflow.sh # When training is done, start a mlflow server to view trace mlflow ui -h 0.0.0.0 -p 5000 --backend-store-uri sqlite:////tmp/mlruns.db # then you can open http://:5000 from browser to view trace Note: During training, because the model may sometimes fail to generate correct toolcall tags, an error message "Failed to decode tool call" will be output to the console, which does not indicate an abnormality in training. Follow :doc:`Rollout trace<../advance/rollout_trace>` to known more about trace feature. Agent Framework --------------- System Architecture ~~~~~~~~~~~~~~~~~~~ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/langgraph_agent.png?raw=true System Components ~~~~~~~~~~~~~~~~~ +--------------------------+-----------------------------------------------------------------------------------------------+ | Component | Role | +==========================+===============================================================================================+ | ChatModel | LLM object of LangChain, used to adapt to the “generate” api provided by AsyncLLMServerManager| +--------------------------+-----------------------------------------------------------------------------------------------+ | RectAgentLoop | Agent adaptation layer, which by default supports a naive LangGraph Agentic. | | | New classes can be derived to support user-defined Agents, and the run function needs to be | | | implemented to complete Agent calls. | +--------------------------+-----------------------------------------------------------------------------------------------+ | AsyncServer | Server, each instance is connected to one DP group of the inference engine. | +--------------------------+-----------------------------------------------------------------------------------------------+ Follow doc "recipe/langgraph_agent/example/README.md" for more details. ================================================ FILE: verl_distillation/docs/start/install.rst ================================================ Installation ============ Requirements ------------ - **Python**: Version >= 3.10 - **CUDA**: Version >= 12.8 verl supports various backends. Currently, the following configurations are available: - **FSDP** and **Megatron-LM** (optional) for training. - **SGLang**, **vLLM** and **TGI** for rollout generation. Choices of Backend Engines ---------------------------- 1. Training: We recommend using **FSDP** backend to investigate, research and prototype different models, datasets and RL algorithms. The guide for using FSDP backend can be found in :doc:`FSDP Workers<../workers/fsdp_workers>`. For users who pursue better scalability, we recommend using **Megatron-LM** backend. Currently, we support `Megatron-LM v0.13.1 `_. The guide for using Megatron-LM backend can be found in :doc:`Megatron-LM Workers<../workers/megatron_workers>`. 2. Inference: For inference, vllm 0.8.3 and later versions have been tested for stability. We recommend turning on env var `VLLM_USE_V1=1` for optimal performance. For SGLang, refer to the :doc:`SGLang Backend<../workers/sglang_worker>` for detailed installation and usage instructions. SGLang rollout is under extensive development and offers many advanced features and optimizations. We encourage users to report any issues or provide feedback via the `SGLang Issue Tracker `_. For huggingface TGI integration, it is usually used for debugging and single GPU exploration. Install from docker image ------------------------- We provide pre-built Docker images for quick setup. And from this version, we utilize a new image release hierarchy for productivity and stability. The image types are divided into three large categories: - **Base Image**: Without inference and training frameworks, only basic dependencies are installed. Can directly install vllm or SGLang on top of it, without need of reinstall torch or CUDA. - **Application Image**: Stable version with inference and training frameworks installed. - **Community Image**: Unstable version with the latest frameworks and features. The first two types of images are hosted on dockerhub `verlai/verl `_ repository, while the preview images are hosted on community repository. .. note:: The image versions are mapped with verl releases, for example, image with tag ``verl0.4`` is built for verl release ``v0.4.x``. Base Image :::::::::: The stable base image is ``verlai/verl:base-verl0.6-cu128-cudnn9.8-torch2.8.0-fa2.7.4`` for vLLM and sglang. The installed package versions can be found from tags, and the Dockerfile can be found in ``docker/verl[version]-[packages]/Dockerfile.base``. The update of base image is not frequent, and the app image can be built on top of it without reinstalling base packages. Application Image ::::::::::::::::: From this version, we divide images built for vLLM and SGLang as the divergence of dependent packages like Pytorch and FlashInfer. There are 2 types of application images available: - **vLLM with FSDP and Megatron**: ``verlai/verl:app-verl0.5-transformers4.55.4-vllm0.10.0-mcore0.13.0-te2.2`` - **SGLang with FSDP and Megatron**: ``verlai/verl:app-verl0.6-transformers4.56.1-sglang0.5.2-mcore0.13.0-te2.2`` Docker images with Megatron backends are runnable with large language model like ``Qwen/Qwen3-235B-A22B``, ``deepseek-ai/DeepSeek-V3-0324`` post-training. Refer to the :doc:`Large Language Model Post-Training documentation<../perf/dpsk>` for more details. Application images can be updated frequently, and the Dockerfile can be found in ``docker/verl[version]-[packages]/Dockerfile.app.[frameworks]``. Based on the base image, it is easy to build your own application image with the desired inference and training frameworks. Community Image ::::::::::::::: Community images are provided by the community, including the latest versions of vLLM and SGLang, and may include experimental features or configurations. And also works for other hardwares or platforms like AMD GPUs with ROCM or AWS EFA and Sagemaker. For latest vLLM with FSDP, please refer to `hiyouga/verl `_ repository and the latest version is ``hiyouga/verl:ngc-th2.8.0-cu12.9-vllm0.11.0``. For latest SGLang with FSDP, please refer to `hebiaobuaa/verl `_ repository and the latest version is ``hebiaobuaa/verl:app-verl0.5-sglang0.4.9.post6-mcore0.12.2-te2.2`` which is provided by SGLang RL Group. For latest vLLM with Megatron, please refer to `iseekyan/verl `_ repository and the latest version is ``iseekyan/verl:megatron0.13_vllm0.11``. See files under ``docker/`` for NGC-based image or if you want to build your own. Note that For aws instances with EFA net interface (Sagemaker AI Pod), you need to install EFA driver as shown in ``docker/Dockerfile.extenstion.awsefa`` Installation from Docker :::::::::::::::::::::::: After pulling the desired Docker image and installing desired inference and training frameworks, you can run it with the following steps: 1. Launch the desired Docker image and attach into it: .. code:: bash docker create --runtime=nvidia --gpus all --net=host --shm-size="10g" --cap-add=SYS_ADMIN -v .:/workspace/verl --name verl sleep infinity docker start verl docker exec -it verl bash 2. If you use the images provided, you only need to install verl itself without dependencies: .. code:: bash # install the nightly version (recommended) git clone https://github.com/volcengine/verl && cd verl pip3 install --no-deps -e . [Optional] If you hope to switch between different frameworks, you can install verl with the following command: .. code:: bash # install the nightly version (recommended) git clone https://github.com/volcengine/verl && cd verl pip3 install -e .[vllm] pip3 install -e .[sglang] Install from custom environment --------------------------------------------- We recommend to use docker images for convenience. However, if your environment is not compatible with the docker image, you can also install verl in a python environment. .. note:: - Dockerfile provides more details than this installation instructions. You can find examples in each Dockerfile, for example `verl0.6-cu128-torch2.8.0-fa2.7.4 Dockerfile.base `_ . Pre-requisites :::::::::::::: For training and inference engines to utilize better and faster hardware support, CUDA/cuDNN and other dependencies are required, and some of the dependencies are easy to be overridden when installing other packages, so we put them in the :ref:`Post-installation` step. .. note:: - The installation steps below are recommended configurations for the latest version of verl. If you are trying to customize your own environment, please ignore the strict constraints. We need to install the following pre-requisites: - **CUDA**: Version >= 12.8 - **cuDNN**: Version >= 9.10.0 - **Apex** CUDA above 12.8 is recommended to use as the docker image, please refer to `NVIDIA's official website `_ for other version of CUDA. .. code:: bash # change directory to anywher you like, in verl source code directory is not recommended wget https://developer.download.nvidia.com/compute/cuda/12.8.1/local_installers/cuda-repo-ubuntu2204-12-8-local_12.8.1-570.124.06-1_amd64.deb dpkg -i cuda-repo-ubuntu2204-12-8-local_12.8.1-570.124.06-1_amd64.deb cp /var/cuda-repo-ubuntu2204-12-8-local/cuda-*-keyring.gpg /usr/share/keyrings/ apt-get update apt-get -y install cuda-toolkit-12-8 update-alternatives --set cuda /usr/local/cuda-12-8 cuDNN can be installed via the following command, please refer to `NVIDIA's official website `_ for other version of cuDNN. .. code:: bash # change directory to anywher you like, in verl source code directory is not recommended wget https://developer.download.nvidia.com/compute/cudnn/9.10.2/local_installers/cudnn-local-repo-ubuntu2204-9.10.2_1.0-1_amd64.deb dpkg -i cudnn-local-repo-ubuntu2204-9.10.2_1.0-1_amd64.deb cp /var/cudnn-local-repo-ubuntu2204-9.10.2/cudnn-*-keyring.gpg /usr/share/keyrings/ apt-get update apt-get -y install cudnn-cuda-12 Install dependencies :::::::::::::::::::: .. note:: We recommend to use a fresh new conda environment to install verl and its dependencies. **Notice that the inference frameworks often strictly limit your pytorch version and will directly override your installed pytorch if not paying enough attention.** As a countermeasure, it is recommended to install inference frameworks first with the pytorch they needed. For vLLM, if you hope to use your existing pytorch, please follow their official instructions `Use an existing PyTorch installation `_ . 1. First of all, to manage environment, we recommend using conda: .. code:: bash conda create -n distill python==3.12 conda activate distill 2. Then, execute the ``install.sh`` script that we provided in verl: .. code:: bash # Make sure you have activated distill conda env # If you need to run with megatron bash scripts/install_vllm_sglang_mcore.sh # Or if you simply need to run with FSDP USE_MEGATRON=0 bash scripts/install_vllm_sglang_mcore.sh If you encounter errors in this step, please check the script and manually follow the steps in the script. [Optional] NVIDIA Apex is recommended for Megatron-LM training, but it's not needed if you only use FSDP backend. You can install it via the following command, but notice that this steps can take a very long time. It is recommended to set the ``MAX_JOBS`` environment variable to accelerate the installation process, but do not set it too large, otherwise the memory will be overloaded and your machines may hang. .. code:: bash # change directory to anywher you like, in verl source code directory is not recommended git clone https://github.com/NVIDIA/apex.git && \ cd apex && \ MAX_JOB=32 pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ Install verl :::::::::::: For installing the latest version of verl, the best way is to clone and install it from source. Then you can modify our code to customize your own post-training jobs. .. code:: bash git clone https://github.com/volcengine/verl.git cd verl pip install --no-deps -e . Post-installation ::::::::::::::::: Please make sure that the installed packages are not overridden during the installation of other packages. The packages worth checking are: - **torch** and torch series - **vLLM** - **SGLang** - **pyarrow** - **tensordict** - **nvidia-cudnn-cu12**: For Magetron backend If you encounter issues about package versions during running verl, please update the outdated ones. Install with AMD GPUs - ROCM kernel support ------------------------------------------------------------------ When you run on AMD GPUs (MI300) with ROCM platform, you cannot use the previous quickstart to run verl. You should follow the following steps to build a docker and run it. If you encounter any issues in using AMD GPUs running verl, feel free to contact me - `Yusheng Su `_. Find the docker for AMD ROCm: `docker/Dockerfile.rocm `_ :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: .. code-block:: bash # Build the docker in the repo dir: # docker build -f docker/Dockerfile.rocm -t verl-rocm:03.04.2015 . # docker images # you can find your built docker FROM rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 # Set working directory # WORKDIR $PWD/app # Set environment variables ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" # Install vllm RUN pip uninstall -y vllm && \ rm -rf vllm && \ git clone -b v0.6.3 https://github.com/vllm-project/vllm.git && \ cd vllm && \ MAX_JOBS=$(nproc) python3 setup.py install && \ cd .. && \ rm -rf vllm # Copy the entire project directory COPY . . # Install dependencies RUN pip install "tensordict<0.6" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ datasets \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ "ray[data,train,tune,serve]" \ torchdata \ transformers \ wandb \ orjson \ pybind11 && \ pip install -e . --no-deps Build the image :::::::::::::::::::::::: .. code-block:: bash docker build -t verl-rocm . Launch the container :::::::::::::::::::::::::::: .. code-block:: bash docker run --rm -it \ --device /dev/dri \ --device /dev/kfd \ -p 8265:8265 \ --group-add video \ --cap-add SYS_PTRACE \ --security-opt seccomp=unconfined \ --privileged \ -v $HOME/.ssh:/root/.ssh \ -v $HOME:$HOME \ --shm-size 128G \ -w $PWD \ verl-rocm \ /bin/bash If you do not want to root mode and require assign yourself as the user, Please add ``-e HOST_UID=$(id -u)`` and ``-e HOST_GID=$(id -g)`` into the above docker launch script. verl with AMD GPUs currently supports FSDP as the training engine, vLLM and SGLang as the inference engine. We will support Megatron in the future. ================================================ FILE: verl_distillation/docs/start/more_resources.rst ================================================ More Resources ============== Last updated: 06/30/2025. - Introduction to verl (`Slides `_) - verl Code Walkthrough (`Slides `_, `Talk in Chinese `_) ================================================ FILE: verl_distillation/docs/start/multinode.rst ================================================ Multinode Training ================== Last updated: 06/10/2025. .. _wuxibin89: https://github.com/wuxibin89 Author: `Xibin Wu `_, `Yusheng Su `_. Option 1: Launch Manually ------------------------------ Set up multinode ray cluster ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Start head node with ``ray start --head --dashboard-host=0.0.0.0``, there're 2 address you should care about: - GCS address: ``ray start --address=
``, where worker node should connect to. - Dashboard address: ``
:8265``, where you should submit job to the cluster. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/head.png?raw=true 2. Start worker node with ``ray start --address=
`` you get above. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/worker.png?raw=true 3. Now you should see the cluster have 2 nodes with ``ray status``. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/status.png?raw=true 4. Additionally, you can access dashboard in the browser with the address you get above. *Firewall rules maybe need configure to access the dashboard, if there's any trouble, please contact your network administrator.* .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/overview.png?raw=true Submit job to ray cluster ~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Submit ray job to cluster with the dashboard address you get above. .. code-block:: bash ray job submit --address="http://127.0.0.1:8265" \ --runtime-env=verl/trainer/runtime_env.yaml \ --no-wait \ -- \ python3 -m verl.trainer.main_ppo \ trainer.n_gpus_per_node=8 \ trainer.nnodes=2 \ ... .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/submit.png?raw=true 2. Then you can check the job status with the following commands: - ray job list: list all jobs submitted to the cluster. - ray job logs : query the logs of the job. - ray job status : query the status of the job. - ray job stop : request the job to be stopped. - ray job list | grep submission_id | grep JobStatus | grep RUNNING | grep -oP 'raysubmit_[^'\''"]+' | head -n 1: get the latest job submission ID of the running job. - ray job logs --follow: added ``--follow`` parameter to ray job logs command to enable continuous log streaming. 3. You can also access driver/task/actor logs in ``/tmp/ray/session_latest/logs/``, driver log is ``job-driver-raysubmit_.log``. 4. We strongly recommend you to view job detail from dashboard in multinode training, because it provide more structure way to view the job information. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/job.png?raw=true .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/job_detail.png?raw=true Option 2: Launch via SkyPilot on Kubernetes or clouds ------------------------------------------------------ .. note:: Ready-to-use SkyPilot example configurations are available in the `examples/skypilot/ `_ directory: - ``verl-ppo.yaml`` - PPO training with GSM8K dataset - ``verl-grpo.yaml`` - GRPO training with MATH dataset - ``verl-multiturn-tools.yaml`` - Multi-turn tool usage training See the `SkyPilot examples README `_ for detailed usage instructions. Step 1: Setup SkyPilot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SkyPilot can support different clouds, here we use GCP as example. `install skypilot `_ .. code-block:: bash conda create -y -n sky python=3.10 conda activate sky pip install "skypilot[gcp]" conda install -c conda-forge google-cloud-sdk gcloud init # Run this if you don't have a credential file. # This will generate ~/.config/gcloud/application_default_credentials.json. gcloud auth application-default login # Check if the GCP credential is correctly setup. sky check gcp .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/setup_skypilot.png?raw=true Step 2: Prepare dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash git clone https://github.com/volcengine/verl.git cd examples/data_preprocess python3 gsm8k.py --local_save_dir ~/data/gsm8k Step 3: Submit a job with SkyPilot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Create a SkyPilot YAML ``verl-cluster.yml`` with the following content: .. parsed-literal:: workdir: . will sync all the data in the current dir to the remote cluster. .. code-block:: yaml resources: accelerators: L4:1 # every node has 1 L4 GPU image_id: docker:verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.0-fa2.7.4 memory: 64+ # every node has 64 GB memory ports: 8265 # expose port for ray dashboard num_nodes: 2 # cluster size # --------------- Work Directory Synchronization (workdir) --------------- # Defines the local working directory to be synchronized to the remote cluster. # Here, '.' means synchronizing the directory where the sky submit command is currently run. workdir: . # --------------- (secrets) --------------- secrets: ## your wandb api key ## WANDB_API_KEY: null # --------------- File Mounts/Data Upload (file_mounts) --------------- # If your dataset (gsm8k folder) is local, it needs to be uploaded to the remote cluster. file_mounts: # Remote path (relative to remote user's home directory): Local path # /remote/dir1/file: /local/dir1/file data/gsm8k: ~/data/gsm8k # --------------- Environment Setup (setup) --------------- # Commands run on each node of the remote cluster to set up the environment (e.g., install dependencies). These are run directly inside Docker. setup: | rm -rf verl git clone https://github.com/volcengine/verl.git cd verl pip3 install -v -e .[vllm] # --------------- Run Command (run) --------------- # The actual task commands to be executed on the remote cluster. # This script will first start the Ray cluster (different ray start commands are executed on Head and Worker nodes). # Then, your training script will only be run on the Head node (SKYPILOT_NODE_RANK == 0). run: | # Get the Head node's IP and total number of nodes (environment variables injected by SkyPilot). head_ip=`echo "$SKYPILOT_NODE_IPS" | head -n1` num_nodes=`echo "$SKYPILOT_NODE_IPS" | wc -l` # Here num_nodes should be equal to 2. # login wandb python3 -c "import wandb; wandb.login(relogin=True, key='$WANDB_API_KEY')" # Start Ray based on node role (Head=0, Worker>0). # This logic is a standard Ray cluster startup script. if [ "$SKYPILOT_NODE_RANK" == "0" ]; then # Head node starts Ray Head. echo "Starting Ray head node..." # Check if a Ray Head is already running to avoid duplicate starts. ps aux | grep ray | grep 6379 &> /dev/null || ray start --head --disable-usage-stats \ --port=6379 \ --dashboard-host=0.0.0.0 \ --dashboard-port=8265 # Wait for all worker nodes to join the cluster. while [ $(ray nodes | grep NODE_ID | wc -l) -lt $num_nodes ]; do echo "Waiting for all nodes to join... ($(ray nodes | grep NODE_ID | wc -l)/$num_nodes)" sleep 5 done # Head node executes the training script. echo "Executing training script on head node..." python3 -m verl.trainer.main_ppo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=['console','wandb'] \ trainer.val_before_train=False \ trainer.default_hdfs_dir=null \ trainer.n_gpus_per_node=1 \ trainer.nnodes=2 \ trainer.save_freq=20 \ trainer.test_freq=20 \ trainer.total_epochs=2 \ trainer.project_name=verl_examples \ trainer.experiment_name=experiment_name_gsm8k else # Wait for Ray Head to start. sleep 10 # Increase waiting time to ensure Head finishes starting. # Worker node starts Ray Worker. echo "Starting Ray worker node..." # Check if a Ray Worker is already running to avoid duplicate starts. ps aux | grep ray | grep $head_ip:6379 &> /dev/null || ray start --address $head_ip:6379 --disable-usage-stats # Add sleep to after `ray start` to give ray enough time to daemonize sleep 5 # Ensure Worker successfully connects to Head. fi # No commands are added to the Worker node here; the Worker's main task is to start Ray and wait for the Head node to assign tasks. echo "Node setup and Ray start script finished for rank $SKYPILOT_NODE_RANK." .. code-block:: bash export WANDB_API_KEY= sky launch -c verl --secret WANDB_API_KEY verl-cluster.yml .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/running_job.png?raw=true .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/running_job_1.png?raw=true .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/finished.png?raw=true **Check the cluster on GCP** .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/gcp_instances.png?raw=true **Check Ray Dashboard** We can see the cluster on the RAY Dashboard with the GCP head node: ```console $ sky status --endpoint 8265 verl 1.2.3.4:8265 ``` .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/ray_dashboard_overview.png?raw=true .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/ray_dashboard_jobs.png?raw=true .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/ray_dashboard_cluster.png?raw=true **Check the checkpoint of model** .. code-block:: bash # login the head node ssh verl # The global step will vary. Find the correct path from the training logs. cd ~/sky_workdir/checkpoints/verl_examples/gsm8k/ # Then list contents to find the checkpoint, e.g.: ls -R . .. image:: https://github.com/yottalabsai/open-source/blob/main/static/verl/saved_model.png?raw=true Option 3: Launch via Slurm ------------------------------ Ray provides users with `this `_ official tutorial to start a Ray cluster on top of Slurm. We have verified the :doc:`GSM8K example<../examples/gsm8k_example>` on a Slurm cluster under a multi-node setting with the following steps. 1. [Optional] If your cluster support `Apptainer or Singularity `_ and you wish to use it, convert verl's Docker image to an Apptainer image. Alternatively, set up the environment with the package manager available on your cluster or use other container runtimes (e.g. through `Slurm's OCI support `_) available to you. .. code:: bash apptainer pull /your/dest/dir/vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3.sif docker://verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3 2. Follow :doc:`GSM8K example<../examples/gsm8k_example>` to prepare the dataset and model checkpoints. 3. Modify `examples/slurm/ray_on_slurm.slurm `_ with your cluster's own information. 4. Submit the job script to the Slurm cluster with `sbatch`. Please note that Slurm cluster setup may vary. If you encounter any issues, please refer to Ray's `Slurm user guide `_ for common caveats. If you changed Slurm resource specifications, please make sure to update the environment variables in the job script if necessary. Option 4: Launch via dstack ------------------------------ `dstackai/dstack `_ is an open-source container orchestrator that simplifies distributed training across cloud providers and on-premises environments without the need to use K8S or Slurm. Prerequisite ~~~~~~~~~~~~ Once dstack is `installed `_, initialize the directory as a repo with ``dstack init``. .. code-block:: bash mkdir myproject && cd myproject dstack init **Create a fleet** Before submitting distributed training jobs, create a `dstack` `fleet `_. Run a Ray cluster task ~~~~~~~~~~~~~~~~~~~~~~ Once the fleet is created, define a Ray cluster task, e.g. in ``ray-cluster.dstack.yml``: .. code-block:: yaml type: task name: ray-verl-cluster nodes: 2 env: - WANDB_API_KEY - PYTHONUNBUFFERED=1 - CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 image: verlai/verl:app-verl0.6-transformers4.56.1-sglang0.5.2-mcore0.13.0-te2.2 commands: - git clone https://github.com/volcengine/verl - cd verl - pip install --no-deps -e . - pip install hf_transfer hf_xet - | if [ $DSTACK_NODE_RANK = 0 ]; then python3 examples/data_preprocess/gsm8k.py --local_save_dir ~/data/gsm8k python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2.5-7B-Instruct')" ray start --head --port=6379; else ray start --address=$DSTACK_MASTER_NODE_IP:6379 fi # Expose Ray dashboard port ports: - 8265 resources: gpu: 80GB:8 shm_size: 128GB # Save checkpoints on the instance volumes: - /checkpoints:/checkpoints Now, if you run this task via `dstack apply`, it will automatically forward the Ray's dashboard port to `localhost:8265`. .. code-block:: bash dstack apply -f ray-cluster.dstack.yml As long as the `dstack apply` is attached, you can use `localhost:8265` to submit Ray jobs for execution Submit Ray jobs ~~~~~~~~~~~~~~~ Before you can submit Ray jobs, ensure to install `ray` locally: .. code-block:: shell pip install ray Now you can submit the training job to the Ray cluster which is available at ``localhost:8265``: .. code-block:: shell $ RAY_ADDRESS=http://localhost:8265 $ ray job submit \ -- python3 -m verl.trainer.main_ppo \ data.train_files=/root/data/gsm8k/train.parquet \ data.val_files=/root/data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-7B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.project_name=ppo_training \ trainer.experiment_name=qwen-2.5-7B \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=2 \ trainer.default_local_dir=/checkpoints \ trainer.save_freq=10 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo.log \ trainer.resume_mode=disable For more details on how `dstack` works, check out its `documentation `_. How to debug? --------------------- Ray Distributed Debugger VSCode Extension (Recommended) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Starting with Ray 2.39, Anyscale has introduced the `Ray Distributed Debugger `_ VSCode extension. Follow the extension’s installation instructions, then add your cluster using the dashboard URL you obtained earlier. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/debugger.png?raw=true :alt: Ray Distributed Debugger VSCode extension screenshot 2. Prerequisites. Ensure the following are installed (see the extension README for more detail): - Visual Studio Code - `ray[default]` >= 2.9.1 - `debugpy` >= 1.8.0 .. image:: https://github.com/aoshen524/verl/blob/main/docs/start/c7098b755ff689859837773a916c857.png?raw=true :alt: VSCode with Ray prerequisites 3. Environment Variables. To enable post‑mortem debugging, set: .. code-block:: bash export RAY_DEBUG_POST_MORTEM=1 .. admonition:: Note :class: important Be sure to remove any legacy flags before starting Ray: - `RAY_DEBUG=legacy` - `--ray-debugger-external` 4. Configuring BreakpointsSet up breakpoint() in your code, and submit job to cluster. Then the extension will show the breakpoint information. 1. Insert `breakpoint()` calls into your remote functions. 2. Submit your job to the cluster. The extension will detect active breakpoints and display them in VSCode. .. image:: https://github.com/aoshen524/verl/blob/main/docs/start/4ddad74395c79a1402331c0ce73316f.png?raw=true :alt: Detected breakpoint in VSCode **Note:** Breakpoints are only supported inside functions decorated with `@ray.remote`. 5. Launching the Debugger. Run your job directly from the command line (do not use a `launch.json`): .. code-block:: bash python job.py 6. Attaching to a Breakpoint. Once the process hits the first `breakpoint()`, click the Ray Distributed Debugger icon in the VSCode sidebar to attach the debugger. .. image:: https://github.com/aoshen524/verl/blob/main/docs/start/4ddad74395c79a1402331c0ce73316f.png?raw=true :alt: Attaching VSCode debugger to Ray process 7. Debugging With Multiple breakpoint(). For each subsequent task, first disconnect the current debugger session, then click the extension icon again to attach to the next breakpoint. .. image:: https://github.com/aoshen524/verl/blob/main/docs/start/6e83c910a62c82fecb89c6619e001cd.png?raw=true :alt: Disconnecting and reconnecting the debugger Legacy Ray Debugger ~~~~~~~~~~~~~~~~~~~ 1. Ray has a builtin legacy `debugger `_ that allows you to debug your distributed applications. To enable debugger, start ray cluster with ``RAY_DEBUG=legacy`` and ``--ray-debugger-external``. .. code-block:: bash # start head node RAY_DEBUG=legacy ray start --head --dashboard-host=0.0.0.0 --ray-debugger-external # start worker node RAY_DEBUG=legacy ray start --address='10.124.46.192:6379' --ray-debugger-external 2. Set up breakpoint in your code, and submit job to cluster. Then run ``ray debug`` to wait breakpoint: .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/legacy.png?raw=true Multi-node training on AMD clusters --------------------------------------------------------------------------------------- If you want to run multi-node training with slurm with Docker/Podman container on AMD Cluster, you can use the following script. If you encounter any issues in using AMD GPUs running verl, please contact `Yusheng Su `_. .. note:: 1. You need to use ``podman`` or ``docker`` in the following script. We will release the apptainer script later. 2. If you want to use ``podman``, you just replace ``docker`` with ``podman`` in the following script. The script includes the following steps: 1. SLURM Configuration 2. Environment Setup 3. Docker/Podman Container Setup 4. Ray Cluster Initialization 5. Data Preprocessing 6. Model Setup 7. Training Launch slurm_script.sh ~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash #!/bin/bash #SBATCH --job-name=verl-ray-on-slurm #SBATCH --nodes=2 #SBATCH --ntasks-per-node=2 #SBATCH --mem=200G #SBATCH --time=30-00:00:00 #SBATCH --gpus-per-node=8 #SBATCH --cpus-per-task=28 #SBATCH --output=../verl_log/slurm-%j.out #SBATCH --error=../verl_log/slurm-%j.err #SBATCH --nodelist=gpu-[0,1] # load necessary modules ### Run this setup # [Cluster]: Use docker # docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 ########################################################################## ###The following setting should be set in different project and cluster### ########################################################################## ### Project CONTAINER_NAME="multinode_verl_training" IMG="verl.rocm" DOCKERFILE="docker/Dockerfile.rocm" # echo $PWD verl_workdir="${HOME}/projects/verl_upstream" export TRANSFORMERS_CACHE="${HOME}/.cache/huggingface" export HF_HOME=$TRANSFORMERS_CACHE ### Cluster Network Setting export NCCL_DEBUG=TRACE export GPU_MAX_HW_QUEUES=2 export TORCH_NCCL_HIGH_PRIORITY=1 export NCCL_CHECKS_DISABLE=1 # export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7 export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9 export NCCL_IB_GID_INDEX=3 export NCCL_CROSS_NIC=0 export CUDA_DEVICE_MAX_CONNECTIONS=1 export NCCL_PROTO=Simple export RCCL_MSCCL_ENABLE=0 export TOKENIZERS_PARALLELISM=false export HSA_NO_SCRATCH_RECLAIM=1 ########################################################################## ### For rocm and training script export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES export CUDA_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES # Build and launch the Docker container srun bash -c " # Exit on any error set -e # Clean up dangling images (images with tag) docker image prune -f # Need to pull the docker first docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "${IMG}"; then echo \"Building ${IMG} image...\" docker build -f \"${DOCKERFILE}\" -t \"${IMG}\" . else echo \"${IMG} image already exists, skipping build\" fi # Removing old container if exists docker rm \"${CONTAINER_NAME}\" 2>/dev/null || true # Checking network devices ibdev2netdev # Launch the docker docker run --rm -d \ -e HYDRA_FULL_ERROR=1 \ -e HIP_VISIBLE_DEVICES=${HIP_VISIBLE_DEVICES} \ -e ROCR_VISIBLE_DEVICES=${ROCR_VISIBLE_DEVICES} \ -e CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES} \ -e NCCL_DEBUG=${NCCL_DEBUG} \ -e GPU_MAX_HW_QUEUES=${GPU_MAX_HW_QUEUES} \ -e TORCH_NCCL_HIGH_PRIORITY=${TORCH_NCCL_HIGH_PRIORITY} \ -e NCCL_CHECKS_DISABLE=${NCCL_CHECKS_DISABLE} \ -e NCCL_IB_HCA=${NCCL_IB_HCA} \ -e NCCL_IB_GID_INDEX=${NCCL_IB_GID_INDEX} \ -e NCCL_CROSS_NIC=${NCCL_CROSS_NIC} \ -e CUDA_DEVICE_MAX_CONNECTIONS=${CUDA_DEVICE_MAX_CONNECTIONS} \ -e NCCL_PROTO=${NCCL_PROTO} \ -e RCCL_MSCCL_ENABLE=${RCCL_MSCCL_ENABLE} \ -e TOKENIZERS_PARALLELISM=${TOKENIZERS_PARALLELISM} \ -e HSA_NO_SCRATCH_RECLAIM=${HSA_NO_SCRATCH_RECLAIM} \ -e TRANSFORMERS_CACHE=${TRANSFORMERS_CACHE} \ -e HF_HOME=${HF_HOME} \ --network host \ --device /dev/dri \ --device /dev/kfd \ --device /dev/infiniband \ --group-add video \ --cap-add SYS_PTRACE \ --security-opt seccomp=unconfined \ --privileged \ -v \${HOME}:\${HOME} \ -v \${HOME}/.ssh:/root/.ssh \ -w "${verl_workdir}" \ --shm-size 128G \ --name \"${CONTAINER_NAME}\" \ \"${IMG}\" \ tail -f /dev/null echo \"Container setup completed\" " # (Optional): If you do not want to root mode and require assign yuorself as the user # Please add `-e HOST_UID=$(id -u)` and `-e HOST_GID=$(id -g)` into the above docker launch script. ### Ray launch the nodes before training # Getting the node names nodes_array=($(scontrol show hostnames "$SLURM_JOB_NODELIST" | tr '\n' ' ')) head_node=${nodes_array[0]} head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) # if we detect a space character in the head node IP, we'll # convert it to an ipv4 address. This step is optional. if [[ "$head_node_ip" == *" "* ]]; then IFS=' ' read -ra ADDR <<<"$head_node_ip" if [[ ${#ADDR[0]} -gt 16 ]]; then head_node_ip=${ADDR[1]} else head_node_ip=${ADDR[0]} fi echo "IPV6 address detected. We split the IPV4 address as $head_node_ip" fi port=6379 ip_head=$head_node_ip:$port export ip_head echo "IP Head: $ip_head" # make sure we set environment variables before Ray initialization # Print out all env variables printenv echo "Starting HEAD at $head_node" srun --nodes=1 --ntasks=1 -w "$head_node" \ docker exec "${CONTAINER_NAME}" \ ray start --head --node-ip-address="$head_node_ip" --port=$port \ --dashboard-port=8266 \ --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & # optional, though may be useful in certain versions of Ray < 1.0. sleep 10 # number of nodes other than the head node worker_num=$((SLURM_JOB_NUM_NODES - 1)) for ((i = 1; i <= worker_num; i++)); do node_i=${nodes_array[$i]} echo "Debug: Starting worker on node_i = ${node_i}" if [ -z "$node_i" ]; then echo "Error: Empty node name for worker $i" continue fi echo "Starting WORKER $i at $node_i" srun --nodes=1 --ntasks=1 -w "$node_i" \ docker exec "${CONTAINER_NAME}" \ ray start --address "$ip_head" --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & sleep 5 done # Ray initlization test (See whether any error in the above execution) echo "Testing Ray initialization in the slurm nodes..." docker exec "${CONTAINER_NAME}" python3 -c ' import ray try: ray.init(address="auto") print("\n=== Ray Cluster Status ===") print(f"Number of nodes: {len(ray.nodes())}") for node in ray.nodes(): print("Node: {}, Status: {}".format(node["NodeManagerHostname"], node["Alive"])) # print(f"Node: {node}") ray.shutdown() print("Ray initialization successful!") except Exception as e: print(f"Ray initialization failed: {str(e)}") ' echo "=== Ray test completed ===" ###### # Run data preprocessing echo "Starting data preprocessing..." docker exec "${CONTAINER_NAME}" \ python3 "examples/data_preprocess/gsm8k.py" "--local_save_dir" "../data/gsm8k" echo "Starting data preprocessing..." docker exec "${CONTAINER_NAME}" \ python3 "examples/data_preprocess/math_dataset.py" "--local_dir" "../data/math" train_files="../data/gsm8k/train.parquet" val_files="../data/gsm8k/test.parquet" # Download and test model echo "Loading model..." docker exec "${CONTAINER_NAME}" \ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')" MODEL_PATH="Qwen/Qwen2-7B-Instruct" # Set model path after pipeline test MODEL_PATH="Qwen/Qwen2.5-0.5B-Instruct" echo "== Data and model loading Done ==" echo "Start to train..." docker exec "${CONTAINER_NAME}" \ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')" MODEL_PATH="Qwen/Qwen2-7B-Instruct" PYTHONUNBUFFERED=1 srun --overlap --nodes=${SLURM_NNODES} --ntasks=1 -w "$head_node" \ docker exec "${CONTAINER_NAME}" \ python3 -m verl.trainer.main_ppo \ data.train_files=$train_files \ data.val_files=$val_files \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.enable_gradient_checkpointing=False \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=$MODEL_PATH \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.kl_ctrl.kl_coef=0.0001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \ trainer.n_gpus_per_node=${SLURM_GPUS_PER_NODE} \ trainer.val_before_train=False \ trainer.nnodes=${SLURM_NNODES} \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 Run multi-node training with above slurm_script.sh ~~~~~~~~~~~~~~~~~~~~ Just sbatch your slurm_script.sh .. code-block:: bash sbatch slurm_script.sh ================================================ FILE: verl_distillation/docs/start/quickstart.rst ================================================ .. _quickstart: ========================================================= Quickstart: PPO training on GSM8K dataset ========================================================= Post-train a LLM using GSM8K dataset. Introduction ------------ .. _hf_dataset_gsm8k: https://huggingface.co/datasets/gsm8k In this example, we train an LLM to tackle the `GSM8k `_ task with function-based rewards. [1]_ Prerequisite: - the latest version of ``verl`` and its dependencies installed following the installation guide. Using the docker image is recommended. - a GPU with at least 24 GB HBM Dataset Introduction -------------------- GSM8k is a math problem dataset. The prompt is an elementary school problem. The LLM model is asked to solve the math problem. Below is an example: Prompt Katy makes coffee using teaspoons of sugar and cups of water in the ratio of 7:13. If she used a total of 120 teaspoons of sugar and cups of water, calculate the number of teaspoonfuls of sugar she used. Solution The total ratio representing the ingredients she used to make the coffee is 7+13 = <<7+13=20>>20 Since the fraction representing the number of teaspoons she used is 7/20, she used 7/20\ *120 = <<7/20*\ 120=42>>42 #### 42 Step 1: Prepare the dataset ---------------------------- We preprocess the dataset in parquet format so that (1) it contains necessary fields for computing RL rewards and (2) is faster to read. .. code-block:: bash python3 examples/data_preprocess/gsm8k.py --local_save_dir ~/data/gsm8k Step 2: Download a model for post-training ------------------------------------------- In this example, we start with the ``Qwen2.5-0.5B-Instruct`` model. If you want to perform SFT before RL, refer to the :doc:`Complete GSM8K Example<../examples/gsm8k_example>`, the `sft directory `_ and `SFT Trainer `_ for further details. .. code-block:: bash python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2.5-0.5B-Instruct')" Step 3: Perform PPO training with the instruct model ---------------------------------------------------------------------- **Reward Model/Function** We use a pre-defined rule-based reward model. We force the model to produce a final answer following 4 “#” as shown in the solution. We extract the final answer from both the solution and model's output using regular expression matching. We assign a reward of 1 to correct answer, 0.0 to incorrect answer and 0 to no answer. For more details, please refer to `verl/utils/reward_score/gsm8k.py `_. **Training Script** Now let's run PPO training with the dataset and model above. [2]_ Set the ``data.train_files`` ,\ ``data.val_files``, ``actor_rollout_ref.model.path`` and ``critic.model.path`` based on your dataset and model names or paths. You may set ``VERL_USE_MODELSCOPE=True`` to download models from `modelscope `_ instead of `huggingface `_. .. code-block:: bash PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=10 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo.log You are expected to see the following logs, indicating training in progress. The key metric ``val/test_score/openai/gsm8k`` is computed every ``trainer.test_freq`` steps: .. code-block:: bash step:0 - timing/gen:21.470 - timing/ref:4.360 - timing/values:5.800 - actor/reward_kl_penalty:0.000 - actor/reward_kl_penalty_coeff:0.001 - timing/adv:0.109 - timing/update_critic:15.664 - critic/vf_loss:14.947 - critic/vf_clipfrac:0.000 - critic/vpred_mean:-2.056 - critic/grad_norm:1023.278 - critic/lr(1e-4):0.100 - timing/update_actor:20.314 - actor/entropy_loss:0.433 - actor/pg_loss:-0.005 - actor/pg_clipfrac:0.000 - actor/ppo_kl:0.000 - actor/grad_norm:1.992 - actor/lr(1e-4):0.010 - critic/score/mean:0.004 - critic/score/max:1.000 - critic/score/min:0.000 - critic/rewards/mean:0.004 - critic/rewards/max:1.000 - critic/rewards/min:0.000 - critic/advantages/mean:-0.000 - critic/advantages/max:2.360 - critic/advantages/min:-2.280 - critic/returns/mean:0.003 - critic/returns/max:0.000 - critic/returns/min:0.000 - critic/values/mean:-2.045 - critic/values/max:9.500 - critic/values/min:-14.000 - response_length/mean:239.133 - response_length/max:256.000 - response_length/min:77.000 - prompt_length/mean:104.883 - prompt_length/max:175.000 - prompt_length/min:68.000 step:1 - timing/gen:23.020 - timing/ref:4.322 - timing/values:5.953 - actor/reward_kl_penalty:0.000 - actor/reward_kl_penalty:0.001 - timing/adv:0.118 - timing/update_critic:15.646 - critic/vf_loss:18.472 - critic/vf_clipfrac:0.384 - critic/vpred_mean:1.038 - critic/grad_norm:942.924 - critic/lr(1e-4):0.100 - timing/update_actor:20.526 - actor/entropy_loss:0.440 - actor/pg_loss:0.000 - actor/pg_clipfrac:0.002 - actor/ppo_kl:0.000 - actor/grad_norm:2.060 - actor/lr(1e-4):0.010 - critic/score/mean:0.000 - critic/score/max:0.000 - critic/score/min:0.000 - critic/rewards/mean:0.000 - critic/rewards/max:0.000 - critic/rewards/min:0.000 - critic/advantages/mean:0.000 - critic/advantages/max:2.702 - critic/advantages/min:-2.616 - critic/returns/mean:0.000 - critic/returns/max:0.000 - critic/returns/min:0.000 - critic/values/mean:-2.280 - critic/values/max:11.000 - critic/values/min:-16.000 - response_length/mean:232.242 - response_length/max:256.000 - response_length/min:91.000 - prompt_length/mean:102.398 - prompt_length/max:185.000 - prompt_length/min:70.000 Checkout ``Algorithm Baselines`` page for full training and validation logs for reference. The checkpoint is saved at the following dir by default: ``checkpoints/${trainer.project_name}/${trainer.experiment_name}``. You can merge the saved checkpoints to huggingface model using ``verl.model_merger`` module, for example: .. code-block:: bash python3 -m verl.model_merger merge \ --backend fsdp \ --local_dir checkpoints/${trainer.project_name}/${trainer.experiment_name}/global_step_1/actor \ --target_dir checkpoints/${trainer.project_name}/${trainer.experiment_name}/global_step_1/actor/huggingface For more details about checkpoint and model merging, please refer to :ref:`checkpoint-page`. To enable ``wandb`` for experiment tracking, set the following configs: .. code-block:: bash trainer.logger='["console","wandb"]' \ trainer.project_name=$YOUR_PROJECT_NAME \ trainer.experiment_name=$YOUR_RUN_NAME \ If you encounter out of memory issues with HBM less than 32GB, enable the following configs would help: .. code-block:: bash actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ critic.ppo_micro_batch_size_per_gpu=1 \ For the full set of configs, please refer to :ref:`config-explain-page` for detailed explanation and performance tuning. .. [1] The original paper (https://arxiv.org/pdf/2110.14168) mainly focuses on training a verifier (a reward model) to solve math problems via Best-of-N sampling. In this example, we train an RL agent using a rule-based reward model. .. [2] More training script examples for FSDP and Megatron-LM backend are stored in `examples/ppo_trainer `_ directory. ================================================ FILE: verl_distillation/docs/start/ray_debug_tutorial.rst ================================================ Ray Debug Tutorial ================== Last updated: 04/23/2025 .. _wuxibin89: https://github.com/wuxibin89 Author: `Ao Shen `_. How to debug? --------------------- Ray Distributed Debugger VSCode Extension (Recommended) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Starting with Ray 2.39, Anyscale has introduced the `Ray Distributed Debugger `_ VSCode extension. Follow the extension’s installation instructions, then add your cluster using the dashboard URL you obtained earlier. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/debugger.png?raw=true :alt: Ray Distributed Debugger VSCode extension screenshot 2. Prerequisites. Ensure the following are installed (see the extension README for more detail): - Visual Studio Code - `ray[default]` >= 2.9.1 - `debugpy` >= 1.8.0 .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/readme.png?raw=true :alt: VSCode with Ray prerequisites 3. Environment Variables. To enable post‑mortem debugging, set: .. code-block:: bash export RAY_DEBUG_POST_MORTEM=1 .. admonition:: Note :class: important Be sure to remove any legacy flags before starting Ray: - `RAY_DEBUG=legacy` - `--ray-debugger-external` 4. Configuring BreakpointsSet up breakpoint() in your code, and submit job to cluster. Then the extension will show the breakpoint information. 1. Insert `breakpoint()` calls into your remote functions. 2. Submit your job to the cluster. The extension will detect active breakpoints and display them in VSCode. **Note:** Breakpoints are only supported inside functions decorated with `@ray.remote`. 5. Launching the Debugger. Run your job directly from the command line (do not use a `launch.json`): .. code-block:: bash python job.py 6. Attaching to a Breakpoint. Once the process hits the first `breakpoint()`, click the Ray Distributed Debugger icon in the VSCode sidebar to attach the debugger. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/launch.png?raw=true :alt: Attaching VSCode debugger to Ray process 7. Debugging With Multiple breakpoint(). For each subsequent task, first disconnect the current debugger session, then click the extension icon again to attach to the next breakpoint. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/disconnect.png?raw=true :alt: Disconnecting and reconnecting the debugger Legacy Ray Debugger ~~~~~~~~~~~~~~~~~~~ 1. Ray has a builtin legacy `debugger `_ that allows you to debug your distributed applications. To enable debugger, start ray cluster with ``RAY_DEBUG=legacy`` and ``--ray-debugger-external``. .. code-block:: bash # start head node RAY_DEBUG=legacy ray start --head --dashboard-host=0.0.0.0 --ray-debugger-external # start worker node RAY_DEBUG=legacy ray start --address='10.124.46.192:6379' --ray-debugger-external 2. Set up breakpoint in your code, and submit job to cluster. Then run ``ray debug`` to wait breakpoint: .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/legacy.png?raw=true ================================================ FILE: verl_distillation/docs/workers/fsdp_workers.rst ================================================ PyTorch FSDP Backend ====================== Last updated: 02/12/2025. We support PyTorch FSDP Backend by implementing various workers for actor, critic, reference, rollout and reward models. We also implement the ``FSDPVLLMShardingManager`` that reshard weight between FSDP and vLLM in `fsdp_vllm.py `_. **Pros** - Readily support various models. - Users only need to implement the corresponding ``dtensor_weight_loader`` for weight synchronization between FSDP and vLLM. While for ``hf_weight_loader``, users can directly apply any models supported both in HF and vLLM without any code change. - Easy to organize the forward and backward computation for each model. **Cons** - Poor scalability when it comes to large-scale models (e.g. Llama 70B and 405B) - The resharding overhead between actor and rollout could be larger than Megatron-LM backend. Due to the simplicity, we recommend using FSDP backend for algorithm research and prototyping. FSDP Workers -------------- ActorRolloutRefWorker ^^^^^^^^^^^^^^^^^^^^^ Actor/Rollout HybridEngine '''''''''''''''''''''''''' 1. HybridEngine, Actor and Rollout initialization API. .. code:: python @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): ``ONE_TO_ALL``: when calling the ``init_model`` function from the driver process, each worker (on a GPU) will execute the following model initialization process. The initialization details of HybridEngine, Actor and Rollout are highlighted below: 1. ``DataParallelPPOActor`` implements the simple PPO computation logics when the model is built with FSDP, including compute log prob, model update. 2. ``vLLMRollout`` support generation with vLLM. We modify the vLLM Engine and make it executed under SPMD to fit into our ``WorkerGroup`` design. 3. ``FSDPVLLMShardingManager`` a context manager to perform actual resharding between actor and rollout. See `source code `_. for more information. 1. Generate sequence and recompute log prob .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(self, prompts: DataProto): - ``Dispatch.DP_COMPUTE_PROTO``: The data will be dispatched and collected along the DP dimension - In this function, the rollout model will perform auto-regressive generation and the actor model will recompute the old log prob for the generated response. 3. Update actor model .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def update_actor(self, data: DataProto): - Update the actor model weight using PPO & entropy loss. ReferenceModel '''''''''''''' 1. Reference model initialization The reference model is initialized using the same function as the actor model without initializing the HybridEngine and Optimizer. Then the actor model is also wrapped by the ``DataParallelPPOActor``. 2. Compute reference log prob .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_ref_log_prob(self, data: DataProto): - In this function, the reference model will call the compute log prob function in ``DataParallelPPOActor`` to compute the reference log prob. CriticWorker and RewardWorker ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. Model initialization Quite similar to reference model. The CriticWorker will perform additional initialization for the Optimizer. 2. Compute Values for CriticWorker .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_values(self, data: DataProto): 3. Update Critic .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def update_critic(self, data: DataProto): 4. Compute Reward .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_rm_score(self, data: DataProto): HybridShard ------------ We didn't support FSDP `HybridShard`. To support this, we may need to construct a 2D device mesh and test the corresponding ``dtensor_weight_loader`` and ``hf_weight_loader`` for each model. ================================================ FILE: verl_distillation/docs/workers/megatron_workers.rst ================================================ Megatron-LM Backend =================== Last updated: 06/24/2025. We support Megatron Backend by implementing various workers for actor, critic, reference, rollout and reward models. We also implement the ``3DHybridEngine`` using Megatron-LM and vLLM/SGLang in `megatron_vllm.py `_ and `megatron_sglang.py `_. **Pros** - Support 5D parallelism (TP, EP, CP, DP, PP) and sequence parallelism for best scalablility and throughput. - 3D HybridEngine can significantly reduce peak memory usage and reduce weight synchronize overhead between actor and rollout. **Cons** - Huggingface Models and Megatron checkpoints need tools for conversion. Development Progress -------------------- Note that [Deprecated] means that the feature is not supported in the latest version of verl. [To-Optimize] means that the feature is implemented but not optimized yet. [WIP] means that the feature is working in progress. [In-Release] means that the feature is ready and in review process, coming at any time. +---------------+-----------------------------------------------------------+ | [Deprecated] | Megatron 3D Parallelism with custom models | +---------------+-----------------------------------------------------------+ | [Done] | Megatron 0.11.0 ``GPTModel`` support | +---------------+-----------------------------------------------------------+ | [Done] | Megatron GRPO support | +---------------+-----------------------------------------------------------+ | [Done] | Megatron with vLLM 0.8.2, with per-tensor weights loading | +---------------+-----------------------------------------------------------+ | [Done] | Megatron with Context Parallel | +---------------+-----------------------------------------------------------+ | [Done] | Qwen2MoE model support | +---------------+-----------------------------------------------------------+ | [To-Optimize] | Megatron dist Checkpoint | +---------------+-----------------------------------------------------------+ | [To-Optimize] | Huggingface and Megatron Checkpoint Converter | +---------------+-----------------------------------------------------------+ | [To-Optimize] | Efficient fused linear, entropy and cross entropy | +---------------+-----------------------------------------------------------+ | [Done] | Megatron offload(param, grad, optimizer) | +---------------+-----------------------------------------------------------+ | [Done] | Megatron Profiler | +---------------+-----------------------------------------------------------+ | [In-Release] | Megatron 0.12.0, TE 2.2 with vLLM 0.8.3 and Fused Attn | +---------------+-----------------------------------------------------------+ | [WIP] | Moonlight/DeepSeek-V3 model support | +---------------+-----------------------------------------------------------+ | [WIP] | Expert Parallel support | +---------------+-----------------------------------------------------------+ | [WIP] | Megatron support dynamic batch size | +---------------+-----------------------------------------------------------+ | [To-Do] | Performance tuning | +---------------+-----------------------------------------------------------+ | [MileStone] | Runnable with DeepSeek-V3 671B post-training | +---------------+-----------------------------------------------------------+ Utils of Megatron Workers ------------------------- MegatronWorker ^^^^^^^^^^^^^^ ``MegatronWorker`` is the base class of different megatron worker classes. In this class, ``get_megatron_global_info`` and ``get_megatron_rank_info`` function to retrieve the 3D parallel world size and rank of each ``Worker`` running on specific GPU. These information will be used in transfer protocol for Megatron Backend. The following ``Worker`` class for different models will be utilized to construct the ``WorkerGroup`` . We implement various of APIs for each ``Worker`` class decorated by the ``@register(dispatch_mode=)`` . These APIs can be called by the ray driver process. The data can be correctly collect and dispatch following the ``dispatch_mode`` on each function. The supported dispatch_model (i.e., transfer protocols) can be found in `decorator.py `_. ActorRolloutRefWorker ^^^^^^^^^^^^^^^^^^^^^ This class is implemented for Actor/Rollout HybridEngine or for the reference model to initialize their model and perform computation. Actor/Rollout HybridEngine '''''''''''''''''''''''''' 1. HybridEngine, Actor and Rollout initialization API. .. code:: python @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): ``ONE_TO_ALL``: when calling the ``init_model`` function from the driver process, each worker (on a GPU) will execute the following model initialization process. The initialization details of HybridEngine, Actor and Rollout are highlighted below: 1. ``MegatronPPOActor`` implements the simple PPO computation logics when the model is built with Megatron, including compute log prob, model update. 2. ``vLLMRollout`` support generation with vLLM. We modify the vLLM Engine and make it executed under SPMD to fit into our ``WorkerGroup`` design. 3. ``MegatronVLLMShardingManager`` a context manager to perform actual resharding between actor and rollout. See `source code `_ for more information. .. code:: python # build actor model self.actor = MegatronPPOActor(config=self.config.actor, model_config=self.actor_model_config, megatron_config=megatron_config, actor_module=self.actor_module, actor_optimizer=self.actor_optimizer, actor_optimizer_config=self.actor_optim_config) # build rollout # rollout initialization rollout = vLLMRollout(actor_module=params, config=self.config.rollout, tokenizer=self.tokenizer, model_hf_config=self.actor_model_config, train_tp=mpu.get_tensor_model_parallel_world_size()) # perform weight resharding between actor and rollout sharding_manager = MegatronVLLMShardingManager(module=self.hybrid_engine, inference_engine=rollout.inference_engine, model_config=self.actor_model_config, layer_name_mapping=layer_name_mapping) ... 1. Generate sequence and recompute log prob .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_PP_AS_DP_PROTO) def generate_sequences(self, prompts: DataProto): - ``Dispatch.MEGATRON_PP_AS_DP_PROTO``: The PP dimension of the actor model will be regarded as DP dimension. Then the driver process will dispatch and collect the data according to this reorganization. This is because, in HybridEngine, the actor weight, which usually applied larger 3D parallel sizes, will be gathered along the PP dimension and TP dimension. Therefore, the corresponding data should be dispatched and collected through the 3D parallel group of the rollout model, rather than the actor model. However, the world_size and rank information can only be retrieved from ``get_megatron_global_info`` and ``get_megatron_rank_info``, which records the 3D information for the actor model. Moreover, the data resharding inside TP dimension will be processed within the HybridEngine. - In this function, the rollout model will perform auto-regressive generation and the actor model will recompute the old log prob for the generated response. 3. Update actor model .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def update_actor(self, data: DataProto): - ``Dispatch.MEGATRON_COMPUTE_PROTO``: User passes the data partitioned by DP dimension. The data is dispatched to all tp/pp ranks within the same dp group, and ultimately only collects output data from tp=0 and the last pp. - Update the actor model weight using PPO & entropy loss. ..note:: Currently, training Tensor Parallel Size can be different from inference Tensor Parallel Size. ReferenceModel '''''''''''''' 1. Reference model initialization The reference model is initialized using the same function as the actor model without initializing the HybridEngine and Optimizer. Then the actor model is also wrapped by the ``MegatronPPOActor``. 2. Compute reference log prob .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def compute_ref_log_prob(self, data: DataProto): - In this function, the reference model will call the compute log prob function in ``MegatronPPOActor`` to compute the reference log prob. CriticWorker and RewardWorker ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. Model initialization Quite similar to reference model. The CriticWorker will perform additional initialization for the Optimizer. 2. Compute Values for CriticWorker .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def compute_values(self, data: DataProto): 3. Update Critic .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def update_critic(self, data: DataProto): 4. Compute Reward .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def compute_rm_score(self, data: DataProto): Utils of Train Optimization --------------------------- Offload ^^^^^^^ When resources are tight, the offload method can lower GPU memory usage, helping training and inference frameworks work well under verl. It moves parameters, gradients, and optimizers to CPU memory and only loads them back to the GPU when needed. If you want to use the offload, you can add the following parameters for the actor and ref separately. .. code:: python # For the actor actor_rollout_ref.actor.megatron.param_offload=True \ actor_rollout_ref.actor.megatron.grad_offload=True \ actor_rollout_ref.actor.megatron.optimizer_offload=True \ # For the ref w/o grad and optimizer actor_rollout_ref.ref.megatron.param_offload=True \ For the critic, you can include these parameters. .. code:: python # For the critic critic.megatron.param_offload=True \ critic.megatron.grad_offload=True \ critic.megatron.optimizer_offload=True \ Related MCore Document ---------------------- There is also a detailed document of using MCore to train different kinds of models, please refer to `MCore Document `_. ================================================ FILE: verl_distillation/docs/workers/model_engine.rst ================================================ Model Engine ============ .. _vermouth: https://github.com/vermouth1992 Author: `Chi Zhang `_ Last updated: 09/25/2025. Current Support Matrix ---------------------- +----------+-----------+--------------+-------------+--------------------------+ | Backends | Model | Scalability | Model | Pain points | | | Supported | | Definition | | | | | | | | +==========+===========+==============+=============+==========================+ | FSDP | Day 1 | - Dense is OK| Huggingface | Monkey patch can be | | + | support | | + monkey | easily impacted by | | ulysses | HF model | - MoE is bad | patch | transformers version | +----------+-----------+--------------+-------------+--------------------------+ | MCore | Limited | Best | GPTModel | Supporting new models is | | | | | (One model | difficult | | | | | for all) | | +----------+-----------+--------------+-------------+--------------------------+ - We monkey patch attention function to support ulysses - We monkey patch VLM models to support FSDP with mixed data with and without images Class Hierarchy --------------- Note that all the workers and trainers run in **SPMD** mode. SFT/DPO/RM trainer is directly invoked by ``torchrun``. The Actor/Critic worker can also be invoked by a RayWorkerGroup and provides APIs to a single controller. - Base Engine level: implement model init, optimizer init, lr scheduler init, sharding, checkpoint manager. - Full Engine level: subclass base engine and implement ``forward_step``. - Worker/SPMD trainer level: **engine agnostic**, implement training logics using abstract engine APIs RL trainer utilizes workers to construct HybridFlow program. This is out of the scope of model engine. Existing Model Types -------------------- ========== ====================== ====================== Model type Language model Value model ========== ====================== ====================== Input text/image/video/audio text/image/video/audio Output logits for next token logits as value ========== ====================== ====================== Currently, we have two model types: language model and value model. We expect to expand the category to include Qwen-Omni family (output both text and audio) and VLA models. Data Format ----------- Currently, verl adopts left-right padding data format in RL trainer. This creates massive padding when the discrepancy between response length is large. We will start to implement no-padding format throughout the whole system. .. image:: https://github.com/vermouth1992/verl-data/blob/master/images/data_format.png?raw=true :alt: Data Format Here is the migration plan: - Implement no-padding format in engine - Add a transformation layer in Actor/Critic worker. - Replace Actor/Critic Worker in RL trainer - Implement no-padding throughput system Checkpoint System ----------------- .. image:: https://github.com/vermouth1992/verl-data/blob/master/images/verl-ckpt.png?raw=true :alt: Model Engine Checkpoint System The engine constructs the model using huggingface config, then load weights from huggingface checkpoint. If the engine directly uses huggingface model definition, it can use function provided by ``transformers``. Otherwise, each engine has to write their own checkpoint load logic (e.g., `mbridge `__). During model training, each engine has to implement save_checkpoint and load_checkpoint that save/load intermediate sharded checkpoint including model, optimizer and lr scheduler states. Each engine has to implement a checkpoint merge script, that merges the intermediate sharded checkpoint back to huggingface format. API --- A tentative model engine API can be found: https://github.com/volcengine/verl/blob/main/verl/workers/engine/base.py#L24 Extension --------- Add a new backend ~~~~~~~~~~~~~~~~~ - Start a new folder under ``verl/workers/engine``. Then, implement ``transformer_impl.py``. If you want to implement a non-transformer model, please contact us in advance. - Add the engine config to the GSM8k SFT trainer script: https://github.com/volcengine/verl/blob/main/tests/special_e2e/sft/run_sft_engine_gsm8k.sh - Invoke the tests with your backend: https://github.com/volcengine/verl/blob/main/tests/special_e2e/sft/test_sft_engine_all.sh. This test script will run various backends and various configurations, and compare the loss and grad norm of the first step to make sure they are close. Add a new model type ~~~~~~~~~~~~~~~~~~~~ - This is mainly reserved for models whose the output is not just text (e.g., Qwen3-Omni). Please discuss with us before you proceed. ================================================ FILE: verl_distillation/docs/workers/ray_trainer.rst ================================================ PPO Ray Trainer =============== Last updated: 02/12/2025. We implement the RayPPOTrainer, which is a trainer runs on the driver process on a single CPU/GPU node (default is CPU). The PPORayTrainer include 3 core functions for data preparation, WorkerGroup initialization and PPO training loop. Data Preparation ---------------- The ``PPORayTrainer``, as a single process, is responsible for loading a complete batch of samples (prompts) from the dataset and then dispatch to different worker_groups running on different GPUs. To generalize the data loading, we implement the ``RLHFDataset`` class to load the preprocessed parquet files, apply chat templates to the prompts, add padding, truncate prompts that exceed max prompt length and then tokenize. .. code:: python self.train_dataset = RLHFDataset(data_files=self.config.data.train_files, tokenizer=self.tokenizer, config=self.config.data) Then, the dataloader will iterate the dataset under PPO mini batch size. WorkerGroup Initialization -------------------------- We first introduce a basic implementation of initializing the ``WorkerGroup`` of the actor model on a given set of GPUs. .. code:: python # max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool # For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one. # For Megatron backend, we recommend using max_colocate_count>1 that can utilize different WorkerGroup for differnt models resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes, use_gpu=True, max_colocate_count=1) # define actor rollout cls to be init on remote actor_rollout_cls = RayClassWithInitArgs(cls=ActorRolloutWorker) # define actor_rollout worker group actor_rollout_worker_group = MegatronRayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=actor_rollout_cls, default_megatron_kwargs=config.actor_rollout.megatron) Different WorkerGroups, like ``actor_rollout_worker_group`` , ``critic_worker_group`` and ``ref_worker_group`` lies on a separate process in the above implementation. The driver process can then call the distributed compute function within the ``actor_rollout_worker_group`` and other roles to construct the RL training loop. For models colocated in the same set of GPUs, we further provide a fine-grain optimization, which merge the ``worker_group`` of different roles in the same process. This optimization can save the redundant CUDA/distributed context in different processes. .. code:: python # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. Instead, directly pass different resource pool to different worker groups. # See TODO(url) for more information. all_wg = {} for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls(resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) if self.use_critic: self.critic_wg = all_wg['critic'] self.critic_wg.init_model() if self.use_reference_policy: self.ref_policy_wg = all_wg['ref'] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = all_wg['rm'] self.rm_wg.init_model() # we should create rollout at the end so that vllm can have a better estimation of kv cache memory self.actor_rollout_wg = all_wg['actor_rollout'] self.actor_rollout_wg.init_model() .. note:: For megatron backend, if we merge the ``worker_groups`` into the same processes, all the roles will utilize the same 3D parallel size. To optimize this, we may need to maintain several 3D process groups for each role in the same distributed context. If you want to use different 3D parallel size for different roles, please follow the similar architecture of the first code block to initialize each role's ``worker_group`` PPO Training Loop ----------------- We implement the PPO training loop by calling the functions in worker_group of each role. The input and output data of each function is a ``DataProto`` object implemented in `protocol.py `_. In the training loop, trainer will dispatch/collect the data to/from different GPUs following the transfer protocols wrapped in the workers' functions. The computation of PPO micro batches is processed in ``update_actor`` and ``update_critic`` functions. To extend to other RLHF algorithms, such as DPO, GRPO, please refer to :doc:`../advance/dpo_extension`. .. code:: python def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from verl.utils.tracking import Tracking from omegaconf import OmegaConf logger = Tracking(project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True)) global_steps = 0 # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None: val_metrics = self._validate() pprint(f'Initial validation metrics: {val_metrics}') for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} batch: DataProto = DataProto.from_single_dict(batch_dict) # batch = batch.to('cuda') # pop those keys for generation gen_batch = batch.pop(batch_keys=['input_ids', 'attention_mask', 'position_ids']) # generate a batch with Timer(name='gen', logger=None) as timer: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) metrics['timing/gen'] = timer.last batch = batch.union(gen_batch_output) if self.use_reference_policy: # compute reference log_prob with Timer(name='ref', logger=None) as timer: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) metrics['timing/ref'] = timer.last # compute values with Timer(name='values', logger=None) as timer: values = self.critic_wg.compute_values(batch) batch = batch.union(values) metrics['timing/values'] = timer.last with Timer(name='adv', logger=None) as timer: # compute scores. Support both model and function-based. # We first compute the scores using reward model. Then, we call reward_fn to combine # the results from reward model and rule-based results. if self.use_rm: # we first compute reward model score reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) # we combine with rule-based rm reward_tensor = self.reward_fn(batch) batch.batch['token_level_scores'] = reward_tensor # compute rewards. apply_kl_penalty if available batch, kl_metrics = apply_kl_penalty(batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty) metrics.update(kl_metrics) # compute advantages, executed on the driver process batch = compute_advantage(batch, self.config.algorithm.gamma, self.config.algorithm.lam, adv_estimator=self.config.algorithm.adv_estimator) metrics['timing/adv'] = timer.last # update critic if self.use_critic: with Timer(name='update_critic', logger=None) as timer: critic_output = self.critic_wg.update_critic(batch) metrics['timing/update_critic'] = timer.last critic_output_metrics = reduce_metrics(critic_output.meta_info['metrics']) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= global_steps: # update actor with Timer(name='update_actor', logger=None) as timer: actor_output = self.actor_rollout_wg.update_actor(batch) metrics['timing/update_actor'] = timer.last actor_output_metrics = reduce_metrics(actor_output.meta_info['metrics']) metrics.update(actor_output_metrics) # validate if self.val_reward_fn is not None and (global_steps + 1) % self.config.trainer.test_freq == 0: with Timer(name='testing', logger=None) as timer: val_metrics: dict = self._validate() val_metrics = {f'val/{key}': val for key, val in val_metrics.items()} metrics['timing/testing'] = timer.last metrics.update(val_metrics) # collect metrics data_metrics = compute_data_metrics(batch=batch) metrics.update(data_metrics) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=global_steps) if self.config.trainer.save_freq > 0 and (global_steps + 1) % self.config.trainer.save_freq == 0: actor_local_path = os.path.join(self.config.trainer.default_local_dir, 'actor', f'global_step_{global_steps}') actor_remote_path = os.path.join(self.config.trainer.default_hdfs_dir, 'actor') self.actor_rollout_wg.save_checkpoint(actor_local_path, actor_remote_path) if self.use_critic: critic_local_path = os.path.join(self.config.trainer.default_local_dir, 'critic', f'global_step_{global_steps}') critic_remote_path = os.path.join(self.config.trainer.default_hdfs_dir, 'critic') self.critic_wg.save_checkpoint(critic_local_path, critic_remote_path) global_steps += 1 # perform validation after training if self.val_reward_fn is not None: val_metrics = self._validate() pprint(f'Final validation metrics: {val_metrics}') ================================================ FILE: verl_distillation/docs/workers/sglang_worker.rst ================================================ SGLang Backend ============== Last updated: 05/31/2025. **Authored By SGLang RL Team and listed alphabetically by last name** `Jingyi Chen `_, `Yitong Guan `_, `Zhuobin Huang `_, `Jiajun Li `_, `Ji Li `_, `Shenggui Li `_, `Junrong Lin `_, `Xiang Long `_, `Rui Lu `_, `Jin Pan `_, `Shuai Shi `_, `Yushen Su `_, `Xinyuan Tong `_, `Chendong Wang `_, `Hanchen Zhang `_, `Haoran Wang `_, `Yongan Xiang `_, `Chengxing Xie `_, `Yuhao Yang `_, `Jinwei Yao `_, `Qiaolin Yu `_, `Yuzhen Zhou `_, `Chenyang Zhao `_ Introduction ------------ `SGLang `_ is an open-source state-of-the-art inference service engine, fully adopted by xAI to support all inference needs of Grok during research and serving processes. Currently, verl fully supports using SGLang as the inference engine during the rollout phase. As a rollout engine, SGLang provides the same feature coverage as vLLM., including memory saving and multi-node rollout features. After installing verl and SGLang, simply add ``actor_rollout_ref.rollout.name=sglang`` at startup script to seamlessly switch between the two inference frameworks. In addition, the SGLang team is actively working on supporting features such as Multi-Turn Agentic RL, VLM RLHF, Server-Based RLHF, and Partial Rollout. You can track the related development progress in the `Tracking Roadmap `_. Installation ------------ Please always follow the following command to install SGLang with verl. .. code-block:: bash pip install --upgrade pip # Currently 0.4.8, subject to updates at any time, please refer to the latest version specified in `setup.py` pip install -e ".[sglang]" You can check the following dependencies are in your environment: .. note:: - **PyTorch**: 2.6.0+cu124 - **CUDA**: 12.4 - **flashinfer-python**: 0.2.5+cu124torch2.6 - **SGLang**: 0.4.6.post5 - **sgl-kernel**: 0.1.4 Using SGLang as the Inference Backend for PPO Training on a Single Machine ------------------------------------------------------------------------- We use Qwen/Qwen2-7B-Instruct on the gsm8k dataset for a simple test. 1. Run the following command to prepare the gsm8k dataset: .. code-block:: bash python3 examples/data_preprocess/gsm8k.py 2. Run the following script to conduct a PPO experiment on a single machine with 4 GPUs: .. code-block:: bash export SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK=True PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ critic.model.fsdp_config.param_offload=True \ critic.model.fsdp_config.optimizer_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo.log Why export SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. ``verl`` initializes a ``SGLangRollout`` module during rollout, which is used to evaluate/generate samples. 2. ``SGLangRollout`` will initialize ``Engine``, and further initialize a ``torch.distributed.DeviceMesh``, used to support Tensor Parallel (TP). 3. ``DeviceMesh.init()`` internally checks the free GPU memory of all participating devices. If the difference is too large (more than ~10%), it directly reports an error to avoid initialization failures or deadlocks. Why might there be inconsistent GPU memory? """"""""""""""""""""""""""""""""""""""""""" **1. Ray Distributed Actor loads the model at different times** ``verl`` uses Ray-based multi-process, multi-GPU concurrent training. Each ``WorkerDict`` may be called at different times: .. code-block:: python self.rollout = SGLangRollout(...) Different workers initialize the model at different times → different memory usage. **2. Delayed initialization causes memory bias** Some workers start model loading/inference (e.g., ``generate_sequences()``, ``compute_log_prob()``) earlier than others. Early workers already use up GPU memory → late workers still have empty memory → memory difference appears. **3. SGLang's TP init uses "all-device broadcast", but there's no uniform release timing** Although ``SGLangRollout`` may only involve subset of GPUs, its ``Engine`` initialization calls ``torch.distributed.init_process_group()`` and broadcasts weights, so: - Non-rollout GPUs also join the communication. - Later on, ``DeviceMesh`` init will fail due to "inconsistent memory". **4. Different FSDP/TP loading behaviors also lead to mismatch** If using: .. code-block:: bash actor.fsdp_config.param_offload=True ref.fsdp_config.param_offload=True Then some workers keep params on CPU while others already sharded to GPU → leads to asymmetric memory layout. Using SGLang as the Inference Backend for PPO Training Across Multiple Machines ------------------------------------------------------------------------------ SGLang also supports running verl's RAY-based cross-machine inference in IPv4 and IPv6 scenarios. In the script below, we use TP=16 for cross-machine inference. Suppose we have two interconnected machines: node0 with IP 10.94.16.4 and node1 with IP 10.94.16.5. 1. Start Ray on node0: .. code-block:: bash ray start --head --dashboard-host=0.0.0.0 You will see the following prompt: .. code-block:: bash Usage stats collection is enabled. To disable this, add `--disable-usage-stats` to the command that starts the cluster, or run the following command: `ray disable-usage-stats` before starting the cluster. See https://docs.ray.io/en/master/cluster/usage-stats.html for more details. Local node IP: 10.94.16.4 -------------------- Ray runtime started. -------------------- Next steps To add another node to this Ray cluster, run ray start --address='10.94.16.4:6379' 2. Have node1 join the Ray cluster: Run the following command on node1: .. code-block:: bash ray start --address='10.94.16.4:6379' Run the following command to confirm that the Ray cluster now has two nodes: .. code-block:: bash ray status You can see that the cluster has two nodes with 16 GPUs: .. code-block:: bash ======== Autoscaler status: 2025-04-09 09:25:37.694016 ======== Node status --------------------------------------------------------------- Active: 1 node_ef382ffd687d8f6b060c1b68e63ada7341b936fe5b1901dd04de1027 1 node_1eb4d7d07e793114c23a89d1a41f1f76acf6ef5b35af844a4ee8e4ba Pending: (no pending nodes) Recent failures: (no failures) Resources --------------------------------------------------------------- Usage: 0.0/360.0 CPU 0.0/16.0 GPU 0B/3.39TiB memory 0B/372.53GiB object_store_memory 3. Run the following script to train meta-llama/Llama-3.1-8B-Instruct with TP=16 across 2 machines using 16 GPUs: .. code-block:: bash DATA_DIR=$HOME/data/gsm8k python3 -m verl.trainer.main_ppo \ actor_rollout_ref.rollout.name=sglang \ data.train_files=$DATA_DIR/train.parquet \ data.val_files=$DATA_DIR/test.parquet \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ actor_rollout_ref.model.path=meta-llama/Llama-3.1-8B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=16 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=meta-llama/Llama-3.1-8B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size=16 \ critic.model.fsdp_config.param_offload=True \ critic.model.fsdp_config.optimizer_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.val_before_train=True \ trainer.n_gpus_per_node=8 \ trainer.nnodes=2 \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo.log ================================================ FILE: verl_distillation/examples/data_preprocess/aime2024_multiturn_w_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the DAPO-Math-17k dataset to multiturn format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/retool_aime2024", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_path = "BytedTsinghua-SIA/AIME-2024" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path, "default") else: dataset = datasets.load_dataset(data_path, "default") train_dataset = dataset["train"] # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): orig_extra_info = example.pop("extra_info") extra_info = orig_extra_info.copy() extra_info["need_tools_kwargs"] = True extra_info["tools_kwargs"] = { "code_interpreter": { "create_kwargs": { "ground_truth": example["reward_model"]["ground_truth"], }, }, } example["extra_info"] = extra_info return example return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/dapo_multiturn_w_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the DAPO-Math-17k dataset to multiturn format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/retool_dapo", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_path = "BytedTsinghua-SIA/DAPO-Math-17k" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path, "default") else: dataset = datasets.load_dataset(data_path, "default") train_dataset = dataset["train"] # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): orig_extra_info = example.pop("extra_info") extra_info = orig_extra_info.copy() extra_info["need_tools_kwargs"] = True extra_info["tools_kwargs"] = { "code_interpreter": { "create_kwargs": { "ground_truth": example["reward_model"]["ground_truth"], }, }, } example["extra_info"] = extra_info return example return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/full_hh_rlhf.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ - Preprocess data and split the training set into 75% for training RM and 25% for validting RM. - All the training data is used to train SFT and RL. - Both chosen and rejected is used to train SFT """ import argparse import os import pandas as pd from datasets import load_dataset from tqdm.auto import tqdm from verl.utils.fs import copy, makedirs def generate_sft_dataset(target_hdfs_path_dir, local_dir="~/data/full_hh_rlh/sft", local_dataset_path=None): if local_dataset_path is not None: dataset = load_dataset(local_dataset_path) else: dataset = load_dataset("Dahoas/full-hh-rlhf") output = {"prompt": [], "response": []} for data in tqdm(dataset["train"]): # add chosen output["prompt"].append(data["prompt"]) output["response"].append(data["chosen"]) # add rejection output["prompt"].append(data["prompt"]) output["response"].append(data["rejected"]) df = pd.DataFrame(output) local_dir = os.path.expanduser(local_dir) os.makedirs(local_dir, exist_ok=True) local_path = os.path.join(local_dir, "train.parquet") df.to_parquet(path=local_path) if target_hdfs_path_dir is not None: hdfs_dir = target_hdfs_path_dir + "/" + "train.parquet" makedirs(hdfs_dir) copy(local_path, hdfs_dir) def generate_rm_dataset(target_hdfs_path_dir, local_dir="~/data/full_hh_rlh/rm", local_dataset_path=None): if local_dataset_path is not None: train_dataset = load_dataset(local_dataset_path, split="train[:75%]") test_dataset = load_dataset(local_dataset_path, split="train[-25%:]") else: train_dataset = load_dataset("Dahoas/full-hh-rlhf", split="train[:75%]") test_dataset = load_dataset("Dahoas/full-hh-rlhf", split="train[-25%:]") local_dir = os.path.expanduser(local_dir) os.makedirs(local_dir, exist_ok=True) for dataset, name in zip([train_dataset, test_dataset], ["train", "test"], strict=True): output = {"prompt": [], "chosen": [], "rejected": []} for data in tqdm(dataset): # add chosen output["prompt"].append(data["prompt"]) output["chosen"].append(data["chosen"]) output["rejected"].append(data["rejected"]) df = pd.DataFrame(output) local_path = os.path.join(local_dir, name + ".parquet") df.to_parquet(path=local_path) if target_hdfs_path_dir is not None: hdfs_dir = target_hdfs_path_dir + "/" + name + ".parquet" makedirs(hdfs_dir) copy(local_path, hdfs_dir) def generate_rl_dataset(target_hdfs_path_dir, local_dir="~/data/full_hh_rlhf/rl", local_dataset_path=None): if local_dataset_path is not None: dataset = load_dataset(local_dataset_path) else: dataset = load_dataset("Dahoas/full-hh-rlhf") train_dataset = dataset["train"] data_source = "Dahoas/full-hh-rlhf" # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): prompt = example.pop("prompt") response = example.pop("response") data = { "data_source": data_source, "prompt": [{"role": "user", "content": prompt}], "ability": "alignment", "reward_model": { "style": "model", "ground_truth": response, # should not be used }, "extra_info": {"split": split, "index": idx}, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) local_dir = os.path.expanduser(local_dir) local_path = os.path.join(local_dir, "train.parquet") train_dataset.to_parquet(local_path) if target_hdfs_path_dir is not None: hdfs_dir = target_hdfs_path_dir + "/" + "train.parquet" makedirs(hdfs_dir) copy(local_path, hdfs_dir) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--split", type=str, choices=["sft", "rm", "rl"], required=True) parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", type=str, required=False, default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", type=str, default="~/data/full_hh_rlhf", help="The save directory for the preprocessed dataset.", ) args = parser.parse_args() local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir if args.split == "sft": generate_sft_dataset(args.hdfs_dir, os.path.join(local_save_dir, args.split), args.local_dataset_path) elif args.split == "rm": generate_rm_dataset(args.hdfs_dir, os.path.join(local_save_dir, args.split), args.local_dataset_path) elif args.split == "rl": generate_rl_dataset(args.hdfs_dir, os.path.join(local_save_dir, args.split), args.local_dataset_path) else: raise NotImplementedError ================================================ FILE: verl_distillation/examples/data_preprocess/geo3k.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the Geometry3k dataset to parquet format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None) parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/geo3k", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "hiyouga/geometry3k" if local_dataset_path is not None: dataset = datasets.load_dataset( local_dataset_path, ) else: dataset = datasets.load_dataset( data_source, ) train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = ( r"You FIRST think about the reasoning process as an internal monologue and then provide the final answer. " r"The reasoning process MUST BE enclosed within tags. " r"The final answer MUST BE put in \boxed{}." ) # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): problem = example.pop("problem") prompt = problem + " " + instruction_following answer = example.pop("answer") images = example.pop("images") data = { "data_source": data_source, "prompt": [ { "role": "user", "content": prompt, } ], "images": images, "ability": "math", "reward_model": {"style": "rule", "ground_truth": answer}, "extra_info": { "split": split, "index": idx, "answer": answer, "question": problem, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True, num_proc=8) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True, num_proc=8) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/geo3k_multiturn_w_tool.py ================================================ # Copyright 2023-2025 SGLang Team # Copyright Amazon.com, Inc. or its affiliates. # Copyright 2025 Reallm Labs Ltd. or its affiliates # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the Geometry3k dataset to parquet format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/geo3k_multiturn_w_tool", help="The save directory for the preprocessed dataset.", ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "hiyouga/geometry3k" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path) else: dataset = datasets.load_dataset(data_source) train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = ( r"You FIRST think about the reasoning process as an internal monologue and then provide the final answer. " r"The reasoning process MUST BE enclosed within tags. " r"The final answer MUST BE put in \boxed{}." ) # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): problem = example.pop("problem") prompt = problem + " " + instruction_following answer = example.pop("answer") images = example.pop("images") data = { "data_source": data_source, "prompt": [ { "role": "system", "content": ( "You are a math expert. You are given a question and you need to solve it step by step. " "Reasoning step by step before any tool call. " "You should use the `calc_geo3k_reward` tool after step by step solving the question, " "before generate final answer at least once and refine your answer if necessary. " ), }, { "role": "user", "content": prompt, }, ], "images": images, "ability": "math", "reward_model": {"style": "rule", "ground_truth": answer}, "extra_info": { "split": split, "index": idx, "answer": answer, "question": problem, "need_tools_kwargs": True, "tools_kwargs": { "calc_geo3k_reward": { "create_kwargs": {"ground_truth": answer}, # "execute_kwargs": {}, # "calc_reward_kwargs": {}, # "release_kwargs": {}, }, }, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True, num_proc=8) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True, num_proc=8) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/gsm8k.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/gsm8k", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "openai/gsm8k" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path, "main") else: dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = 'Let\'s think step by step and output the final answer after "####".' # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") solution = extract_solution(answer_raw) data = { "data_source": data_source, "prompt": [ { "role": "user", "content": question, } ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "split": split, "index": idx, "answer": answer_raw, "question": question_raw, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/gsm8k_multiturn_sft.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/gsm8k_sft", help="The save directory for the preprocessed dataset." ) parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "openai/gsm8k" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path, "main") else: dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = 'Let\'s think step by step and output the final answer after "####".' # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") data = { "messages": [ { "role": "user", "content": question, }, { "role": "assistant", "content": answer_raw, }, ], } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir local_save_dir = os.path.expanduser(local_save_dir) train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/gsm8k_multiturn_w_interaction.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/gsm8k", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "openai/gsm8k" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path, "main") else: dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = "Let's think step by step and output the final answer after `####`." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") solution = extract_solution(answer_raw) data = { "data_source": data_source, "prompt": [ { "role": "system", "content": ( "You are a math expert. You are given a question and you need to solve it step by step. " "You should rethinking carefully if user point out your answer is wrong. " "Put your final answer in the format of `#### `." ), }, { "role": "user", "content": question, }, ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "split": split, "index": idx, "answer": answer_raw, "question": question_raw, "interaction_kwargs": { "name": "gsm8k", "query": question, "ground_truth": solution, }, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/gsm8k_multiturn_w_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/gsm8k", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "openai/gsm8k" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path, "main") else: dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = "Let's think step by step and output the final answer after `####`." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") solution = extract_solution(answer_raw) data = { "data_source": data_source, "prompt": [ { "role": "system", "content": ( "You are a math expert. You are given a question and you need to solve it step by step. " "Reasoning step by step before any tool call. " "You should use the `calc_gsm8k_reward` tool after step by step solving the question, " "before generate final answer at least once and refine your answer if necessary. " "Put your final answer in the format of `#### `." ), }, { "role": "user", "content": question, }, ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "split": split, "index": idx, "answer": answer_raw, "question": question_raw, "need_tools_kwargs": True, "tools_kwargs": { "calc_gsm8k_reward": { "create_kwargs": {"ground_truth": solution}, # "execute_kwargs": {}, # "calc_reward_kwargs": {}, # "release_kwargs": {}, }, }, "interaction_kwargs": { "query": question, "ground_truth": solution, }, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/gsm8k_tool_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/gsm8k", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "openai/gsm8k" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path, "main") else: dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = "Let's think step by step and output the final answer after `####`." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") solution = extract_solution(answer_raw) data = { "data_source": data_source, "agent_name": "tool_agent", "prompt": [ { "role": "system", "content": ( "You are a math expert. You are given a question and you need to solve it step by step. " "Reasoning step by step before any tool call. " "You should use the `calc_gsm8k_reward` tool after step by step solving the question, " "before generate final answer at least once and refine your answer if necessary. " "Put your final answer in the format of `#### `." ), }, { "role": "user", "content": question, }, ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "split": split, "index": idx, "answer": answer_raw, "question": question_raw, "need_tools_kwargs": True, "tools_kwargs": { "calc_gsm8k_reward": { "create_kwargs": {"ground_truth": solution}, # "execute_kwargs": {}, # "calc_reward_kwargs": {}, # "release_kwargs": {}, }, }, "interaction_kwargs": { "query": question, "ground_truth": solution, }, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/hellaswag.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess Hellaswag dataset. """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def preprocess(text): text = text.strip() # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag. text = text.replace(" [title]", ". ") text = re.sub("\\[.*?\\]", "", text) text = text.replace(" ", " ") return text if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/hellaswag", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "Rowan/hellaswag" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path) else: dataset = datasets.load_dataset(data_source, trust_remote_code=True) train_dataset = dataset["train"] val_dataset = dataset["validation"] test_dataset = dataset["test"] instruction = "Please complete the following sentence.\n" def make_map_fn(split): def process_fn(doc, idx): ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize() query = preprocess(doc["activity_label"] + ": " + ctx) choices = [preprocess(ending) for ending in doc["endings"]] gold = int(doc["label"]) data = { "data_source": data_source, "prompt": [{"role": "user", "content": query}], "ability": "nlp", "reward_model": { "style": "model", "eval": "multiple_choice", # using loglikelihood "ground_truth": gold, "choices": choices, }, "extra_info": {"split": split, "index": idx}, } return data return process_fn # filter data that doesn't have a label train_dataset = train_dataset.filter(lambda x: len(x["label"]) > 0) val_dataset = val_dataset.filter(lambda x: len(x["label"]) > 0) test_dataset = test_dataset.filter(lambda x: len(x["label"]) > 0) train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) val_dataset = val_dataset.map(function=make_map_fn("validation"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) hdfs_dir = args.hdfs_dir local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet")) val_dataset.to_parquet(os.path.join(local_save_dir, "validation.parquet")) test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_save_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/math_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the MATH-lighteval dataset to parquet format """ import argparse import json import os import datasets from verl.utils.hdfs_io import copy, makedirs from verl.utils.reward_score.math_reward import last_boxed_only_string, remove_boxed def extract_solution(solution_str): return remove_boxed(last_boxed_only_string(solution_str)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default=None) parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/math", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() local_dataset_path = args.local_dataset_path # 'lighteval/MATH' is no longer available on huggingface. # Use mirror repo: DigitalLearningGmbH/MATH-lighteval data_source = "DigitalLearningGmbH/MATH-lighteval" print(f"Loading the {data_source} dataset from huggingface...", flush=True) if local_dataset_path is not None: dataset = datasets.load_dataset( local_dataset_path, ) else: dataset = datasets.load_dataset( data_source, ) train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = "Let's think step by step and output the final answer within \\boxed{}." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question = example.pop("problem") question = question + " " + instruction_following answer = example.pop("solution") solution = extract_solution(answer) data = { "data_source": data_source, "prompt": [{"role": "user", "content": question}], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": {"split": split, "index": idx}, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) local_save_dir = args.local_dir if local_save_dir is not None: print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.") else: local_save_dir = args.local_save_dir local_dir = os.path.expanduser(local_save_dir) hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) # Save one example as JSON for reference example = train_dataset[0] with open(os.path.join(local_dir, "train_example.json"), "w") as f: json.dump(example, f, indent=2) example = test_dataset[0] with open(os.path.join(local_dir, "test_example.json"), "w") as f: json.dump(example, f, indent=2) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/examples/data_preprocess/multiturn.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Create a simple multi-turn dataset for testing """ import argparse import os import pandas as pd def main(): parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/multiturn") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() # Create example conversations conversations = [] # Conversation 1 conversations.append( { "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is the capital of France?"}, {"role": "assistant", "content": "The capital of France is Paris."}, {"role": "user", "content": "And what about Germany?"}, {"role": "assistant", "content": "The capital of Germany is Berlin."}, ] } ) # Conversation 2 conversations.append( { "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Can you explain quantum computing?"}, { "role": "assistant", "content": "Quantum computing is a type of computing that uses quantum-mechanical phenomena, " "such as superposition and entanglement, to perform operations on data.", }, {"role": "user", "content": "How is it different from classical computing?"}, { "role": "assistant", "content": "Classical computing uses bits that are either 0 or 1, while quantum computing uses " "quantum bits or qubits that can exist in multiple states simultaneously due to superposition.", }, ] } ) # Conversation 3 conversations.append( { "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Write a simple Python function to calculate factorial."}, { "role": "assistant", "content": ( "```python\ndef factorial(n):\n if n == 0 or n == 1:\n return 1\n else:\n " "return n * factorial(n-1)\n```\n\nThis is a recursive function to calculate the " "factorial of a number." ), }, {"role": "user", "content": "Can you make it iterative instead?"}, { "role": "assistant", "content": ( "```python\ndef factorial(n):\n result = 1\n for i in range(1, n+1):\n " "result *= i\n return result\n```\n\nThis is an iterative version of the factorial function." ), }, ] } ) # Create train and test datasets train_data = conversations[:2] # First 2 conversations for training test_data = conversations[2:] # Last conversation for testing # Create output directory local_dir = os.path.expanduser(args.local_dir) os.makedirs(local_dir, exist_ok=True) # Save to parquet files train_df = pd.DataFrame(train_data) test_df = pd.DataFrame(test_data) train_df.to_parquet(os.path.join(local_dir, "train.parquet")) test_df.to_parquet(os.path.join(local_dir, "test.parquet")) # Handle HDFS if specified if args.hdfs_dir is not None: try: from verl.utils.hdfs_io import copy, makedirs makedirs(args.hdfs_dir) copy(src=local_dir, dst=args.hdfs_dir) except ImportError: print("Warning: HDFS support not available. Skipping HDFS copy.") # Print statistics print(f"Train dataset size: {len(train_df)}") print(f"Test dataset size: {len(test_df)}") print(f"Data saved to {local_dir}") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/examples/data_preprocess/preprocess_search_r1_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os import tempfile import pandas as pd from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError from verl.utils.hdfs_io import copy, makedirs # Setup logging logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") logger = logging.getLogger(__name__) # Configuration constants DEFAULT_SYSTEM_CONTENT = "You are a helpful and harmless assistant." DEFAULT_USER_CONTENT_PREFIX = ( "Answer the given question. You must conduct reasoning inside and " "first every time you get new information. After reasoning, if you find you lack " "some knowledge, you can call a search engine by query " "and it will return the top searched results between and " ". You can search as many times as your want. If you find no " "further external knowledge needed, you can directly provide the answer inside " " and , without detailed illustrations. For example, " " Beijing . Question: " ) def process_single_row(row, current_split_name, row_index): """ Process a single row of data for SearchR1-like format. Args: row: DataFrame row containing the original data current_split_name: Name of the current split (train/test) row_index: Index of the row in the DataFrame Returns: pd.Series: Processed row data in the required format """ question = row.get("question", "") # Build prompt structure user_content = user_content_prefix.rstrip("\n") + question prompt = [{"role": "system", "content": system_content}, {"role": "user", "content": user_content}] # Extract ground truth from reward_model or fallback to golden_answers reward_model_data = row.get("reward_model") if isinstance(reward_model_data, dict) and "ground_truth" in reward_model_data: ground_truth = reward_model_data.get("ground_truth") else: ground_truth = row.get("golden_answers", []) # Process data source data_source_tagged = "searchR1_" + str(row.get("data_source", "")) # Build tools kwargs structure tools_kwargs = { "search": { "create_kwargs": {"ground_truth": ground_truth, "question": question, "data_source": data_source_tagged} } } # Build complete extra_info structure extra_info = { "index": row_index, "need_tools_kwargs": True, "question": question, "split": current_split_name, "tools_kwargs": tools_kwargs, } return pd.Series( { "data_source": data_source_tagged, "prompt": prompt, "ability": row.get("ability"), "reward_model": reward_model_data, "extra_info": extra_info, "metadata": row.get("metadata"), } ) def main(): local_save_dir = os.path.expanduser(args.local_dir) os.makedirs(local_save_dir, exist_ok=True) processed_files = [] # Download and process files using temporary directory with tempfile.TemporaryDirectory() as tmp_download_dir: for split in ["train", "test"]: parquet_filename = f"{split}.parquet" logger.info(f"Processing {split} split...") try: # Download Parquet file from HuggingFace logger.info(f"Downloading {parquet_filename} from {args.hf_repo_id}") local_parquet_filepath = hf_hub_download( repo_id=args.hf_repo_id, filename=parquet_filename, repo_type="dataset", local_dir=tmp_download_dir, local_dir_use_symlinks=False, ) # Load and process Parquet file df_raw = pd.read_parquet(local_parquet_filepath) logger.info(f"Loaded {len(df_raw)} rows from {parquet_filename}") def apply_process_row(row, split_name=split): return process_single_row(row, current_split_name=split_name, row_index=row.name) df_processed = df_raw.apply(apply_process_row, axis=1) # Save processed DataFrame output_file_path = os.path.join(local_save_dir, f"{split}.parquet") df_processed.to_parquet(output_file_path, index=False) logger.info(f"Saved {len(df_processed)} processed rows to {output_file_path}") processed_files.append(output_file_path) except EntryNotFoundError: logger.warning(f"{parquet_filename} not found in repository {args.hf_repo_id}") except Exception as e: logger.error(f"Error processing {split} split: {e}") if not processed_files: logger.warning("No data was processed or saved") return logger.info(f"Successfully processed {len(processed_files)} files to {local_save_dir}") # Copy to HDFS if specified if args.hdfs_dir: try: makedirs(args.hdfs_dir) copy(src=local_save_dir, dst=args.hdfs_dir) logger.info(f"Successfully copied files to HDFS: {args.hdfs_dir}") except Exception as e: logger.error(f"Error copying files to HDFS: {e}") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Download Search-R1 from HuggingFace, process, and save to Parquet.") parser.add_argument( "--hf_repo_id", default="PeterJinGo/nq_hotpotqa_train", help="HuggingFace dataset repository ID." ) parser.add_argument( "--local_dir", default="~/data/searchR1_processed_direct", help="Local directory to save the processed Parquet files.", ) parser.add_argument("--hdfs_dir", default=None, help="Optional HDFS directory to copy the Parquet files to.") args = parser.parse_args() # System and user content configuration system_content = DEFAULT_SYSTEM_CONTENT user_content_prefix = DEFAULT_USER_CONTENT_PREFIX main() ================================================ FILE: verl_distillation/examples/generation/run_deepseek7b_mutli_node.sh ================================================ set -x data_path=$HOME/data/rlhf/gsm8k/test.parquet save_path=$HOME/data/rlhf/math/deepseek_v2_lite_gen_test.parquet model_path=deepseek-ai/deepseek-llm-7b-chat python3 -m verl.trainer.main_generation \ trainer.nnodes=2 \ trainer.n_gpus_per_node=8 \ data.path=$data_path \ data.prompt_key=prompt \ data.n_samples=1 \ data.output_path=$save_path \ model.path=$model_path\ +model.trust_remote_code=True \ rollout.temperature=1.0 \ rollout.top_k=50 \ rollout.top_p=0.7 \ rollout.prompt_length=2048 \ rollout.response_length=1024 \ rollout.tensor_model_parallel_size=16 \ rollout.gpu_memory_utilization=0.8 ================================================ FILE: verl_distillation/examples/generation/run_deepseek_v2_lite_math.sh ================================================ set -x data_path=$HOME/data/gsm8k/test.parquet save_path=$HOME/data/gsm8k/deepseek_v2_lite_gen_test.parquet model_path=deepseek-ai/deepseek-llm-7b-chat python3 -m verl.trainer.main_generation \ trainer.nnodes=1 \ trainer.n_gpus_per_node=8 \ data.path=$data_path \ data.prompt_key=prompt \ data.n_samples=1 \ data.output_path=$save_path \ model.path=$model_path \ +model.trust_remote_code=True \ rollout.temperature=1.0 \ rollout.top_k=50 \ rollout.top_p=0.7 \ rollout.prompt_length=2048 \ rollout.response_length=1024 \ rollout.tensor_model_parallel_size=2 \ rollout.gpu_memory_utilization=0.8 ================================================ FILE: verl_distillation/examples/gmpo_trainer/README.md ================================================
# Geometric-Mean Policy Optimization
This is the official implementaion of paper [***Geometric-Mean Policy Optimization***](https://arxiv.org/abs/2507.20673).
image
## 1. Contents - Geometric-Mean Policy Optimization - [1. Contents](#1-contents) - [2. Introduction](#2-introduction) - [3. Code Usage](#3-code-usage) - [4. Contacts](#4-contacts) - [5. Citation](#5-citation) ## 2. Introduction Group Relative Policy Optimization (GRPO) has significantly enhanced the reasoning capability of large language models by optimizing the arithmetic mean of token-level rewards. Unfortunately, GRPO is observed to suffer from unstable policy updates when facing tokens with outlier importance-weighted rewards, which manifest as extreme importance sampling ratios during training. In this study, we propose Geometric-Mean Policy Optimization (GMPO), with the aim to improve the stability of GRPO through suppressing token reward outliers. Instead of optimizing the arithmetic mean, GMPO maximizes the geometric mean of token-level rewards, which is inherently less sensitive to outliers and maintains a more stable range of importance sampling ratio. GMPO is plug-and-play—simply replacing GRPO's arithmetic mean with the geometric mean of token-level rewards, as the latter is inherently less sensitive to outliers. GMPO is theoretically plausible—analysis reveals that both GMPO and GRPO are weighted forms of the policy gradient while the former enjoys more stable weights, which consequently benefits policy optimization and performance. Experiments on multiple mathematical reasoning benchmarks show that GMPO-7B improves the average Pass@1 of GRPO by up to 4.1%, outperforming many state-of-the-art approaches. ## 3. Code Usage The key configurations are: ``` clip_ratio_low=0.4 clip_ratio_high=0.4 loss_mode=geo_mean ``` We observed that using a large clip ratio during Mixture-of-Experts (MoE) model training often leads to optimization instability. When training MoE models, consider lowering the clip ratio to achieve more stable convergence. To get started quickly, run: ``` bash examples/gmpo_trainer/run_qwen2_5-7b_math.sh ``` GMPO can be combined with other methods such as DAPO (experimental - not fully tested): ``` bash examples/gmpo_trainer/test_dapo_7b_math.sh bash examples/gmpo_trainer/test_dapo_qwen3_30b_math.sh ``` ## 4. Contacts If you have any question about our work or this repository, please don't hesitate to contact us by emails or open an issue under this project. - [zhaoyuzhong20@mails.ucas.ac.cn](zhaoyuzhong20@mails.ucas.ac.cn) - [liuyue171@mails.ucas.ac.cn](liuyue171@mails.ucas.ac.cn) - [lecu@microsoft.com](lecu@microsoft.com) - [wanfang@ucas.ac.cn](wanfang@ucas.ac.cn) ## 5. Citation ``` @article{zhao2025geometric, title={Geometric-mean policy optimization}, author={Zhao, Yuzhong and Liu, Yue and Liu, Junpeng and Chen, Jingye and Wu, Xun and Hao, Yaru and Lv, Tengchao and Huang, Shaohan and Cui, Lei and Ye, Qixiang and others}, journal={arXiv preprint arXiv:2507.20673}, year={2025} } ``` ================================================ FILE: verl_distillation/examples/gmpo_trainer/run_qwen2_5-7b_math.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" use_kl_loss=False loss_mode=geo_mean clip_ratio=0.4 save_contents="['model', 'optimizer', 'extra']" export WANDB_MODE=offline save_contents="['hf_model']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-Math-7B \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.checkpoint.save_contents=${save_contents} \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_gmpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_5_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/gmpo_trainer/test_dapo_7b_math.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.4 clip_ratio_high=0.4 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=32 loss_mode=geo_mean # export WANDB_MODE=offline save_contents="['model', 'optimizer', 'extra']" # save_contents="['hf_model']" # reference run wandb: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/runs/ow47vvon?nw=nwusertongyuxuan361 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.actor.checkpoint.save_contents="${save_contents}" \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=200 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/examples/gmpo_trainer/test_dapo_qwen3_30b_math.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen3-30B-A3B-Base-MATH-0527a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.4 clip_ratio_high=0.4 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 loss_mode=geo_mean # export WANDB_MODE=offline save_contents="['model', 'optimizer', 'extra']" # save_contents="['hf_model']" # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B-Base"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=32 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.actor.checkpoint.save_contents="${save_contents}" \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=300 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/examples/gpg_trainer/gpg.md ================================================ # GPG: Group Policy Gradient Group Policy Gradient (GPG) is a minimalist reinforcement learning (RL) method that enhances the reasoning ability of large language models without relying on supervised fine-tuning or complex tricks. GPG revisits traditional policy gradients and directly optimizes the RL objective—no surrogate losses, no KL penalties, no critic, and no reference model. Compared to GRPO, GPG is simpler, more efficient, and achieves better results on many tasks. For more details, please refer to the original paper [GPG: A Simple and Strong Reinforcement Learning Baseline for Model Reasoning ](https://arxiv.org/abs/2504.02546). ## Key Components - Use a corrected advantage function to improve policy gradient accuracy and training efficiency. - By eliminating the critic and reference models, avoiding KL divergence constraints, significantly simplifies the training process compared to Group Relative Policy Optimization (GRPO) ## Configuration To configure GPG within the framework, use the following YAML settings. ```yaml algorithm: adv_estimator: gpg actor_rollout_ref: actor: policy_loss: loss_mode: "gpg" ``` ## Advanced Extensions GPG is a simple and strong baseline for model reasoning. Although it avoids using KL loss in its original form, you can still use KL loss to further improve the performance. ```yaml algorithm: adv_estimator: gpg actor_rollout_ref: actor: use_kl_loss: True # enable kl regularization kl_loss_coef: 0.01 policy_loss: loss_mode: "gpg" ``` ================================================ FILE: verl_distillation/examples/gpg_trainer/run_qwen2-7b_math.sh ================================================ set -x # If you are using vllm<=0.6.3, you might need to set the following environment variable to avoid bugs: # export VLLM_ATTENTION_BACKEND=XFORMERS gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gpg \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.policy_loss.loss_mode=gpg \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_gpg_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/gpg_trainer/run_qwen2-7b_math_megatron.sh ================================================ set -x # If you are using vllm<=0.6.3, you might need to set the following environment variable to avoid bugs: # export VLLM_ATTENTION_BACKEND=XFORMERS export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=gpg \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.policy_loss.loss_mode=gpg \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_gpg_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/README.md ================================================ # Group Relative Policy Optimization (GRPO) In reinforcement learning, classic algorithms like PPO rely on a "critic" model to estimate the value of actions, guiding the learning process. However, training this critic model can be resource-intensive. GRPO simplifies this process by eliminating the need for a separate critic model. Instead, it operates as follows: - Group Sampling: For a given problem, the model generates multiple possible solutions, forming a "group" of outputs. - Reward Assignment: Each solution is evaluated and assigned a reward based on its correctness or quality. - Baseline Calculation: The average reward of the group serves as a baseline. - Policy Update: The model updates its parameters by comparing each solution's reward to the group baseline, reinforcing better-than-average solutions and discouraging worse-than-average ones. This approach reduces computational overhead by avoiding the training of a separate value estimation model, making the learning process more efficient. For more details, refer to the original paper [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://arxiv.org/pdf/2402.03300) ## Key Components - No Value Function (Critic-less): unlike PPO, GRPO does not train a separate value network (critic) - Group Sampling (Grouped Rollouts): instead of evaluating one rollout per input, GRPO generates multiple completions (responses) from the current policy for each prompt. This set of completions is referred to as a group. - Relative Rewards: within each group, completions are scored (e.g., based on correctness), and rewards are normalized relative to the group. ## Configuration Note that all configs containing `micro_batch_size` are used to configure the maximum sample or token count per forward or backward pass to avoid GPU OOMs, whose value should not change algorithmic/convergence behavior. Despite that many configurations start with the `ppo_` prefix, they work across different RL algorithms in verl, as the GRPO training loop is similar to that of PPO (without critic). ![image](https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d) - `actor_rollout.ref.rollout.n`: For each prompt, sample n times. Default to 1. For GRPO, please set it to a value larger than 1 for group sampling. - `data.train_batch_size`: The global batch size of prompts used to generate a set of sampled trajectories/rollouts. The number of responses/trajectories is `data.train_batch_size * actor_rollout.ref.rollout.n` - `actor_rollout_ref.actor.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO actor updates. The ppo_mini_batch_size is a global size across all workers. - `actor_rollout_ref.actor.ppo_epochs`: Number of epochs for GRPO updates on one set of sampled trajectories for actor - `actor_rollout_ref.actor.clip_ratio`: The GRPO clip range. Default to 0.2 - `algorithm.adv_estimator`: Default is gae. Please set it to grpo instead - `actor_rollout_ref.actor.loss_agg_mode`: Default is "token-mean". Options include "token-mean", "seq-mean-token-sum", "seq-mean-token-mean". The original GRPO paper takes the sample-level loss (seq-mean-token-mean), which may be unstable in long-CoT scenarios. All GRPO example scripts provided in verl uses the default configuration "token-mean" for loss aggregation instead. Instead of adding KL penalty in the reward, GRPO regularizes by directly adding the KL divergence between the trained policy and the reference policy to the loss: - `actor_rollout_ref.actor.use_kl_loss`: To use kl loss in the actor. When used, we are not applying KL in the reward function. Default is False. Please set it to True for GRPO. - `actor_rollout_ref.actor.kl_loss_coef`: The coefficient of kl loss. Default is 0.001. - `actor_rollout_ref.actor.kl_loss_type`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. Appending "+" in the end (e.g., 'k1+' and 'k3+') would apply straight through to employ k2 for unbiased gradient estimation, regardless of the kl value estimation (see https://github.com/volcengine/verl/pull/2953#issuecomment-3162113848 for more details). How to calculate the kl divergence between actor and reference policy. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html ## Advanced Extensions ### DrGRPO The work [Understanding R1-Zero-Like Training: A Critical Perspective](https://arxiv.org/pdf/2503.20783) claims there's optimization bias in GRPO, that leads to artificially longer responses, especially for incorrect outputs. This inefficiency stems from the way GRPO calculates advantages using group-based reward normalization, which can inadvertently favor longer, less accurate responses. Instead, DrGRPO aggregates token-level losses by normalizing with a global constant to eliminate length bias. Configure the following to enable DrGRPO, with all other parameters the same as GRPO's: - `actor_rollout_ref.actor.loss_agg_mode`: "seq-mean-token-sum-norm", which turns off seq-dim averaging - `actor_rollout_ref.actor.use_kl_loss`: Please set it to False for DrGRPO - `algorithm.norm_adv_by_std_in_grpo`: False, which turns off standard deviation norm ## Reference Example Qwen2.5 GRPO training log and commands: [link](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/qwen2-7b-fsdp2.log) ```bash bash examples/grpo_trainer/run_qwen3-8b.sh ``` For more reference performance, please see https://verl.readthedocs.io/en/latest/algo/baseline.html ================================================ FILE: verl_distillation/examples/grpo_trainer/run_deepseek671b_math_megatron_80gb.sh ================================================ set -x # # 0. download HF checkpoint # # remove the `quantization_config` in the `config.json` # # set `num_nextn_predict_layers=0` to disable MTP, which is not currently supported # huggingface-cli download deepseek-ai/DeepSeek-V3-0324 # no offline dist checkpoint needed, now with mbridge>=0.13.0, we can directly init model from huggingface downloaded fp8 weights # tested on docker://verlai/verl:app-verl0.5-transformers4.55.4-vllm0.10.0-mcore0.13.0-te2.2 LLM="" # 2. run the script gsm8k_train_path=/root/data/gsm8k/train.parquet gsm8k_test_path=/root/data/gsm8k/test.parquet train_files=$gsm8k_train_path test_files=$gsm8k_test_path ALL_OFFLOAD=${ALL_OFFLOAD:-True} COMMON_PARAM_OFFLOAD=${COMMON_PARAM_OFFLOAD:-$ALL_OFFLOAD} COMMON_GRAD_OFFLOAD=${COMMON_GRAD_OFFLOAD:-$ALL_OFFLOAD} COMMON_OPTIMIZER_OFFLOAD=${COMMON_OPTIMIZER_OFFLOAD:-$ALL_OFFLOAD} ACTOR_PARAM_OFFLOAD=${ACTOR_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} ACTOR_GRAD_OFFLOAD=${ACTOR_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} ACTOR_OPTIMIZER_OFFLOAD=${ACTOR_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} REF_PARAM_OFFLOAD=${REF_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_PARAM_OFFLOAD=${CRITIC_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_GRAD_OFFLOAD=${CRITIC_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} CRITIC_OPTIMIZER_OFFLOAD=${CRITIC_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} RM_PARAM_OFFLOAD=${RM_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} # 256 H100(80GB) NODES=32 PP=16 TP=1 EP=16 ETP=1 INFER_TP=32 # consider TP/ETP, and enable recompute if short of memory # full recompute n_resp_per_prompt=4 max_prompt_length=2048 max_response_length=4096 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 1)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) use_kl_in_reward=False kl_coef=0.0 use_kl_loss=True kl_loss_coef=0.001 # RAY_ADDRESS='auto' ray job submit --working-dir . -- python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=grpo \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=512 \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$LLM \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.rollout.temperature=1.0 \ actor_rollout_ref.rollout.top_p=1.0 \ actor_rollout_ref.rollout.top_k=-1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$INFER_TP \ trainer.logger='["console","tensorboard"]' \ trainer.project_name='verl_megatron_gsm8k_examples' \ trainer.experiment_name='dsv3-32nodes' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=$NODES \ trainer.save_freq=-1 \ trainer.test_freq=5 \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.actor.megatron.override_transformer_config.attention_backend='fused' \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_first_pipeline_stage=4 \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=1 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.actor.megatron.param_offload=${ACTOR_PARAM_OFFLOAD} \ actor_rollout_ref.actor.megatron.optimizer_offload=${ACTOR_OPTIMIZER_OFFLOAD} \ actor_rollout_ref.actor.megatron.grad_offload=${ACTOR_GRAD_OFFLOAD} \ actor_rollout_ref.ref.megatron.param_offload=${REF_PARAM_OFFLOAD} \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ actor_rollout_ref.actor.megatron.use_mbridge=True \ trainer.default_local_dir=$CKPT_DIR \ trainer.val_before_train=False \ trainer.total_epochs=100 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_deepseek671b_math_megatron_96gb.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail ## !!!!!!!important!!!!!! # 1. set the following environment variables on all your nodes # env_vars: # CUDA_DEVICE_MAX_CONNECTIONS: "1" # NCCL_NVLS_ENABLE: "0" # VLLM_USE_V1: 1 # 2. install mbridge=0.1.13 on all your node with the following command: # pip3 install git+https://github.com/ISEEKYAN/mbridge # 3. remove the `quantization_config` in the DeepSeek-V3's `config.json` and # set `num_nextn_predict_layers=0` to disable MTP, which is not currently supported SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" [ -f "${SCRIPT_DIR}/env.sh" ] && source "${SCRIPT_DIR}/env.sh" adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=True kl_loss_coef=0.001 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1204 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=96 n_resp_per_prompt=8 train_prompt_mini_bsz=32 # minimum nodes for DeepSeek-V3: 12 nodes NNODES=${NNODES:-12} RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=$RAY_DATA_HOME/models/DeepSeek-V3-config-verl TRAIN_FILE=$RAY_DATA_HOME/dataset/dapo-math-17k.parquet TEST_FILE=$RAY_DATA_HOME/dataset/aime-2024.parquet # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 10 / 10)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 1)) offload=True optim_offload=${OFFLOAD_OPTIM:-True} gen_tp=32 train_tp=${TP:-8} train_pp=${PP:-12} EP=${EP:-8} ETP=1 CP=1 optimizer_offload_fraction=${OFFLOAD_FRACTION:-1.} LAST_LAYER=${LAST_LAYER:-6} project_name='verl-deepseek-v3' exp_name="671B-${NNODES}-pp${train_pp}-tp${train_tp}-ep${EP}-actor-length${actor_ppo_max_token_len}" CKPTS_DIR=$RAY_DATA_HOME/ckpt/${project_name}/${exp_name} python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.rollout.name=vllm \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.megatron.use_mbridge=True \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=${optimizer_offload_fraction} \ +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${optim_offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.actor.megatron.context_parallel_size=${CP} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.nccl_timeout=1200 \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.ref.megatron.context_parallel_size=${CP} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ +actor_rollout_ref.actor.megatron.override_transformer_config.apply_rope_fusion=False \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_shared_expert_overlap=False \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type=flex \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_embedding_in_pipeline_split=False \ +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_loss_in_pipeline_split=False \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=${LAST_LAYER} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','wandb'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=100 \ trainer.total_epochs=10 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/examples/grpo_trainer/run_deepseek7b_llm.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=80 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=160 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=160 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_deepseek7b_llm_math.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=40 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='deepseek_llm_7b_function_rm_math' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_deepseek7b_llm_math_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='deepseek_llm_7b_math_megatron' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_deepseek7b_llm_seq_balance.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_glm41v_9b.sh ================================================ set -x ENGINE=${1:-vllm} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=zai-org/GLM-4.1V-9B-Thinking \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='glm41v_9b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_gptoss_20b.sh ================================================ #!/bin/bash cat > get_model.py << EOF import torch from transformers import AutoModelForCausalLM, AutoTokenizer, Mxfp4Config model_id = "openai/gpt-oss-20b" output_dir = "$HOME/models/gpt-oss-20b-bf16" quantization_config = Mxfp4Config(dequantize=True) model_kwargs = dict( attn_implementation="eager", torch_dtype=torch.bfloat16, quantization_config=quantization_config, use_cache=False, device_map="auto", ) model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs) # Patch config with custom attribute before saving model.config.attn_implementation = "eager" model.save_pretrained(output_dir) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.save_pretrained(output_dir) EOF python get_model.py # or you can use lmsys/gpt-oss-20b-bf16 # recommend to use same value for train_batch_size and ppo_mini_batch_size # to avoid MOE training instability # use large value for max_response_length if you want to use reasoning effort high. model_dir=$HOME/models/gpt-oss-20b-bf16 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$gsm8k_train_path" \ data.val_files="$gsm8k_test_path" \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=8192 \ data.filter_overlong_prompts=True \ data.truncation='error' \ +data.apply_chat_template_kwargs.reasoning_effort=medium \ actor_rollout_ref.model.path=${model_dir} \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ +actor_rollout_ref.actor.fsdp_config.model_dtype=bfloat16 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.mode=sync \ actor_rollout_ref.rollout.engine_kwargs.sglang.attention_backend=triton \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='oai_oss_20b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=50 \ trainer.test_freq=10 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_minicpmo2_6.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=False \ data.truncation='error' \ data.image_key=images \ data.trust_remote_code=True \ data.custom_cls.path=recipe/minicpmo/rl_dataset.py \ data.custom_cls.name=RLHFDataset \ actor_rollout_ref.model.path=openbmb/MiniCPM-o-2_6 \ actor_rollout_ref.model.trust_remote_code=True \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.use_dynamic_bsz=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.fsdp_config.use_orig_params=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=False \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='minicpmo2_6_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_mistral13b_skyworkrm_hhrlhf.sh ================================================ train_files=data/full_hh_rlhf/rl/train.parquet test_files=data/full_hh_rlhf/rl/train.parquet # no use max_prompt_length=4096 max_response_length=2048 gen_tp=4 n_per_prompt=5 adv_estimator="grpo" project_name=verl_full_hh_rlhf_examples exp_name="grpo_mistral13B-skyworkLlama8b-hhrlhf" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=$adv_estimator \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=512 \ data.prompt_key="prompt" \ data.return_raw_chat=True \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=mistralai/Mistral-Nemo-Instruct-2407 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=10 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.n=$n_per_prompt \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ reward_model.enable=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.model.path=Skywork/Skywork-Reward-Llama-3.1-8B \ reward_model.model.input_tokenizer=mistralai/Mistral-Nemo-Instruct-2407 \ reward_model.micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.logger='["console","wandb"]' \ trainer.val_before_train=False \ trainer.project_name=$project_name \ trainer.experiment_name=$exp_name \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=10 \ trainer.test_freq=-1 \ trainer.total_epochs=5 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_moonlight16b_math_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping HF_MODEL_PATH=moonshotai/Moonlight-16B-A3B DIST_CKPT_PATH=${DIST_CKPT_PATH} train_path=$HOME/data/gsm8k/train.parquet test_path=$HOME/data/gsm8k/test.parquet python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_path" \ data.val_files="$test_path" \ data.train_batch_size=192 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.trust_remote_code=True \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.model.trust_remote_code=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=3 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=4 \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=4 \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=1 \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=3 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=4 \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=4 \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=1 \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='moonlight_megatron_ep' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=3 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2-7b.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=40 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2-7b_math.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2-7b_math_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping rollout_mode="sync" if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" USE_FUSED_KERNELS=True python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=$return_raw_chat \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.model.use_fused_kernels=$USE_FUSED_KERNELS \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=$rollout_mode \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2-7b_seq_balance.sh ================================================ set -x # For async rollout mode, dataset should return raw chat. rollout_mode="async" rollout_name="sglang" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.return_raw_chat=$return_raw_chat \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$rollout_name \ actor_rollout_ref.rollout.mode=$rollout_mode \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm_kl1e-3' \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2-7b_seq_balance_math_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" offload=True python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=12000 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2-7b_sgl_megatron.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=4 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5-3b_gsm8k_grpo_lora.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ trainer.val_before_train=False \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=16 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.model.lora_rank=64 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=16 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=40 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2.5_3b_grpo_lora' \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ # actor_rollout_ref.actor.ppo_mini_batch_size=256 \ # data.train_batch_size=1024 \ # trainer.n_gpus_per_node=8 \ # actor_rollout_ref.model.use_shm=True \ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5-3b_gsm8k_grpo_lora_from_adapter.sh ================================================ set -x lora_adapter_path=${lora_adapter_path:-/path/saved/lora_adapter} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.lora_adapter_path=${lora_adapter_path} \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=40 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2.5_3b_grpo_lora' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5-7b_math_megatron_diff_tp.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_32b_grpo_npu.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-32B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6\ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_5_32b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=2 \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_7b_grpo_discrete_prof_npu.sh ================================================ set -x # profiling configuration PROFILE_STEPS="[2,4]" PROFILE_RANKS_ALL=False DISCRETE=True PROFILE_RANKS="[1,2]" # profiling NPU options SAVE_PATH="$HOME/profile_data" LEVEL="level1" CONTENTS=['npu','cpu'] ANALYSIS=True python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=32 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.optim.lr=5e-8 \ actor_rollout_ref.actor.ppo_mini_batch_size=2 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.profiler.enable=True \ actor_rollout_ref.actor.profiler.ranks=$PROFILE_RANKS \ actor_rollout_ref.actor.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.actor.profiler.tool_config.npu.discrete=$DISCRETE \ actor_rollout_ref.actor.profiler.tool_config.npu.contents=$CONTENTS \ actor_rollout_ref.actor.profiler.tool_config.npu.level=$LEVEL \ actor_rollout_ref.actor.profiler.tool_config.npu.analysis=$ANALYSIS \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.ref.profiler.enable=True \ actor_rollout_ref.ref.profiler.ranks=$PROFILE_RANKS \ actor_rollout_ref.ref.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.ref.profiler.tool_config.npu.discrete=$DISCRETE \ actor_rollout_ref.ref.profiler.tool_config.npu.contents=$CONTENTS \ actor_rollout_ref.ref.profiler.tool_config.npu.level=$LEVEL \ actor_rollout_ref.ref.profiler.tool_config.npu.analysis=$ANALYSIS \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_5_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=5 \ trainer.device=npu \ global_profiler.tool=npu \ global_profiler.steps=$PROFILE_STEPS \ global_profiler.save_path=$SAVE_PATH $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_7b_grpo_e2e_prof_npu.sh ================================================ set -x # profiling configuration PROFILE_STEPS="[2,4]" PROFILE_RANKS_ALL=True DISCRETE=False # profiling NPU options SAVE_PATH="$HOME/profile_data" LEVEL="level1" CONTENTS=['npu','cpu'] ANALYSIS=True python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=32 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=5e-8 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=2 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.profiler.enable=True \ actor_rollout_ref.actor.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.actor.profiler.tool_config.npu.discrete=$DISCRETE \ actor_rollout_ref.actor.profiler.tool_config.npu.contents=$CONTENTS \ actor_rollout_ref.actor.profiler.tool_config.npu.level=$LEVEL \ actor_rollout_ref.actor.profiler.tool_config.npu.analysis=$ANALYSIS \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.ref.profiler.enable=True \ actor_rollout_ref.ref.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.ref.profiler.tool_config.npu.discrete=$DISCRETE \ actor_rollout_ref.ref.profiler.tool_config.npu.contents=$CONTENTS \ actor_rollout_ref.ref.profiler.tool_config.npu.level=$LEVEL \ actor_rollout_ref.ref.profiler.tool_config.npu.analysis=$ANALYSIS \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_5_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=5 \ trainer.device=npu \ global_profiler.tool=npu \ global_profiler.steps=$PROFILE_STEPS \ global_profiler.save_path=$SAVE_PATH $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_7b_grpo_npu.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=5e-8 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_5_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=5 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl-7b-megatron.sh ================================================ set -x ENGINE=${1:-vllm} export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping HF_MODEL_PATH=Qwen/Qwen2.5-VL-7B-Instruct DIST_CKPT_PATH=${DIST_CKPT_PATH} # convert HF model to meagatron format offlinely # python scripts/converter_hf_to_mcore.py --hf_model_path $HF_MODEL_PATH --output_path $DIST_CKPT_PATH # megatron tuning guide: # 1. recommend to offload all states by setting ALL_OFFLOAD=True # 2. enable dynamic batch size by setting actor_rollout_ref.actor.use_dynamic_bsz=True ref.log_prob_use_dynamic_bsz=True rollout.log_prob_use_dynamic_bsz=True # 3. set ppo_max_token_len_per_gpu and log_prob_max_token_len_per_gpu as large as possible for better MFU (limited by GPU memory). assure ppo_max_token_len_per_gpu > max_prompt_length+max_response_length, if sequence length is too long, you can increase the TP/PP size # 4. if memory is very limited, enable full recompute, but the mfu will be 30% lower # full recompute settings: # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ ALL_OFFLOAD=${ALL_OFFLOAD:-True} COMMON_PARAM_OFFLOAD=${COMMON_PARAM_OFFLOAD:-$ALL_OFFLOAD} COMMON_GRAD_OFFLOAD=${COMMON_GRAD_OFFLOAD:-$ALL_OFFLOAD} COMMON_OPTIMIZER_OFFLOAD=${COMMON_OPTIMIZER_OFFLOAD:-$ALL_OFFLOAD} ACTOR_PARAM_OFFLOAD=${ACTOR_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} ACTOR_GRAD_OFFLOAD=${ACTOR_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} ACTOR_OPTIMIZER_OFFLOAD=${ACTOR_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} REF_PARAM_OFFLOAD=${REF_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} train_path=$HOME/data/geo3k/train.parquet test_path=$HOME/data/geo3k/test.parquet python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_path" \ data.val_files="$test_path" \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=1 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=5120 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=20480 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=20480 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=1 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.actor.megatron.param_offload=${ACTOR_PARAM_OFFLOAD} \ actor_rollout_ref.actor.megatron.optimizer_offload=${ACTOR_OPTIMIZER_OFFLOAD} \ actor_rollout_ref.actor.megatron.grad_offload=${ACTOR_GRAD_OFFLOAD} \ actor_rollout_ref.ref.megatron.param_offload=${REF_PARAM_OFFLOAD} \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl-7b-sglang.sh ================================================ set -x # python examples/data_preprocess/geo3k.py --local_dir ~/data/geo3k python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.multi_stage_wake_up=True \ global_profiler.tool=torch_memory \ global_profiler.save_path=./mem_snapshots \ global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries=100000 \ global_profiler.global_tool_config.torch_memory.stack_depth=32 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.mode=sync \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl-7b.sh ================================================ set -x ENGINE=${1:-vllm} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl-7b_freeze_vision.sh ================================================ set -x ENGINE=${1:-vllm} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.freeze_vision_tower=True \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl-7b_lora.sh ================================================ set -x ENGINE=${1:-vllm} # If you are using vllm<=0.6.3, you might need to set the following environment variable to avoid bugs: # export VLLM_ATTENTION_BACKEND=XFORMERS python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.model.lora_rank=64 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.model.exclude_modules='.*visual.*' \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=False \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl-7b_seq_balance.sh ================================================ set -x ENGINE=${1:-vllm} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=6144 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=False \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=6144 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl_32b_npu.sh ================================================ set -x ENGINE=${1:-vllm} # Some models are optimized by vllm ascend. While in some case, e.g. rlhf training, # the optimized model may not be suitable. In this case, set this value to 0 to disable the optimized model. export USE_OPTIMIZED_MODEL=0 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-32B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_32b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=2 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=15 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl_3b_npu.sh ================================================ set -x ENGINE=${1:-vllm} # Some models are optimized by vllm ascend. While in some case, e.g. rlhf training, # the optimized model may not be suitable. In this case, set this value to 0 to disable the optimized model. export USE_OPTIMIZED_MODEL=0 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=16 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_3b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=15 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen2_5_vl_7b_npu.sh ================================================ set -x ENGINE=${1:-vllm} # Some models are optimized by vllm ascend. While in some case, e.g. rlhf training, # the optimized model may not be suitable. In this case, set this value to 0 to disable the optimized model. export USE_OPTIMIZED_MODEL=0 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=15 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3-235b_megatron_96gb.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail ## !!!!!!!important!!!!!! ## set the following environment variables on all your nodes # env_vars: # CUDA_DEVICE_MAX_CONNECTIONS: "1" # NCCL_NVLS_ENABLE: "0" # VLLM_USE_V1: 1 # install mbridge=0.1.13 on all your node with the following command: # pip3 install git+https://github.com/ISEEKYAN/mbridge SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" [ -f "${SCRIPT_DIR}/env.sh" ] && source "${SCRIPT_DIR}/env.sh" adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=True kl_loss_coef=0.001 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1204 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 1)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=${TRAIN_BS:-32} n_resp_per_prompt=8 train_prompt_mini_bsz=16 # minimum nodes need for qwen3-235B-A22B NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=$RAY_DATA_HOME/models/Qwen3-235B-A22B TRAIN_FILE=$RAY_DATA_HOME/dataset/dapo-math-17k.parquet TEST_FILE=$RAY_DATA_HOME/dataset/aime-2024.parquet # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 10 / 10)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 1)) offload=True OPTIM_OFFLOAD=${OPTIM_OFFLOAD:-True} gen_tp=8 train_tp=${TP:-4} train_pp=${PP:-8} EP=${EP:-4} ETP=1 CP=1 optimizer_offload_fraction=${OFFLOAD_FRACTION:-1.} last_layer=${LAST_LAYER:-10} project_name='verl-qwen3' exp_name="235B-${NNODES}-pp${train_pp}-tp${train_tp}-ep${EP}-actor-length${actor_ppo_max_token_len}" CKPTS_DIR=$RAY_DATA_HOME/ckpt/${project_name}/${exp_name} # TODO: support cuda graph for rollout by setting the following config # actor_rollout_ref.rollout.cudagraph_capture_sizes=[1,2,4,8,16,32] # actor_rollout_ref.rollout.enforce_eager=False python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.megatron.use_mbridge=True \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=${optimizer_offload_fraction} \ +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${OPTIM_OFFLOAD} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.actor.megatron.context_parallel_size=${CP} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.nccl_timeout=1200 \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.ref.megatron.context_parallel_size=${CP} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ +actor_rollout_ref.actor.megatron.override_transformer_config.apply_rope_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.masked_softmax_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.bias_activation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.bias_dropout_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.deallocate_pipeline_outputs=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.persist_layer_norm=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_grouped_gemm=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type="flex" \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_loss_in_pipeline_split=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_embedding_in_pipeline_split=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','wandb'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=100 \ trainer.total_epochs=10 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3-32b_npu.sh ================================================ set -x project_name='GRPO-Qwen3' exp_name='GRPO-Qwen3-32b-npu' gen_tp=4 RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-32B"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/gsm8k/train.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/gsm8k/test.parquet"} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.train_batch_size=1024 \ data.max_prompt_length=2048 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=${MODEL_PATH} \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=4 \ +actor_rollout_ref.actor.fsdp_config.mixed_precision.param_dtype=bf16 \ +actor_rollout_ref.actor.fsdp_config.mixed_precision.reduce_dtype=bf16 \ +actor_rollout_ref.actor.fsdp_config.mixed_precision.buffer_dtype=fp32 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=32768 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes=4 \ trainer.resume_from_path=checkpoints/ \ trainer.save_freq=500 \ trainer.test_freq=50 \ trainer.total_epochs=50 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3-8b.sh ================================================ # Tested successfully on the hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.4-flashinfer0.2.2-cxx11abi0 image. # It outperforms the Qwen2 7B base model by two percentage points on the test set of GSM8K. set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen3-8B \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen3_8b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3-8b_npu.sh ================================================ set -x project_name='GRPO-Qwen3' exp_name='GRPO-Qwen3-8B-npu' gen_tp=2 RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-8B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=${MODEL_PATH} \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.default_local_dir=${CKPTS_DIR} \ trainer.device=npu \ trainer.resume_mode=auto \ actor_rollout_ref.actor.fsdp_config.forward_prefetch=True \ actor_rollout_ref.ref.fsdp_config.forward_prefetch=True \ ++actor_rollout_ref.actor.entropy_from_logits_with_chunking=True \ ++actor_rollout_ref.ref.entropy_from_logits_with_chunking=True \ trainer.val_before_train=True \ trainer.save_freq=5 \ trainer.test_freq=5 \ trainer.total_epochs=15 ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3_8b_grpo_sglang_1k_spmd_npu.sh ================================================ set -x export HCCL_CONNECT_TIMEOUT=1500 export HCCL_HOST_SOCKET_PORT_RANGE=60000-60050 export HCCL_NPU_SOCKET_PORT_RANGE=61000-61050 # WORKSPACE_HOME and DATA_HOME support custom path configuration. WORKSPACE_HOME=$pwd DATA_HOME=$pwd sp_size=4 num_npu=4 tp_size=4 train_prompt_bsz=16 train_prompt_mini_bsz=16 max_prompt_length=512 max_response_length=1024 CKPTS_DIR=$WORKSPACE_HOME/logs/ckpt/qwen3_8b model_path=$DATA_HOME/models/Qwen3-8B train_data=$DATA_HOME/datasets/processed_gsm8k/train.parquet valid_data=$DATA_HOME/datasets/processed_gsm8k/test.parquet python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$train_data \ data.val_files=$valid_data \ data.train_batch_size=$train_prompt_bsz \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=$train_prompt_mini_bsz \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$tp_size \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=5 \ +actor_rollout_ref.rollout.engine_kwargs.sglang.attention_backend="ascend" \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.nccl_timeout=1800 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.project_name='verl_grpo_example_512_1024_gsm8k' \ trainer.experiment_name='qwen3_8b_function_rm' \ trainer.n_gpus_per_node=$num_npu \ trainer.nnodes=1 \ trainer.save_freq=1000 \ trainer.test_freq=10000 \ trainer.total_epochs=5 \ trainer.default_local_dir="${CKPTS_DIR}" \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ trainer.device=npu $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3_8b_grpo_sglang_32k_spmd_npu.sh ================================================ set -x export HCCL_CONNECT_TIMEOUT=1500 export HCCL_HOST_SOCKET_PORT_RANGE=60000-60050 export HCCL_NPU_SOCKET_PORT_RANGE=61000-61050 # WORKSPACE_HOME and DATA_HOME support custom path configuration. WORKSPACE_HOME=$pwd DATA_HOME=$pwd sp_size=4 num_gpu=8 tp_size=4 train_prompt_bsz=16 train_prompt_mini_bsz=16 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 32)) CKPTS_DIR=$WORKSPACE_HOME/logs/ckpt/qwen3_8b model_path=$DATA_HOME/models/Qwen3-8B train_data=$DATA_HOME/datasets/dapo/dapo-math-17k.parquet valid_data=$DATA_HOME/datasets/dapo/aime-2024.parquet python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$train_data \ data.val_files=$valid_data \ data.train_batch_size=$train_prompt_bsz \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=False \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=$train_prompt_mini_bsz \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$tp_size \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=5 \ +actor_rollout_ref.rollout.engine_kwargs.sglang.attention_backend="ascend" \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.nccl_timeout=3600 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.project_name='verl_grpo_example_2k_32k' \ trainer.experiment_name='qwen3_8b_function_rm' \ trainer.n_gpus_per_node=$num_gpu \ trainer.nnodes=1 \ trainer.save_freq=1000 \ trainer.test_freq=10000 \ trainer.total_epochs=5 \ trainer.default_local_dir="${CKPTS_DIR}" \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ trainer.device=npu $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3_vl-235b-megatron.sh ================================================ set -x ENGINE=${1:-vllm} export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping # dependency: vllm>=0.11.0, megatron-lm>=0.13, mbridge with qwen3vl_cp branch # environment option1: use a stable container later than docker://verlai/verl:vllm011.dev6 # and install mbridge in it by following the instruction in the container # pip remove mbridge if you have installed it # pip install git+https://github.com/ISEEKYAN/mbridge.git@qwen3vl_cp # for correct mbridge # environment option2: use container docker://verlai/verl:vllm011.dev_qwenvl_cp export VLLM_ALLREDUCE_USE_SYMM_MEM=0 # for vllm0.11.0 with TP HF_MODEL_PATH=${HF_MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-VL-235B-A22B-Instruct"} GEN_TP=${GEN_TP:-16} CP=${CP:-2} TP=${TP:-4} PP=${PP:-8} EP=${EP:-8} ETP=${ETP:-1} train_path=$HOME/data/geo3k/train.parquet test_path=$HOME/data/geo3k/test.parquet python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_path" \ data.val_files="$test_path" \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.context_parallel_size=$CP \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$GEN_TP \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=4096 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=4096 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=4096 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.use_mbridge=True \ actor_rollout_ref.actor.megatron.param_offload=True \ actor_rollout_ref.actor.megatron.optimizer_offload=True \ actor_rollout_ref.actor.megatron.grad_offload=True \ actor_rollout_ref.ref.megatron.param_offload=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=1 \ +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type=flex \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_loss_in_pipeline_split=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.account_for_embedding_in_pipeline_split=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen3_vl_235b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=8 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3_vl-30b-megatron.sh ================================================ set -x ENGINE=${1:-vllm} export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping # dependency: vllm>=0.11.0, megatron-lm>=0.13, mbridge with qwen3vl_cp branch # environment option1: use a stable container later than docker://verlai/verl:vllm011.dev6 # and install mbridge in it by following the instruction in the container # pip remove mbridge if you have installed it # pip install git+https://github.com/ISEEKYAN/mbridge.git@qwen3vl_cp # for correct mbridge # environment option2: use container docker://verlai/verl:vllm011.dev_qwenvl_cp export VLLM_ALLREDUCE_USE_SYMM_MEM=0 # for vllm0.11.0 with TP HF_MODEL_PATH=${HF_MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-VL-30B-A3B-Instruct"} GEN_TP=${GEN_TP:-4} CP=${CP:-2} TP=${TP:-2} PP=${PP:-1} EP=${EP:-8} ETP=${ETP:-1} train_path=$HOME/data/geo3k/train.parquet test_path=$HOME/data/geo3k/test.parquet python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_path" \ data.val_files="$test_path" \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.context_parallel_size=$CP \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$GEN_TP \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=4096 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=4096 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=4096 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.use_mbridge=True \ actor_rollout_ref.actor.megatron.param_offload=True \ actor_rollout_ref.actor.megatron.optimizer_offload=True \ actor_rollout_ref.actor.megatron.grad_offload=True \ actor_rollout_ref.ref.megatron.param_offload=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=1 \ +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type=flex \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen3_vl_30b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3_vl-8b-megatron.sh ================================================ set -x ENGINE=${1:-vllm} export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping # dependency: vllm>=0.11.0, megatron-lm>=0.13, mbridge with qwen3vl_cp branch # environment option1: use a stable container later than docker://verlai/verl:vllm011.dev6 # and install mbridge in it by following the instruction in the container # pip remove mbridge if you have installed it # pip install git+https://github.com/ISEEKYAN/mbridge.git@qwen3vl_cp # for correct mbridge # environment option2: use container docker://verlai/verl:vllm011.dev_qwenvl_cp export VLLM_ALLREDUCE_USE_SYMM_MEM=0 # for vllm0.11.0 with TP HF_MODEL_PATH=${HF_MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-VL-8B-Instruct"} GEN_TP=${GEN_TP:-4} CP=${CP:-2} TP=${TP:-2} PP=${PP:-2} train_path=$HOME/data/geo3k/train.parquet test_path=$HOME/data/geo3k/test.parquet python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_path" \ data.val_files="$test_path" \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.context_parallel_size=$CP \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$GEN_TP \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=4096 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=4096 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=4096 \ actor_rollout_ref.rollout.name=$ENGINE \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.use_mbridge=True \ actor_rollout_ref.actor.megatron.param_offload=True \ actor_rollout_ref.actor.megatron.optimizer_offload=True \ actor_rollout_ref.actor.megatron.grad_offload=True \ actor_rollout_ref.ref.megatron.param_offload=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=1 \ +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type=flex \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen3_vl_8b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/grpo_trainer/run_qwen3moe-30b_megatron_96gb.sh ================================================ set -x # tested in NNODES=1~4 * 96G H20 GPU NNODES=${NNODES:-1} NGPUS_PER_NODES=${NGPUS_PER_NODES:-8} project_name='DAPO-Qwen3-30b-MATH' exp_name='DAPO-Qwen3-30b-MATH-megatron' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=128 train_ppo_micro_batch_size_per_gpu=2 infer_ppo_micro_batch_size_per_gpu=2 # Paths MODEL_PATH=Qwen/Qwen3-30B-A3B RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} TRAIN_FILE=$RAY_DATA_HOME/dataset/dapo-math-17k.parquet TEST_FILE=$RAY_DATA_HOME/dataset/aime-2024.parquet TEST_FILE="['$aime24_test_path']" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length))) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length))) offload=True optimizer_offload_fraction=${OFFLOAD_FRACTION:-1.} COMMON_PP=${COMMON_PP:-1} COMMON_VPP=${COMMON_VPP:-null} COMMON_CP=${COMMON_CP:-1} COMMON_TP=${COMMON_TP:-1} COMMON_EP=${COMMON_EP:-8} COMMON_ETP=${COMMON_ETP:-1} TRAIN_TP=${TRAIN_TP:-$COMMON_TP} INFER_TP=${INFER_TP:-4} ACTOR_PP=${ACTOR_PP:-$COMMON_PP} ACTOR_VPP=${ACTOR_VPP:-$COMMON_VPP} ACTOR_CP=${ACTOR_CP:-$COMMON_CP} ACTOR_TP=${ACTOR_TP:-$TRAIN_TP} ACTOR_EP=${ACTOR_EP:-$COMMON_EP} ACTOR_ETP=${ACTOR_ETP:-$COMMON_ETP} ROLLOUT_TP=${ROLLOUT_TP:-$INFER_TP} REF_PP=${REF_PP:-$COMMON_PP} REF_VPP=${REF_VPP:-$COMMON_VPP} REF_CP=${REF_CP:-$COMMON_CP} REF_TP=${REF_TP:-$TRAIN_TP} REF_EP=${REF_EP:-$COMMON_EP} REF_ETP=${REF_ETP:-$COMMON_ETP} CRITIC_PP=${CRITIC_PP:-$COMMON_PP} CRITIC_VPP=${CRITIC_VPP:-$COMMON_VPP} CRITIC_CP=${CRITIC_CP:-$COMMON_CP} CRITIC_TP=${CRITIC_TP:-$TRAIN_TP} CRITIC_EP=${CRITIC_EP:-$COMMON_EP} CRITIC_ETP=${CRITIC_ETP:-$COMMON_ETP} RM_PP=${RM_PP:-$COMMON_PP} RM_VPP=${RM_VPP:-$COMMON_VPP} RM_CP=${RM_CP:-$COMMON_CP} RM_TP=${RM_TP:-$TRAIN_TP} RM_EP=${RM_EP:-$COMMON_EP} RM_ETP=${RM_ETP:-$COMMON_ETP} # install mbridge # pip3 install git+https://github.com/ISEEKYAN/mbridge USE_MBRIDGE=True USE_DIST_CKPT=False python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ +actor_rollout_ref.model.override_config.model_config.max_position_embeddings=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.model.use_fused_kernels=False \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${train_ppo_micro_batch_size_per_gpu} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.lr_decay_style='constant' \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=${optimizer_offload_fraction} \ +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ actor_rollout_ref.actor.megatron.use_mbridge=$USE_MBRIDGE \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=$USE_DIST_CKPT \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${ACTOR_TP} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${ACTOR_PP} \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=${ACTOR_VPP} \ actor_rollout_ref.actor.megatron.context_parallel_size=${ACTOR_CP} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=${ACTOR_EP} \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=${ACTOR_ETP} \ +actor_rollout_ref.actor.megatron.override_transformer_config.apply_rope_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.masked_softmax_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.bias_activation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.bias_dropout_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.deallocate_pipeline_outputs=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.persist_layer_norm=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_grouped_gemm=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type="flex" \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${infer_ppo_micro_batch_size_per_gpu} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${INFER_TP} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${infer_ppo_micro_batch_size_per_gpu} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=${USE_DIST_CKPT} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${REF_TP} \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${REF_PP} \ actor_rollout_ref.ref.megatron.virtual_pipeline_model_parallel_size=${REF_VPP} \ actor_rollout_ref.ref.megatron.context_parallel_size=${REF_CP} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=${REF_EP} \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=${REF_ETP} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','wandb'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODES}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=100 \ trainer.total_epochs=10 \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/examples/grpo_trainer/run_seed_oss_36b.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=64 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=ByteDance-Seed/Seed-OSS-36B-Base \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.ppo_mini_batch_size=8 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.strategy=fsdp2 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=2 \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.ref.strategy=fsdp2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console"]' \ trainer.project_name='verl_grpo_seed_oss_36b' \ trainer.experiment_name='seed_oss_36b' \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/README.md ================================================ # Proximal Policy Optimization (PPO) Proximal Policy Optimization (PPO) is a family of policy gradient methods for reinforcement learning, proposed by OpenAI in 2017. PPO strikes a balance between simplicity, stability, and performance, making it one of the most widely used algorithms in modern RL applications, including large-scale language model fine-tuning. Traditional policy gradient methods like REINFORCE or Vanilla Policy Gradient suffer from: - High variance and sample inefficiency. - Instability due to large policy updates. PPO addresses this problem using a clipped surrogate objective that avoids overly large updates without requiring second-order derivatives. For more technical details regarding PPO, we suggest reading the introduction in the [OpenAI spinning up tutorial](https://spinningup.openai.com/en/latest/algorithms/ppo.html), and the paper [Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347). ## Key Components - Actor-Critic Architecture: PPO requires both an actor model (policy) and a critic model (value function). This differs from other algorithms like GRPO and RLOO that don't require a critic model. - Generalized Advantage Estimation (GAE): PPO uses GAE for computing advantage values, which helps reduce variance in policy gradient estimates while maintaining low bias. - Clipped Surrogate Objective: The core of PPO is implemented through the clipped surrogate objective function that limits policy updates. ## Configuration Note that all configs containing `micro_batch_size` are used to configure the maximum sample or token count per forward or backward pass to avoid GPU OOMs, whose value should not change algorithmic/convergence behavior. Most critic configs are similar to those of actors. Note that the critic model is omitted from the figure below. ![image](https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d) - `data.train_batch_size`: The global batch size of prompts used to generate a set of sampled trajectories/rollouts. The number of responses/trajectories is `data.train_batch_size * actor_rollout.ref.rollout.n` - `actor_rollout_ref.actor.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO actor updates. The ppo_mini_batch_size is a global size across all workers - `critic.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO critic updates. The ppo_mini_batch_size is a global size across all workers - `actor_rollout_ref.actor.clip_ratio`: The PPO clip range. Default to 0.2 - `actor_rollout_ref.actor.ppo_epochs`: Number of epochs for PPO updates on one set of sampled trajectories for actor - `critic.ppo_epochs`: Number of epochs for PPO updates on one set of sampled trajectories for critic. Defaults to `actor_rollout_ref.actor.ppo_epochs` - `algorithm.gamma`: discount factor - `algorithm.lam`: The lambda term that trades off between bias and variance in the GAE estimator - `algorithm.adv_estimator`: Support gae, grpo, reinforce_plus_plus, reinforce_plus_plus_baseline, rloo, rloo_vectorized ## Advanced Extensions ### KL Divergence Control Options to prevent the policy from diverging too far from a reference policy. Two mechanisms are available: KL reward penalty and KL loss. For more technical details, see [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) Options to use KL loss for KL divergence control: - `actor_rollout_ref.actor.use_kl_loss`: to use kl loss in the actor. When used, we are not applying KL in the reward function. Default is False - `actor_rollout_ref.actor.kl_loss_coef`: The coefficient of kl loss. Default is 0.001. - `actor_rollout_ref.actor.kl_loss_type`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. Appending "+" in the end (e.g., 'k1+' and 'k3+') would apply straight through to employ k2 for unbiased gradient estimation, regardless of the kl value estimation (see https://github.com/volcengine/verl/pull/2953#issuecomment-3162113848 for more details). How to calculate the kl divergence between actor and reference policy. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html Options to use KL penalty in the reward: - `algorithm.use_kl_in_reward`: Whether to enable in-reward kl penalty. Default is False. - `algorithm.kl_penalty`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. This defines the way to calculate the kl divergence between actor and reference policy. For specific options, refer to `kl_penalty` in core_algos.py. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html - `algorithm.kl_ctrl.kl_coef`: The (initial) coefficient of in-reward kl_penalty. Default is 0.001. - `algorithm.kl_ctrl.type`: 'fixed' for FixedKLController and 'adaptive' for AdaptiveKLController. - `algorithm.kl_ctrl.horizon`: See source code of AdaptiveKLController for details. - `algorithm.kl_ctrl.target_kl`: See source code of AdaptiveKLController for details. ### Dual-clip PPO The Dual-Clip PPO introduces a approach by applying a lower bound to the policy ratio when the advantage is less than zero, when multiplied by a large raito, does not exceed a specified lower bound. ![image](https://github.com/user-attachments/assets/fc232181-d8b0-4307-8dd2-4dc0a4c1c139) - `actor_rollout_ref.actor.clip_ratio_c`: lower bound of the value for Dual-clip PPO, defaults to 3.0 ## Reference Example Qwen2.5 training log and commands: [link](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) ```bash bash run_gemma.sh trainer.n_gpus_per_node=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ trainer.logger=console \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ data.train_batch_size=256 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size=2 \ critic.ppo_micro_batch_size=2 ``` Reference performance with verl v0.2: | Model | Method | Score | Link | |-------------------------------|------------------|-------|------------------------------------------------------------------------------------------------| | Qwen/Qwen2.5-0.5B-Instruct | pretrained model | 36.4 | [Qwen Blog](https://qwenlm.github.io/blog/qwen2.5-llm/) | | Qwen/Qwen2.5-0.5B-Instruct | PPO | 56.7 | [PPO Command and Logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) | ================================================ FILE: verl_distillation/examples/ppo_trainer/run_deepseek7b_llm.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=1 \ trainer.use_legacy_worker_impl=auto \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_deepseek7b_llm_modelscope.sh ================================================ set -x VERL_USE_MODELSCOPE=True \ python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=1 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_deepseek7b_llm_pfppo.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ algorithm.use_pf_ppo=True \ algorithm.pf_ppo.reweight_method=pow \ # ["pow", "max_min", "max_random"] algorithm.pf_ppo.weight_pow=2.0 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=5 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=1 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_deepseek7b_llm_sandbox_fusion.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ reward_model.sandbox_fusion.url='https://xxxxxxxxx.apigateway-cn-beijing.volceapi.com/run_code' \ reward_model.sandbox_fusion.max_concurrent=128 \ reward_model.reward_manager=prime \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/Eurus-2-RL-Data/train.parquet \ data.val_files=$HOME/data/Eurus-2-RL-Data/validation.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_sandbox_fusion' \ trainer.experiment_name='deepseek_llm_7b_function_sandbox_fusion' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=1 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_deepseek7b_llm_sp2.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=64 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ critic.optim.lr=1e-5 \ critic.ulysses_sequence_parallel_size=2 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=64 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm_sp2' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_deepseek_full_hh_rlhf.sh ================================================ set -x train_files=$HOME/data/full_hh_rlhf/rl/train.parquet test_files=$HOME/data/full_hh_rlhf/rl/train.parquet # no use python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=512 \ data.max_prompt_length=128 \ data.max_response_length=128 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.ppo_micro_batch_size_per_gpu=4 \ reward_model.enable=True \ reward_model.megatron.tensor_model_parallel_size=4 \ reward_model.model.path=deepseek-ai/deepseek-llm-7b-chat \ reward_model.micro_batch_size_per_gpu=4 \ reward_model.param_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_megatron_full_hh_rlhf_examples' \ trainer.experiment_name='deepseek_llm_7b_model_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=100 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_deepseek_math_gsm8k_megatron.sh ================================================ set -x # Example runnable on H20 * 8 export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ critic.optim.lr=1e-5 \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_ppo_gsm8k_math_examples' \ trainer.experiment_name='deepseek_llm_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=100 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_deepseek_math_gsm8k_megatron_nsys.sh ================================================ set -x # Example runnable on H20 * 8 export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files=${train_files:-"$gsm8k_train_path"} test_files=${test_files:-"$gsm8k_test_path"} # Nsight profiling configuration PROFILE_STEPS="[1]" # or [] or null PROFILE_RANKS_ALL=False # or True PROFILE_RANKS=[0,4] DISCRETE=True # or True python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.profiler.enable=True \ actor_rollout_ref.actor.profiler.ranks=$PROFILE_RANKS \ actor_rollout_ref.actor.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ critic.optim.lr=1e-5 \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.ppo_micro_batch_size_per_gpu=4 \ critic.profiler.enable=True \ critic.profiler.ranks=$PROFILE_RANKS \ critic.profiler.all_ranks=$PROFILE_RANKS_ALL \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_ppo_gsm8k_math_examples' \ trainer.experiment_name='deepseek_llm_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=100 \ trainer.total_training_steps=1 \ global_profiler.tool=nsys \ global_profiler.steps=$PROFILE_STEPS \ global_profiler.global_tool_config.nsys.discrete=$DISCRETE $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_gemma.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=google/gemma-2-2b-it \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=False \ critic.model.path=google/gemma-2-2b-it \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=4 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='gemma2b_function_rm' \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=10 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_moonlight16b_a3b_gsm8k_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping # 0. download the model huggingface-cli download moonshotai/Moonlight-16B-A3B-Instruct # 1. convert the model to mcore format # change the HF_MODEL_PATH and DIST_CKPT_PATH to your own path HF_MODEL_PATH=/data/models/moonshotai/Moonlight-16B-A3B-Instruct DIST_CKPT_PATH=/data/mcore_ckpt/Moonlight-16B-A3B-Instruct python scripts/converter_hf_to_mcore.py --hf_model_path $HF_MODEL_PATH --output_path $DIST_CKPT_PATH # 2. run the script gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet train_files=$gsm8k_train_path test_files=$gsm8k_test_path ALL_OFFLOAD=${ALL_OFFLOAD:-False} COMMON_PARAM_OFFLOAD=${COMMON_PARAM_OFFLOAD:-$ALL_OFFLOAD} COMMON_GRAD_OFFLOAD=${COMMON_GRAD_OFFLOAD:-$ALL_OFFLOAD} COMMON_OPTIMIZER_OFFLOAD=${COMMON_OPTIMIZER_OFFLOAD:-$ALL_OFFLOAD} ACTOR_PARAM_OFFLOAD=${ACTOR_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} ACTOR_GRAD_OFFLOAD=${ACTOR_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} ACTOR_OPTIMIZER_OFFLOAD=${ACTOR_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} REF_PARAM_OFFLOAD=${REF_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_PARAM_OFFLOAD=${CRITIC_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_GRAD_OFFLOAD=${CRITIC_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} CRITIC_OPTIMIZER_OFFLOAD=${CRITIC_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} RM_PARAM_OFFLOAD=${RM_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} NODES=4 PP=2 TP=8 EP=8 ETP=1 VLLM_TP=4 # RAY_ADDRESS='auto' ray job submit --working-dir . -- python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.trust_remote_code=True \ actor_rollout_ref.model.path=$LLM \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ critic.optim.lr=1e-5 \ critic.model.path=$LLM \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_megatron_gsm8k_examples' \ trainer.experiment_name='moonlight_16b_a3b_instruct_1node' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=$NODES \ trainer.save_freq=-1 \ trainer.test_freq=5 \ actor_rollout_ref.model.trust_remote_code=True \ critic.model.trust_remote_code=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=13 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$VLLM_TP \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=$PP \ critic.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=$TP \ critic.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \ critic.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \ critic.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.actor.megatron.param_offload=${ACTOR_PARAM_OFFLOAD} \ actor_rollout_ref.actor.megatron.optimizer_offload=${ACTOR_OPTIMIZER_OFFLOAD} \ actor_rollout_ref.actor.megatron.grad_offload=${ACTOR_GRAD_OFFLOAD} \ actor_rollout_ref.ref.megatron.param_offload=${REF_PARAM_OFFLOAD} \ critic.megatron.param_offload=${CRITIC_PARAM_OFFLOAD} \ critic.megatron.optimizer_offload=${CRITIC_OPTIMIZER_OFFLOAD} \ critic.megatron.grad_offload=${CRITIC_GRAD_OFFLOAD} \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ critic.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ critic.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ trainer.val_before_train=False \ trainer.total_epochs=100 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen1.5_moe_a2.7b-gsm8k_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping # 0. download the model #huggingface-cli download Qwen/Qwen1.5-MoE-A2.7B-Chat # 1. convert the model to mcore format # change the HF_MODEL_PATH and DIST_CKPT_PATH to your own path HF_MODEL_PATH=/data/models/Qwen/Qwen1.5-MoE-A2.7B-Chat DIST_CKPT_PATH=/data/mcore_ckpt/Qwen1.5-MoE-A2.7B-Chat python scripts/converter_hf_to_mcore.py --hf_model_path $HF_MODEL_PATH --output_path $DIST_CKPT_PATH # 2. run the script gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet train_files=$gsm8k_train_path test_files=$gsm8k_test_path NODES=4 PP=2 TP=4 CP=1 VLLM_TP=4 # RAY_ADDRESS='auto' ray job submit --working-dir . -- python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.context_parallel_size=$CP \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.ref.megatron.context_parallel_size=$CP \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$VLLM_TP \ critic.optim.lr=1e-5 \ critic.model.path=$HF_MODEL_PATH \ critic.ppo_micro_batch_size_per_gpu=4 \ critic.megatron.tensor_model_parallel_size=$TP \ critic.megatron.pipeline_model_parallel_size=$PP \ critic.megatron.context_parallel_size=$CP \ critic.megatron.use_dist_checkpointing=True \ critic.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_megatron_gsm8k_examples' \ trainer.experiment_name='qwen1.5_moe_nochat' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=$NODES \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=100 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen2-7b_math_gsm8k_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_ppo_gsm8k_math_examples' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=100 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen2-7b_rm.sh ================================================ # Discliamer: the model used in the script is only for academic purpose. set -x # Data preparation scripts are available in ``examples/data_preprocess``. # Example usage: # # python3 examples/data_preprocess/math_dataset.py --local_dir ~/data/math # python3 examples/data_preprocess/gsm8k.py --local_save_dir ~/data/gsm8k gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" # prepare model ckpt huggingface-cli download Qwen/Qwen2-7B-Instruct --local-dir $HOME/models/Qwen2-7B-Instruct & huggingface-cli download sfairXC/FsfairX-LLaMA3-RM-v0.1 --local-dir $HOME/models/FsfairX-LLaMA3-RM-v0.1 & wait python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path="$HOME/models/Qwen2-7B-Instruct" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.optim.lr_warmup_steps_ratio=0.05 \ critic.model.path="$HOME/models/Qwen2-7B-Instruct" \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ reward_model.enable=True \ reward_model.model.path="$HOME/models/FsfairX-LLaMA3-RM-v0.1" \ reward_model.model.use_remove_padding=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.micro_batch_size_per_gpu=32 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.val_before_train=False \ trainer.experiment_name='Qwen2-7B-Instruct_hybrid_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.use_dynamic_bsz=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ reward_model.enable=True \ reward_model.model.path=sfairXC/FsfairX-LLaMA3-RM-v0.1\ reward_model.model.use_remove_padding=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.micro_batch_size_per_gpu=32 \ reward_model.use_dynamic_bsz=True \ reward_model.forward_max_token_len_per_gpu=98304 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_hybrid_rm_bsz8k_p4k_r4k_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance_fused_kernels.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" FUSED_KERNEL_BACKEND=triton # or 'torch' for torch backend python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.model.fused_kernel_options.impl_backend=$FUSED_KERNEL_BACKEND \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.use_dynamic_bsz=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ reward_model.enable=True \ reward_model.model.path=sfairXC/FsfairX-LLaMA3-RM-v0.1\ reward_model.model.use_remove_padding=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.micro_batch_size_per_gpu=32 \ reward_model.use_dynamic_bsz=True \ reward_model.forward_max_token_len_per_gpu=98304 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_hybrid_rm_bsz8k_p4k_r4k_seq_packing_fused_kernel' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance_nsys.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files=${train_files:-"$gsm8k_train_path"} test_files=${test_files:-"$gsm8k_test_path"} PROFILE_STEPS="[1,2,5]" # or [] or null PROFILE_RANKS_ALL=False # or True PROFILE_RANKS=[0,4] DISCRETE=True # or True python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=12000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.profiler.enable=True \ actor_rollout_ref.actor.profiler.ranks=$PROFILE_RANKS \ actor_rollout_ref.actor.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=2 \ critic.use_dynamic_bsz=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ critic.profiler.enable=True \ critic.profiler.ranks=$PROFILE_RANKS \ critic.profiler.all_ranks=$PROFILE_RANKS_ALL \ reward_model.enable=True \ reward_model.model.path=sfairXC/FsfairX-LLaMA3-RM-v0.1\ reward_model.model.use_remove_padding=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.micro_batch_size_per_gpu=32 \ reward_model.use_dynamic_bsz=True \ reward_model.forward_max_token_len_per_gpu=98304 \ reward_model.profiler.enable=True \ reward_model.profiler.ranks=$PROFILE_RANKS \ reward_model.profiler.all_ranks=$PROFILE_RANKS_ALL \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_hybrid_rm_bsz8k_p4k_r4k_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=15 \ trainer.total_training_steps=6 \ global_profiler.profile_continuous_steps=True \ global_profiler.tool=nsys \ global_profiler.steps=$PROFILE_STEPS \ global_profiler.global_tool_config.nsys.discrete=$DISCRETE $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen2-7b_seq_balance.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" # For async rollout mode, dataset should return raw chat. rollout_mode="sync" if [ "$rollout_mode" = "async" ]; then return_raw_chat="True" fi python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=$return_raw_chat \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=$rollout_mode \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_function_rm_bsz8k_p4k_r4k_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen2-7b_sglang_seq_balance.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_function_rm_bsz8k_p4k_r4k_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen2.5-32b.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-32B-Instruct \ actor_rollout_ref.model.enable_gradient_checkpointing=False \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2.5-32B-Instruct \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=4 \ trainer.save_freq=20 \ trainer.test_freq=10 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/ppo_trainer/run_qwen3-8b_npu.sh ================================================ set -x export VLLM_USE_V1=1 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/dapo-math-17k.parquet \ data.val_files=$HOME/data/dapo-math-17k.parquet \ data.train_batch_size=256 \ data.max_prompt_length=2000 \ data.max_response_length=12000 \ data.shuffle=False \ actor_rollout_ref.model.path=Qwen/Qwen3-8B \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.max_num_batched_tokens=14000 \ actor_rollout_ref.rollout.max_num_seqs=64 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.enforce_eager=False \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen3-8B \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=1 \ critic.ulysses_sequence_parallel_size=2 \ critic.model.fsdp_config.param_offload=True \ critic.model.fsdp_config.optimizer_offload=True \ critic.use_dynamic_bsz=True \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_example_dapo_math_17k' \ trainer.experiment_name='qwen3_8b_fsdp' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=-1 \ trainer.val_before_train=False \ trainer.device=npu \ trainer.max_actor_ckpt_to_keep=1 \ trainer.max_critic_ckpt_to_keep=1 \ trainer.total_training_steps=100 $@ ================================================ FILE: verl_distillation/examples/ray/tutorial.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0ddc582b", "metadata": {}, "source": [ "# VeRL Ray API Tutorial" ] }, { "cell_type": "markdown", "id": "71fe3b94", "metadata": {}, "source": [ "## Chapter 1: Ray Basics" ] }, { "cell_type": "code", "execution_count": 144, "id": "1347d381", "metadata": { "tags": [] }, "outputs": [], "source": [ "import os" ] }, { "cell_type": "code", "execution_count": 145, "id": "e75b9d44", "metadata": { "tags": [] }, "outputs": [], "source": [ "import warnings\n", "\n", "import ray\n", "import torch\n", "\n", "warnings.filterwarnings(\"ignore\")" ] }, { "cell_type": "code", "execution_count": 146, "id": "2e90ae00", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2024-11-01 17:27:19,132\tINFO worker.py:1752 -- Started a local Ray instance.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "9cc9d2ccbdfb48918c8fd6cd13a0807a", "version_major": 2, "version_minor": 0 }, "text/html": [ "
\n", "
\n", "
\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
Python version:3.9.2
Ray version:2.10.0
\n", "\n", "
\n", "
\n" ], "text/plain": [ "RayContext(dashboard_url='', python_version='3.9.2', ray_version='2.10.0', ray_commit='09abba26b5bf2707639bb637c208d062a47b46f6')" ] }, "execution_count": 146, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "\u001b[36m(GPUAccumulator pid=224400)\u001b[0m rank 0, value: tensor([1.], device='cuda:0')\n", "\u001b[36m(GPUAccumulator pid=225234)\u001b[0m rank 2, value: tensor([3.], device='cuda:0')\n", "\u001b[36m(GPUAccumulator pid=225607)\u001b[0m rank 0, value: tensor([2.], device='cuda:0')\n", "\u001b[36m(GPUAccumulator pid=226423)\u001b[0m rank 1, value: tensor([3.], device='cuda:0')\n", "\u001b[36m(GPUAccumulator pid=226857)\u001b[0m rank 3, value: tensor([6.], device='cuda:0')\n", "\u001b[36m(GPUAccumulatorDecorator pid=227475)\u001b[0m 10\n", "\u001b[36m(GPUAccumulatorDecorator pid=227475)\u001b[0m rank 0, value: tensor([10.], device='cuda:0')\n", "\u001b[36m(GPUAccumulatorDecorator pid=227655)\u001b[0m rank 1, value: tensor([11.], device='cuda:0')\n" ] } ], "source": [ "# Build a local ray cluster. The head node and worker node are on this machine\n", "ray.init()" ] }, { "cell_type": "markdown", "id": "a127e4e4", "metadata": {}, "source": [ "Implement an Accumulator class." ] }, { "cell_type": "code", "execution_count": 147, "id": "20e7b9a3", "metadata": { "tags": [] }, "outputs": [], "source": [ "@ray.remote\n", "class Accumulator:\n", " def __init__(self):\n", " self.value = 0\n", "\n", " def add(self, x):\n", " self.value += x\n", "\n", " def get_value(self):\n", " return self.value" ] }, { "cell_type": "code", "execution_count": 148, "id": "3b80098c", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Instantiate an accumulator. Accumulator can be viewed as a process, acting as an RPC service.\n", "accumulator = Accumulator.remote()" ] }, { "cell_type": "code", "execution_count": 149, "id": "b14b1009", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0\n" ] } ], "source": [ "value_ref = accumulator.get_value.remote() # Check the current value. Note that this function returns immediately and does not actually wait for the remote execution to complete.\n", "# Get the value\n", "value = ray.get(value_ref)\n", "print(value)" ] }, { "cell_type": "code", "execution_count": 150, "id": "513a84b3", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "10\n" ] } ], "source": [ "# Accumulate, then check the result.\n", "accumulator.add.remote(10) # Similarly, the 'add' here will return immediately.\n", "new_value = ray.get(accumulator.get_value.remote())\n", "print(new_value)" ] }, { "cell_type": "markdown", "id": "3c332fe0", "metadata": {}, "source": [ "## Chapter 2: Resource Pool and RayWorkerGroup\n", "In the previous example, it was a simple single-process worker. \n", "In this example, we implement a worker with a GPU and form a RayWorkerGroup. Within this RayWorkerGroup, we implement a simple operation of an accumulator." ] }, { "cell_type": "code", "execution_count": 151, "id": "04229afb", "metadata": { "tags": [] }, "outputs": [], "source": [ "from verl.single_controller.base import Worker\n", "from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, merge_resource_pool" ] }, { "cell_type": "code", "execution_count": 152, "id": "0d0dbd58", "metadata": { "tags": [] }, "outputs": [], "source": [ "resource_pool = RayResourcePool([4], use_gpu=True)" ] }, { "cell_type": "code", "execution_count": 153, "id": "68f6838a", "metadata": { "tags": [] }, "outputs": [], "source": [ "@ray.remote\n", "class GPUAccumulator(Worker):\n", " def __init__(self) -> None:\n", " super().__init__()\n", " # The initial value of each rank is the same as the rank\n", " self.value = torch.zeros(size=(1,), device=\"cuda\") + self.rank\n", "\n", " def add(self, x):\n", " self.value += x\n", " print(f\"rank {self.rank}, value: {self.value}\")\n", " return self.value.cpu()" ] }, { "cell_type": "code", "execution_count": 154, "id": "23aad8fe", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[tensor([1.]), tensor([2.]), tensor([3.]), tensor([4.])]\n" ] } ], "source": [ "# Each worker's initial value is its rank, and then each rank's value is incremented by 1, so the values obtained on each rank are [1, 2, 3, 4]\n", "class_with_args = RayClassWithInitArgs(cls=GPUAccumulator)\n", "worker_group = RayWorkerGroup(resource_pool, class_with_args)\n", "print(worker_group.execute_all_sync(\"add\", x=[1, 1, 1, 1]))" ] }, { "cell_type": "markdown", "id": "e6705284", "metadata": {}, "source": [ "The principle of parameter passing: The input parameter is a list of length world_size, where each element in the list is dispatched respectively to each worker in the RayWorkerGroup. \n", "The return parameter is also a list, corresponding to the return value of each worker." ] }, { "cell_type": "markdown", "id": "d25c2412", "metadata": {}, "source": [ "### GPU Resource Sharing" ] }, { "cell_type": "markdown", "id": "f74f6d24", "metadata": {}, "source": [ "RayWorkerGroups mapped to the same resource pool share the GPU. In this example, we implement three resource pools: the first occupies 4 GPUs, the second also occupies 4 GPUs, and the last occupies all 8 GPUs. Among them, the first resource pool reuses the resource pool mentioned above." ] }, { "cell_type": "code", "execution_count": 155, "id": "49f9c06f", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Create a new resource pool and then merge the newly created resource pool with the previous one.\n", "resource_pool_1 = RayResourcePool([4], use_gpu=True, name_prefix=\"a\")\n", "resource_pool_merge = merge_resource_pool(resource_pool, resource_pool_1)" ] }, { "cell_type": "code", "execution_count": 156, "id": "05c2e305", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Establish a RayWorkerGroup on the newly created resource pool.\n", "worker_group_1 = RayWorkerGroup(resource_pool_1, class_with_args)\n", "worker_group_merge = RayWorkerGroup(resource_pool_merge, class_with_args)" ] }, { "cell_type": "code", "execution_count": 157, "id": "6b9b13f4", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[tensor([2.]), tensor([3.]), tensor([4.]), tensor([5.])]\n" ] } ], "source": [ "# Run 'add' on the second set of 4 GPUs; the result should be [2, 3, 4, 5].\n", "output_1 = worker_group_1.execute_all_sync(\"add\", x=[2, 2, 2, 2])\n", "print(output_1)" ] }, { "cell_type": "code", "execution_count": 158, "id": "d856d030", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[tensor([3.]), tensor([4.]), tensor([5.]), tensor([6.]), tensor([7.]), tensor([8.]), tensor([9.]), tensor([10.])]\n" ] } ], "source": [ "# Run 'add' on the merged set of 8 GPUs; the result should be [3, 4, 5, 6, 7, 8, 9, 10].\n", "output_merge = worker_group_merge.execute_all_sync(\"add\", x=[3, 3, 3, 3, 3, 3, 3, 3])\n", "print(output_merge)" ] }, { "cell_type": "code", "execution_count": 159, "id": "33a4628c", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "4 4 8\n" ] } ], "source": [ "print(worker_group.world_size, worker_group_1.world_size, worker_group_merge.world_size)" ] }, { "cell_type": "markdown", "id": "3df19d13", "metadata": {}, "source": [ "## Chapter 3: Data Dispatch, Execution and Collection" ] }, { "cell_type": "markdown", "id": "acb22d9d", "metadata": {}, "source": [ "In the above example, we used the `execute_all_sync` function in the RayWorkerGroup to dispatch data from the driver to each worker. This is very inconvenient for coding. \n", "In this chapter, we use the form of function decorators to allow RayWorkerGroup to directly call functions written in the Worker, and to greatly simplify parameter passing." ] }, { "cell_type": "code", "execution_count": 160, "id": "35237432", "metadata": { "tags": [] }, "outputs": [], "source": [ "from verl.single_controller.base.decorator import Dispatch, Execute, register" ] }, { "cell_type": "code", "execution_count": 161, "id": "88b8ba3b", "metadata": { "tags": [] }, "outputs": [], "source": [ "@ray.remote\n", "class GPUAccumulatorDecorator(Worker):\n", " def __init__(self) -> None:\n", " super().__init__()\n", " # The initial value of each rank is the same as the rank\n", " self.value = torch.zeros(size=(1,), device=\"cuda\") + self.rank\n", "\n", " # map from a single input to all the worker\n", " @register(Dispatch.ONE_TO_ALL)\n", " def add(self, x):\n", " print(x)\n", " self.value = self.value + x\n", " print(f\"rank {self.rank}, value: {self.value}\")\n", " return self.value.cpu()" ] }, { "cell_type": "code", "execution_count": 162, "id": "eddaa043", "metadata": { "tags": [] }, "outputs": [], "source": [ "class_with_args = RayClassWithInitArgs(cls=GPUAccumulatorDecorator)\n", "gpu_accumulator_decorator = RayWorkerGroup(resource_pool_merge, class_with_args)" ] }, { "cell_type": "code", "execution_count": 163, "id": "10087c91", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[tensor([10.]), tensor([11.]), tensor([12.]), tensor([13.]), tensor([14.]), tensor([15.]), tensor([16.]), tensor([17.])]\n" ] } ], "source": [ "# As we can see, 10 is automatically dispatched to each Worker in this RayWorkerGroup.\n", "print(gpu_accumulator_decorator.add(x=10))" ] }, { "cell_type": "markdown", "id": "540ee6ad", "metadata": {}, "source": [ "### Custom Dispatch, Collection\n", "Users can customize `dispatch` and `collection` function. You only need to write the `dispatch_fn` and `collect_fn` functions yourself. We also support executing RPC only on rank_zero, with specific examples provided below." ] }, { "cell_type": "code", "execution_count": 164, "id": "8e041270", "metadata": { "tags": [] }, "outputs": [], "source": [ "from verl.single_controller.base.decorator import Dispatch, collect_all_to_all, register" ] }, { "cell_type": "code", "execution_count": 165, "id": "43b5be31", "metadata": { "tags": [] }, "outputs": [], "source": [ "def two_to_all_dispatch_fn(worker_group, *args, **kwargs):\n", " \"\"\"\n", " Assume the input is a list of 2. Duplicate the input interleaved and pass to each worker.\n", " \"\"\"\n", " for arg in args:\n", " assert len(arg) == 2\n", " for i in range(worker_group.world_size - 2):\n", " arg.append(arg[i % 2])\n", " for k, v in kwargs.items():\n", " assert len(v) == 2\n", " for i in range(worker_group.world_size - 2):\n", " v.append(v[i % 2])\n", " return args, kwargs\n", "\n", "\n", "@ray.remote\n", "class TestActor(Worker):\n", " # TODO: pass *args and **kwargs is bug prone and not very convincing\n", " def __init__(self, x) -> None:\n", " super().__init__()\n", " self._x = x\n", "\n", " def foo(self, y):\n", " return self._x + y\n", "\n", " @register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.RANK_ZERO)\n", " def foo_rank_zero(self, x, y):\n", " return self._x + y + x\n", "\n", " @register(dispatch_mode={\"dispatch_fn\": two_to_all_dispatch_fn, \"collect_fn\": collect_all_to_all})\n", " def foo_custom(self, x, y):\n", " return self._x + y + x" ] }, { "cell_type": "code", "execution_count": 166, "id": "83ec6609", "metadata": { "tags": [] }, "outputs": [], "source": [ "class_with_args = RayClassWithInitArgs(cls=TestActor, x=2)\n", "worker_group = RayWorkerGroup(resource_pool, class_with_args)" ] }, { "cell_type": "code", "execution_count": 167, "id": "62c58d8a", "metadata": { "tags": [] }, "outputs": [], "source": [ "output_ref = worker_group.foo_custom(x=[1, 2], y=[5, 6])\n", "assert output_ref == [8, 10, 8, 10]\n", "\n", "output_ref = worker_group.foo_rank_zero(x=1, y=2)\n", "assert output_ref == 5" ] }, { "cell_type": "code", "execution_count": 168, "id": "14689353", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "8\n" ] } ], "source": [ "print(gpu_accumulator_decorator.world_size)" ] }, { "cell_type": "code", "execution_count": 169, "id": "2c80bbf4", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Shutdown ray cluster\n", "ray.shutdown()" ] }, { "cell_type": "markdown", "id": "a5c8151c", "metadata": {}, "source": [ "## Chapter 4: NVMegatronRayWorkerGroup" ] }, { "cell_type": "markdown", "id": "cd5680e9", "metadata": {}, "source": [ "Due to the Ray issue, we can only support max_colocate_count=1 in RayResourcePool for now. \n", "This means that each GPU can only have one process.\n", "We can support max_colocate > 1 when applying this pull request: https://github.com/ray-project/ray/pull/44385" ] }, { "cell_type": "markdown", "id": "92724419", "metadata": {}, "source": [ "Therefore, we need to restart the ray and initialize a new resource_pool to demonstrate the **NVMegatronRayWorkerGroup**" ] }, { "cell_type": "code", "execution_count": null, "id": "9b038538", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Build a local ray cluster. The head node and worker node are on this machine\n", "ray.init()" ] }, { "cell_type": "markdown", "id": "ebfd8798", "metadata": {}, "source": [ "Finally, we implement a `NVMegatronRayWorkerGroup`, within which we create a Megatron and then run a tensor parallel (tp) split Llama mlp layer. Here, we use a complex dispatch mode, `Megatron_COMPUTE`. This dispatch mode assumes that user passes the data partitioned by DP dimension. The data is dispatched to all tp/pp ranks within the same dp group, and ultimately only collects output data from tp=0 and the last pp. In this way, for users that only write code on the driver, the Megatron behind the RPC becomes transparent." ] }, { "cell_type": "code", "execution_count": 171, "id": "5a032154", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/opt/tiger/Megatron-LM\n", "/opt/tiger/Megatron-LM/megatron/__init__.py\n" ] } ], "source": [ "import sys\n", "\n", "current_pythonpath = os.environ.get(\"PYTHONPATH\", \"\")\n", "\n", "new_path = \"/opt/tiger/Megatron-LM\"\n", "\n", "new_pythonpath = f\"{new_path}:{current_pythonpath}\" if current_pythonpath else new_path\n", "\n", "os.environ[\"PYTHONPATH\"] = new_pythonpath\n", "\n", "print(new_path)\n", "sys.path.append(new_path)\n", "\n", "import megatron\n", "\n", "print(megatron.__file__)" ] }, { "cell_type": "code", "execution_count": 172, "id": "8c84cd5a", "metadata": { "tags": [] }, "outputs": [], "source": [ "from megatron.core import parallel_state as mpu\n", "from omegaconf import OmegaConf\n", "\n", "from verl.single_controller.base.decorator import Dispatch, Execute, register\n", "from verl.single_controller.base.megatron.worker import MegatronWorker\n", "from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup\n", "from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup" ] }, { "cell_type": "code", "execution_count": 173, "id": "1b1debcc", "metadata": { "tags": [] }, "outputs": [], "source": [ "resource_pool = RayResourcePool([4], use_gpu=True, max_colocate_count=1)" ] }, { "cell_type": "code", "execution_count": 174, "id": "bccbe081", "metadata": { "tags": [] }, "outputs": [], "source": [ "@ray.remote\n", "class MLPLayerWorker(MegatronWorker):\n", " def __init__(self):\n", " super().__init__()\n", " rank = int(os.environ[\"LOCAL_RANK\"])\n", " torch.distributed.init_process_group(backend=\"nccl\")\n", " torch.cuda.set_device(rank)\n", "\n", " mpu.initialize_model_parallel(\n", " tensor_model_parallel_size=4,\n", " pipeline_model_parallel_size=1,\n", " virtual_pipeline_model_parallel_size=None,\n", " pipeline_model_parallel_split_rank=None,\n", " use_sharp=False,\n", " context_parallel_size=1,\n", " expert_model_parallel_size=1,\n", " nccl_communicator_config_path=None,\n", " )\n", " from megatron.core import tensor_parallel\n", "\n", " tensor_parallel.model_parallel_cuda_manual_seed(10)\n", "\n", " @register(Dispatch.ONE_TO_ALL)\n", " def init_model(self, config):\n", " from omegaconf import OmegaConf\n", "\n", " from verl.models.llama.megatron.layers import ParallelLlamaMLP\n", " from verl.utils.megatron_utils import init_model_parallel_config\n", "\n", " megatron_config = OmegaConf.create(\n", " {\n", " \"sequence_parallel\": False,\n", " \"param_dtype\": \"fp32\",\n", " \"tensor_model_parallel_size\": mpu.get_tensor_model_parallel_world_size(),\n", " \"pipeline_model_parallel_rank\": mpu.get_pipeline_model_parallel_rank(),\n", " \"pipeline_model_parallel_size\": mpu.get_pipeline_model_parallel_world_size(),\n", " \"virtual_pipeline_model_parallel_rank\": mpu.get_virtual_pipeline_model_parallel_rank(),\n", " \"virtual_pipeline_model_parallel_size\": mpu.get_virtual_pipeline_model_parallel_world_size(),\n", " }\n", " )\n", "\n", " megatron_config = init_model_parallel_config(megatron_config)\n", " self.parallel_layer = ParallelLlamaMLP(config=config, megatron_config=megatron_config)\n", "\n", " @register(Dispatch.ONE_TO_ALL)\n", " def get_weights(self):\n", " output = {}\n", " for key, val in self.parallel_layer.named_parameters():\n", " output[key] = val\n", " return output\n", "\n", " @register(Dispatch.MEGATRON_COMPUTE)\n", " def run_layer(self, x):\n", " x = x.to(\"cuda\")\n", " y = self.parallel_layer(x)\n", " return y" ] }, { "cell_type": "code", "execution_count": 175, "id": "a655271d", "metadata": { "tags": [] }, "outputs": [], "source": [ "layer_cls = RayClassWithInitArgs(cls=MLPLayerWorker)\n", "layer_worker_group = NVMegatronRayWorkerGroup(\n", " resource_pool=resource_pool,\n", " ray_cls_with_init=layer_cls,\n", ")" ] }, { "cell_type": "code", "execution_count": 176, "id": "f105ebee", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "4 4 1 1\n" ] } ], "source": [ "print(layer_worker_group.world_size, layer_worker_group.tp_size, layer_worker_group.pp_size, layer_worker_group.dp_size)" ] }, { "cell_type": "code", "execution_count": 177, "id": "38655091", "metadata": { "tags": [] }, "outputs": [], "source": [ "ffn_hidden_size = 11008\n", "batch_size = 16\n", "seq_len = 2048\n", "hidden_size = 4096\n", "\n", "config = OmegaConf.create(\n", " {\n", " \"hidden_size\": hidden_size,\n", " \"intermediate_size\": ffn_hidden_size,\n", " \"hidden_act\": \"silu\",\n", " \"pretraining_tp\": 1,\n", " \"tp\": layer_worker_group.tp_size,\n", " }\n", ")" ] }, { "cell_type": "code", "execution_count": 178, "id": "a026efca", "metadata": { "tags": [] }, "outputs": [], "source": [ "x = torch.rand(size=(seq_len, batch_size, hidden_size), dtype=torch.float32)" ] }, { "cell_type": "code", "execution_count": 179, "id": "f5fcaf13", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "[None, None, None, None]" ] }, "execution_count": 179, "metadata": {}, "output_type": "execute_result" } ], "source": [ "layer_worker_group.init_model(config)" ] }, { "cell_type": "code", "execution_count": 180, "id": "3f5cc9b4", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "torch.Size([2048, 16, 4096])\n" ] } ], "source": [ "output = layer_worker_group.run_layer(\n", " [x]\n", ") # This must be a list of size 1, ensuring that the input equals the data parallel (dp).\n", "print(output[0].shape)" ] }, { "cell_type": "code", "execution_count": 181, "id": "49792210", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Shutdown ray cluster\n", "ray.shutdown()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.2" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: verl_distillation/examples/reinforce_plus_plus_trainer/run_qwen2-7b_math_rf.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=reinforce_plus_plus \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=1024 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=mse \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/reinforce_plus_plus_trainer/run_qwen2-7b_math_rf_baseline.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=reinforce_plus_plus_baseline \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=1024 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=mse \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/remax_trainer/run_qwen2.5-3b_seq_balance.sh ================================================ set -x export HF_DATASETS_OFFLINE=1 export TRANSFORMERS_OFFLINE=1 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=remax \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=30000 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_remax_example_gsm8k' \ trainer.experiment_name='qwen2.5_3b_function_rm_kl1e-3' \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=5 $@ ================================================ FILE: verl_distillation/examples/remax_trainer/run_qwen2.5-7b_seq_balance.sh ================================================ set -x export HF_DATASETS_OFFLINE=1 export TRANSFORMERS_OFFLINE=1 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=remax \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_remax_example_gsm8k' \ trainer.experiment_name='qwen2.5_7b_function_rm_kl1e-3' \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=10 $@ ================================================ FILE: verl_distillation/examples/rloo_trainer/run_qwen2-7b.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=rloo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=80 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=160 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=160 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_rloo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/rollout_importance_sampling/README.md ================================================ # Rollout Importance Sampling (IS) Examples This directory contains examples and documentation for using Rollout Importance Sampling to correct distribution mismatch between rollout and training policies. **References:** - When Speed Kills Stability: https://yingru.notion.site/When-Speed-Kills-Stability-271211a558b7808d8b12d403fd15edda - Off-policy RL: https://fengyao.notion.site/off-policy-rl ## Overview Rollout Importance Sampling corrects for distribution mismatch when: 1. **Rollout generation** uses one policy (e.g., vLLM with BFloat16) 2. **Training** uses another policy (e.g., FSDP with FP32) 3. This mismatch leads to biased gradient estimates ## Quick Start ### Basic Configuration ```yaml algorithm: # Main control: set threshold to enable (null = disabled) rollout_is_threshold: 2.0 # Whether to apply weights to policy loss (true) or just compute metrics (false) rollout_is: true rollout_is_level: token rollout_is_mode: truncate # IMPORTANT: Must enable log prob calculation actor_rollout_ref: rollout: calculate_log_probs: true ``` ### Running the Example ```bash # Basic example with token-level truncate bash examples/rollout_importance_sampling/run_with_rollout_is.sh ``` ## Configuration Options ### Aggregation Levels (`rollout_is_level`) | Level | Properties | Threshold Range | |-------|-----------|-----------------| | **token** | Per-token | 1.5 - 5.0 | | **sequence** | Per-sequence | 2.0 - 10.0 | | **geometric** | Geometric mean | 1.0002 - 1.001 | ### Bounding Modes (`rollout_is_mode`) | Mode | Behavior | |------|----------| | **truncate** | Cap weights at upper threshold only | | **clip** | Zero out weights outside [lower, upper] | ### Key Parameters - `rollout_is_threshold`: Upper threshold for IS weights (null = disabled, float = enabled). **Main on/off switch.** - `rollout_is`: Whether to apply weights to loss (true) or just compute metrics (false). Default: false. - `rollout_is_threshold_lower`: Lower threshold (null = auto 1/upper) - `rollout_is_veto_threshold`: Catastrophic outlier threshold (default: null, disabled) ## Configuration Examples ### Example 1: Full IS Correction (Apply Weights) ```yaml algorithm: rollout_is_threshold: 2.0 rollout_is: true # Apply to loss rollout_is_level: token rollout_is_mode: truncate rollout_is_veto_threshold: null # Disabled by default ``` ### Example 2: Metrics Only (No Weight Application) ```yaml algorithm: rollout_is_threshold: 2.0 rollout_is: false # Compute metrics only, don't apply to loss rollout_is_level: token rollout_is_mode: truncate ``` ### Example 3: Geometric Mean with Mask ```yaml algorithm: rollout_is_threshold: 1.0002 rollout_is: true rollout_is_threshold_lower: 0.9998 rollout_is_level: geometric rollout_is_mode: mask rollout_is_veto_threshold: 1e-4 # Enable veto for this example ``` ### Example 4: Sequence-level with Truncate ```yaml algorithm: rollout_is_threshold: 5.0 rollout_is: true rollout_is_threshold_lower: null # Auto-reciprocal: 0.2 rollout_is_level: sequence rollout_is_mode: truncate rollout_is_veto_threshold: 1e-4 # Enable veto for this example ``` ### Example 5: Asymmetric Thresholds ```yaml algorithm: rollout_is_threshold: 5.0 rollout_is: true rollout_is_threshold_lower: 0.8 rollout_is_level: token rollout_is_mode: mask ``` ## Monitoring Metrics Key metrics to watch (all prefixed with `mismatch/` in logs): ### Health Indicators - `rollout_is_mean`: Mean IS weight across sequences - `rollout_is_eff_sample_size`: Effective sample size after weighting - `rollout_is_veto_fraction`: Fraction of sequences vetoed ### Distribution Metrics - `rollout_is_max`, `rollout_is_min`: Weight extremes - `rollout_is_std`: Standard deviation ### Diagnostic Metrics - `rollout_is_ratio_fraction_high`: Fraction exceeding upper threshold - `rollout_is_ratio_fraction_low`: Fraction below lower threshold - `rollout_is_catastrophic_token_fraction`: Catastrophic tokens detected ### Mismatch Metrics (Training vs Rollout Policy) These metrics help diagnose the distribution mismatch between rollout and training policies: **Perplexity Metrics:** - `mismatch_training_ppl`: Perplexity of training policy - `mismatch_rollout_ppl`: Perplexity of rollout policy - `mismatch_ppl_ratio`: Ratio of training PPL to rollout PPL - `mismatch_log_ppl_diff`: Log perplexity difference **KL Divergence Metrics:** - `mismatch_kl`: KL divergence KL(π_rollout || π_training) - `mismatch_k3_kl`: K3 KL estimator ## Troubleshooting ### Issue: High Variance in IS Weights **Symptoms**: `rollout_is_std` > 1.0, `rollout_is_eff_sample_size` < 0.3 **Solutions**: 1. Switch from `sequence` to `geometric` level 2. Tighten thresholds 3. Check if rollout and training are too different ### Issue: Too Many Sequences Vetoed **Symptoms**: `rollout_is_veto_fraction` > 0.1 **Solutions**: 1. Relax veto threshold: `rollout_is_veto_threshold: 1e-3` 2. Check for numerical issues in log prob computation 3. Verify rollout and training policies aren't completely different ### Issue: Mean IS Weight Far from 1.0 **Symptoms**: `rollout_is_mean` < 0.5 or > 2.0 **Solutions**: 1. Check that `calculate_log_probs=True` is set 2. Verify rollout_log_probs are correctly passed 3. Check for systematic bias in rollout vs training ### Issue: Too Much Data Discarded (Mask Mode) **Symptoms**: `rollout_is_masked_fraction` > 0.5 **Solutions**: 1. Widen thresholds 2. Switch to `truncate` mode 3. Use `geometric` level for better stability ## Performance Considerations ### Memory Usage - Rollout IS adds minimal memory overhead (~1% of model memory) - Log-space computation prevents numerical overflow ### Computational Cost - Token-level: ~1-2% overhead - Sequence-level: ~2-3% overhead - Geometric: ~2-3% overhead ## Advanced Topics ### Dual Thresholds Specify both upper and lower explicitly: ```yaml rollout_is_threshold: 2.0 # Upper rollout_is_threshold_lower: 0.5 # Lower (not 1/2.0 = 0.5) ``` Or use auto-reciprocal: ```yaml rollout_is_threshold: 2.0 # Upper = 2.0, Lower = 0.5 (auto) rollout_is_threshold_lower: null ``` ### Veto Mechanism The veto mechanism zeros out entire sequences containing catastrophic outliers: - If any token has ratio < `rollout_is_veto_threshold`, the entire sequence is rejected - This prevents extreme outliers from dominating training - Default: `null` (disabled by default) - Set to `1e-4` to enable (catches ratios 10,000x off) ## Examples See the script in this directory: - `run_with_rollout_is.sh`: Basic example with token-level truncate mode ## References - Implementation: `verl/trainer/ppo/mismatch_helper.py` - Core algorithm: `verl/trainer/ppo/core_algos.py` - Paper: "Your Efficient RL Framework Secretly Brings You Off-Policy RL Training" ================================================ FILE: verl_distillation/examples/rollout_importance_sampling/run_with_rollout_is.sh ================================================ #!/usr/bin/env bash # Example: Basic PPO training with Rollout Importance Sampling # This demonstrates the standard setup for correcting distribution mismatch set -xeuo pipefail # ============================================================================== # Rollout Importance Sampling Configuration # ============================================================================== # Main control: Upper threshold for IS weights (null = disabled, float = enabled) rollout_is_threshold=2.0 # Whether to apply IS weights to policy loss # true = apply weights to loss, false = compute metrics only rollout_is=true # Lower threshold (null = auto-reciprocal, i.e., 1/upper = 0.5) rollout_is_threshold_lower=null # Aggregation level: token | sequence | geometric (experimental) rollout_is_level=token # Bounding mode: truncate (cap upper) | mask (zero outside bounds) rollout_is_mode=truncate # Catastrophic outlier veto threshold (set to null to disable, or e.g., 1e-4 to enable) rollout_is_veto_threshold=null # ============================================================================== # Model and Data Configuration # ============================================================================== MODEL_PATH=${MODEL_PATH:-"Qwen/Qwen2.5-7B"} TRAIN_FILE=${TRAIN_FILE:-"data/train.parquet"} TEST_FILE=${TEST_FILE:-"data/test.parquet"} max_prompt_length=512 max_response_length=1024 # ============================================================================== # Training Configuration # ============================================================================== train_batch_size=128 ppo_mini_batch_size=32 ppo_epochs=1 learning_rate=5e-7 # ============================================================================== # Algorithm Configuration # ============================================================================== adv_estimator=gae gamma=1.0 lam=0.95 # ============================================================================== # Launch Training # ============================================================================== python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_batch_size} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.gamma=${gamma} \ algorithm.lam=${lam} \ algorithm.rollout_is=${rollout_is} \ algorithm.rollout_is_threshold=${rollout_is_threshold} \ algorithm.rollout_is_threshold_lower=${rollout_is_threshold_lower} \ algorithm.rollout_is_level=${rollout_is_level} \ algorithm.rollout_is_mode=${rollout_is_mode} \ algorithm.rollout_is_veto_threshold=${rollout_is_veto_threshold} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=${learning_rate} \ actor_rollout_ref.actor.ppo_mini_batch_size=${ppo_mini_batch_size} \ actor_rollout_ref.actor.ppo_epochs=${ppo_epochs} \ actor_rollout_ref.rollout.calculate_log_probs=True \ actor_rollout_ref.rollout.name=vllm \ trainer.logger='["console","wandb"]' \ trainer.project_name="rollout_is_example" \ trainer.experiment_name="basic_token_truncate" \ trainer.total_epochs=10 echo "Training completed!" echo "" echo "Rollout IS Configuration:" echo " - Threshold: ${rollout_is_threshold}" echo " - Apply to loss: ${rollout_is}" echo " - Level: ${rollout_is_level}" echo " - Mode: ${rollout_is_mode}" echo "" echo "Monitor these key metrics in wandb:" echo " - mismatch/rollout_is_mean (should be ~1.0)" echo " - mismatch/rollout_is_eff_sample_size (should be >0.5)" echo " - mismatch/rollout_is_veto_fraction (should be <0.1)" ================================================ FILE: verl_distillation/examples/sft/gsm8k/run_deepseek_6b7.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_deepseek_6b7.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=deepseek-ai/deepseek-coder-6.7b-instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-deepseek-coder-6.7b-instruct \ trainer.total_epochs=4 \ trainer.logger='["console","wandb"]' $@ ================================================ FILE: verl_distillation/examples/sft/gsm8k/run_gemma_2b.sh ================================================ # Tested with 2 & 4 GPUs set -x if [ "$#" -lt 2 ]; then echo "Usage: run_gemma_2b.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=google/gemma-2b-it \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-gemma-2b-it \ trainer.total_epochs=2 \ trainer.logger='["console","wandb"]' $@ ================================================ FILE: verl_distillation/examples/sft/gsm8k/run_gemma_7b.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_gemma_7b.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ data.prompt_dict_keys=['question'] \ data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=google/gemma-1.1-7b-it \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-gemma-1.1-7b-it \ trainer.total_epochs=4 \ trainer.logger='["console","wandb"]' $@ ================================================ FILE: verl_distillation/examples/sft/gsm8k/run_qwen3_8b_sft_peft_sp2_npu.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen3_8b_sft_peft_sp2_npu.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=64 \ model.partial_pretrain=Qwen/Qwen3-8B \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen3-8b-instruct \ trainer.logger=console \ trainer.total_epochs=2 $@ \ model.lora_rank=32 \ model.lora_alpha=16 \ model.target_modules=all-linear \ model.strategy=fsdp \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true \ trainer.device=npu ================================================ FILE: verl_distillation/examples/sft/gsm8k/run_qwen_05_peft.sh ================================================ # Tested with 2 & 4 GPUs set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen_05_peft.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct \ trainer.logger=console \ trainer.total_epochs=1 $@ \ model.lora_rank=32\ model.lora_alpha=16 \ model.target_modules=all-linear # Or you can do this: # model.target_modules=[q_proj,v_proj] \ ================================================ FILE: verl_distillation/examples/sft/gsm8k/run_qwen_05_sp2.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen_05_sp2.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size=4 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct-sp2 \ trainer.logger=console \ trainer.total_training_steps=1 $@ \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true ================================================ FILE: verl_distillation/examples/sft/gsm8k/run_qwen_05_sp2_liger.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen_05_sp2.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size=4 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ model.use_liger=True \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct-sp2-liger \ trainer.logger=console $@ \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true ================================================ FILE: verl_distillation/examples/sft/gsm8k/run_seed_oss_36b_sft.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_seed_oss_36b_sft.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size=4 \ model.partial_pretrain=ByteDance-Seed/Seed-OSS-36B-Base \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-seed-oss-36b \ trainer.logger=console \ trainer.total_training_steps=1 \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true $@ ================================================ FILE: verl_distillation/examples/sft/multiturn/run_qwen_05_sp2.sh ================================================ #!/bin/bash set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen_05_sp2.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/multiturn/train.parquet \ data.val_files=$HOME/data/multiturn/test.parquet \ data.multiturn.enable=true \ data.multiturn.messages_key=messages \ data.micro_batch_size=4 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=multiturn-sft \ trainer.experiment_name=multiturn-sft-qwen-2.5-0.5b-instruct-sp2 \ trainer.logger=console \ trainer.total_training_steps=1 $@ \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true ================================================ FILE: verl_distillation/examples/sglang_multiturn/README.md ================================================ # Multi-Turn Rollout Example (GSM8K) This example demonstrates how to perform **multi-turn rollout** using SGLang with a tool-calling capable model (e.g., Qwen2.5-3B) on the GSM8K dataset. ## Usage ### Step 1: Download GSM8K Dataset ```bash cd examples/data_preprocess python3 gsm8k_multiturn_w_tool.py ``` This will download and preprocess the GSM8K dataset into ~/data/gsm8k/. ### Step 2: Run Multi-Turn Rollout If you have 8 GPUs Use the standard 8-GPU script: ```bash cd your_verl_root_dir bash examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn.sh ``` If you have only 4 GPUs Use the fallback 4-GPU script: ```bash cd your_verl_root_dir bash examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn_4xgpu.sh ``` ## Notes - The rollout supports multi-turn conversations with tool-calling capabilities. - Current tools are used for GSM8K answer evaluation. - Future versions may extend to search and code interpreter tools. ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/geo3k_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 2048 max_response_length: 2048 train_batch_size: 256 return_raw_chat: True return_multi_modal_inputs: False actor_rollout_ref: hybrid_engine: True model: custom_chat_template: "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{%- if tools %}{{- '<|im_start|>system\\n' }}{%- if messages[0]['role'] == 'system' %}{{- messages[0]['content'] }}{%- else %}{{- 'You are a helpful assistant.' }}{%- endif %}{{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}{%- for tool in tools %}{{- \"\\n\" }}{{- tool | tojson }}{%- endfor %}{{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}{% for message in messages %}{% if message['role'] != 'system' or loop.first == false %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endif %}{% endfor %}{%- else %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endfor %}{%- endif %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 # tool_config_path: "./config/tool_config/gsm8k_tool_config.yaml" ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/geo3k_multiturn_megatron_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_megatron_trainer - _self_ data: max_prompt_length: 2048 max_response_length: 2048 train_batch_size: 256 return_raw_chat: True return_multi_modal_inputs: False actor_rollout_ref: hybrid_engine: True model: custom_chat_template: "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{%- if tools %}{{- '<|im_start|>system\\n' }}{%- if messages[0]['role'] == 'system' %}{{- messages[0]['content'] }}{%- else %}{{- 'You are a helpful assistant.' }}{%- endif %}{{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}{%- for tool in tools %}{{- \"\\n\" }}{{- tool | tojson }}{%- endfor %}{{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}{% for message in messages %}{% if message['role'] != 'system' or loop.first == false %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endif %}{% endfor %}{%- else %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endfor %}{%- endif %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 # tool_config_path: "./config/tool_config/gsm8k_tool_config.yaml" ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/gsm8k_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/gsm8k_multiturn_grpo_server.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 sglang_rollout_mode: server server: timeout: 60 max_attempts: 3 retry_delay: 2 max_connections: 1000 max_start_wait_time: 300.0 ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/gsm8k_multiturn_grpo_w_interaction.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_user_turns: 5 ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/gsm8k_multiturn_megatron_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_megatron_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml ================================================ interaction: - name: "gsm8k" class_name: "verl.interactions.gsm8k_interaction.Gsm8kInteraction" config: {} ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/retool_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 tool_config_path: "./config/tool_config/sandbox_fusion_tool_config.yaml" ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/search_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True shuffle: False actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 2 format: qwen ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/search_multiturn_grpo_one_step_off.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True shuffle: False actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 2 format: qwen ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml ================================================ tools: - class_name: "verl.tools.geo3k_tool.Geo3kTool" config: type: native tool_schema: type: "function" function: name: "calc_geo3k_reward" description: "A tool for calculating the reward of geo3k. (1.0 if parsed answer is correct, 0.0 if parsed answer is incorrect or not correctly parsed)" parameters: type: "object" properties: answer: type: "string" description: "The model's answer to the geo3k problem, must be a digits" required: ["answer"] ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml ================================================ tools: - class_name: "verl.tools.gsm8k_tool.Gsm8kTool" config: type: native tool_schema: type: "function" function: name: "calc_gsm8k_reward" description: "A tool for calculating the reward of gsm8k. (1.0 if parsed answer is correct, 0.0 if parsed answer is incorrect or not correctly parsed)" parameters: type: "object" properties: answer: type: "string" description: "The model's answer to the GSM8K math problem, must be a digits" required: ["answer"] ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/tool_config/mcp_server.json ================================================ { "mcpServers": { "Tavily Expert": { "url": "your_tavily_expert_url", "auth_token": "your_tavily_api_token" } } } ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/tool_config/mcp_tool_config.yaml ================================================ tools: - class_name: verl.tools.mcp_search_tool.MCPSearchTool config: rate_limit: 120 timeout: 120 type: mcp mcp: mcp_servers_config_path: ./mcp_server.json # optional tool_selected_list: - tavily_search_tool ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/tool_config/sandbox_fusion_tool_config.yaml ================================================ tools: - class_name: "verl.tools.sandbox_fusion_tools.SandboxFusionTool" config: sandbox_fusion_url: "https://xxx.apigateway-cn-beijing.volceapi.com/run_code" num_workers: 10 enable_global_rate_limit: true rate_limit: 10 default_timeout: 30 default_language: "python" memory_limit_mb: 1024 type: native tool_schema: type: "function" function: name: "code_interpreter" description: "A tool for executing code." parameters: type: "object" properties: code: type: "string" description: "The code to execute." required: ["code"] ================================================ FILE: verl_distillation/examples/sglang_multiturn/config/tool_config/search_tool_config.yaml ================================================ tools: - class_name: verl.tools.search_tool.SearchTool config: retrieval_service_url: http://127.0.0.1:8000/retrieve num_workers: 120 rate_limit: 120 timeout: 30 type: native tool_schema: type: function function: name: search description: Searches the web for relevant information based on the given query. parameters: type: object properties: query_list: type: array item: type: string description: A list of fully-formed semantic queries. The tool will return search results for each query. required: - query_list ================================================ FILE: verl_distillation/examples/sglang_multiturn/geo3k/run_qwen2.5-3b_geo3k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='geo3k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=2048 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='geo3k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-geo3k-sgl-multi-w-tool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/geo3k_multiturn_w_tool/train.parquet \ data.val_files=$HOME/data/geo3k_multiturn_w_tool/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/geo3k/run_qwen2.5-3b_geo3k_multiturn_4xgpu.sh ================================================ # run on 4xH100 # make sure your current working directory is the root of the project set -x export HYDRA_FULL_ERROR=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='geo3k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=2048 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='geo3k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-geo3k-async-sgl-multi-w-tool-verify-n16-4cards' \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.total_epochs=15 \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=8192 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=8192 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=8192 \ critic.ppo_max_token_len_per_gpu=8192 \ critic.forward_max_token_len_per_gpu=8192 \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml" \ $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/geo3k/run_qwen2.5-3b_megatron_geo3k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project # this is a verification training script, the parallel setting should be tuned to your model set -x export PYTHONUNBUFFERED=1 export RAY_DEDUP_LOGS=0 export RUST_BACKTRACE=1 export HYDRA_FULL_ERROR=1 export CUDA_DEVICE_MAX_CONNECTIONS=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='geo3k_multiturn_megatron_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=2048 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.context_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.megatron.seed=42 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.context_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='geo3k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-geo3k-sgl-multi-w-tool-n8-mcore-v2505201745_seed42' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/geo3k_multiturn_w_tool/train.parquet \ data.val_files=$HOME/data/geo3k_multiturn_w_tool/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen0.5b_gsm8k_multiturn_curriculum.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.sampler.class_name="RandomCurriculumSampler" \ data.sampler.class_path="pkg://tests.utils.dataset.test_create_rl_sampler_on_cpu" \ data.dataloader_num_workers=0 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.train_batch_size=256 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen3-4b_function_rm-gsm8k-sgl-multi-w-tool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen2.5-0.5b_gsm8k_multiturn_w_interaction.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" TRAIN_BATCH_SIZE=${TRAIN_BATCH_SIZE:-512} MICRO_BATCH_SIZE=${MICRO_BATCH_SIZE:-8} OFFLOAD=${OFFLOAD:-False} python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo_w_interaction' \ algorithm.adv_estimator=grpo \ data.train_batch_size=$TRAIN_BATCH_SIZE \ data.max_prompt_length=1024 \ data.max_response_length=$((1024 * 3)) \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ +actor_rollout_ref.model.enable_activation_offloading=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=$TRAIN_BATCH_SIZE \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=$MICRO_BATCH_SIZE \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.fsdp_config.param_offload=$OFFLOAD \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$OFFLOAD \ actor_rollout_ref.actor.fsdp_config.model_dtype=bfloat16 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=$MICRO_BATCH_SIZE \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=$MICRO_BATCH_SIZE \ actor_rollout_ref.ref.fsdp_config.param_offload=$OFFLOAD \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen2.5-0.5b_function_rm-gsm8k-sgl-multi-w-interaction-n8' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/gsm8k_verl_sgl_multi_turn_w_interaction/train.parquet \ data.val_files=$HOME/data/gsm8k_verl_sgl_multi_turn_w_interaction/test.parquet \ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" function now() { date '+%d-%H-%M' } EXPERIMENT_NAME="qwen2.5-3b_baseline_$(now)" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ global_profiler.tool=torch_memory \ global_profiler.save_path=./mem_snapshots \ global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries=100000 \ global_profiler.global_tool_config.torch_memory.stack_depth=32 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.multi_stage_wake_up=True \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.over_sample_rate=0.1 \ actor_rollout_ref.rollout.mode=sync \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='multi-turn-grpo-qwen2.5-3b-sglang' \ trainer.experiment_name=$EXPERIMENT_NAME \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.val_before_train=True \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 \ actor_rollout_ref.rollout.update_weights_bucket_megabytes=512 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn_4xgpu.sh ================================================ # run on 4xH100 # make sure your current working directory is the root of the project set -x export HYDRA_FULL_ERROR=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-async-sgl-multi-w-tool-verify-n16-4cards' \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.total_epochs=15 \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=8192 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=8192 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=8192 \ critic.ppo_max_token_len_per_gpu=8192 \ critic.forward_max_token_len_per_gpu=8192 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml" \ actor_rollout_ref.rollout.multi_turn.max_user_turns=1 \ $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn_4xgpu_server.sh ================================================ # run on 4xH100 # make sure your current working directory is the root of the project set -x export HYDRA_FULL_ERROR=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo_server' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console", "wandb"]' \ trainer.project_name='gsm8k_async_rl_server' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-async-sgl-multi-w-tool-verify-n16-4cards' \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.total_epochs=15 \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=8192 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=8192 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=8192 \ critic.ppo_max_token_len_per_gpu=8192 \ critic.forward_max_token_len_per_gpu=8192 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml" \ actor_rollout_ref.rollout.multi_turn.max_user_turns=1 \ $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn_server.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" function now() { date '+%d-%H-%M' } EXPERIMENT_NAME="qwen2.5-3b_baseline_$(now)" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo_server' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.multi_stage_wake_up=True \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.over_sample_rate=0 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='multi-turn-grpo-qwen2.5-3b-sglang' \ trainer.experiment_name=$EXPERIMENT_NAME \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.val_before_train=True \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 \ actor_rollout_ref.rollout.update_weights_bucket_megabytes=512 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn_vllm_fsdp.sh ================================================ # run on Ascend 910 # make sure your current working directory is the root of the project set -x ulimit -n 65535 #set vllm v1 env export VLLM_USE_V1=1 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" TRAIN_BATCH_SIZE=32 MICRO_BATCH_SIZE=8 python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ actor_rollout_ref.rollout.name=vllm \ algorithm.adv_estimator=grpo \ data.train_batch_size=${TRAIN_BATCH_SIZE} \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path="Qwen/Qwen2.5-3B-Instruct" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=${TRAIN_BATCH_SIZE} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${MICRO_BATCH_SIZE} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${MICRO_BATCH_SIZE} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9\ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${MICRO_BATCH_SIZE} \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-sgl-multi-w-tool-verify-n16' \ trainer.device=npu \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.logger='["console"]' \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ trainer.total_epochs=15 \ actor_rollout_ref.rollout.update_weights_bucket_megabytes=512 \ actor_rollout_ref.rollout.trace.token2text=False \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.multi_turn.enable=true \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ actor_rollout_ref.rollout.free_cache_engine=True ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_tool_agent_mlflow.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.trace.backend=mlflow \ actor_rollout_ref.rollout.trace.token2text=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","mlflow"]' \ trainer.project_name='gsm8k_tool-agent' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-sgl-tool-agent-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.total_training_steps=2 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen2.5-3b_megatron_gsm8k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project # this is a verification training script, the parallel setting should be tuned to your model set -x export PYTHONUNBUFFERED=1 export RAY_DEDUP_LOGS=0 export RUST_BACKTRACE=1 export HYDRA_FULL_ERROR=1 export CUDA_DEVICE_MAX_CONNECTIONS=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_megatron_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=/user/longxiang1/models/Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.context_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.megatron.seed=42 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.context_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-sgl-multi-w-tool-n8-mcore-v2505201745_seed42' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=/user/longxiang1/data/gsm8k_verl_sgl_multi_turn_preprocessed_v2/train.parquet \ data.val_files=/user/longxiang1/data/gsm8k_verl_sgl_multi_turn_preprocessed_v2/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen3-4b_gsm8k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen3-4B \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.rollout.over_sample_rate=0.1 \ actor_rollout_ref.rollout.mode=sync \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen3-4b_function_rm-gsm8k-sgl-multi-w-tool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/run_qwen3_4b_dapo_multiturn.sh ================================================ set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" pip install --upgrade "huggingface-hub>=0.34.0" hf download \ BytedTsinghua-SIA/DAPO-Math-17k \ --repo-type dataset \ --local-dir $HOME/data/BytedTsinghua-SIA/DAPO-Math-17k hf download \ Maxwell-Jia/AIME_2024 \ --repo-type dataset \ --local-dir $HOME/data/Maxwell-Jia/AIME_2024 # Note: # 1. # a sandbox fusion server is needed to run the code interpreter tool. # docker run -it -p 8080:8080 volcengine/sandbox-fusion:server-20250609 # 2. # The model located at font-info/qwen3-4b-sft-SGLang-RL (https://huggingface.co/font-info/qwen3-4b-sft-SGLang-RL) # is a fine-tuned version provided by the SGLang RL team. Without supervised fine-tuning (SFT) # on the Retool dataset, Dapo training will not converge. # If you still wish to perform SFT from scratch, follow the steps below: # Step 1: Download the SFT dataset #huggingface-cli download JoeYing/ReTool-SFT --repo-type dataset --local-dir ./ReTool-SFT # Step 2: Preprocess the data for SFT #python3 recipe/retool/retool_sft_preprocess.py # Step 3: Run SFT training #bash recipe/retool/run_qwen2-32b_sft.sh # having trouble setup? see https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/multi-turn/release_log/latest_sglang.md for more details. python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ algorithm.use_kl_in_reward=False \ algorithm.kl_ctrl.kl_coef=0.0 \ data.train_files=$HOME/data/BytedTsinghua-SIA/DAPO-Math-17k \ data.val_files=$HOME/data/Maxwell-Jia/AIME_2024 \ data.return_raw_chat=True \ data.train_batch_size=32 \ data.max_prompt_length=2048 \ data.max_response_length=16384 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.custom_cls.path=$PROJECT_DIR/recipe/retool/retool.py \ data.custom_cls.name=CustomRLHFDataset \ custom_reward_function.path=$PROJECT_DIR/recipe/retool/retool.py \ custom_reward_function.name=compute_score \ actor_rollout_ref.model.path=font-info/qwen3-4b-sft-SGLang-RL \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.0 \ actor_rollout_ref.actor.clip_ratio_low=0.2 \ actor_rollout_ref.actor.clip_ratio_high=0.28 \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.use_dynamic_bsz=False \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=32768 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.update_weights_bucket_megabytes=512 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.multi_stage_wake_up=True \ actor_rollout_ref.rollout.multi_turn.enable=True \ actor_rollout_ref.rollout.multi_turn.max_user_turns=16 \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=16 \ actor_rollout_ref.rollout.multi_turn.tool_config_path=$PROJECT_DIR/recipe/retool/sandbox_fusion_tool_config.yaml \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.rollout.val_kwargs.top_p=0.6 \ actor_rollout_ref.rollout.val_kwargs.temperature=1.0 \ actor_rollout_ref.rollout.val_kwargs.n=30 \ trainer.logger=['console','wandb'] \ trainer.project_name=sglang-dapo-multiturn \ trainer.experiment_name=qwen3_4b_sft_dapo_multiturn \ trainer.n_gpus_per_node=8 \ trainer.log_val_generations=20 \ trainer.val_before_train=True \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.total_epochs=15 \ $@ ================================================ FILE: verl_distillation/examples/sglang_multiturn/search_r1_like/local_dense_retriever/download.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 Search-R1 Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/PeterGriffinJin/Search-R1/blob/main/scripts/download.py import argparse from huggingface_hub import hf_hub_download parser = argparse.ArgumentParser(description="Download files from a Hugging Face dataset repository.") parser.add_argument("--repo_id", type=str, default="PeterJinGo/wiki-18-e5-index", help="Hugging Face repository ID") parser.add_argument("--save_path", type=str, required=True, help="Local directory to save files") args = parser.parse_args() repo_id = "PeterJinGo/wiki-18-e5-index" for file in ["part_aa", "part_ab"]: hf_hub_download( repo_id=repo_id, filename=file, # e.g., "e5_Flat.index" repo_type="dataset", local_dir=args.save_path, ) repo_id = "PeterJinGo/wiki-18-corpus" hf_hub_download( repo_id=repo_id, filename="wiki-18.jsonl.gz", repo_type="dataset", local_dir=args.save_path, ) ================================================ FILE: verl_distillation/examples/sglang_multiturn/search_r1_like/local_dense_retriever/retrieval_server.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 Search-R1 Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/PeterGriffinJin/Search-R1/blob/main/search_r1/search/retrieval_server.py import argparse import json import warnings from typing import Optional import datasets import faiss import numpy as np import torch import uvicorn from fastapi import FastAPI from pydantic import BaseModel from tqdm import tqdm from transformers import AutoModel, AutoTokenizer def load_corpus(corpus_path: str): corpus = datasets.load_dataset("json", data_files=corpus_path, split="train", num_proc=4) return corpus def load_docs(corpus, doc_idxs): results = [corpus[int(idx)] for idx in doc_idxs] return results def load_model(model_path: str, use_fp16: bool = False): model = AutoModel.from_pretrained(model_path, trust_remote_code=True) model.eval() model.cuda() if use_fp16: model = model.half() tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True, trust_remote_code=True) return model, tokenizer def pooling(pooler_output, last_hidden_state, attention_mask=None, pooling_method="mean"): if pooling_method == "mean": last_hidden = last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] elif pooling_method == "cls": return last_hidden_state[:, 0] elif pooling_method == "pooler": return pooler_output else: raise NotImplementedError("Pooling method not implemented!") class Encoder: def __init__(self, model_name, model_path, pooling_method, max_length, use_fp16): self.model_name = model_name self.model_path = model_path self.pooling_method = pooling_method self.max_length = max_length self.use_fp16 = use_fp16 self.model, self.tokenizer = load_model(model_path=model_path, use_fp16=use_fp16) self.model.eval() @torch.no_grad() def encode(self, query_list: list[str], is_query=True) -> np.ndarray: # processing query for different encoders if isinstance(query_list, str): query_list = [query_list] if "e5" in self.model_name.lower(): if is_query: query_list = [f"query: {query}" for query in query_list] else: query_list = [f"passage: {query}" for query in query_list] if "bge" in self.model_name.lower(): if is_query: query_list = [ f"Represent this sentence for searching relevant passages: {query}" for query in query_list ] inputs = self.tokenizer( query_list, max_length=self.max_length, padding=True, truncation=True, return_tensors="pt" ) inputs = {k: v.cuda() for k, v in inputs.items()} if "T5" in type(self.model).__name__: # T5-based retrieval model decoder_input_ids = torch.zeros((inputs["input_ids"].shape[0], 1), dtype=torch.long).to( inputs["input_ids"].device ) output = self.model(**inputs, decoder_input_ids=decoder_input_ids, return_dict=True) query_emb = output.last_hidden_state[:, 0, :] else: output = self.model(**inputs, return_dict=True) query_emb = pooling( output.pooler_output, output.last_hidden_state, inputs["attention_mask"], self.pooling_method ) if "dpr" not in self.model_name.lower(): query_emb = torch.nn.functional.normalize(query_emb, dim=-1) query_emb = query_emb.detach().cpu().numpy() query_emb = query_emb.astype(np.float32, order="C") del inputs, output torch.cuda.empty_cache() return query_emb class BaseRetriever: def __init__(self, config): self.config = config self.retrieval_method = config.retrieval_method self.topk = config.retrieval_topk self.index_path = config.index_path self.corpus_path = config.corpus_path def _search(self, query: str, num: int, return_score: bool): raise NotImplementedError def _batch_search(self, query_list: list[str], num: int, return_score: bool): raise NotImplementedError def search(self, query: str, num: int = None, return_score: bool = False): return self._search(query, num, return_score) def batch_search(self, query_list: list[str], num: int = None, return_score: bool = False): return self._batch_search(query_list, num, return_score) class BM25Retriever(BaseRetriever): def __init__(self, config): super().__init__(config) from pyserini.search.lucene import LuceneSearcher self.searcher = LuceneSearcher(self.index_path) self.contain_doc = self._check_contain_doc() if not self.contain_doc: self.corpus = load_corpus(self.corpus_path) self.max_process_num = 8 def _check_contain_doc(self): return self.searcher.doc(0).raw() is not None def _search(self, query: str, num: int = None, return_score: bool = False): if num is None: num = self.topk hits = self.searcher.search(query, num) if len(hits) < 1: if return_score: return [], [] else: return [] scores = [hit.score for hit in hits] if len(hits) < num: warnings.warn("Not enough documents retrieved!", stacklevel=2) else: hits = hits[:num] if self.contain_doc: all_contents = [json.loads(self.searcher.doc(hit.docid).raw())["contents"] for hit in hits] results = [ { "title": content.split("\n")[0].strip('"'), "text": "\n".join(content.split("\n")[1:]), "contents": content, } for content in all_contents ] else: results = load_docs(self.corpus, [hit.docid for hit in hits]) if return_score: return results, scores else: return results def _batch_search(self, query_list: list[str], num: int = None, return_score: bool = False): results = [] scores = [] for query in query_list: item_result, item_score = self._search(query, num, True) results.append(item_result) scores.append(item_score) if return_score: return results, scores else: return results class DenseRetriever(BaseRetriever): def __init__(self, config): super().__init__(config) self.index = faiss.read_index(self.index_path) if config.faiss_gpu: co = faiss.GpuMultipleClonerOptions() co.useFloat16 = True co.shard = True self.index = faiss.index_cpu_to_all_gpus(self.index, co=co) self.corpus = load_corpus(self.corpus_path) self.encoder = Encoder( model_name=self.retrieval_method, model_path=config.retrieval_model_path, pooling_method=config.retrieval_pooling_method, max_length=config.retrieval_query_max_length, use_fp16=config.retrieval_use_fp16, ) self.topk = config.retrieval_topk self.batch_size = config.retrieval_batch_size def _search(self, query: str, num: int = None, return_score: bool = False): if num is None: num = self.topk query_emb = self.encoder.encode(query) scores, idxs = self.index.search(query_emb, k=num) idxs = idxs[0] scores = scores[0] results = load_docs(self.corpus, idxs) if return_score: return results, scores.tolist() else: return results def _batch_search(self, query_list: list[str], num: int = None, return_score: bool = False): if isinstance(query_list, str): query_list = [query_list] if num is None: num = self.topk results = [] scores = [] for start_idx in tqdm(range(0, len(query_list), self.batch_size), desc="Retrieval process: "): query_batch = query_list[start_idx : start_idx + self.batch_size] batch_emb = self.encoder.encode(query_batch) batch_scores, batch_idxs = self.index.search(batch_emb, k=num) batch_scores = batch_scores.tolist() batch_idxs = batch_idxs.tolist() # load_docs is not vectorized, but is a python list approach flat_idxs = sum(batch_idxs, []) batch_results = load_docs(self.corpus, flat_idxs) # chunk them back batch_results = [batch_results[i * num : (i + 1) * num] for i in range(len(batch_idxs))] results.extend(batch_results) scores.extend(batch_scores) del batch_emb, batch_scores, batch_idxs, query_batch, flat_idxs, batch_results torch.cuda.empty_cache() if return_score: return results, scores else: return results def get_retriever(config): if config.retrieval_method == "bm25": return BM25Retriever(config) else: return DenseRetriever(config) ##################################### # FastAPI server below ##################################### class Config: """ Minimal config class (simulating your argparse) Replace this with your real arguments or load them dynamically. """ def __init__( self, retrieval_method: str = "bm25", retrieval_topk: int = 10, index_path: str = "./index/bm25", corpus_path: str = "./data/corpus.jsonl", dataset_path: str = "./data", data_split: str = "train", faiss_gpu: bool = True, retrieval_model_path: str = "./model", retrieval_pooling_method: str = "mean", retrieval_query_max_length: int = 256, retrieval_use_fp16: bool = False, retrieval_batch_size: int = 128, ): self.retrieval_method = retrieval_method self.retrieval_topk = retrieval_topk self.index_path = index_path self.corpus_path = corpus_path self.dataset_path = dataset_path self.data_split = data_split self.faiss_gpu = faiss_gpu self.retrieval_model_path = retrieval_model_path self.retrieval_pooling_method = retrieval_pooling_method self.retrieval_query_max_length = retrieval_query_max_length self.retrieval_use_fp16 = retrieval_use_fp16 self.retrieval_batch_size = retrieval_batch_size class QueryRequest(BaseModel): queries: list[str] topk: Optional[int] = None return_scores: bool = False app = FastAPI() @app.post("/retrieve") def retrieve_endpoint(request: QueryRequest): """ Endpoint that accepts queries and performs retrieval. Input format: { "queries": ["What is Python?", "Tell me about neural networks."], "topk": 3, "return_scores": true } Output format (when return_scores=True,similarity scores are returned): { "result": [ [ # Results for each query { {"document": doc, "score": score} }, # ... more documents ], # ... results for other queries ] } """ if not request.topk: request.topk = config.retrieval_topk # fallback to default # Perform batch retrieval results, scores = retriever.batch_search( query_list=request.queries, num=request.topk, return_score=request.return_scores ) # Format response resp = [] for i, single_result in enumerate(results): if request.return_scores: # If scores are returned, combine them with results combined = [] for doc, score in zip(single_result, scores[i], strict=True): combined.append({"document": doc, "score": score}) resp.append(combined) else: resp.append(single_result) return {"result": resp} if __name__ == "__main__": parser = argparse.ArgumentParser(description="Launch the local faiss retriever.") parser.add_argument( "--index_path", type=str, default="/home/peterjin/mnt/index/wiki-18/e5_Flat.index", help="Corpus indexing file." ) parser.add_argument( "--corpus_path", type=str, default="/home/peterjin/mnt/data/retrieval-corpus/wiki-18.jsonl", help="Local corpus file.", ) parser.add_argument("--topk", type=int, default=3, help="Number of retrieved passages for one query.") parser.add_argument("--retriever_name", type=str, default="e5", help="Name of the retriever model.") parser.add_argument( "--retriever_model", type=str, default="intfloat/e5-base-v2", help="Path of the retriever model." ) parser.add_argument("--faiss_gpu", action="store_true", help="Use GPU for computation") args = parser.parse_args() # 1) Build a config (could also parse from arguments). # In real usage, you'd parse your CLI arguments or environment variables. config = Config( retrieval_method=args.retriever_name, # or "dense" index_path=args.index_path, corpus_path=args.corpus_path, retrieval_topk=args.topk, faiss_gpu=args.faiss_gpu, retrieval_model_path=args.retriever_model, retrieval_pooling_method="mean", retrieval_query_max_length=256, retrieval_use_fp16=True, retrieval_batch_size=512, ) # 2) Instantiate a global retriever so it is loaded once and reused. retriever = get_retriever(config) # 3) Launch the server. By default, it listens on http://127.0.0.1:8000 uvicorn.run(app, host="0.0.0.0", port=8000) ================================================ FILE: verl_distillation/examples/sglang_multiturn/search_r1_like/run_qwen2.5-3b_instruct_search_multiturn.sh ================================================ # run on 8xH20 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" TRAIN_DATA="$HOME/data/searchR1_processed_direct/train.parquet" VAL_DATA="$HOME/data/searchR1_processed_direct/test.parquet" TOOL_CONFIG="$CONFIG_PATH/tool_config/search_tool_config.yaml" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='search_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=512 \ data.val_batch_size=256 \ data.max_prompt_length=4096 \ data.max_response_length=3000 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.285 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.max_model_len=15000 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.val_before_train=False \ trainer.logger='["console","wandb"]' \ trainer.project_name='search_r1_like_async_rl' \ trainer.experiment_name='qwen2.5-3b-instruct_function_rm-search-async-sgl-multi-w-searchtool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=100 \ trainer.test_freq=50 \ data.train_files="$TRAIN_DATA" \ data.val_files="$VAL_DATA" \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$TOOL_CONFIG" \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/examples/skypilot/README.md ================================================ # verl with SkyPilot Run verl reinforcement learning training jobs on Kubernetes clusters or cloud platforms with GPU nodes using [SkyPilot](https://github.com/skypilot-org/skypilot). ## Installation and Configuration ### Step 1: Install SkyPilot Choose the installation based on your target platform: ```bash # For Kubernetes only pip install "skypilot[kubernetes]" # For AWS pip install "skypilot[aws]" # For Google Cloud Platform pip install "skypilot[gcp]" # For Azure pip install "skypilot[azure]" # For multiple platforms pip install "skypilot[kubernetes,aws,gcp,azure]" ``` ### Step 2: Configure Your Platform See https://docs.skypilot.co/en/latest/getting-started/installation.html ### Step 3: Set Up Environment Variables Export necessary API keys for experiment tracking: ```bash # For Weights & Biases tracking export WANDB_API_KEY="your-wandb-api-key" # For HuggingFace gated models (if needed) export HF_TOKEN="your-huggingface-token" ``` ## Examples ### PPO Training ```bash sky launch -c verl-ppo verl-ppo.yaml --secret WANDB_API_KEY -y ``` Runs PPO training on GSM8K dataset using Qwen2.5-0.5B-Instruct model across 2 nodes with H100 GPUs. Based on examples in [`../ppo_trainer/`](../ppo_trainer/). ### GRPO Training ```bash sky launch -c verl-grpo verl-grpo.yaml --secret WANDB_API_KEY -y ``` Runs GRPO (Group Relative Policy Optimization) training on MATH dataset using Qwen2.5-7B-Instruct model. Memory-optimized configuration for 2 nodes. Based on examples in [`../grpo_trainer/`](../grpo_trainer/). ### Multi-turn Tool Usage Training ```bash sky launch -c verl-multiturn verl-multiturn-tools.yaml --secret WANDB_API_KEY --secret HF_TOKEN -y ``` Single-node training with 8xH100 GPUs for multi-turn tool usage with Qwen2.5-3B-Instruct. Includes tool and interaction configurations for GSM8K. Based on examples in [`../sglang_multiturn/`](../sglang_multiturn/) but uses vLLM instead of sglang. ## Configuration The example YAML files are pre-configured with: - **Infrastructure**: Kubernetes clusters (`infra: k8s`) - can be changed to `infra: aws` or `infra: gcp`, etc. - **Docker Image**: verl's official Docker image with CUDA 12.6 support - **Setup**: Automatically clones and installs verl from source - **Datasets**: Downloads required datasets during setup phase - **Ray Cluster**: Configures distributed training across nodes - **Logging**: Supports Weights & Biases via `--secret WANDB_API_KEY` - **Models**: Supports gated HuggingFace models via `--secret HF_TOKEN` ## Launch Command Options - `-c `: Cluster name for managing the job - `--secret KEY`: Pass secrets for API keys (can be used multiple times) - `-y`: Skip confirmation prompt ## Monitoring Your Jobs ### Check cluster status ```bash sky status ``` ### View logs ```bash sky logs verl-ppo # View logs for the PPO job ``` ### SSH into head node ```bash ssh verl-ppo ``` ### Access Ray dashboard ```bash sky status --endpoint 8265 verl-ppo # Get dashboard URL ``` ### Stop a cluster ```bash sky down verl-ppo ``` ================================================ FILE: verl_distillation/examples/skypilot/verl-grpo.yaml ================================================ resources: infra: k8s accelerators: H100:1 memory: 128+ image_id: docker:verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.0-fa2.7.4 ports: 8265 num_nodes: 2 secrets: WANDB_API_KEY: setup: | rm -rf verl git clone https://github.com/volcengine/verl.git cd verl pip3 install -v -e .[vllm] pip3 install flashinfer-python echo "Downloading Math dataset..." mkdir -p ~/data/math python3 "$(pwd)/examples/data_preprocess/math_dataset.py" --local_dir ~/data/math echo "Math dataset download completed" run: | HEAD_IP=$(echo "$SKYPILOT_NODE_IPS" | head -n1) NUM_NODES=$SKYPILOT_NUM_NODES NUM_GPUS_PER_NODE=$SKYPILOT_NUM_GPUS_PER_NODE if [ "$SKYPILOT_NODE_RANK" == "0" ]; then echo "Starting Ray head node..." ps aux | grep ray | grep 6379 &> /dev/null || ray start --head --disable-usage-stats \ --port=6379 \ --dashboard-host=0.0.0.0 \ --dashboard-port=8265 # Wait for all worker nodes to join retry_count=0 max_retries=30 while [ $retry_count -lt $max_retries ]; do connected_nodes=$(ray status 2>/dev/null | grep -c "node_" || echo "0") echo "Connected nodes: $connected_nodes/$NUM_NODES (attempt $((retry_count+1))/$max_retries)" if [ "$connected_nodes" -ge "$NUM_NODES" ]; then echo "All nodes connected to Ray cluster" break fi retry_count=$((retry_count+1)) sleep 10 done python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/math/train.parquet \ data.val_files=$HOME/data/math/test.parquet \ data.train_batch_size=32 \ data.max_prompt_length=256 \ data.max_response_length=256 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=16 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.ppo_epochs=1 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=1 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=2048 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=[console,wandb] \ trainer.project_name=verl_math_grpo_demo \ trainer.experiment_name=qwen25_7b_grpo \ trainer.n_gpus_per_node=$NUM_GPUS_PER_NODE \ trainer.nnodes=$NUM_NODES \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=1 else sleep 15 echo "Starting Ray worker node..." ps aux | grep ray | grep $HEAD_IP:6379 &> /dev/null || ray start --address $HEAD_IP:6379 --disable-usage-stats sleep 10 fi echo "Node setup and Ray start script finished for rank $SKYPILOT_NODE_RANK." ================================================ FILE: verl_distillation/examples/skypilot/verl-multiturn-tools.yaml ================================================ resources: infra: k8s accelerators: H100:8 memory: 128+ image_id: docker:verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.0-fa2.7.4 ports: 8265 num_nodes: 1 secrets: WANDB_API_KEY: HF_TOKEN: # in case you're using gated models from the HF hub setup: | rm -rf verl git clone https://github.com/volcengine/verl.git cd verl pip3 install -v -e .[vllm] pip3 install flashinfer-python pip install "transformers<4.54.0" # https://github.com/vllm-project/vllm-ascend/issues/2046 # Download GSM8K dataset for multiturn tool training echo "Downloading GSM8K dataset..." mkdir -p ~/data/gsm8k python3 "$(pwd)/examples/data_preprocess/gsm8k.py" --local_dir ~/data/gsm8k echo "GSM8K dataset download completed" run: | NUM_GPUS_PER_NODE=$SKYPILOT_NUM_GPUS_PER_NODE PROJECT_DIR="$(pwd)/verl" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" # Single node setup - no worker coordination needed echo "Starting Ray head node..." ps aux | grep ray | grep 6379 &> /dev/null || ray start --head --disable-usage-stats \ --port=6379 \ --dashboard-host=0.0.0.0 \ --dashboard-port=8265 cd verl python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=64 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=64 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=[console,wandb] \ trainer.project_name=verl_multiturn_tools \ trainer.experiment_name=qwen25_7b_gsm8k_multiturn_tools \ trainer.n_gpus_per_node=$NUM_GPUS_PER_NODE \ trainer.nnodes=1 \ trainer.save_freq=10 \ trainer.test_freq=5 \ trainer.total_epochs=10 \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=8192 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=8192 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=8192 \ critic.ppo_max_token_len_per_gpu=8192 \ critic.forward_max_token_len_per_gpu=8192 \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml" \ actor_rollout_ref.rollout.multi_turn.max_user_turns=1 echo "Node setup and Ray start script finished for rank $SKYPILOT_NODE_RANK." ================================================ FILE: verl_distillation/examples/skypilot/verl-ppo.yaml ================================================ resources: infra: k8s accelerators: H100:1 memory: 128+ image_id: docker:verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.0-fa2.7.4 ports: 8265 num_nodes: 2 secrets: WANDB_API_KEY: setup: | rm -rf verl git clone https://github.com/volcengine/verl.git cd verl pip3 install -v -e .[vllm] pip3 install flashinfer-python # Download GSM8K dataset - alternative approach echo "Downloading GSM8K dataset..." mkdir -p ~/data/gsm8k # Check if the script exists and use absolute path if [ -f "$(pwd)/examples/data_preprocess/gsm8k.py" ]; then python3 "$(pwd)/examples/data_preprocess/gsm8k.py" --local_dir ~/data/gsm8k else echo "Warning: gsm8k.py script not found, skipping dataset download" # You might want to download the dataset manually or use a different approach fi echo "GSM8K dataset download completed" run: | # Get the Head node's IP and total number of nodes HEAD_IP=$(echo "$SKYPILOT_NODE_IPS" | head -n1) NUM_NODES=$SKYPILOT_NUM_NODES # login wandb # python3 -c "import wandb; wandb.login(relogin=True, key='$WANDB_API_KEY')" if [ "$SKYPILOT_NODE_RANK" == "0" ]; then # Head node starts Ray Head echo "Starting Ray head node..." ps aux | grep ray | grep 6379 &> /dev/null || ray start --head --disable-usage-stats \ --port=6379 \ --dashboard-host=0.0.0.0 \ --dashboard-port=8265 # Wait for all worker nodes to join the cluster with better checking echo "Waiting for all nodes to join Ray cluster..." retry_count=0 max_retries=30 while [ $retry_count -lt $max_retries ]; do connected_nodes=$(ray status 2>/dev/null | grep -c "node_" || echo "0") echo "Connected nodes: $connected_nodes/$NUM_NODES (attempt $((retry_count+1))/$max_retries)" if [ "$connected_nodes" -ge "$NUM_NODES" ]; then echo "All nodes connected to Ray cluster" break fi retry_count=$((retry_count+1)) sleep 10 done if [ $retry_count -eq $max_retries ]; then echo "WARNING: Not all nodes connected to Ray cluster after $max_retries attempts" echo "Current Ray status:" ray status fi python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=[console,wandb] \ trainer.val_before_train=False \ trainer.default_hdfs_dir=null \ trainer.n_gpus_per_node=1 \ trainer.nnodes=2 \ trainer.save_freq=20 \ trainer.test_freq=20 \ trainer.total_epochs=2 \ trainer.project_name=verl_examples \ trainer.experiment_name=experiment_name_gsm8k else # Wait for Ray Head to start sleep 15 # Worker node starts Ray Worker echo "Starting Ray worker node..." ps aux | grep ray | grep $HEAD_IP:6379 &> /dev/null || ray start --address $HEAD_IP:6379 --disable-usage-stats sleep 10 fi echo "Node setup and Ray start script finished for rank $SKYPILOT_NODE_RANK." ================================================ FILE: verl_distillation/examples/slurm/ray_on_slurm.slurm ================================================ #!/bin/bash #SBATCH --job-name=verl-ray-on-slurm #SBATCH --nodes=2 #SBATCH --ntasks-per-node=1 #SBATCH --mem=200G #SBATCH --partition=your-partition #SBATCH --time=01:00:00 #SBATCH --account=your-account #SBATCH --gpus-per-node=4 #SBATCH --cpus-per-task=64 #SBATCH --output=slurm-%j.out #SBATCH --error=slurm-%j.err # load necessary modules # replace these information with your own verl_workdir=/path/to/verl train_files=/path/to/gsm8k/train.parquet val_files=/path/to/gsm8k/test.parquet apptainer_image_path=/path/to/verl-ngc.sif # replace these information with your own # Getting the node names nodes=$(scontrol show hostnames "$SLURM_JOB_NODELIST") nodes_array=("$nodes") head_node=${nodes_array[0]} head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) # if we detect a space character in the head node IP, we'll # convert it to an ipv4 address. This step is optional. if [[ "$head_node_ip" == *" "* ]]; then IFS=' ' read -ra ADDR <<<"$head_node_ip" if [[ ${#ADDR[0]} -gt 16 ]]; then head_node_ip=${ADDR[1]} else head_node_ip=${ADDR[0]} fi echo "IPV6 address detected. We split the IPV4 address as $head_node_ip" fi port=6379 ip_head=$head_node_ip:$port export ip_head echo "IP Head: $ip_head" # make sure we set environment variables before Ray initialization printenv echo "Starting HEAD at $head_node" srun --nodes=1 --ntasks=1 -w "$head_node" \ apptainer run --nv --bind $verl_workdir $apptainer_image_path \ ray start --head --node-ip-address="$head_node_ip" --port=$port \ --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & # optional, though may be useful in certain versions of Ray < 1.0. sleep 10 # number of nodes other than the head node worker_num=$((SLURM_JOB_NUM_NODES - 1)) for ((i = 1; i <= worker_num; i++)); do node_i=${nodes_array[$i]} echo "Starting WORKER $i at $node_i" srun --nodes=1 --ntasks=1 -w "$node_i" \ apptainer run --nv --bind $verl_workdir $apptainer_image_path \ ray start --address "$ip_head" --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & sleep 5 done PYTHONUNBUFFERED=1 srun --overlap --nodes=1 --ntasks=1 -w "$head_node" \ apptainer run --nv --bind $verl_workdir $apptainer_image_path \ python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$train_files \ data.val_files=$val_files \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node="${SLURM_GPUS_PER_NODE}" \ trainer.nnodes="${SLURM_NNODES}" \ trainer.save_freq=10 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo_slurm.log ================================================ FILE: verl_distillation/examples/split_placement/README.md ================================================ # Split Placement Example Here we introduce how to run the naive implementation of the split placement of PPO algorithm. We will release the complete version of flexible placement in the near future. For quickstart, you can only follow Step 2 to modify the code and then follow Step 4 to execute the split placement example. ### Step 1: Placing the models to different GPUs Specify the placement and resource allocation. In the example, we place the actor and reference in the first half of the GPUs while map the critic and reward model (if any) to the second half of the GPUs. ```python actor_rollout_ref_pool_id = 'actor_rollout_ref_pool' critic_pool_id = 'critic_pool' if config.trainer.nnodes // 2 == 0 and config.trainer.n_gpus_per_node // 2 > 0: resource_pool_spec = { actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, critic_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, } else: resource_pool_spec = { actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), critic_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), } print(f'resource_pool_spec: {resource_pool_spec}') mapping = { Role.ActorRollout: actor_rollout_ref_pool_id, Role.Critic: critic_pool_id, Role.RefPolicy: actor_rollout_ref_pool_id, } mapping[Role.RewardModel] = critic_pool_id ``` ### Step 2: Make the models executed asynchronously Based on the model placement, we need to make the models executed asynchronously. To do so, you need to turn off the `blocking` flag (i.e., `blocking=False`) in our decorator of some model operations. For example, we hope the actor update and critic update can be executed in parallel, then we need to make the following modification in `fsdp_workers.py` ``` @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO, blocking=False) def update_actor(self, data: DataProto): ... @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO, blocking=False) def update_critic(self, data: DataProto): ... ``` We can also parallelize the computation of `ref_log_prob` and `values` and `rewards` in the split placement. For simplicity of the tutorial, we don't do this in this example. ### Step 3: Execute these operation in parallel in the single controller process To implement the parallel execution of the actor and critic update, the only thing we need to modify in the `ray_trainer.py` is to `get` the concurrent `futures` on the single controller process. ```python critic_output = critic_output.get() actor_output = actor_output.get() ``` ### Step 4: Run the split placement example ``` bash run_deepseek7b_llm.sh ``` ================================================ FILE: verl_distillation/examples/split_placement/config/ppo_trainer_split.yaml ================================================ # the ppo trainer split config will override default ppo_trainer.yaml hydra: searchpath: - file://../../verl/trainer/config defaults: - ppo_trainer - _self_ data: tokenizer: null train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet train_max_samples: -1 # set to -1 to use full dataset val_max_samples: -1 # set to -1 to use full dataset prompt_key: prompt max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 val_batch_size: null # DEPRECATED: Validation datasets are sent to inference engines as a whole batch, which will schedule the memory themselves return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs return_raw_chat: False return_full_prompt: False shuffle: True seed: 42 actor_rollout_ref: hybrid_engine: True model: path: ~/models/deepseek-llm-7b-chat external_lib: null override_config: { } enable_gradient_checkpointing: True use_remove_padding: False actor: strategy: fsdp # This is for backward-compatibility ppo_mini_batch_size: 256 ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu ppo_micro_batch_size_per_gpu: null use_dynamic_bsz: False ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length} grad_clip: 1.0 clip_ratio: 0.2 entropy_coeff: 0.0 use_kl_loss: False # True for GRPO kl_loss_coef: 0.001 # for grpo kl_loss_type: low_var_kl # for grpo ppo_epochs: 1 shuffle: False ulysses_sequence_parallel_size: 1 # sp size optim: lr: 1e-6 lr_warmup_steps: -1 # Prioritized. Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime min_lr_ratio: null # only useful for warmup with cosine lr_scheduler_type: constant # select from constant/cosine total_training_steps: -1 # must be override by program fsdp_config: wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 param_offload: False optimizer_offload: False fsdp_size: -1 ref: fsdp_config: param_offload: False wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} ulysses_sequence_parallel_size: ${actor_rollout_ref.actor.ulysses_sequence_parallel_size} # sp size rollout: name: vllm temperature: 1.0 top_k: -1 # 0 for hf rollout, -1 for vllm rollout top_p: 1 prompt_length: ${data.max_prompt_length} # not use for opensource response_length: ${data.max_response_length} # for vllm rollout dtype: bfloat16 # should align with FSDP gpu_memory_utilization: 0.5 ignore_eos: False enforce_eager: True free_cache_engine: True load_format: dummy_dtensor tensor_model_parallel_size: 2 max_num_batched_tokens: 8192 max_num_seqs: 1024 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} disable_log_stats: True enable_chunked_prefill: True # could get higher throughput # for hf rollout do_sample: True # number of responses (i.e. num sample times) n: 1 # > 1 for grpo critic: strategy: fsdp optim: lr: 1e-5 lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime min_lr_ratio: null # only useful for warmup with cosine lr_scheduler_type: constant # select from constant/cosine total_training_steps: -1 # must be override by program model: path: ~/models/deepseek-llm-7b-chat tokenizer_path: ${actor_rollout_ref.model.path} override_config: { } external_lib: ${actor_rollout_ref.model.external_lib} enable_gradient_checkpointing: True use_remove_padding: False fsdp_config: param_offload: False optimizer_offload: False wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 fsdp_size: -1 ppo_mini_batch_size: ${actor_rollout_ref.actor.ppo_mini_batch_size} ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu ppo_micro_batch_size_per_gpu: null forward_micro_batch_size: ${critic.ppo_micro_batch_size} forward_micro_batch_size_per_gpu: ${critic.ppo_micro_batch_size_per_gpu} use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} ppo_max_token_len_per_gpu: 32768 # (${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}) * 2 forward_max_token_len_per_gpu: ${critic.ppo_max_token_len_per_gpu} ulysses_sequence_parallel_size: 1 # sp size ppo_epochs: ${actor_rollout_ref.actor.ppo_epochs} shuffle: ${actor_rollout_ref.actor.shuffle} grad_clip: 1.0 cliprange_value: 0.5 reward_model: enable: False strategy: fsdp model: input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical path: ~/models/FsfairX-LLaMA3-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} use_remove_padding: False fsdp_config: min_num_params: 0 param_offload: False fsdp_size: -1 micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu micro_batch_size_per_gpu: null # set a number max_length: null ulysses_sequence_parallel_size: 1 # sp size use_dynamic_bsz: ${critic.use_dynamic_bsz} forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} reward_manager: naive algorithm: gamma: 1.0 lam: 1.0 adv_estimator: gae use_kl_in_reward: False kl_penalty: kl # how to estimate kl divergence kl_ctrl: type: fixed kl_coef: 0.001 trainer: total_epochs: 30 total_training_steps: null project_name: verl_examples experiment_name: gsm8k logger: [ 'console', 'wandb' ] log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 # auto: find the last ckpt to resume. If can't find, start from scratch resume_mode: auto # or disable or resume_path if resume_from_path is set resume_from_path: null test_freq: -1 critic_warmup: 0 default_hdfs_dir: null default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} ray_kwargs: ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. ================================================ FILE: verl_distillation/examples/split_placement/main_ppo_split.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import hydra import ray import torch from omegaconf import OmegaConf from split_monkey_patch import fit from verl import DataProto from verl.trainer.ppo.ray_trainer import RayPPOTrainer from verl.utils.reward_score import gsm8k, math_reward def _select_rm_score_fn(data_source): if data_source == "openai/gsm8k": return gsm8k.compute_score elif data_source == "lighteval/MATH": return math_reward.compute_score else: raise NotImplementedError class RewardManager: def __init__(self, tokenizer, num_examine) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console def __call__(self, data: DataProto, return_dict: bool = False): """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) already_print_data_sources = {} for i in range(len(data)): data_item = data[i] # DataProtoItem prompt_ids = data_item.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() valid_prompt_ids = prompt_ids[-valid_prompt_length:] response_ids = data_item.batch["responses"] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode sequences = torch.cat((valid_prompt_ids, valid_response_ids)) sequences_str = self.tokenizer.decode(sequences) ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] # select rm_score data_source = data_item.non_tensor_batch["data_source"] compute_score_fn = _select_rm_score_fn(data_source) score = compute_score_fn(solution_str=sequences_str, ground_truth=ground_truth) reward_tensor[i, valid_response_length - 1] = score if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print(sequences_str) if return_dict: return {"reward_tensor": reward_tensor} else: return reward_tensor @hydra.main(config_path="config", config_name="ppo_trainer_split", version_base=None) def main(config): if not ray.is_initialized(): # this is for local ray cluster default_runtime_env = {"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}} ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") ray.init(**OmegaConf.to_container(ray_init_kwargs)) ray.get(main_task.remote(config)) @ray.remote def main_task(config): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # download the checkpoint from hdfs local_path = copy_to_local(config.actor_rollout_ref.model.path) # instantiate tokenizer from verl.utils import hf_tokenizer tokenizer = hf_tokenizer(local_path) # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray import RayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ray.remote(ActorRolloutRefWorker), Role.Critic: ray.remote(CriticWorker), } # NOTE: initialze two resource pool actor_rollout_ref_pool_id = "actor_rollout_ref_pool" critic_pool_id = "critic_pool" if config.trainer.nnodes // 2 == 0 and config.trainer.n_gpus_per_node // 2 > 0: resource_pool_spec = { actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, critic_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, } else: resource_pool_spec = { actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), critic_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), } print(f"resource_pool_spec: {resource_pool_spec}") mapping = { Role.ActorRollout: actor_rollout_ref_pool_id, Role.Critic: critic_pool_id, } # use reference model if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = actor_rollout_ref_pool_id # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = critic_pool_id reward_fn = RewardManager(tokenizer=tokenizer, num_examine=0) # Note that we always use function-based RM for validation val_reward_fn = RewardManager(tokenizer=tokenizer, num_examine=1) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) RayPPOTrainer.fit = fit trainer = RayPPOTrainer( config=config, tokenizer=tokenizer, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, ) trainer.init_workers() trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/examples/split_placement/run_deepseek7b_llm.sh ================================================ set -x python3 main_ppo_split.py \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/split_placement/split_monkey_patch.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ An naive implementation of split placment example """ import uuid from copy import deepcopy from pprint import pprint import numpy as np import torch from verl import DataProto from verl.trainer.ppo.ray_trainer import ( AdvantageEstimator, apply_kl_penalty, compute_advantage, compute_data_metrics, compute_timing_metrics, marked_timer, ) from verl.trainer.ppo.reward import compute_reward from verl.utils.metric import reduce_metrics def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # we start from step 1 self.global_steps += 1 last_val_metrics = None for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} batch: DataProto = DataProto.from_single_dict(batch_dict) # pop those keys for generation gen_batch = batch.pop(batch_keys=["input_ids", "attention_mask", "position_ids"]) is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw): gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with marked_timer("gen_max", timing_raw): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) batch = batch.union(gen_baseline_output) # compute reward model score on batch rm_scores = None if self.use_rm and "rm_scores" not in batch.batch.keys(): rm_scores = self.rm_wg.compute_rm_score(batch) batch = batch.union(rm_scores) reward_baseline_tensor, _ = compute_reward(batch, self.reward_fn) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) keys_to_pop = set(gen_baseline_output.batch.keys()) if rm_scores is not None: keys_to_pop.update(rm_scores.batch.keys()) batch.pop(batch_keys=list(keys_to_pop)) batch.batch["reward_baselines"] = reward_baseline_tensor del rm_scores, gen_baseline_batch, gen_baseline_output batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() # recompute old_log_probs with marked_timer("old_log_prob", timing_raw): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw): ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw): # compute scores. Support both model and function-based. # We first compute the scores using reward model. Then, we call reward_fn to combine # the results from reward model and rule-based results. if self.use_rm and "rm_scores" not in batch.batch.keys(): # we first compute reward model score reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) # we combine with rule-based rm reward_tensor, _ = compute_reward(batch, self.reward_fn) batch.batch["token_level_scores"] = reward_tensor # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get("norm_adv_by_std_in_grpo", True) batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, config=self.config.algorithm, ) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor_call", timing_raw): actor_output = self.actor_rollout_wg.update_actor(batch) else: actor_output = None # update critic if self.use_critic: with marked_timer("update_critic_call", timing_raw): critic_output = self.critic_wg.update_critic(batch) # NOTE: make sure you set blocking=False in update_actor and update_crtic in the worker class with marked_timer("update_actor_critic", timing_raw): critic_output = critic_output.get() critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) if actor_output is not None: actor_output = actor_output.get() actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with marked_timer("save_checkpoint", timing_raw): self._save_checkpoint() # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) if self.global_steps >= self.total_training_steps: pprint(f"Final validation metrics: {last_val_metrics}") return self.global_steps += 1 ================================================ FILE: verl_distillation/examples/tuning/0.5b/qwen2-0.5b_grpo-lora_1_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=4 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-0.5b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=0.5b-${NOW} MODEL_PATH=Qwen/Qwen2.5-0.5B-Instruct set -x nproc_per_gpu=1 nnodes=1 ngpu_per_node=1 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ trainer.val_before_train=False \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.1 \ actor_rollout_ref.rollout.n=1 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_distillation/examples/tuning/1.5b/qwen2-1.5b_grpo-lora_1_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-1.5b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=1.5b-${NOW} MODEL_PATH=Qwen/Qwen2.5-1.5B-Instruct set -x nproc_per_gpu=128 nnodes=1 ngpu_per_node=1 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.1 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_distillation/examples/tuning/14b/qwen2-14b_grpo-lora_2_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-14b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=14b-${NOW} MODEL_PATH=Qwen/Qwen2.5-14B-Instruct set -x nproc_per_gpu=58 # 32√ → 64× → 48√ → 56√ → 60× → 58√ → 59× nnodes=1 ngpu_per_node=2 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.25 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_distillation/examples/tuning/14b/qwen2_14b_grpo_4_h800_fsdp_vllm.sh ================================================ set -x gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_test_path=$HOME/data/rlhf/math/test.parquet model_path=Qwen/Qwen2.5-Coder-14B-Instruct train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" PYTHONPATH=/opt/tiger/open_verl python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_14b_function_rm' \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/examples/tuning/32b/qwen2-32b_grpo-lora_4_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-32b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=32b-${NOW} MODEL_PATH=Qwen/Qwen2.5-32B-Instruct set -x nproc_per_gpu=45 # 32√ → 64× → 48× → 40√ → 44√ → 46× → 45× nnodes=1 ngpu_per_node=4 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_distillation/examples/tuning/32b/qwen2_32B_grpo_8_h20_megatron_vllm.sh ================================================ set -x # we need this to avoid fragmentation of GPU memory export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256 gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_test_path=$HOME/data/rlhf/math/test.parquet train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" model_path=Qwen/Qwen2.5-32B python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=512 \ data.max_prompt_length=2048 \ data.max_response_length=6144 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=8 \ actor_rollout_ref.actor.megatron.param_offload=True \ actor_rollout_ref.actor.megatron.grad_offload=True \ actor_rollout_ref.actor.megatron.optimizer_offload=True \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.megatron.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='megatron_vllm_qwen2_32b' \ trainer.experiment_name='qwen2_32b_grpo_8_h20' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/tuning/3b/qwen2-3b_grpo-lora_1_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-3b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=3b-${NOW} MODEL_PATH=Qwen/Qwen2.5-3B-Instruct set -x nproc_per_gpu=62 nnodes=1 ngpu_per_node=1 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.1 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_distillation/examples/tuning/70b/qwen2-70b_grpo_32_h20_fsdp_vllm.sh ================================================ set -x gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_val_path=$HOME/data/rlhf/math/test.parquet model_path=Qwen/Qwen2-72B-Instruct python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$data_path \ data.val_files=$gsm8k_val_path \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=16 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='Qwen2_72B_Instruct' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=4 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/examples/tuning/70b/qwen2-70b_grpo_32_h800_fsdp_vllm.sh ================================================ set -x #### important: vllm version must be >= 0.8.3 gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_val_path=$HOME/data/rlhf/math/test.parquet model_path=Qwen/Qwen2-72B-Instruct python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$gsm8k_train_path \ data.val_files=$gsm8k_val_path \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=16 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='Qwen2_72B_Instruct' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=4 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/examples/tuning/70b/qwen2-72b_grpo-lora_8_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-72b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=72b-${NOW} MODEL_PATH=Qwen/Qwen2.5-72B-Instruct set -x nproc_per_gpu=22 # 16√ → 32× → 24× → 20√ → 22√ → 23× nnodes=1 ngpu_per_node=8 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_distillation/examples/tuning/7b/qwen2-7b_grpo-lora_1_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-7b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=7b-${NOW} MODEL_PATH=Qwen/Qwen2.5-7B-Instruct set -x nproc_per_gpu=16 # 64√ → 128× → 96√ → 112× → 104× → 100√ → 102× → 101× nnodes=1 ngpu_per_node=1 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.2 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_distillation/examples/tuning/7b/qwen2-7b_grpo_2_h800_fsdp_vllm.sh ================================================ set -x gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_test_path=$HOME/data/rlhf/math/test.parquet model_path=Qwen/Qwen2-7B-Instruct train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" PYTHONPATH=/opt/tiger/open_verl python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/examples/tutorial/agent_loop_get_started/agent_loop_tutorial.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Train ReAct agent with code sandbox\n", "\n", "In this tutorial, we will demonstrate how to train a [ReAct](https://arxiv.org/abs/2210.03629) agent to solve math problem with code sandbox.\n", "\n", "The agent works as follows:\n", "1. Given a math problem, the agent first query LLM to generate response and tool calls, which are python code to be executed in sandbox.\n", "2. If there is a tool call, the agent execute the python code in code sandbox.\n", "3. After code execution, the agent get the result from sandbox and append to chat history.\n", "4. The agent query LLM again until no tool call or max context length reached.\n", "\n", "\n", "
\n", " \"ReAct\"\n", "
\n", " source: LangGraph\n", "
\n", "
" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Prerequisite\n", "\n", "To run the examples in this notebook, you need to install the verl package first.\n", "```bash\n", "git clone https://github.com/volcengine/verl\n", "cd verl\n", "pip install -e .\n", "```" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2025-10-16 23:20:11,956\tINFO worker.py:2004 -- Started a local Ray instance. View the dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8265 \u001b[39m\u001b[22m\n", "/usr/local/lib/python3.12/dist-packages/ray/_private/worker.py:2052: FutureWarning: Tip: In future versions of Ray, Ray will no longer override accelerator visible devices env var if num_gpus=0 or num_gpus=None (default). To enable this behavior and turn off this error message, set RAY_ACCEL_ENV_VAR_OVERRIDE_ON_ZERO=0\n", " warnings.warn(\n" ] } ], "source": [ "import asyncio\n", "import sys\n", "import tempfile\n", "import os\n", "import socket\n", "import json\n", "\n", "import requests\n", "import ray\n", "import fastapi\n", "import uvicorn\n", "from starlette.requests import Request\n", "from starlette.responses import JSONResponse\n", "from pprint import pprint\n", "\n", "import verl\n", "\n", "ray.init()\n", "verl_config_dir = os.path.join(os.path.dirname(verl.__file__), \"trainer/config\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For demo purpose, we will use Qwen/Qwen3-1.7B as the LLM. First, let's download required model and dataset used in this tutorial." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import pyarrow.parquet as pq\n", "from huggingface_hub import snapshot_download\n", "\n", "snapshot_download(\n", " repo_id=\"verl-team/lighteval-MATH-preprocessed\",\n", " repo_type=\"dataset\",\n", " local_dir=os.path.expanduser(\"~/verl-team/lighteval-MATH-preprocessed\"),\n", ")\n", "snapshot_download(\n", " repo_id=\"Qwen/Qwen3-1.7B\",\n", " repo_type=\"model\",\n", " local_dir=os.path.expanduser(\"~/Qwen/Qwen3-1.7B\"),\n", ")\n", "\n", "model_path = os.path.expanduser(\"~/Qwen/Qwen3-1.7B\")\n", "train_file = os.path.expanduser(\"~/verl-team/lighteval-MATH-preprocessed/train.parquet\")\n", "test_file = os.path.expanduser(\"~/verl-team/lighteval-MATH-preprocessed/test.parquet\")\n", "\n", "test = pq.read_table(test_file)\n", "test_file = os.path.expanduser(\"~/verl-team/lighteval-MATH-preprocessed/test_100.parquet\")\n", "pq.write_table(test[:100], test_file)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "verl support both vllm and sglang rollout server for high performance inference. This tutorial has been tested on both vllm and sglang, you can choose either of them to run the tutorial." ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "rollout_name = \"???\" # vllm or sglang" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Basic tool call\n", "For beginning, let's see how we can do basic tool call in verl with example from [Transformer tool use](https://huggingface.co/docs/transformers/main/chat_extras#tool-use). To use tool in verl, we need to define a tool class that inherits from `BaseTool`, and implement the following methods:\n", "- `get_openai_tool_schema`: return the schema of the tool in `OpenAIFunctionToolSchema` format.\n", "- `execute`: execute the tool with the given parameters, and return the result in `ToolResponse` format." ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "{\n", " \"type\": \"function\",\n", " \"function\": {\n", " \"name\": \"get_current_temperature\",\n", " \"description\": \"Get current temperature at a location.\",\n", " \"parameters\": {\n", " \"type\": \"object\",\n", " \"properties\": {\n", " \"location\": {\n", " \"type\": \"string\",\n", " \"description\": \"The location to get the temperature for, in the format \\\"City, State, Country\\\".\"\n", " },\n", " \"unit\": {\n", " \"type\": \"string\",\n", " \"description\": \"The unit to return the temperature in. Defaults to \\\"celsius\\\".\",\n", " \"enum\": [\n", " \"celsius\",\n", " \"fahrenheit\"\n", " ]\n", " }\n", " },\n", " \"required\": [\n", " \"location\"\n", " ]\n", " }\n", " }\n", "}\n" ] } ], "source": [ "from transformers.utils import get_json_schema\n", "from verl.tools.base_tool import BaseTool, OpenAIFunctionToolSchema, ToolResponse\n", "\n", "\n", "class WeatherTool(BaseTool):\n", " def get_current_temperature(self, location: str, unit: str = \"celsius\"):\n", " \"\"\"Get current temperature at a location.\n", "\n", " Args:\n", " location: The location to get the temperature for, in the format \"City, State, Country\".\n", " unit: The unit to return the temperature in. Defaults to \"celsius\". (choices: [\"celsius\", \"fahrenheit\"])\n", "\n", " Returns:\n", " the temperature, the location, and the unit in a dict\n", " \"\"\"\n", " return {\n", " \"temperature\": 26.1,\n", " \"location\": location,\n", " \"unit\": unit,\n", " }\n", "\n", " def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:\n", " schema = get_json_schema(self.get_current_temperature)\n", " return OpenAIFunctionToolSchema(**schema)\n", "\n", " async def execute(self, instance_id: str, parameters: dict, **kwargs) -> tuple[ToolResponse, float, dict]:\n", " try:\n", " result = self.get_current_temperature(**parameters)\n", " return ToolResponse(text=json.dumps(result)), 0, {}\n", " except Exception as e:\n", " return ToolResponse(text=str(e)), 0, {}\n", "\n", "\n", "weather_tool = WeatherTool(config={}, tool_schema=None)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Next, let's launch a standalone rollout server without hybrid engine (which is more heavy to start) to test the basic tool call." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from hydra import compose, initialize_config_dir\n", "from verl.workers.rollout.replica import get_rollout_replica_class\n", "\n", "with initialize_config_dir(config_dir=verl_config_dir):\n", " config = compose(\n", " config_name=\"ppo_trainer\",\n", " overrides=[\n", " \"actor_rollout_ref.rollout.name=\" + rollout_name,\n", " \"actor_rollout_ref.rollout.mode=async\",\n", " \"actor_rollout_ref.rollout.tensor_model_parallel_size=1\",\n", " \"actor_rollout_ref.model.path=\" + model_path,\n", " \"actor_rollout_ref.rollout.response_length=4096\",\n", " \"actor_rollout_ref.rollout.skip_tokenizer_init=False\",\n", " \"+actor_rollout_ref.rollout.engine_kwargs.vllm.enable_auto_tool_choice=True\",\n", " \"+actor_rollout_ref.rollout.engine_kwargs.vllm.tool_call_parser=hermes\",\n", " \"+actor_rollout_ref.rollout.engine_kwargs.sglang.tool_call_parser=qwen25\",\n", " ],\n", " )\n", "\n", "rollout_server_class = get_rollout_replica_class(config.actor_rollout_ref.rollout.name)\n", "rollout_server = rollout_server_class(\n", " replica_rank=0,\n", " config=config.actor_rollout_ref.rollout,\n", " model_config=config.actor_rollout_ref.model,\n", ")\n", "\n", "await rollout_server.init_standalone()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Then, we can query LLM with openai client. Note that we need to pass the tool schema to server to guide LLM generating tool calls. We can see that the LLM correctly generates a tool call to get the temperature in Paris." ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[{'content': \"Hey, what's the temperature in Paris right now?\", 'role': 'user'},\n", " {'role': 'assistant',\n", " 'tool_calls': [{'function': {'arguments': '{\"location\": \"Paris, France\"}',\n", " 'name': 'get_current_temperature'},\n", " 'id': 'call_b10bdde504a0411690e96b55',\n", " 'index': -1,\n", " 'type': 'function'}]}]\n" ] } ], "source": [ "from openai import AsyncOpenAI\n", "\n", "client = AsyncOpenAI(\n", " api_key=\"dummy\",\n", " base_url=f\"http://{rollout_server._server_address}/v1\",\n", ")\n", "\n", "messages = [{\"role\": \"user\", \"content\": \"Hey, what's the temperature in Paris right now?\"}]\n", "completion = await client.chat.completions.create(\n", " model=config.actor_rollout_ref.model.path,\n", " messages=messages,\n", " tools=[weather_tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True)],\n", " extra_body={\n", " \"chat_template_kwargs\": {\"enable_thinking\": False},\n", " },\n", ")\n", "\n", "message = completion.choices[0].message.model_dump(exclude_unset=True, exclude_none=True)\n", "messages.append(message)\n", "pprint(messages)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can execute the tool call with arguments generated by LLM and get the temperature in Paris." ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "text='{\"temperature\": 26.1, \"location\": \"Paris, France\", \"unit\": \"celsius\"}' image=None video=None\n" ] } ], "source": [ "args = json.loads(message[\"tool_calls\"][0][\"function\"][\"arguments\"])\n", "tool_response, _, _ = await weather_tool.execute(\"\", args)\n", "print(tool_response)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Then, we can add the tool response to chat history and query LLM again. With the tool response, LLM can generate a final response to the user." ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[{'content': \"Hey, what's the temperature in Paris right now?\", 'role': 'user'},\n", " {'role': 'assistant',\n", " 'tool_calls': [{'function': {'arguments': '{\"location\": \"Paris, France\"}',\n", " 'name': 'get_current_temperature'},\n", " 'id': 'call_b10bdde504a0411690e96b55',\n", " 'index': -1,\n", " 'type': 'function'}]},\n", " {'content': '{\"temperature\": 26.1, \"location\": \"Paris, France\", \"unit\": '\n", " '\"celsius\"}',\n", " 'role': 'tool'},\n", " {'content': 'The current temperature in Paris is 26.1°C.',\n", " 'role': 'assistant'}]\n" ] } ], "source": [ "messages.append({\"role\": \"tool\", \"content\": tool_response.text})\n", "completion = await client.chat.completions.create(\n", " model=config.actor_rollout_ref.model.path,\n", " messages=messages,\n", " tools=[weather_tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True)],\n", " extra_body={\n", " \"chat_template_kwargs\": {\"enable_thinking\": False},\n", " },\n", ")\n", "\n", "message = completion.choices[0].message.model_dump(exclude_unset=True, exclude_none=True)\n", "messages.append(message)\n", "pprint(messages)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Advanced tool call with code sandbox\n", "\n", "Now, let's see a more realistic example of tool call with code sandbox, which is widely used in real-world applications.\n", "\n", "### 2.1 Implement a naive code sandbox\n", "\n", "To execute python code snippet generated by LLM, we need a code sandbox environment. In this tutorial, we will implement a very naive code sandbox, which is\n", "a FastAPI http server with `/run_code` endpoint. The server works as follows:\n", "1. Receive a http request, write the python code snippet to a temp file.\n", "2. Spawn a subprocess to execute the code, and get stdout and stderr of the subprocess.\n", "3. Return the stdout and stderr of the subprocess as http response.\n", "\n", "> 🚨 **WARNING:** This naive code sandbox is for demonstration purpose only, do not use it in production. Please use docker/kata container for stronger isolation and security restriction." ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "@ray.remote(num_cpus=1)\n", "class Sandbox:\n", " \"\"\"Sandbox to execute python code.\"\"\"\n", "\n", " def __init__(self):\n", " self.address = ray._private.services.get_node_ip_address()\n", " self.port = self._get_free_port()\n", " asyncio.create_task(self._start_fastapi_server())\n", "\n", " async def code_execution(self, request: Request):\n", " request_json = await request.json()\n", " code = request_json[\"code\"]\n", " # print(f\"execute code:\\n{code}\")\n", "\n", " _, temp_file = tempfile.mkstemp(suffix=\".py\", prefix=\"temp_code\", dir=None, text=True)\n", " with open(temp_file, \"w\") as f:\n", " f.write(code)\n", "\n", " try:\n", " process = await asyncio.create_subprocess_exec(\n", " sys.executable, temp_file, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n", " )\n", "\n", " stdout, stderr = await process.communicate()\n", "\n", " response = {\n", " \"status\": \"Success\" if process.returncode == 0 else \"Failed\",\n", " \"run_result\": {\n", " \"status\": \"Finished\",\n", " \"stdout\": stdout.decode(),\n", " \"stderr\": stderr.decode(),\n", " \"return_code\": process.returncode,\n", " },\n", " }\n", " return JSONResponse(content=response)\n", " finally:\n", " try:\n", " os.unlink(temp_file)\n", " except Exception:\n", " pass\n", "\n", " def _get_free_port(self):\n", " with socket.socket() as sock:\n", " sock.bind((\"\", 0))\n", " return sock.getsockname()[1]\n", "\n", " async def _start_fastapi_server(self):\n", " app = fastapi.FastAPI()\n", " app.router.add_api_route(\"/run_code\", self.code_execution, methods=[\"POST\"])\n", "\n", " config = uvicorn.Config(app, host=[\"::\", \"0.0.0.0\"], port=self.port, log_level=\"warning\")\n", " server = uvicorn.Server(config)\n", " await server.serve()\n", "\n", " async def get_server_address(self) -> str:\n", " \"\"\"Get FastAPI server address.\"\"\"\n", " return f\"{self.address}:{self.port}\"" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "sandbox = Sandbox.remote()\n", "sandbox_address = ray.get(sandbox.get_server_address.remote())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.2 Define sandbox tool\n", "\n", "As shown in the previous section, we also defined a tool for the code sandbox. In the `execute` method, we send the code snippet to code sandbox by http request and get the output." ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "{\n", " \"type\": \"function\",\n", " \"function\": {\n", " \"name\": \"code_interpreter\",\n", " \"description\": \"Execute the code in the sandbox.\",\n", " \"parameters\": {\n", " \"type\": \"object\",\n", " \"properties\": {\n", " \"code\": {\n", " \"type\": \"string\",\n", " \"description\": \"The code to be executed.\"\n", " }\n", " },\n", " \"required\": [\n", " \"code\"\n", " ]\n", " }\n", " }\n", "}\n" ] } ], "source": [ "import re\n", "import aiohttp\n", "\n", "\n", "class SandboxTool(BaseTool):\n", " def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):\n", " super().__init__(config, tool_schema)\n", " # Different model may use different code pattern, e.g. python, py, etc.\n", " self.code_pattern = re.compile(r\"```py(.*?)```\", re.DOTALL)\n", "\n", " async def code_interpreter(self, code: str) -> str:\n", " \"\"\"Execute the code in the sandbox.\n", "\n", " Args:\n", " code: The code to be executed.\n", "\n", " Returns:\n", " str: The output of the code execution.\n", " \"\"\"\n", " async with aiohttp.ClientSession() as session:\n", " async with session.post(\n", " self.config.get(\"sandbox_fusion_url\"),\n", " json={\"code\": code},\n", " ) as resp:\n", " resp.raise_for_status()\n", " result = await resp.json()\n", " stdout, stderr = result[\"run_result\"][\"stdout\"], result[\"run_result\"][\"stderr\"]\n", " return stdout + stderr\n", "\n", " def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:\n", " schema = get_json_schema(self.code_interpreter)\n", " return OpenAIFunctionToolSchema(**schema)\n", "\n", " async def execute(self, instance_id: str, parameters: dict, **kwargs) -> tuple[str, float, dict]:\n", " code = parameters[\"code\"]\n", " matches = self.code_pattern.findall(code)\n", " if matches:\n", " code = matches[0].strip()\n", "\n", " # NOTE: Some script may not explicitly print result, we need to add a print statement to the end of the script.\n", " # More better way is to SFT the model to make it print result by default, we skip SFT stage in this tutorial.\n", " lines = code.split(\"\\n\")\n", " for i, line in reversed(list(enumerate(lines))):\n", " if line == \"\":\n", " continue\n", " if not lines[i].startswith(\"print\"):\n", " lines[i] = f\"print({line})\"\n", " break\n", " code = \"\\n\".join(lines)\n", "\n", " result = await self.code_interpreter(code)\n", " return ToolResponse(text=result), 0.0, {}\n", "\n", "\n", "sandbox_tool = SandboxTool(config={\"sandbox_fusion_url\": f\"http://{sandbox_address}/run_code\"}, tool_schema=None)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "First, let's try to execute a valid code and check the response with stdout." ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(ToolResponse(text='sqrt(3)\\n', image=None, video=None), 0.0, {})\n" ] } ], "source": [ "code = \"\"\"```py\n", "import sympy\n", "\n", "print(sympy.sqrt(3))\n", "```\"\"\"\n", "\n", "print(await sandbox_tool.execute(instance_id=\"\", parameters={\"code\": code}))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Then, let's try to execute an invalid code and check the response with stderr. The error message is important to inform LLM to fix code in next generation." ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(ToolResponse(text='Traceback (most recent call last):\\n File \"/tmp/temp_code3e2f638_.py\", line 2, in \\n print(sympy.sqrt(3))\\n ^^^^^\\nNameError: name \\'sympy\\' is not defined\\n', image=None, video=None), 0.0, {})\n" ] } ], "source": [ "code_invalid = \"\"\"\n", "print(sympy.sqrt(3))\n", "\"\"\"\n", "\n", "print(await sandbox_tool.execute(instance_id=\"\", parameters={\"code\": code_invalid}))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.3 Test sandbox tool\n", "\n", "Now, we can test sandbox tool with real math problem. In this tutorial, we will use the [DigitalLearningGmbH/MATH-lighteval](https://huggingface.co/datasets/DigitalLearningGmbH/MATH-lighteval) dataset, which consists of problems from mathematics competitions, including the AMC 10, AMC 12, AIME, and more." ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "ebd09c8816b140a59a879e5a5e218950", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Generating train split: 0 examples [00:00, ? examples/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "from datasets import load_dataset\n", "\n", "dataset = load_dataset(\"parquet\", data_files=test_file)[\"train\"]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For debug purpose, we can implement ReAct agent as a simple loop. For RL training, there are more subtle issue and corner case to deal with, we provide a built-in ReAct agent loop which will be discussed in next section." ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "No tool calls, finish_reason: stop\n" ] } ], "source": [ "messages = dataset[\"prompt\"][0]\n", "\n", "while True:\n", " # 1. Chat with the model\n", " completion = await client.chat.completions.create(\n", " model=config.actor_rollout_ref.model.path,\n", " messages=messages,\n", " tools=[sandbox_tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True)],\n", " extra_body={\n", " \"chat_template_kwargs\": {\"enable_thinking\": False},\n", " },\n", " )\n", "\n", " message = completion.choices[0].message.model_dump(exclude_unset=True, exclude_none=True)\n", " messages.append(message)\n", "\n", " # 2. Call tools\n", " finish_reason = completion.choices[0].finish_reason\n", " if finish_reason != \"tool_calls\":\n", " print(f\"No tool calls, finish_reason: {finish_reason}\")\n", " break\n", "\n", " try:\n", " tool_calls = completion.choices[0].message.tool_calls[0]\n", " args = json.loads(tool_calls.function.arguments)\n", " result, _, _ = await sandbox_tool.execute(\"\", args)\n", " except Exception as e:\n", " print(f\"Error: {e}\")\n", "\n", " # 3. Add tool response to messages\n", " messages.append(\n", " {\n", " \"role\": \"tool\",\n", " \"content\": result.text,\n", " }\n", " )" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'content': \"How many vertical asymptotes does the graph of $y=\\\\frac{2}{x^2+x-6}$ have? Let's think step by step and output the final answer within \\\\boxed{}.\",\n", " 'role': 'user'},\n", " {'content': \"To determine the number of vertical asymptotes for the function $ y = \\\\frac{2}{x^2 + x - 6} $, we need to find the values of $ x $ where the denominator equals zero, as these points are where the function is undefined and potentially where it has vertical asymptotes.\\n\\nThe denominator is $ x^2 + x - 6 $. To find the vertical asymptotes, we need to solve the equation:\\n\\n$$ x^2 + x - 6 = 0 $$\\n\\nThis is a quadratic equation, and we can solve it using the quadratic formula:\\n\\n$$ x = \\\\frac{-b \\\\pm \\\\sqrt{b^2 - 4ac}}{2a} $$\\n\\nwhere $ a = 1 $, $ b = 1 $, and $ c = -6 $. Let's solve this equation to find the values of $ x $ where the denominator is zero, which will give us the vertical asymptotes.\",\n", " 'role': 'assistant',\n", " 'tool_calls': [{'id': 'call_4d873672ff8445159e4e5e45',\n", " 'function': {'arguments': '{\"code\": \"from sympy import symbols, solve\\\\nx = symbols(\\'x\\')\\\\nroots = solve(x**2 + x - 6, x)\\\\nroots\"}',\n", " 'name': 'code_interpreter'},\n", " 'type': 'function',\n", " 'index': -1}]},\n", " {'role': 'tool', 'content': '[-3, 2]\\n'},\n", " {'content': 'The roots of the equation $ x^2 + x - 6 = 0 $ are $ x = -3 $ and $ x = 2 $. These are the values of $ x $ where the denominator is zero, which means the function $ y = \\\\frac{2}{x^2 + x - 6} $ is undefined at these points. \\n\\nSince the denominator is zero at these values, the function has vertical asymptotes at $ x = -3 $ and $ x = 2 $. Therefore, the graph of the function has two vertical asymptotes.\\n\\nThe final answer is $\\\\boxed{2}$.',\n", " 'role': 'assistant'}]" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "messages" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can see that the ReAct agent properly query LLM, execute sandbox tool call, finally generate the answer." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3. End-to-end training with tool agent loop\n", "\n", "After tool has been implemented and tested, we can do end-to-end RL training to tune the model to properly use the tool. To simplify agentic RL training, verl provide [Agent Loop](https://verl.readthedocs.io/en/latest/advance/agent_loop.html) abstraction, which allow user to define custom agent loop:\n", "- Search agent\n", "- Math agent\n", "- SWE agent\n", "- GUI agent\n", "- ...\n", "\n", "For ease of use, verl provide two pre-defined agent loop:\n", "- SingleTurnAgentLoop: single-turn conversation without tool calling\n", "- ToolAgentLoop: multi-turn conversation with tool calling, interaction\n", "\n", "To use ToolAgentLoop, user only need to provide tools configuration in json/yaml file. In the configuration file, user should specify following fields for each tool:\n", "- class_name: fully qualified class name of the tool used to dynamically load the custom tool class\n", "- config: key-word arguments used to initialize the tool instance\n", "\n", "Let's dump our sandbox tool configuration to a json file:" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2025-10-16 23:07:16,868\tINFO worker.py:2004 -- Started a local Ray instance. View the dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8265 \u001b[39m\u001b[22m\n" ] } ], "source": [ "ray.shutdown()\n", "\n", "sandbox = Sandbox.remote()\n", "sandbox_address = ray.get(sandbox.get_server_address.remote())\n", "\n", "tool_config = {\n", " \"tools\": [\n", " {\n", " \"class_name\": \"sandbox.SandboxTool\",\n", " \"config\": {\n", " \"type\": \"native\",\n", " \"sandbox_fusion_url\": f\"http://{sandbox_address}/run_code\",\n", " },\n", " },\n", " ],\n", "}\n", "\n", "tool_config_path = \"tool_config.json\"\n", "with open(tool_config_path, \"w\") as f:\n", " json.dump(tool_config, f)" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/tmp/ipykernel_174199/3963810189.py:3: UserWarning: \n", "The version_base parameter is not specified.\n", "Please specify a compatability version level, or None.\n", "Will assume defaults for version 1.1\n", " with initialize_config_dir(config_dir=verl_config_dir):\n" ] } ], "source": [ "from hydra import compose, initialize_config_dir\n", "\n", "with initialize_config_dir(config_dir=verl_config_dir):\n", " config = compose(\n", " config_name=\"ppo_trainer\",\n", " overrides=[\n", " \"algorithm.adv_estimator=grpo\",\n", " \"data.train_files=\" + train_file,\n", " \"data.val_files=\" + test_file,\n", " \"data.return_raw_chat=True\",\n", " \"data.train_batch_size=32\",\n", " \"data.max_prompt_length=1024\",\n", " \"data.max_response_length=1024\",\n", " \"+data.apply_chat_template_kwargs.enable_thinking=False\",\n", " # actor related\n", " \"actor_rollout_ref.model.path=\" + model_path,\n", " \"actor_rollout_ref.actor.ppo_mini_batch_size=8\",\n", " \"actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8\",\n", " \"actor_rollout_ref.actor.fsdp_config.param_offload=True\",\n", " \"actor_rollout_ref.actor.fsdp_config.optimizer_offload=True\",\n", " # rollout related\n", " \"actor_rollout_ref.rollout.name=\" + rollout_name,\n", " \"actor_rollout_ref.rollout.mode=async\",\n", " \"actor_rollout_ref.rollout.tensor_model_parallel_size=1\",\n", " \"actor_rollout_ref.rollout.n=8\",\n", " \"actor_rollout_ref.rollout.multi_turn.tool_config_path=\" + tool_config_path,\n", " \"actor_rollout_ref.rollout.agent.default_agent_loop=tool_agent\",\n", " \"actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8\",\n", " # trainer related\n", " \"trainer.val_before_train=True\",\n", " \"trainer.log_val_generations=10\",\n", " \"trainer.n_gpus_per_node=8\",\n", " \"trainer.test_freq=-1\",\n", " \"trainer.total_training_steps=5\",\n", " \"trainer.logger=['console','tensorboard', 'wandb']\",\n", " \"trainer.project_name=verl\",\n", " \"trainer.experiment_name=\" + os.path.basename(model_path),\n", " ],\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from verl.trainer.main_ppo import main\n", "\n", "main(config)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For demo purpose, we only train 5 steps, you can verify the training process by checking wandb metrics:\n", "- num_turns: min/max/mean chat conversation turns in each step.\n", "- critic rewards: min/max/mean critic rewards in each step.\n", "\n", "For more realistic agentic RL training, please refer to our recipe:\n", "- [retool](https://github.com/volcengine/verl/tree/main/recipe/retool): implementation of paper [ReTool: Reinforcement Learning for Strategic Tool Use in LLMs](https://arxiv.org/abs/2504.11536)\n", "- [collabllm](https://github.com/volcengine/verl/tree/main/recipe/collabllm): implementation of paper [CollabLLM: From Passive Responders to Active Collaborators](https://arxiv.org/pdf/2502.00640)\n", "- [deepeyes](https://github.com/volcengine/verl/tree/main/recipe/deepeyes): implementation of paper [DeepEyes: Incentivizing \"Thinking with Images\" via Reinforcement Learning](https://arxiv.org/abs/2505.14362)" ] } ], "metadata": { "fileId": "398ea641-8a51-4a0b-b64e-6b7cd6b72164", "filePath": "/opt/tiger/open_verl/examples/agent_loop_tutorial.ipynb", "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.3" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: verl_distillation/examples/tutorial/agent_loop_get_started/sandbox.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import aiohttp from transformers.utils import get_json_schema from verl.tools.base_tool import BaseTool, OpenAIFunctionToolSchema, ToolResponse class SandboxTool(BaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): super().__init__(config, tool_schema) # Different model may use different code pattern, e.g. python, py, etc. self.code_pattern = re.compile(r"```py(.*?)```", re.DOTALL) async def code_interpreter(self, code: str) -> str: """Execute the code in the sandbox. Args: code: The code to be executed. Returns: str: The output of the code execution. """ async with aiohttp.ClientSession() as session: async with session.post( self.config.get("sandbox_fusion_url"), json={"code": code}, ) as resp: resp.raise_for_status() result = await resp.json() stdout, stderr = result["run_result"]["stdout"], result["run_result"]["stderr"] return stdout + stderr def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: schema = get_json_schema(self.code_interpreter) return OpenAIFunctionToolSchema(**schema) async def execute(self, instance_id: str, parameters: dict, **kwargs) -> tuple[str, float, dict]: code = parameters["code"] matches = self.code_pattern.findall(code) if matches: code = matches[0].strip() # NOTE: Some script may not explicitly print result, we need to add a print statement to the end of the script. # More better way is to SFT the model to make it print result by default, we skip SFT stage in this tutorial. lines = code.split("\n") for i, line in reversed(list(enumerate(lines))): if line == "": continue if not lines[i].startswith("print"): lines[i] = f"print({line})" break code = "\n".join(lines) result = await self.code_interpreter(code) return ToolResponse(text=result), 0.0, {} ================================================ FILE: verl_distillation/init_ray.sh ================================================ #!/bin/bash # Single Node Ray Initialization Script # Usage: bash init_ray.sh # HEAD_NODE_IP: IP address of the head node # PORT: Ray port (default: 6379) # RANK: Node rank (0 for head, >0 for workers) set -e # Parse arguments HEAD_NODE_IP=${1:-"127.0.0.1"} PORT=${2:-6379} RANK=${3:-0} # Configuration NUM_CPUS=${NUM_CPUS:-""} NUM_GPUS=${NUM_GPUS:-""} OBJECT_STORE_MEMORY=${OBJECT_STORE_MEMORY:-""} CONDA_ENV_NAME=${CONDA_ENV_NAME:-"distill"} # Colors GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' log_info() { echo -e "${GREEN}[INFO]${NC} $(hostname): $1" } log_warn() { echo -e "${YELLOW}[WARN]${NC} $(hostname): $1" } # Activate conda environment if [ -f "/root/anaconda3/etc/profile.d/conda.sh" ]; then source "/root/anaconda3/etc/profile.d/conda.sh" elif [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then source "$HOME/anaconda3/etc/profile.d/conda.sh" elif [ -f "$HOME/miniconda3/etc/profile.d/conda.sh" ]; then source "$HOME/miniconda3/etc/profile.d/conda.sh" fi if command -v conda &> /dev/null; then conda activate ${CONDA_ENV_NAME} 2>/dev/null || log_warn "Could not activate conda env: ${CONDA_ENV_NAME}" fi # Build ray start command options RAY_OPTS="" if [ -n "${NUM_CPUS}" ]; then RAY_OPTS="${RAY_OPTS} --num-cpus=${NUM_CPUS}" fi if [ -n "${NUM_GPUS}" ]; then RAY_OPTS="${RAY_OPTS} --num-gpus=${NUM_GPUS}" fi if [ -n "${OBJECT_STORE_MEMORY}" ]; then RAY_OPTS="${RAY_OPTS} --object-store-memory=${OBJECT_STORE_MEMORY}" fi # Stop existing Ray instance ray stop --force 2>/dev/null || true sleep 2 # Start Ray if [ "${RANK}" -eq 0 ]; then log_info "Starting Ray HEAD node on port ${PORT}..." ray start --head --port=${PORT} ${RAY_OPTS} else log_info "Starting Ray WORKER node, connecting to ${HEAD_NODE_IP}:${PORT}..." ray start --address=${HEAD_NODE_IP}:${PORT} ${RAY_OPTS} fi sleep 3 # Check status log_info "Ray node started. Checking status..." ray status ================================================ FILE: verl_distillation/init_ray_cluster.sh ================================================ #!/bin/bash # Multi-node Ray Cluster Initialization Script # Usage: bash init_ray_cluster.sh [--stop] # --stop: Stop Ray on all nodes instead of starting set -e SCRIPT_DIR=$(cd $(dirname $0); pwd) PROJECT_DIR=${SCRIPT_DIR} # Configuration PORT=${RAY_PORT:-6379} HOSTFILE=${HOSTFILE:-"/etc/mpi/hostfile"} CONDA_ENV_NAME=${CONDA_ENV_NAME:-"distill"} LOG_DIR="${PROJECT_DIR}/logs/ray" # Colors RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' log_info() { echo -e "${GREEN}[INFO]${NC} $1" } log_warn() { echo -e "${YELLOW}[WARN]${NC} $1" } log_error() { echo -e "${RED}[ERROR]${NC} $1" } # Function to stop Ray on all nodes stop_cluster() { log_info "Stopping Ray on all nodes..." if [ ! -f "${HOSTFILE}" ]; then log_warn "Hostfile not found, stopping local Ray only" ray stop --force 2>/dev/null || true return fi ALL_NODES=$(awk '!a[$1]++ {print $1}' ${HOSTFILE}) for node in ${ALL_NODES}; do log_info "Stopping Ray on ${node}..." ssh -n ${node} "source /root/anaconda3/etc/profile.d/conda.sh && conda activate ${CONDA_ENV_NAME} && ray stop --force" 2>/dev/null & done wait log_info "Ray stopped on all nodes" } # Function to start Ray cluster start_cluster() { # Check hostfile if [ ! -f "${HOSTFILE}" ]; then log_error "Hostfile not found: ${HOSTFILE}" log_info "Please create a hostfile with one IP per line" log_info "Example:" echo " 192.168.1.100" echo " 192.168.1.101" echo " 192.168.1.102" exit 1 fi # Get head node (first line) HEAD_NODE=$(awk 'NR==1 {print $1}' ${HOSTFILE}) ALL_NODES=$(awk '!a[$1]++ {print $1}' ${HOSTFILE}) log_info "Head node: ${HEAD_NODE}" log_info "Ray port: ${PORT}" log_info "Conda env: ${CONDA_ENV_NAME}" echo "" log_info "Nodes in cluster:" echo "${ALL_NODES}" echo "" # Create log directory mkdir -p "${LOG_DIR}" # Stop existing Ray instances first log_info "Stopping any existing Ray instances..." stop_cluster sleep 3 # Start head node first (synchronously) log_info "Starting Ray HEAD on ${HEAD_NODE}..." ssh -n ${HEAD_NODE} "CONDA_ENV_NAME=${CONDA_ENV_NAME} bash ${SCRIPT_DIR}/init_ray.sh ${HEAD_NODE} ${PORT} 0" \ > "${LOG_DIR}/ray_${HEAD_NODE}.log" 2>&1 if [ $? -ne 0 ]; then log_error "Failed to start Ray HEAD. Check ${LOG_DIR}/ray_${HEAD_NODE}.log" exit 1 fi log_info "Ray HEAD started successfully" # Wait for head to be ready sleep 5 # Start worker nodes (asynchronously) rank=1 for node in ${ALL_NODES}; do if [ "${node}" == "${HEAD_NODE}" ]; then continue fi log_info "Starting Ray WORKER on ${node} (rank ${rank})..." ssh -n ${node} "CONDA_ENV_NAME=${CONDA_ENV_NAME} bash ${SCRIPT_DIR}/init_ray.sh ${HEAD_NODE} ${PORT} ${rank}" \ > "${LOG_DIR}/ray_${node}.log" 2>&1 & rank=$((rank + 1)) done # Wait for all workers log_info "Waiting for all workers to join..." wait sleep 3 # Check cluster status echo "" log_info "Ray cluster initialization complete!" log_info "Logs saved to: ${LOG_DIR}/" echo "" log_info "Cluster status:" ssh -n ${HEAD_NODE} "source /root/anaconda3/etc/profile.d/conda.sh && conda activate ${CONDA_ENV_NAME} && ray status" } # Main case "${1}" in --stop) stop_cluster ;; *) start_cluster ;; esac ================================================ FILE: verl_distillation/pyproject.toml ================================================ # ------------------------------- # build-system # ------------------------------- [build-system] requires = [ "setuptools>=61.0", "wheel" ] build-backend = "setuptools.build_meta" # ------------------------------- # project (PEP 621 metadata) # ------------------------------- [project] name = "verl" # We'll mark the version as "dynamic" because it's read from the file "verl/version/version" # (PEP 621 calls this "dynamic version"). # The actual version is specified in the [tool.setuptools.dynamic] section below. dynamic = ["version", "dependencies", "optional-dependencies", "authors", "urls"] description = "verl: Volcano Engine Reinforcement Learning for LLM" license = {text = "Apache-2.0"} # Changed from file to text format readme = {file = "README.md", content-type = "text/markdown"} requires-python = ">=3.10" # ------------------------------- # tool.ruff - Linting configuration # ------------------------------- [tool.ruff] # Note: While the formatter will attempt to format lines such that they remain within the line-length, # it isn't a hard upper bound, and formatted lines may exceed the line-length. line-length = 120 exclude = ["tests/workers/rollout/test_sglang_async_rollout_sf_tools.py", "scripts/legacy_model_merger.py"] [tool.ruff.lint] isort = {known-first-party = ["verl"]} # c.f. https://github.com/vllm-project/vllm/blob/ce8d6b75fc0586045df75ee1568a5b5f9957251b/pyproject.toml select = [ # pycodestyle "E", # Pyflakes "F", # pyupgrade "UP", # flake8-bugbear "B", # isort "I", "G", ] ignore = [ # star imports "F405", "F403", # lambda expression assignment "E731", # Loop control variable not used within loop body "B007", # f-string format "UP032", # `.log()` statement uses f-string "G004", # X | None for type annotations "UP045", # deprecated import "UP035", ] # ------------------------------- # tool.mypy - typechecking config # ------------------------------- [tool.mypy] pretty = true ignore_missing_imports = true explicit_package_bases = true follow_imports = "skip" # Blanket silence ignore_errors = true [[tool.mypy.overrides]] module = [ "verl.trainer.config.algorithm", "verl.trainer.ppo.core_algos", "verl.trainer.ppo.reward", "verl.workers.reward_manager", "verl.workers.reward_manager.*", ] ignore_errors = false # ------------------------------- # tool.setuptools - Additional config # ------------------------------- [tool.setuptools] # True means `setuptools` will attempt to include all relevant files in package_data automatically. # This corresponds to `include_package_data=True` in setup.py. include-package-data = true # We read the version from a file in 'verl/version/version' [tool.setuptools.dynamic] version = {file = "verl/version/version"} # If you need to mimic `package_dir={'': '.'}`: [tool.setuptools.package-dir] "" = "." # If you need to include specific non-Python data (like YAML files or version file): # This is the rough equivalent of package_data={'': ['version/*'], 'verl': ['trainer/config/*.yaml']} [tool.setuptools.package-data] verl = [ "version/*", "trainer/config/*.yaml", "trainer/config/*/*.yaml", ] "recipe.onpolicy_distill" = [ "config/*.yaml", ] ================================================ FILE: verl_distillation/recipe/README.md ================================================ # Recipe The examples under `recipes/` are representative extensions to verl for specific end-to-end RL training recipes. The help the community reproduce experiments, verl team provides a snapshot of the codebase when each recipe is initially PR'ed to verl main. You can find them via [github branches](https://github.com/volcengine/verl/branches/all?query=recipe) # Awesome work using verl - [Logic-RL](https://github.com/Unakar/Logic-RL): a reproduction of DeepSeek R1 Zero on 2K Tiny Logic Puzzle Dataset. ![GitHub Repo stars](https://img.shields.io/github/stars/Unakar/Logic-RL) - [Seed-Coder](https://github.com/ByteDance-Seed/Seed-Coder): RL training of Seed-Coder boosts performance on competitive programming ![GitHub Repo stars](https://img.shields.io/github/stars/ByteDance-Seed/Seed-Coder) - [all-hands/openhands-lm-32b-v0.1](https://www.all-hands.dev/blog/introducing-openhands-lm-32b----a-strong-open-coding-agent-model): A strong, open coding agent model, trained with [multi-turn fine-tuning](https://github.com/volcengine/verl/pull/195) - [s3](https://github.com/pat-jj/s3) **Efficient Yet Effective** Search Agent Training via RL ![GitHub Repo stars](https://img.shields.io/github/stars/pat-jj/s3) - [Rec-R1](https://arxiv.org/pdf/2503.24289): Bridging Generative Large Language Models and Recommendation Systems via Reinforcement Learning - [Explore RL Data Scaling](https://arxiv.org/abs/2503.22230): Exploring Data Scaling Trends and Effects in Reinforcement Learning from Human Feedback - [FIRE](https://arxiv.org/abs/2410.21236): Flaming-hot initiation with regular execution sampling for large language models - [DQO](https://arxiv.org/abs/2410.09302): Enhancing multi-Step reasoning abilities of language models through direct Q-function optimization - [ProRL](https://arxiv.org/abs/2505.24864): Prolonged Reinforcement Learning Expands Reasoning Boundaries in Large Language Models - [cognition-engineering](https://github.com/gair-nlp/cognition-engineering): Test time scaling drives cognition engineering. ![GitHub Repo stars](https://img.shields.io/github/stars/gair-nlp/cognition-engineering) - [Trust Region Preference Approximation](https://github.com/XueruiSu/Trust-Region-Preference-Approximation): A simple and stable **reinforcement learning algorithm** for LLM reasoning. ![GitHub Repo stars](https://img.shields.io/github/stars/XueruiSu/Trust-Region-Preference-Approximation) - [AdaRFT](https://github.com/uscnlp-lime/verl): Efficient Reinforcement Finetuning via **Adaptive Curriculum Learning** ![GitHub Repo stars](https://img.shields.io/github/stars/uscnlp-lime/verl) - [critic-rl](https://github.com/HKUNLP/critic-rl): LLM critics for code generation ![GitHub Repo stars](https://img.shields.io/github/stars/HKUNLP/critic-rl) - [self-rewarding-reasoning-LLM](https://arxiv.org/pdf/2502.19613): self-rewarding and correction with **generative reward models** ![GitHub Repo stars](https://img.shields.io/github/stars/RLHFlow/Self-rewarding-reasoning-LLM) - [DeepEnlighten](https://github.com/DolbyUUU/DeepEnlighten): Reproduce R1 with **social reasoning** tasks and analyze key findings ![GitHub Repo stars](https://img.shields.io/github/stars/DolbyUUU/DeepEnlighten) - [MetaSpatial](https://github.com/PzySeere/MetaSpatial): Reinforcing **3D Spatial Reasoning** in **VLMs** for the **Metaverse** ![GitHub Repo stars](https://img.shields.io/github/stars/PzySeere/MetaSpatial) - [PURE](https://github.com/CJReinforce/PURE): **Credit assignment** is the key to successful reinforcement fine-tuning using **process reward model** ![GitHub Repo stars](https://img.shields.io/github/stars/CJReinforce/PURE) - [cognitive-behaviors](https://github.com/kanishkg/cognitive-behaviors): Cognitive Behaviors that Enable Self-Improving Reasoners, or, Four Habits of Highly Effective STaRs ![GitHub Repo stars](https://img.shields.io/github/stars/kanishkg/cognitive-behaviors) - [deepscaler](https://github.com/agentica-project/rllm/tree/deepscaler): iterative context scaling with GRPO ![GitHub Repo stars](https://img.shields.io/github/stars/agentica-project/deepscaler) - [DAPO](https://dapo-sia.github.io/): the fully open source SOTA RL algorithm that beats DeepSeek-R1-zero-32B ![GitHub Repo stars](https://img.shields.io/github/stars/volcengine/verl) - [NoisyRollout](https://github.com/NUS-TRAIL/NoisyRollout): Reinforcing Visual Reasoning with Data Augmentation ![GitHub Repo stars](https://img.shields.io/github/stars/NUS-TRAIL/NoisyRollout) ================================================ FILE: verl_distillation/recipe/__init__.py ================================================ # This file makes `recipe` a regular Python package so that entrypoints like # `python -m recipe.onpolicy_distill.main_onpolicy_distill` work reliably after installation. ================================================ FILE: verl_distillation/recipe/char_count/README.md ================================================ # Char Count ## Introduction Char count is a simple NLP task. We create it for beginners to grasp the idea of RLVR. The task can be trained using a tiny model (e.g., https://huggingface.co/HuggingFaceTB/SmolLM2-135M) on a consumer GPU with only 8GB. ## Problem formulation The prompt is: "How many {char} are there in {word}?". In order for LLM to better answer this question, we create SFT dataset with intermediate steps. For example, ```text Question: How many n are there in n-i-n-e? Answer: n = n i != n n = n e != n \boxed{2} ``` Note that - We add a dash between each individual char to make the task easier because each individual char will be tokenized to the same token by most tokenizer. - In the SFT dataset, we create a CoT by listing all the individual chars and whether it equals to the target. In the end, it outputs the final answer inside the box. - The task can be verified. - The word is not always meaningful. Each char is sampled uniformly from a to z. We make the total length and the answer uniformly distributed within a range. ## Scripts To create the dataset, run ```bash python3 create_dataset.py ``` We create a train set and a val set. Both of them are used of SFT and RL. You can specify the total number of data, min/max length and data path. To run the SFT ```bash bash train_sft.sh ``` We train SFT for 3 epochs. After 3 epochs, the validation score is around 0.12. To run GRPO ```bash bash train_grpo.sh ``` We train GRPO for 2 epochs. After 2 epochs, the validation score is around 0.36. ================================================ FILE: verl_distillation/recipe/char_count/create_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Task description: Given a random word and a random char, count the number of occurrence of char in the word. Create CoT dataset that split the word into separate char. Then list the char and count the occurrence. The word set comes from shakespeare """ import os.path import random prompt_template = "How many {} are there in word {}?" def generate_random_char(): return chr(97 + random.randint(0, 25)) def create_prompt_response(min_length=3, max_length=5): # randomly generate a length word_length = random.randint(min_length, max_length) # randomly generate a target count number. This makes the target number target_count_number = random.randint(1, word_length) char_lst = [] # generate the word # step 1: generate the target word target_char = generate_random_char() for _ in range(target_count_number): char_lst.append(target_char) # step 2: generate other words for _ in range(word_length - target_count_number): while True: char = generate_random_char() if char != target_char: char_lst.append(char) break # step 3: random permute char_lst random.shuffle(char_lst) word = "-".join(char_lst) prompt = prompt_template.format(target_char, word) final_answer = [] # cot number = 0 for i, char in enumerate(char_lst): cot = f"{char}" if char != target_char: cot += " != " else: cot += " = " number += 1 cot += f"{target_char}." final_answer.append(cot) conclusion = f"\\boxed{{{number}}} {target_char} in {word}." final_answer.append(conclusion) final_answer = "\n".join(final_answer) return prompt, final_answer if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--total_number", type=int, default=10000) parser.add_argument("--min_length", type=int, default=5) parser.add_argument("--max_length", type=int, default=20) parser.add_argument("--data_path", type=str, default="~/data/char_count") args = vars(parser.parse_args()) total_number = args["total_number"] min_length = args["min_length"] max_length = args["max_length"] data_path = args["data_path"] data_path = os.path.expanduser(data_path) full_output = [] for _ in range(total_number): output = create_prompt_response(min_length=min_length, max_length=max_length) full_output.append(output) # random reorder random.shuffle(full_output) # split for train and test train_split_len = int(0.9 * len(full_output)) train_outputs = full_output[:train_split_len] test_output = full_output[train_split_len:] sft_train_dataset = {"prompt": [], "response": []} for o in train_outputs: sft_train_dataset["prompt"].append(o[0]) sft_train_dataset["response"].append(o[1]) sft_test_dataset = {"prompt": [], "response": []} for o in test_output: sft_test_dataset["prompt"].append(o[0]) sft_test_dataset["response"].append(o[1]) import pandas as pd sft_train_dataset = pd.DataFrame(data=sft_train_dataset) sft_test_dataset = pd.DataFrame(data=sft_test_dataset) folder = os.path.join(data_path, "sft") os.makedirs(folder, exist_ok=True) sft_train_dataset.to_parquet(os.path.join(folder, "train.parquet")) sft_test_dataset.to_parquet(os.path.join(folder, "test.parquet")) # build RL dataset rl_train_dataset = {"prompt": [], "data_source": [], "ability": [], "reward_model": [], "extra_info": []} rl_test_dataset = {"prompt": [], "data_source": [], "ability": [], "reward_model": [], "extra_info": []} from verl.utils.reward_score.math_reward import last_boxed_only_string, remove_boxed for o in train_outputs: prompt = o[0] response = o[1] prompt_with_template = [ { "role": "user", "content": prompt, } ] rl_train_dataset["prompt"].append(prompt_with_template) rl_train_dataset["data_source"].append("char_count") rl_train_dataset["ability"].append("other") rl_train_dataset["reward_model"].append( {"style": "rule", "ground_truth": remove_boxed(last_boxed_only_string(response))} ) rl_train_dataset["extra_info"].append({"response": response}) for o in test_output: prompt = o[0] response = o[1] prompt_with_template = [ { "role": "user", "content": prompt, } ] rl_test_dataset["prompt"].append(prompt_with_template) rl_test_dataset["data_source"].append("char_count") rl_test_dataset["ability"].append("other") rl_test_dataset["reward_model"].append( {"style": "rule", "ground_truth": remove_boxed(last_boxed_only_string(response))} ) rl_test_dataset["extra_info"].append({"response": response}) rl_train_dataset = pd.DataFrame(data=rl_train_dataset) rl_test_dataset = pd.DataFrame(data=rl_test_dataset) folder = os.path.join(data_path, "rl") os.makedirs(folder, exist_ok=True) rl_train_dataset.to_parquet(os.path.join(folder, "train.parquet")) rl_test_dataset.to_parquet(os.path.join(folder, "test.parquet")) ================================================ FILE: verl_distillation/recipe/char_count/reward_function.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Reward function """ from verl.utils.reward_score import math_reward def char_count_reward_function(data_source, solution_str, ground_truth, extra_info=None): try: last_boxed_string = math_reward.last_boxed_only_string(solution_str) if last_boxed_string is None: return 0 solution = math_reward.remove_boxed(last_boxed_string) if solution == ground_truth: return 1 else: return 0 except Exception: print(ground_truth, solution_str) return 0 ================================================ FILE: verl_distillation/recipe/char_count/train_grpo.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/char_count/rl/train.parquet \ data.val_files=$HOME/data/char_count/rl/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=128 \ data.max_response_length=128 \ data.filter_overlong_prompts=False \ data.truncation='error' \ actor_rollout_ref.model.path=./models/sft/global_step_105 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=16 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=5000 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.0 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","tensorboard"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='smol135m_grpo' \ trainer.val_before_train=True \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=2 \ custom_reward_function.path=recipe/char_count/reward_function.py \ custom_reward_function.name=char_count_reward_function ================================================ FILE: verl_distillation/recipe/char_count/train_sft.sh ================================================ set -x nproc_per_node=1 save_path=./models/sft torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/char_count/sft/train.parquet \ data.val_files=$HOME/data/char_count/sft/test.parquet \ data.prompt_key=prompt \ data.response_key=response \ data.micro_batch_size_per_gpu=8 \ data.max_length=256 \ data.train_batch_size=256 \ use_remove_padding=True \ model.partial_pretrain=HuggingFaceTB/SmolLM2-135M-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=char_count-sft \ trainer.experiment_name=char_count-sft-SmolLM2-135M-Instruct \ trainer.total_epochs=3 \ trainer.logger=console ================================================ FILE: verl_distillation/recipe/collabllm/README.md ================================================ # CollabLLM This repository implements [CollabLLM](https://arxiv.org/pdf/2502.00640) (ICML 2025) using the verl framework. For the original implementation, see the [CollabLLM repository](https://github.com/Wuyxin/collabllm). CollabLLM is a method for training language models to collaborate effectively in multi-turn conversations. This implementation adapts the original imlpementation to work with the Verl training framework. ## Quick start ### 0. Environment Make sure the required packages for `verl` are installed. Additionally, install `litellm` and export the required API keys. The API model will be used for user simulators and, optionally, LLM Judges (see the Configuration section below). ### 1. Prepare Your Dataset First, process your dataset using the provided script: ```bash python process_dataset.py --dataset <> ... --dataset_type ``` **Requirements:** - Input: A Hugging Face multiturn dataset. Existing datasets: `collabllm/collabllm-multiturn-$DATASET`, with `DATASET` in one of [`math-hard(-large)`, `medium(-large)`, `bigcodebench(-large)`] (*-large are the datasets used in the CollabLLM paper) - Example format: See [collabllm-multiturn-math-hard](https://huggingface.co/datasets/collabllm/collabllm-multiturn-math-hard) - To generate your own dataset: Use [build_dataset.py](https://github.com/Wuyxin/collabllm/blob/main/scripts/engine/build_dataset.py) from the original CollabLLM repository *Note: Check `process_dataset.py` for example commands and usage.* ### 2. Train Your Model **(Optional) For Supervised Fine-Tuning (SFT):** ```bash bash train_sft_collabllm.sh ``` **For Reinforcement Learning (RL):** ```bash bash train_rl_collabllm.sh ``` The RL script shows an example to train CollabLLM on `math-hard-large`. - The config to sample future conversations are in `recipe/collabllm/config/collabllm_interaction_config.yaml`. - The Multiturn-aware Reward is aggregated from these three conversational-level rewards: ``` +reward_model.reward_kwargs.metric_weights.accuracy=1 \ +reward_model.reward_kwargs.metric_weights.interactivity=1 \ +reward_model.reward_kwargs.metric_weights.token_amount=-0.0001 \ ``` You can remove, add, or modify the weights depending on your task. A list of implemented metrics you can already add are under `recipe/collabllm/metrics`. For example, on `medium-large`, you can replace `accuracy` with `bleu_score` via ``` +reward_model.reward_kwargs.metric_weights.bleu_score=1 ``` which will instead apply bleu score on the sampled future conversations. ## Configuration Read [doc](https://verl.readthedocs.io/en/latest/) for detailed configurations. ## Citation If you find CollabLLM useful in your research, please cite the following: ```bibtex @inproceedings{collabllm2025, title={CollabLLM: From Passive Responders to Active Collaborators}, author={Shirley Wu and Michel Galley and Baolin Peng and Hao Cheng and Gavin Li and Yao Dou and Weixin Cai and James Zou and Jure Leskovec and Jianfeng Gao}, booktitle={International Conference on Machine Learning (ICML)}, year={2025} } ``` ================================================ FILE: verl_distillation/recipe/collabllm/collabllm_agent_loop.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from copy import deepcopy from typing import Any from uuid import uuid4 from recipe.collabllm.utils import is_valid_messages from verl.experimental.agent_loop.agent_loop import AgentLoopOutput from verl.experimental.agent_loop.tool_agent_loop import AgentData, AgentState, ToolAgentLoop from verl.utils.rollout_trace import rollout_trace_op from verl.workers.rollout.schemas import Message logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class CollabLLMAgentLoop(ToolAgentLoop): @rollout_trace_op async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput: messages = list(kwargs["raw_prompt"]) image_data = deepcopy(kwargs.get("multi_modal_data", {}).get("image", None)) metrics = {} request_id = uuid4().hex tools_kwargs = kwargs.get("tools_kwargs", {}) # Initialize interaction if needed interaction = None interaction_kwargs = {} if self.interaction_config_file: interaction_kwargs = kwargs["extra_info"]["interaction_kwargs"] if "name" not in interaction_kwargs: raise ValueError("'name' key is required in interaction_kwargs") interaction_name = interaction_kwargs["name"] if interaction_name not in self.interaction_map: raise ValueError( f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: " f"{list(self.interaction_map.keys())}" ) interaction = self.interaction_map[interaction_name] await interaction.start_interaction(request_id, **interaction_kwargs) # Create AgentData instance to encapsulate all state agent_data = AgentData( messages=messages, image_data=image_data, metrics=metrics, request_id=request_id, tools_kwargs=tools_kwargs, interaction=interaction, interaction_kwargs=interaction_kwargs, ) # for collabllm, firstly generate model reponses await self._handle_pending_state(agent_data, sampling_params) status = await self._handle_generating_state(agent_data, sampling_params) if status == AgentState.TERMINATED: # tell reward manager to score -1 and skip future interaction # to avoid reward hacking with incompleted message num_repeats = 0 else: # then, collect interaction rollouts num_repeats = self.config.actor_rollout_ref.rollout.multi_turn.num_repeat_rollouts interaction_requests = [deepcopy(agent_data) for _ in range(num_repeats)] # messages are only used in collabllm reward manager messages_lst = [] for _agent_data in interaction_requests: if not is_valid_messages(_agent_data.messages[-1]): break prev_msg_len = len(_agent_data.messages) await self.run_agent_data_loop(_agent_data, sampling_params, AgentState.INTERACTING) messages_lst.append([Message(**msg) for msg in _agent_data.messages]) if interaction.config.get("enable_log"): print(f"Assistant: ...{messages_lst[-1][prev_msg_len - 1].content[-100:]}") print(f"User: {messages_lst[-1][prev_msg_len].content[:100]}...") # Finalize output response_ids = agent_data.prompt_ids[-len(agent_data.response_mask) :] prompt_ids = agent_data.prompt_ids[: len(agent_data.prompt_ids) - len(agent_data.response_mask)] multi_modal_data = {"image": agent_data.image_data} if agent_data.image_data is not None else {} output = AgentLoopOutput( prompt_ids=prompt_ids, response_ids=response_ids[: self.response_length], response_mask=agent_data.response_mask[: self.response_length], multi_modal_data=multi_modal_data, response_logprobs=agent_data.response_logprobs[: self.response_length] if agent_data.response_logprobs else None, num_turns=agent_data.user_turns + agent_data.assistant_turns + 1, metrics=agent_data.metrics, extra_fields={ "turn_scores": agent_data.turn_scores, "messages": {"messages": messages_lst}, # compatiable with sglang interaction }, ) return output async def run_agent_data_loop(self, agent_data: AgentData, sampling_params: dict[str, Any], state: AgentState): """ Run the agent data loop to process the agent data. Args: agent_data (AgentData): The agent data to process. sampling_params (dict[str, Any]): The sampling parameters. state (AgentState, optional): The initial state of the agent. Defaults to None. """ while state != AgentState.TERMINATED: if state == AgentState.PENDING: state = await self._handle_pending_state(agent_data, sampling_params) elif state == AgentState.GENERATING: state = await self._handle_generating_state(agent_data, sampling_params) elif state == AgentState.PROCESSING_TOOLS: state = await self._handle_processing_tools_state(agent_data) elif state == AgentState.INTERACTING: state = await self._handle_interacting_state(agent_data) else: logger.error(f"Invalid state: {state}") state = AgentState.TERMINATED ================================================ FILE: verl_distillation/recipe/collabllm/collabllm_interation.py ================================================ # Copyright 2024 CollabLLM Ltd. and/or its affiliates # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import copy import logging import os from typing import Any, Optional from uuid import uuid4 from recipe.collabllm.utils import remove_think_block from verl.interactions.base import BaseInteraction from verl.utils.rollout_trace import rollout_trace_op logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) TERMINATION_SIGNAL = "[[TERMINATE CHAT]]" USER_PROMPT_TEMPLATE = """You are role-playing as a human USER interacting with an AI collaborator to complete a specific task. Your goal is to generate realistic, natural responses that a user might give in this scenario. ## Input Information: You will be provided with: - Task Description: The type of task you are trying to accomplish. - Complete Prompt or Reference Goal: This field may include the complete user request/query or a reference answer to user's request. Use this field to understand the user's intent, requirements, or what would count as a satisfactory outcome. - Chat History: The ongoing conversation between you (as the user) and the AI Inputs: <|The Start of Task Description (Not visible to the AI)|> {task_desc} <|The End of Task Description|> <|The Start of Complete Prompt or Reference Goal (Not visible to the AI)|> {single_turn_prompt} <|The End of Complete Prompt or Reference Goal|> <|The Start of Chat History|> {chat_history} <|The End of Chat History|> ## Guidelines: - Stay in Character: Role-play as a human USER. You are NOT an AI. Maintain a consistent personality throughout the chat. - Minimize Effort: IMPORTANT! As a user, avoid being too detailed in your responses. Provide vague or incomplete demands in the early stages of the conversation to minimize your effort. Let the AI ask for clarification rather than providing everything upfront. - Knowledge Background: Reflect the user's knowledge level in the role-playing. If the user is less knowledgeable about a task, they might not notice incorrect statements. Ask questions that demonstrate your current understanding and areas of confusion. - Occasionally Make Mistakes: Real-world users might misspell words, provide incorrect dates, give wrong information, or ask unclear questions. Simulate this behavior to reflect natural interactions. - Mention Personal Preferences: Include preferences or constraints that might influence your requests or responses. For example, "I prefer short answers," "I need this done quickly," or "I like detailed comments in code." - Goal-Oriented: Keep the chat focused on your intent. Avoid small talk or digressions. Redirect the chat back to the main objective if it starts to stray. ## Output Format: You should output a JSON object with three entries: - "current_answer" (str): Briefly summerize the AI's current solution to the task. - "thought" (str): Output your thought process as a user deciding what to say next. Consider: 1. Have you obtained a satisfactory solution from the AI? If yes, you can terminate this chat. 2. If not, what specific part of the problem or solution are you struggling with? 3. Has the AI asked you to perform a task or answer a question? If so, how should you approach it? 4. Are you noticing any patterns or potential misunderstandings that need clarification? 5. If you're stuck, how can you phrase your question to get the most helpful response while demonstrating your current understanding? - "response" (str): Based on your thought process, respond to the AI as the user you are role-playing. Stop immediately when the user's response is completed. ## Important Notes: - Respond Based on Previous Messages: Your responses should be based on the context of the current chat history. Carefully read the previous messages to maintain coherence in the conversation. - Conversation Flow: If "Current Chat History" is empty, start the conversation from scratch with an initial request. Otherwise, continue based on the existing conversation. - Don't Copy Input Directly: Use the provided information for understanding context only. Avoid copying target queries or any provided information directly in your responses. - Completion Signal: Use "{termination_signal}" as your response when you believe your goal has been solved or if you determine the AI cannot help further. - Double check if the JSON object is formatted correctly. Ensure that all fields are present and properly structured. Remember to stay in character as a user throughout your response, and follow the instructions and guidelines carefully.""" # noqa: E501 class CollabLLMInteraction(BaseInteraction): """A demo interaction for calculating the reward of CollabLLM. - `start_interaction`: start a interaction instance for a trajectory. - `generate_response`: generate the response of the assistant. - `calculate_score`: calculate the score of the interaction. - `finalize_interaction`: finalize the interaction instance. """ def __init__(self, config: dict): super().__init__(config) _config = copy.deepcopy(config) _config.pop("enable_log", None) self.name = _config.pop("name") self.user_model = _config.pop("user_model") self.termination_signal = _config.pop("termination_signal", TERMINATION_SIGNAL) self.num_retries = _config.pop("num_retries", 3) self.user_model_kwargs = _config self._instance_dict = {} async def start_interaction( self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs ) -> str: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } self.interaction_kwargs = kwargs assert "single_turn_prompt" in kwargs, "single_turn_prompt is required in interaction_kwargs" return instance_id @rollout_trace_op async def generate_response( self, instance_id: str, messages: list[dict[str, Any]], **kwargs ) -> tuple[bool, str, float, dict]: assert messages[-1]["role"] in ["system", "assistant"], ( "Last message input to the user model must be from system or assistant role" ) import litellm chat_history = self._parse_messages(messages, strip_sys_prompt=True) prompt = USER_PROMPT_TEMPLATE.format( task_desc=self.interaction_kwargs.get("task_desc", "general assistance task"), single_turn_prompt=self.interaction_kwargs["single_turn_prompt"], chat_history=chat_history, termination_signal=self.termination_signal, ) response = "" for i in range(self.num_retries): try: full_response = ( ( await litellm.acompletion( model=self.user_model, messages=[{"role": "user", "content": prompt}], **self.user_model_kwargs, ) ) .choices[0] .message.content ) except litellm.RateLimitError as e: logger.warning(f"[CollabLLMInteraction] hit RateLimitError: {e}. Retrying...") await asyncio.sleep(max(2**i, 60)) continue except Exception as e: logger.exception(f"An unexpected error occurred in CollabLLMAgentLoop: {e}") continue try: if isinstance(full_response, str): full_response = extract_json(full_response) except Exception as e: logger.warning(f"[CollabLLMInteraction] Error extracting JSON: {e}. Retrying...") continue if isinstance(full_response, dict): keys = full_response.keys() if {"current_answer", "thought", "response"}.issubset(keys): response = full_response.pop("response") if isinstance(response, str): break else: logger.warning( f"[CollabLLMInteraction] got an invaild response {response} full_response {full_response}. \ Retrying..." ) continue else: logger.warning(f"[CollabLLMInteraction] Keys {keys} do not match expected keys. Retrying...") continue self._instance_dict[instance_id]["response"] = response logger.debug(f"[CollabLLMInteraction] User: {response}") should_terminate_sequence = self.termination_signal in response reward = 0.0 return should_terminate_sequence, response, reward, {} async def finalize_interaction(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] def _parse_messages(self, messages, strip_sys_prompt=True): if messages is None: return "" if strip_sys_prompt: messages = [msg for msg in messages if msg["role"] != "system"] messages = [remove_think_block(msg) for msg in messages] chat = "\n".join(f"**{m['role'].capitalize()}**: {m['content']}" for m in messages) return chat def extract_json(s): def convert_value(value): true_values = {"true": True, "false": False, "null": None} value_lower = value.lower() if value_lower in true_values: return true_values[value_lower] try: if "." in value or "e" in value.lower(): return float(value) else: return int(value) except ValueError: return value # Return as string if not a number def parse_number(s, pos): start = pos while pos < len(s) and s[pos] in "-+0123456789.eE": pos += 1 num_str = s[start:pos] try: if "." in num_str or "e" in num_str.lower(): return float(num_str), pos else: return int(num_str), pos except ValueError: logger.error(f"Invalid number at position {start}: {num_str}") raise def skip_whitespace(s, pos): while pos < len(s) and s[pos] in " \t\n\r": pos += 1 return pos def parse_string(s, pos): quote_char = s[pos] assert quote_char in ('"', "'") pos += 1 result = "" while pos < len(s): c = s[pos] if c == "\\": pos += 1 if pos >= len(s): raise ValueError("Invalid escape sequence") c = s[pos] escape_sequences = {"n": "\n", "t": "\t", "r": "\r", "\\": "\\", quote_char: quote_char} result += escape_sequences.get(c, c) elif c == quote_char: pos += 1 # Attempt to convert to a number if possible converted_value = convert_value(result) return converted_value, pos else: result += c pos += 1 raise ValueError("Unterminated string") def parse_key(s, pos): pos = skip_whitespace(s, pos) if s[pos] in ('"', "'"): key, pos = parse_string(s, pos) return key, pos else: raise ValueError(f"Expected string for key at position {pos}") def parse_object(s, pos): obj = {} assert s[pos] == "{" pos += 1 pos = skip_whitespace(s, pos) while pos < len(s) and s[pos] != "}": pos = skip_whitespace(s, pos) key, pos = parse_key(s, pos) pos = skip_whitespace(s, pos) if pos >= len(s) or s[pos] != ":": raise ValueError(f'Expected ":" at position {pos}') pos += 1 pos = skip_whitespace(s, pos) value, pos = parse_value(s, pos) obj[key] = value pos = skip_whitespace(s, pos) if pos < len(s) and s[pos] == ",": pos += 1 pos = skip_whitespace(s, pos) elif pos < len(s) and s[pos] == "}": break elif pos < len(s) and s[pos] != "}": raise ValueError(f'Expected "," or "}}" at position {pos}') if pos >= len(s) or s[pos] != "}": raise ValueError(f'Expected "}}" at position {pos}') pos += 1 return obj, pos def parse_array(s, pos): lst = [] assert s[pos] == "[" pos += 1 pos = skip_whitespace(s, pos) while pos < len(s) and s[pos] != "]": value, pos = parse_value(s, pos) lst.append(value) pos = skip_whitespace(s, pos) if pos < len(s) and s[pos] == ",": pos += 1 pos = skip_whitespace(s, pos) elif pos < len(s) and s[pos] == "]": break elif pos < len(s) and s[pos] != "]": raise ValueError(f'Expected "," or "]" at position {pos}') if pos >= len(s) or s[pos] != "]": raise ValueError(f'Expected "]" at position {pos}') pos += 1 return lst, pos def parse_triple_quoted_string(s, pos): if s[pos : pos + 3] == "'''": quote_str = "'''" elif s[pos : pos + 3] == '"""': quote_str = '"""' else: raise ValueError(f"Expected triple quotes at position {pos}") pos += 3 result = "" while pos < len(s): if s[pos : pos + 3] == quote_str: pos += 3 # Attempt to convert to a number if possible converted_value = convert_value(result) return converted_value, pos else: result += s[pos] pos += 1 raise ValueError("Unterminated triple-quoted string") def parse_value(s, pos): pos = skip_whitespace(s, pos) if pos >= len(s): raise ValueError("Unexpected end of input") if s[pos] == "{": return parse_object(s, pos) elif s[pos] == "[": return parse_array(s, pos) elif s[pos : pos + 3] in ("'''", '"""'): return parse_triple_quoted_string(s, pos) elif s[pos] in ('"', "'"): return parse_string(s, pos) elif s[pos : pos + 4].lower() == "true": return True, pos + 4 elif s[pos : pos + 5].lower() == "false": return False, pos + 5 elif s[pos : pos + 4].lower() == "null": return None, pos + 4 elif s[pos] in "-+0123456789.": return parse_number(s, pos) else: raise ValueError(f"Unexpected character at position {pos}: {s[pos]}") json_start = s.index("{") json_end = s.rfind("}") s = s[json_start : json_end + 1] s = s.strip() result, pos = parse_value(s, 0) pos = skip_whitespace(s, pos) if pos != len(s): raise ValueError(f"Unexpected content at position {pos}") return result ================================================ FILE: verl_distillation/recipe/collabllm/config/agent.yaml ================================================ - name: collabllm_agent _target_: recipe.collabllm.collabllm_agent_loop.CollabLLMAgentLoop ================================================ FILE: verl_distillation/recipe/collabllm/config/collabllm_interaction_config.yaml ================================================ interaction: - name: "collabllm" class_name: "recipe.collabllm.collabllm_interation.CollabLLMInteraction" config: { "user_model": "gpt-4o-mini", "num_retries": 3, "max_tokens": 512, "temperature": 1.0, "enable_log": True } ================================================ FILE: verl_distillation/recipe/collabllm/metrics/accuracy.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from recipe.collabllm.utils import extract_json, parse_messages ACCURACY_PROMPT = '''You are a helpful and meticulous evaluator. Your task is to \ evaluate the *accuracy* of an AI model's answer to a target question. \ You will be given the target question, the ground truth answer, and the conversation between the AI and the user. Provided Information: <|The Start of Target Question and Ground Truth Answer|> Target Question: {single_turn_prompt} Ground Truth Answer: {ground_truth} <|The End of Target Question and Ground Truth Answer|> <|The Start of The Conversation|> {chat_history} <|The End of The Conversation|> You should determine whether the model's final response to the target question is \ factually correct and consistent with the provided ground truth. Rating criteria (binary): • 1 = Correct — the response matches the ground truth. • 0 = Incorrect — the response contradicts or misses the ground truth. Output format (JSON): {{ "thought": "", "accuracy": <0 or 1> }} Double check if the JSON object is formatted correctly. Ensure that all fields are present and properly structured. \ Use " or """ to wrap up the thought and use single quotes inside the "thought" field to avoid JSON escape issues. Your evaluation: ''' async def compute_score(data_source, messages, ground_truth, extra_info, **kwargs): # Check if litellm is available, fallback to openai if not try: import litellm use_litellm = True except ImportError: # litellm not found, falling back to openai import openai use_litellm = False chat_history = parse_messages(messages, strip_sys_prompt=True) prompt = ACCURACY_PROMPT.format( single_turn_prompt=extra_info["interaction_kwargs"]["single_turn_prompt"], ground_truth=ground_truth, chat_history=chat_history, ) if use_litellm: full_response = ( ( await litellm.acompletion( messages=[{"role": "user", "content": prompt}], **kwargs, ) ) .choices[0] .message.content ) else: client = openai.AsyncOpenAI() # Assumes API key is set in environment full_response = ( ( await client.chat.completions.create( messages=[{"role": "user", "content": prompt}], **kwargs, ) ) .choices[0] .message.content ) full_response = extract_json(full_response) assert isinstance(full_response, dict), f"Expected a dict, got {type(full_response)}" assert {"accuracy", "thought"}.issubset(full_response.keys()), ( f"Expected keys not found from {full_response.keys()}" ) accuracy = full_response.pop("accuracy") return float(accuracy) ================================================ FILE: verl_distillation/recipe/collabllm/metrics/bleu_score.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nltk.translate.bleu_score import sentence_bleu from recipe.collabllm.utils import extract_json, parse_messages EXTRACT_MULTITURN_COMPLETION_PROMPT = '''You are a thorough and diligent conversation analyzer. \ Your task is to extract the final and complete version of a document that was generated during \ a multiturn conversation between a user and a chat assistant. \ The extracted content should reflect the final and comprehensive response provided by the assistant \ based on the user’s request. You will be provided with the conversation: <|The Start of The Conversation|> {chat_history} <|The End of The Conversation|> Instructions for Extraction: 1. Identify the Most Update-to-Date Contents: Review the entire conversation to identify the most updated parts \ of the content provided by the assistant. This may include: - Different sections of text (e.g., an essay, report, or article). 2. Integrate Revisions: If the assistant made revisions, updates, or added sections throughout the conversation, \ ensure that these changes are fully integrated into the final content. The goal is to extract a single, cohesive \ output that incorporates all modifications and additions made during the conversation. For example, if the assistant \ writes an introducation at the beginning and move on to the conclusion, the final output should include both the \ introduction and the conclusion. 3. Focus on Completeness: - For text-based documents: Ensure that the extracted content is comprehensive and represents the full document \ or section as discussed in the conversation. You should output a JSON object with two entries: - "thought" (str): Output your thought process when extracting the final content. 1. How do different parts of the conversation contribute to the final output? 2. How do you make sure you included the most updated and complete information? 3. How do you make sure you did not include any information that is not necessary? - "final_completion" (str): The final and complete version of the document extracted from the conversation. Note: 1. If there are multiple lines, you should use triple quotes (""") to wrap the content. For example, \ "final_completion": """first line. second line.""" or "thought": """first line; second line.""". 2. In the "final_completion" entry, replace all double quotes (") with single quotes (') to prevent JSON formatting \ issues. For example, you can output "final_completion": "'Hello World' is a common phrase." Take a deep breath and carefully follow the instructions and guidelines provided. ''' async def compute_score(data_source, messages, ground_truth, extra_info, **kwargs): # Check if litellm is available, fallback to openai if not try: import litellm use_litellm = True except ImportError: # litellm not found, falling back to openai import openai use_litellm = False chat_history = parse_messages(messages, strip_sys_prompt=True) prompt = EXTRACT_MULTITURN_COMPLETION_PROMPT.format(chat_history=chat_history) if use_litellm: full_response = ( ( await litellm.acompletion( messages=[{"role": "user", "content": prompt}], **kwargs, ) ) .choices[0] .message.content ) else: client = openai.AsyncOpenAI() # Assumes API key is set in environment full_response = ( ( await client.chat.completions.create( messages=[{"role": "user", "content": prompt}], **kwargs, ) ) .choices[0] .message.content ) full_response = extract_json(full_response) assert isinstance(full_response, dict), f"Expected a dict, got {type(full_response)}" assert {"final_completion", "thought"}.issubset(full_response.keys()), ( f"Expected keys not found from {full_response.keys()}" ) final_completion = full_response.pop("final_completion") bleu = sentence_bleu([ground_truth], final_completion) return float(bleu) ================================================ FILE: verl_distillation/recipe/collabllm/metrics/interactivity.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from recipe.collabllm.utils import extract_json, parse_messages INTERACTIVITY_PROMPT = '''You are a helpful and meticulous conversation evaluator. \ Your task is to evaluate the interactivity of the responses provided by an AI assistant \ to user questions in a given conversation: <|The Start of the Conversation to be Evaluated|> {chat_history} <|The End of the Conversation to be Evaluated|> You should assess the assistant's engagement, clarity, and ability to understand the user's needs. \ Give a float number between 0 and 1. Scoring Criteria: - Let U = user understanding & response clarity ∈ [0,1] - 1.0 = Fully understands the user's intent and gives a clear answer. - 0.7 = Mostly understands and the answer is generally clear. - 0.3 = Partially misunderstands or the answer is hard to follow. - 0.0 = Misunderstands the intent and gives an unclear or irrelevant answer. - Let Q = clarification in [0,1] - 1.0 = Asks precise, necessary clarifying questions when needed. - 0.7 = Asks somewhat helpful but incomplete clarifications. - 0.3 = Only asks generic questions (e.g., “Does that help?”). - 0.0 = Asks no clarifying questions when needed. - Let S = suggestion helpfulness in [0,1] - 1.0 = Provides useful, actionable suggestions. - 0.7 = Suggestions are somewhat helpful but limited. - 0.3 = Suggestions are vague or generic. - 0.0 = No suggestions when they would clearly help. score = average([U, Q, S]) Output format (JSON): {{ "thought": "", "interactivity": }} Double check if the JSON object is formatted correctly. Ensure that all fields are present and properly structured. \ Use " or """ to wrap up the thought. You should not use other triple quotes inside the "thought" field. \ Instead you should use single quotes to avoid JSON escape issues. Your evaluation: ''' async def compute_score(data_source, messages, ground_truth, extra_info, **kwargs): # Check if litellm is available, fallback to openai if not try: import litellm use_litellm = True except ImportError: # litellm not found, falling back to openai import openai use_litellm = False chat_history = parse_messages(messages, strip_sys_prompt=True) prompt = INTERACTIVITY_PROMPT.format(chat_history=chat_history) if use_litellm: full_response = ( ( await litellm.acompletion( messages=[{"role": "user", "content": prompt}], **kwargs, ) ) .choices[0] .message.content ) else: client = openai.AsyncOpenAI() # Assumes API key is set in environment full_response = ( ( await client.chat.completions.create( messages=[{"role": "user", "content": prompt}], **kwargs, ) ) .choices[0] .message.content ) full_response = extract_json(full_response) assert isinstance(full_response, dict), f"Expected a dict, got {type(full_response)}" assert {"interactivity", "thought"}.issubset(full_response.keys()), ( f"Expected keys not found from {full_response.keys()}" ) interactivity = full_response.pop("interactivity") return float(interactivity) ================================================ FILE: verl_distillation/recipe/collabllm/metrics/pass_rate.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from bigcodebench.eval import untrusted_check from recipe.collabllm.utils import extract_json, parse_messages EXTRACT_MULTITURN_COMPLETION_PROMPT = '''You are a thorough and diligent conversation analyzer. \ Your task is to extract the final and complete version of a code function {entry_point} that was generated \ during a multiturn conversation between a user and a chat assistant. \ The extracted content should reflect the final and comprehensive response provided by the \ assistant based on the user’s request. You will be provided with the task and the conversation: <|The Start of The Task|> {single_turn_prompt} <|The End of The Task|> <|The Start of The Conversation|> {chat_history} <|The End of The Conversation|> Instructions for Extraction: 1. Identify the Most Update-to-Date Contents: Review the entire conversation to identify the most updated parts of \ the content provided by the assistant. This may include: - Different parts of the code snippet, function, class, or script. 2. Integrate Revisions: If the assistant made revisions, updates, or added sections throughout the conversation, \ ensure that these changes are fully integrated into the final content. The goal is to extract a single, cohesive \ output that incorporates all modifications and additions made during the conversation. For example, if the assistant \ writes a function at the beginning and changes a part, the final output should take the modification into account. 3. Focus on Completeness: - For code: Extract a complete and functional code snippet, including all necessary components such as imports, \ functions, classes, and any other essential elements. The code should be runnable, but you do not need to \ include any testing examples including the contents after `if __name__ == "__main__":`. Only the function code \ is required. You should output a JSON object with two entries: - "thought" (str): Output your thought process when extracting the final content. 1. How do different parts of the conversation contribute to the final output? 2. How do you make sure you included the most updated and complete information? 3. How do you make sure you did not include any information that is not necessary? - "final_completion" (str): The final and complete version of the code extracted from the conversation. \ Rename main function name for the task to {entry_point} if needed. Remove any comments wrapped by """. Note: 1. If there are multiple lines, you should use triple quotes (""") to wrap the content. For example, \ "final_completion": """first line. second line.""" or "thought": """first line; second line.""". You should not use other triple quotes inside. 2. In the "final_completion" entry, replace all double quotes (") with single quotes (') to prevent JSON formatting \ issues. For example, you can output "final_completion": "'Hello World' is a common phrase." Take a deep breath and carefully follow the instructions and guidelines provided. ''' async def compute_score(data_source, messages, ground_truth, extra_info, **kwargs): # Check if litellm is available, fallback to openai if not try: import litellm use_litellm = True except ImportError: # litellm not found, falling back to openai import openai use_litellm = False chat_history = parse_messages(messages, strip_sys_prompt=True) prompt = EXTRACT_MULTITURN_COMPLETION_PROMPT.format( chat_history=chat_history, single_turn_prompt=extra_info["interaction_kwargs"]["single_turn_prompt"], entry_point=extra_info["single_turn_metadata"]["entry_point"], ) if use_litellm: full_response = ( ( await litellm.acompletion( messages=[{"role": "user", "content": prompt}], **kwargs, ) ) .choices[0] .message.content ) else: client = openai.AsyncOpenAI() # Assumes API key is set in environment full_response = ( ( await client.chat.completions.create( messages=[{"role": "user", "content": prompt}], **kwargs, ) ) .choices[0] .message.content ) full_response = extract_json(full_response) assert isinstance(full_response, dict), f"Expected a dict, got {type(full_response)}" assert {"final_completion", "thought"}.issubset(full_response.keys()), ( f"Expected keys not found from {full_response.keys()}" ) final_completion = full_response.pop("final_completion") metadata = extra_info["single_turn_metadata"] res = untrusted_check( final_completion, metadata["test"], metadata["entry_point"], max_as_limit=300 * 1024, max_data_limit=300 * 1024, max_stack_limit=300 * 1024, min_time_limit=60, gt_time_limit=60, ) passed = res[0] == "pass" # info = res[1] # for printing extra info return float(passed) ================================================ FILE: verl_distillation/recipe/collabllm/metrics/token_amount.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def compute_score(data_source, messages, ground_truth, extra_info, **kwargs): prompt = extra_info["prompt"] # Calculate the token penalty based on the length of the prompt future_conv = messages[len(prompt) :] # simple length estimation total_tokens = sum(len(m.content.split()) for m in future_conv) return total_tokens ================================================ FILE: verl_distillation/recipe/collabllm/process_dataset.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python3 """ # available datasets: # math-hard(-large), medium(-large), bigcodebench(-large) # to create your own dataset, refer to https://github.com/Wuyxin/collabllm DATASET=math-hard-large python recipe/collabllm/process_dataset.py \ --dataset collabllm/collabllm-multiturn-$DATASET \ --local_dir $HOME/data/collabllm-$DATASET \ --dataset_type sft python recipe/collabllm/process_dataset.py \ --dataset collabllm/collabllm-multiturn-$DATASET \ --local_dir $HOME/data/collabllm-$DATASET \ --dataset_type rl Preprocess collabllm/collabllm-multiturn-math-hard into (ground_truth, extra_info). - ground_truth: picked from --prefer_field (default: single_turn_completion), falling back to --fallback_field (default: completion) - extra_info: a shallow copy of the original example plus bookkeeping fields - reward_model: {"style": "rule", "ground_truth": ground_truth} Saves one parquet per split into --local_dir and a small JSON preview. """ import argparse import json import os import uuid from typing import Any, Optional from datasets import Dataset, concatenate_datasets, load_dataset SYSTEM_PROMPT = """The assistant is designed to be helpful, proactive, and highly interactive. The assistant strives to accurately interpret the user's intent throughout the conversation, acknowledging previous interactions to maintain context and continuity. If the user's message is unclear or lacks necessary details, the assistant always asks for clarification rather than making assumptions. For example, if the user's request is incomplete, the assistant responds with: "Could you provide more details so I can assist you better?" The assistant asks specific follow-up questions and offers suggestions based on the user's needs, avoiding vague or generic prompts. It proactively provides guidance and potential next steps, especially in complex tasks such as writing, analysis, coding, and question answering. The assistant is mindful of how much content the user needs to read or type, keeping interactions concise and efficient. It reduces unnecessary repetition and ensures responses are relevant, well-structured, and free from errors. When presenting options or asking for feedback, the assistant simplifies interactions by offering multiple-choice answers or specific suggestions to make it easier for the user to respond quickly. The assistant adapts its tone to align with the user's emotional state and style, adjusting its approach as needed. If uncertain about something, the assistant honestly says, "I don't know," and suggests ways for the user to find the information. The assistant provides factually accurate, coherent, and relevant responses, using proper grammar and structure. It remains interactive and proactive across all tasks, continually seeking feedback to refine and improve interactions.""" # Required fields: "prompt", "ground_truth", "extra_info" # In "extra_info" dict: # (1) Rquired: "single_turn_prompt", which is the specific problem used to inform the user simulator, # (2) Optional: "task_desc" (a short task description), # (3) Optional: other fields for customized reward computation def collapse_example(example: dict[str, Any]) -> dict[str, Any]: if "prompt" not in example: raise ValueError("Missing required 'prompt' field.") ground_truth = ( example.get("ground_truth") or example.get("single_turn_completion") or example.get("completion") or "" ) extra_info = {} for k, v in example.items(): if k in ("prompt", "ground_truth", "extra_info"): continue extra_info.setdefault(k, v) # keep extra_info values if keys overlap # make sure extra_info has the required fields assert "single_turn_prompt" in extra_info, "Missing 'single_turn_prompt' in extra_info." # add system prompt as the beginning of the list example["prompt"] = [{"role": "system", "content": SYSTEM_PROMPT}] + example["prompt"] extra_info.setdefault("prompt", example["prompt"]) # save the original prompt extra_info.setdefault( "interaction_kwargs", { "name": "collabllm", "single_turn_prompt": extra_info.pop("single_turn_prompt"), "task_desc": extra_info.pop("task_desc", "general ask-for-assistance task"), }, ) return { "prompt": example["prompt"], "ground_truth": ground_truth, "raw_prompt": example["prompt"], # save the original prompt "extra_info": extra_info, "reward_model": {"style": "rule", "ground_truth": ground_truth}, "data_source": "collabllm", "agent_name": "collabllm_agent", "index": str(uuid.uuid4()), } # ---------- IO helpers ---------- def save_parquet(ds_split: Dataset, filename: str, out_dir: str) -> None: os.makedirs(out_dir, exist_ok=True) path = os.path.join(out_dir, f"{filename}.parquet") ds_split.to_parquet(path) print(f"[OK] Wrote {filename}.parquet → {path} ({len(ds_split)} rows)") def maybe_copy_to_hdfs(local_dir: str, hdfs_dir: Optional[str]) -> None: if not hdfs_dir: return try: from verl.utils.hdfs_io import copy, makedirs # type: ignore except Exception as e: print(f"[WARN] Skipping HDFS copy (verl not available): {e}") return makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) print(f"[OK] Copied {local_dir} → {hdfs_dir}") # ---------- Main ---------- def main(): ap = argparse.ArgumentParser() ap.add_argument( "--dataset", default="collabllm/collabllm-multiturn-math-hard", help="HF dataset path or local dir/file." ) ap.add_argument("--task_desc", default="solving math problems", help="Task description for the dataset.") ap.add_argument("--local_dir", default="~/data/collabllm-math-hard", help="Output directory.") ap.add_argument("--hdfs_dir", default=None, help="Optional HDFS destination (requires verl).") ap.add_argument( "--validation_size", type=float, default=0.1, help="Validation split size (fraction or absolute int)." ) ap.add_argument("--seed", type=int, default=42, help="Random seed for splitting.") ap.add_argument("--num_proc", type=int, default=1, help="Parallel workers for map().") ap.add_argument("--dataset_type", default="rl", choices=["rl", "sft"], help="Type of dataset (e.g., 'rl', 'sft').") args = ap.parse_args() out_dir = os.path.expanduser(args.local_dir) os.makedirs(out_dir, exist_ok=True) print(f"[INFO] Loading dataset: {args.dataset}") ds_dict = load_dataset(args.dataset) parts = list(ds_dict.values()) ds_all: Dataset = parts[0] if len(parts) == 1 else concatenate_datasets(parts) # Dataset({ # features: ['prompt', 'completion', 'conv_id', 'score', 'single_turn_prompt', # 'single_turn_completion', 'single_turn_metadata', 'turn_id', 'sessions', 'rewards'], # num_rows: xxx # }) if args.dataset_type == "rl": # If multiple splits exist, merge them before collapsing/splitting. ds_all = ds_all.map(lambda x: {"task_desc": args.task_desc}, num_proc=args.num_proc) print(f"[INFO] Collapsing to formatted fields on {len(ds_all)} rows…") ds_all = ds_all.map( function=collapse_example, remove_columns=ds_all.column_names, num_proc=args.num_proc, ) def dedup_by_prompt(dataset): seen = set() unique_rows = [] for ex in dataset: prompt_key = json.dumps(ex["prompt"], sort_keys=True, ensure_ascii=False) if prompt_key not in seen: seen.add(prompt_key) unique_rows.append(ex) return Dataset.from_list(unique_rows) ds_all = dedup_by_prompt(ds_all) elif args.dataset_type == "sft": df = ds_all.to_pandas() # Sort so that within each conv_id the highest turn_id is first, # and if multiple rows share the same turn_id, the highest score comes first df = df.sort_values(["conv_id", "turn_id", "score"], ascending=[True, False, False]) # Keep only the top row per conv_id df = df.drop_duplicates(subset="conv_id", keep="first") # Back to HF Dataset ds_all = Dataset.from_pandas(df, preserve_index=False) # Append assistant response into prompt list def append_completion(example): example["prompt"] = ( [{"role": "system", "content": SYSTEM_PROMPT}] + example["prompt"] + [{"role": "assistant", "content": example["completion"]}] ) return example ds_all = ds_all.map(append_completion) # Keep only prompt column cols_to_remove = [col for col in ds_all.column_names if col != "prompt"] ds_all = ds_all.remove_columns(cols_to_remove) print(f"[INFO] Splitting with validation_size={args.validation_size}, seed={args.seed}") split = ds_all.train_test_split(test_size=args.validation_size, seed=args.seed, shuffle=True) train_ds, val_ds = split["train"], split["test"] print(train_ds, val_ds) save_parquet(train_ds, f"{args.dataset_type}_train", out_dir) save_parquet(val_ds, f"{args.dataset_type}_validation", out_dir) maybe_copy_to_hdfs(local_dir=out_dir, hdfs_dir=args.hdfs_dir) print(f"[DONE] {args.dataset_type}_train.parquet and {args.dataset_type}_validation.parquet written.") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/collabllm/reward_function.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import importlib.util import os import sys from typing import Any, Callable, Optional import litellm import torch from transformers import PreTrainedTokenizer from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager TERMINATION_SIGNAL = "[[TERMINATE CHAT]]" async def conversation_level_reward_func( data_source, messages, ground_truth, extra_info, metrics, **kwargs ) -> torch.Tensor: """ Async version of conversation-level reward function. Apply conversation-level reward function to the future interactions between the user simulator and policy model, which are generated from `verl/interactions/collabllm_interation.py` """ num_retries = kwargs.get("num_retries", 6) rewards = {} for metric in metrics: current_dir = os.path.dirname(os.path.abspath(__file__)) metric_file_path = os.path.join(current_dir, f"metrics/{metric}.py") if not os.path.exists(metric_file_path): print(f"Error: Metric file '{metric_file_path}' not found. Assigning 0 to metric '{metric}'.") rewards[metric] = 0.0 continue spec = importlib.util.spec_from_file_location(f"metric_{metric}", metric_file_path) if spec is None: print(f"Error: Could not create spec for metric '{metric}'. Assigning 0 to metric '{metric}'.") rewards[metric] = 0.0 continue module = importlib.util.module_from_spec(spec) try: sys.modules[f"metric_{metric}"] = module assert spec.loader is not None spec.loader.exec_module(module) except Exception as e: print(f"Error loading metric module from '{metric_file_path}': {e}. Assigning 0 to metric '{metric}'.") rewards[metric] = 0.0 continue # Assume each metric file has a compute_score function if not hasattr(module, "compute_score"): print( f"Error: Function 'compute_score' not found in '{metric_file_path}'. Assigning 0 to metric '{metric}'." ) rewards[metric] = 0.0 continue compute_score_fn = module.compute_score # Retry mechanism for calling the metric function for attempt in range(num_retries): try: # Call the metric function (await if it's async) if asyncio.iscoroutinefunction(compute_score_fn): rewards[metric] = await compute_score_fn(data_source, messages, ground_truth, extra_info, **kwargs) else: rewards[metric] = compute_score_fn(data_source, messages, ground_truth, extra_info, **kwargs) break # Success, exit retry loop except Exception as e: if attempt == num_retries - 1: # Last attempt print( f"Error: Failed to compute metric '{metric}' after {num_retries} attempts. " f"Last error: {e}. Assigning 0 to metric '{metric}'." ) rewards[metric] = 0.0 else: print(f"Attempt {attempt + 1} failed for metric '{metric}': {e}. Retrying...") if isinstance(e, litellm.RateLimitError): await asyncio.sleep(max(2**attempt, 60)) # Exponential backoff # Return dict with metric names as keys return {metric: torch.tensor(reward, dtype=torch.float32) for metric, reward in rewards.items()} @register("collabllm") class CollabLLMRewardManager(AbstractRewardManager): """ The Reward Manager used in https://github.com/Wuyxin/collabllm/ """ def __init__( self, tokenizer: PreTrainedTokenizer, num_examine: int, metric_weights: dict, llm_judge_kwargs: dict, reward_fn_key: str = "data_source", compute_score: Optional[Callable] = None, normalize_by_data_source=False, ) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key self.metric_weights = metric_weights self.llm_judge_kwargs = llm_judge_kwargs self.normalize_by_data_source = normalize_by_data_source self.metrics = list(self.metric_weights.keys()) def __call__(self, data: DataProto, return_dict: bool = False) -> torch.Tensor | dict[str, Any]: # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): if return_dict: return {"reward_tensor": data.batch["rm_scores"]} else: return data.batch["rm_scores"] # Use thread-compatible async loop management instead of asyncio.run() loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: return loop.run_until_complete(self._compute_rewards_async(data, return_dict)) finally: loop.close() async def _compute_rewards_async(self, data: DataProto, return_dict: bool = False) -> torch.Tensor | dict[str, Any]: # batched scoring prompt_ids = data.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_response_length = data.batch["attention_mask"][:, prompt_length:].sum(dim=-1) data_source = data.non_tensor_batch["data_source"] ground_truth = data.non_tensor_batch["ground_truth"] extra_info = data.non_tensor_batch["extra_info"] message_lst = data.non_tensor_batch["messages"] # batch the messages into multiple num_repeat_rollouts = len(message_lst[0]["messages"]) batch_size = len(data_source) grouped_messages = [ [message_lst[i]["messages"][j] for i in range(len(message_lst))] for j in range(num_repeat_rollouts) ] # Flatten lists for all batch items across all rollouts flattened_data_sources = [data_source[i] for _ in range(num_repeat_rollouts) for i in range(batch_size)] flattened_ground_truths = [ground_truth[i] for _ in range(num_repeat_rollouts) for i in range(batch_size)] flattened_extra_infos = [extra_info[i] for _ in range(num_repeat_rollouts) for i in range(batch_size)] flattened_messages = [grouped_messages[j][i] for j in range(num_repeat_rollouts) for i in range(batch_size)] if num_repeat_rollouts > 0: tasks = [ self.compute_score( flattened_data_sources[i], flattened_messages[i], flattened_ground_truths[i], flattened_extra_infos[i], self.metrics, **self.llm_judge_kwargs, ) for i in range(len(flattened_data_sources)) ] score_dicts = await asyncio.gather(*tasks) # Aggregate scores for each metric across repeated rollouts scores_by_metrics = { metric: torch.stack([score_dict[metric] for score_dict in score_dicts]) .view(num_repeat_rollouts, -1) .sum(dim=0) for metric in self.metrics } # Apply metric-specific weights weighted_scores_by_metrics = { metric: torch.clamp( scores_by_metrics[metric] * self.metric_weights[metric] / num_repeat_rollouts, min=-1.0, max=1.0, ) for metric in self.metrics } # Compute mean of weighted scores for each metric mean_weighted_scores_by_metrics = { metric: weighted_scores_by_metrics[metric].mean(dim=0) for metric in self.metrics } # Combine weighted scores from all metrics into a single tensor scores = torch.stack([weighted_scores_by_metrics[metric] for metric in self.metrics]).sum(dim=0) else: score_dicts = [] scores = torch.full((batch_size,), 0.0, dtype=torch.float32, device=prompt_ids.device) mean_weighted_scores_by_metrics = {metric: 0.0 for metric in self.metrics} print("Scores:", scores, mean_weighted_scores_by_metrics) reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) for i in range(len(data)): reward_tensor[i, valid_response_length[i].item() - 1] = scores[i] if return_dict: return {"reward_tensor": reward_tensor} else: return reward_tensor ================================================ FILE: verl_distillation/recipe/collabllm/train_rl_collabllm.sh ================================================ # Usage: sh recipe/collabllm/train_rl_collabllm.sh set -x PROJECT_DIR="$(pwd)" export VLLM_USE_V1=1 RESUME_PATH="${1:-}" if [ -z "$RESUME_PATH" ]; then RESUME_PATH=null fi DATASET=math-hard-large PROJECT_DIR="$(pwd)" AGENTLOOP_CONFIG_PATH="$PROJECT_DIR/recipe/collabllm/config/agent.yaml" python3 -m verl.trainer.main_ppo \ trainer.val_before_train=False \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/collabllm-$DATASET/rl_train.parquet \ data.val_files=$HOME/data/collabllm-$DATASET/rl_validation.parquet \ reward_model.reward_manager=collabllm \ +reward_model.reward_kwargs.metric_weights.accuracy=1 \ +reward_model.reward_kwargs.metric_weights.interactivity=1 \ +reward_model.reward_kwargs.metric_weights.token_amount=-0.0001 \ +reward_model.reward_kwargs.llm_judge_kwargs.model=gpt-4o-mini \ +reward_model.reward_kwargs.llm_judge_kwargs.max_tokens=2048 \ +reward_model.reward_kwargs.llm_judge_kwargs.temperature=0 \ data.train_batch_size=16 \ data.max_prompt_length=8196 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path="Qwen/Qwen2.5-7B-Instruct" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=8 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.rollout.temperature=1.0 \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.multi_turn.enable=true \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.multi_turn.max_user_turns=2 \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=3 \ actor_rollout_ref.rollout.multi_turn.num_repeat_rollouts=3 \ actor_rollout_ref.rollout.agent.agent_loop_config_path=$AGENTLOOP_CONFIG_PATH \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console", "wandb"]' \ trainer.project_name=verlxcollabllm \ trainer.experiment_name=collabllm-qwen2.5-7B-$DATASET \ trainer.nnodes=1 \ trainer.n_gpus_per_node=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ trainer.save_freq=100 \ trainer.test_freq=10 \ trainer.total_epochs=20 \ custom_reward_function.path=recipe/collabllm/reward_function.py \ custom_reward_function.name=conversation_level_reward_func \ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/recipe/collabllm/config/collabllm_interaction_config.yaml" \ trainer.resume_from_path=$RESUME_PATH ================================================ FILE: verl_distillation/recipe/collabllm/train_sft_collabllm.sh ================================================ #!/bin/bash set -x if [ "$#" -lt 1 ]; then echo "Usage: sft_train_collabllm.sh [ other_configs...]" exit 1 fi nproc_per_node=$1 # Shift the arguments so $@ refers to the rest shift 1 DATASET=math-hard-large torchrun --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/collabllm-$DATASET/sft_train.parquet \ data.val_files=$HOME/data/collabllm-$DATASET/sft_validation.parquet \ data.multiturn.enable=true \ data.multiturn.messages_key=prompt \ optim.lr=1e-6 \ data.train_batch_size=64 \ data.micro_batch_size_per_gpu=2 \ data.max_length=8196 \ model.partial_pretrain=Qwen/Qwen2.5-7B-Instruct \ trainer.project_name=collabllm-sft-$DATASET \ trainer.experiment_name=collabllm-sft-qwen2.5-7B-$DATASET \ trainer.logger=console \ trainer.total_epochs=3 $@ \ ulysses_sequence_parallel_size=1 \ use_remove_padding=true $@ ================================================ FILE: verl_distillation/recipe/collabllm/utils.py ================================================ # Copyright 2025 CollabLLM team and/or its affiliates # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import re logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def parse_messages(messages, strip_sys_prompt=True): """ Args: messages: List[dict] List of dictionaries with keys 'role' and 'content' Example: messages = [{'role': 'user', 'content': 'Hello!'}, {'role': 'assistant', 'content': 'Hi!'}, ...] """ if messages is None: return "" if strip_sys_prompt: messages = strip_system_prompt(messages) chat = "\n".join(f"**{m.role.capitalize()}**: {m.content}" for m in messages) return chat def strip_system_prompt(messages): """ Args: messages: List[dict] List of dictionaries with keys 'role' and 'content' Example: messages = [{'role': 'user', 'content': 'Hello!'}, {'role': 'assistant', 'content': 'Hi!'}, ...] """ return [msg for msg in messages if msg.role != "system"] def extract_json(s): def convert_value(value): true_values = {"true": True, "false": False, "null": None} value_lower = value.lower() if value_lower in true_values: return true_values[value_lower] try: if "." in value or "e" in value.lower(): return float(value) else: return int(value) except ValueError: return value # Return as string if not a number def parse_number(s, pos): start = pos while pos < len(s) and s[pos] in "-+0123456789.eE": pos += 1 num_str = s[start:pos] try: if "." in num_str or "e" in num_str.lower(): return float(num_str), pos else: return int(num_str), pos except ValueError: logger.error(f"Invalid number at position {start}: {num_str}") raise def skip_whitespace(s, pos): while pos < len(s) and s[pos] in " \t\n\r": pos += 1 return pos def parse_string(s, pos): quote_char = s[pos] assert quote_char in ('"', "'") pos += 1 result = "" while pos < len(s): c = s[pos] if c == "\\": pos += 1 if pos >= len(s): raise ValueError("Invalid escape sequence") c = s[pos] escape_sequences = {"n": "\n", "t": "\t", "r": "\r", "\\": "\\", quote_char: quote_char} result += escape_sequences.get(c, c) elif c == quote_char: pos += 1 # Attempt to convert to a number if possible converted_value = convert_value(result) return converted_value, pos else: result += c pos += 1 raise ValueError("Unterminated string") def parse_key(s, pos): pos = skip_whitespace(s, pos) if s[pos] in ('"', "'"): key, pos = parse_string(s, pos) return key, pos else: raise ValueError(f"Expected string for key at position {pos}") def parse_object(s, pos): obj = {} assert s[pos] == "{" pos += 1 pos = skip_whitespace(s, pos) while pos < len(s) and s[pos] != "}": pos = skip_whitespace(s, pos) key, pos = parse_key(s, pos) pos = skip_whitespace(s, pos) if pos >= len(s) or s[pos] != ":": raise ValueError(f'Expected ":" at position {pos}') pos += 1 pos = skip_whitespace(s, pos) value, pos = parse_value(s, pos) obj[key] = value pos = skip_whitespace(s, pos) if pos < len(s) and s[pos] == ",": pos += 1 pos = skip_whitespace(s, pos) elif pos < len(s) and s[pos] == "}": break elif pos < len(s) and s[pos] != "}": raise ValueError(f'Expected "," or "}}" at position {pos}') if pos >= len(s) or s[pos] != "}": raise ValueError(f'Expected "}}" at position {pos}') pos += 1 return obj, pos def parse_array(s, pos): lst = [] assert s[pos] == "[" pos += 1 pos = skip_whitespace(s, pos) while pos < len(s) and s[pos] != "]": value, pos = parse_value(s, pos) lst.append(value) pos = skip_whitespace(s, pos) if pos < len(s) and s[pos] == ",": pos += 1 pos = skip_whitespace(s, pos) elif pos < len(s) and s[pos] == "]": break elif pos < len(s) and s[pos] != "]": raise ValueError(f'Expected "," or "]" at position {pos}') if pos >= len(s) or s[pos] != "]": raise ValueError(f'Expected "]" at position {pos}') pos += 1 return lst, pos def parse_triple_quoted_string(s, pos): if s[pos : pos + 3] == "'''": quote_str = "'''" elif s[pos : pos + 3] == '"""': quote_str = '"""' else: raise ValueError(f"Expected triple quotes at position {pos}") pos += 3 result = "" while pos < len(s): if s[pos : pos + 3] == quote_str: pos += 3 # Attempt to convert to a number if possible converted_value = convert_value(result) return converted_value, pos else: result += s[pos] pos += 1 raise ValueError("Unterminated triple-quoted string") def parse_value(s, pos): pos = skip_whitespace(s, pos) if pos >= len(s): raise ValueError("Unexpected end of input") if s[pos] == "{": return parse_object(s, pos) elif s[pos] == "[": return parse_array(s, pos) elif s[pos : pos + 3] in ("'''", '"""'): return parse_triple_quoted_string(s, pos) elif s[pos] in ('"', "'"): return parse_string(s, pos) elif s[pos : pos + 4].lower() == "true": return True, pos + 4 elif s[pos : pos + 5].lower() == "false": return False, pos + 5 elif s[pos : pos + 4].lower() == "null": return None, pos + 4 elif s[pos] in "-+0123456789.": return parse_number(s, pos) else: raise ValueError(f"Unexpected character at position {pos}: {s[pos]}") json_start = s.index("{") json_end = s.rfind("}") s = s[json_start : json_end + 1] s = s.strip() result, pos = parse_value(s, 0) pos = skip_whitespace(s, pos) if pos != len(s): raise ValueError(f"Unexpected content at position {pos}") return result def remove_think_block(msg: dict): """ remove .*? from content """ if "content" in msg and isinstance(msg["content"], str): msg["content"] = re.sub(r".*?", "", msg["content"], flags=re.DOTALL).strip() return msg def is_valid_messages(msg: dict) -> bool: """ check if is valid messages, including: 1. is paried with 2. is not empty inside and outside 3. is not nested, and at most one block is allowed. 4. can not be empty if remove ending "<|im_end|>" """ content = msg.get("content") if not isinstance(content, str): return True # Base case: empty or whitespace-only content is invalid. if not content.strip(): return False num_think_open = content.count("") num_think_close = content.count("") # Rule 1: Check for paired tags. if num_think_open != num_think_close: return False # Rule 3: Allow at most one think block. if num_think_open > 1: return False # Case 1: No blocks. if num_think_open == 0: visible_content = content # Case 2: Exactly one block. else: # Rule 2: Check for empty content inside the think block. match = re.search(r"(.*?)", content, re.DOTALL) if not match or not match.group(1).strip(): return False # The "visible" content is what's outside the think block. visible_content = re.sub(r".*?", "", content, flags=re.DOTALL) visible_content = visible_content.strip() # Rule 4 & 2 (outside): Check if visible content is empty after handling <|im_end|>. if visible_content.endswith("<|im_end|>"): visible_content = visible_content[: -len("<|im_end|>")] if not visible_content.strip(): return False return True ================================================ FILE: verl_distillation/recipe/dapo/README.md ================================================ # Recipe: Decoupled Clip and Dynamic Sampling Policy Optimization (DAPO) > Open-Source Algorithm Implementation & Expriement Running: [Yuxuan Tong](https://tongyx361.github.io/), [Guangming Sheng](https://hk.linkedin.com/in/guangming-sheng-b50640211) > [!IMPORTANT] > > **🔥 News!!!** > > - [2025/04] We reproduced the results of two versions of DAPO ([Full](./run_dapo_qwen2.5_32b.sh) & [w/o Dynamic Sampling](./run_dapo_wo_ds_qwen2.5_32b.sh)), achieving 52% and 50% on AIME 2024 respectively, based on [the latest codebase on `recipe/dapo`](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo). Please check the details in [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n). > - [2025/03] We published the training record of [an early version of DAPO (w/o Token-level PG Loss & Dynamic Sampling)](./run_dapo_early_qwen2.5_32b.sh), achieving 44% on AIME 2024, in [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n). 🏠 [Homepage](https://dapo-sia.github.io/) | 📝 [Paper@arXiv](https://arxiv.org/abs/2503.14476) | 🤗 [Datasets&Models@HF](https://huggingface.co/collections/BytedTsinghua-SIA/dapo-67d7f1517ee33c8aed059da0) | 🐱 [Code@GitHub](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo) | 🐱 [Repo@GitHub](https://github.com/BytedTsinghua-SIA/DAPO) > We propose the **D**ecoupled Clip and Dynamic s**A**mpling **P**olicy **O**ptimization (DAPO) algorithm. By making our work publicly available, we provide the broader research community and society with practical access to scalable reinforcement learning, enabling all to benefit from these advancements. Our system is based on the awesome [verl](https://github.com/volcengine/verl) framework. Thanks for their great work! Applying DAPO training to Qwen2.5-32B base model proves to outperform the previous state-of-the-art DeepSeek-R1-Zero-Qwen-32B on AIME 2024, achieving **50%** accuracy with **50%** less training steps. > > ![dapo-main-result](https://dapo-sia.github.io/static/images/score.png) ## Quickstart 1. Prepare the datasets **on the Ray cluster**: ```bash bash prepare_dapo_data.sh # This downloads the datasets to ${HOME}/verl/data by default ``` 2. Submit the job to the Ray cluster **from any machine**: ```bash cd verl # Repo root export RAY_ADDRESS="http://${RAY_IP:-localhost}:8265" # The Ray cluster address to connect to export WORKING_DIR="${PWD}" # The local directory to package to the Ray cluster # Set the runtime environment like env vars and pip packages for the Ray cluster in yaml export RUNTIME_ENV="./recipe/dapo/runtime_env.yaml" # This sets environment variables for the Ray cluster bash recipe/dapo/run_dapo_qwen2.5_32b.sh # or other scripts ``` ## Reproduction Runs | Setup | AIME 2024 Acc. | Hardware | Image | Commit | Environment Variables | Training Script | Training Record | | -------------------------------------------- | -------------- | --------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | | DAPO | 52% | 16x8xH800 | `hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | | DAPO w/o Dynamic Sampling | 50% | 16x8xH800 | `hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_wo_ds_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_wo_ds_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | | DAPO w/o Token-level Loss & Dynamic Sampling | 44% | 16x8xH20 | `hiyouga/verl:ngc-th2.5.1-cu120-vllm0.7.4-hotfix` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_early_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_early_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | > [!IMPORTANT] > > **📢 Call for Contribution!** > > Welcome to submit your reproduction runs and setups! ## Configuration ### Separated Clip Epsilons (-> Clip-Higher) An example configuration: ```yaml actor_rollout_ref: actor: clip_ratio_low: 0.2 clip_ratio_high: 0.28 ``` `clip_ratio_low` and `clip_ratio_high` specify the $\varepsilon_{\text {low }}$ and $\varepsilon_{\text {high }}$ in the DAPO objective. Core relevant code: ```python pg_losses1 = -advantages * ratio pg_losses2 = -advantages * torch.clamp(ratio, 1 - cliprange_low, 1 + cliprange_high) pg_losses = torch.maximum(pg_losses1, pg_losses2) ``` ### Dynamic Sampling (with Group Filtering) An example configuration: ```yaml data: gen_batch_size: 1536 train_batch_size: 512 algorithm: filter_groups: enable: True metric: acc # score / seq_reward / seq_final_reward / ... max_num_gen_batches: 10 # Non-positive values mean no upper limit ``` Setting `filter_groups.enable` to `True` will filter out groups whose outputs' `metric` are all the same, e.g., for `acc`, groups whose outputs' accuracies are all 1 or 0. The trainer will repeat sampling with `gen_batch_size` until there are enough qualified groups for `train_batch_size` or reaching the upper limit specified by `max_num_gen_batches`. Core relevant code: ```python prompt_bsz = self.config.data.train_batch_size if num_prompt_in_batch < prompt_bsz: print(f'{num_prompt_in_batch=} < {prompt_bsz=}') num_gen_batches += 1 max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches: print(f'{num_gen_batches=} < {max_num_gen_batches=}. Keep generating...') continue else: raise ValueError( f'{num_gen_batches=} >= {max_num_gen_batches=}. Generated too many. Please check your data.' ) else: # Align the batch traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n batch = batch[:traj_bsz] ``` ### Flexible Loss Aggregation Mode (-> Token-level Loss) An example configuration: ```yaml actor_rollout_ref: actor: loss_agg_mode: "token-mean" # / "seq-mean-token-sum" / "seq-mean-token-mean" # NOTE: "token-mean" is the default behavior ``` Setting `loss_agg_mode` to `token-mean` will mean the (policy gradient) loss across all the tokens in all the sequences in a mini-batch. Core relevant code: ```python if loss_agg_mode == "token-mean": loss = verl_F.masked_mean(loss_mat, loss_mask) elif loss_agg_mode == "seq-mean-token-sum": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) # token-sum loss = torch.mean(seq_losses) # seq-mean elif loss_agg_mode == "seq-mean-token-mean": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) / torch.sum(loss_mask, dim=-1) # token-mean loss = torch.mean(seq_losses) # seq-mean else: raise ValueError(f"Invalid loss_agg_mode: {loss_agg_mode}") ``` ### Overlong Reward Shaping An example configuration: ```yaml data: max_response_length: 20480 # 16384 + 4096 reward_model: overlong_buffer: enable: True len: 4096 penalty_factor: 1.0 ``` Setting `overlong_buffer.enable` to `True` will penalize the outputs whose lengths are overlong but still within the hard context limit. Specifically, the penalty increases linearly from `0` to `overlong_buffer.penalty_factor` when the length of the output exceeds the `max_response_length - overlong_buffer.len` by `0` to `overlong_buffer.len` tokens. Core relevant code: ```python if self.overlong_buffer_cfg.enable: overlong_buffer_len = self.overlong_buffer_cfg.len expected_len = self.max_resp_len - overlong_buffer_len exceed_len = valid_response_length - expected_len overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0) reward += overlong_reward ``` ## FAQ ### Where is the "Overlong Filtering" in the paper? Most experiments in the paper, including the best-performant one, are run without Overlong Filtering because it's somehow overlapping with Overlong Reward Shaping in terms of properly learning from the longest outputs. So we don't implement it here. ### What's the difference between [the `recipe/dapo` directory in the `main` branch](https://github.com/volcengine/verl/tree/main/recipe/dapo) and the [`recipe/dapo` branch](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo)? [The `recipe/dapo` branch](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo) is for **as-is reproduction** and thus won't be updated with new features. [The `recipe/dapo` directory in the `main` branch](https://github.com/volcengine/verl/tree/main/recipe/dapo) works as an example of how to extend the latest `verl` to implement an algorithm recipe, which will be maintained with new features. ### Why can't I produce similar results after modifications? RL infrastructures nowadays still have inherent unrobustness, on which we are still working hard to improve. We strongly recommend to only modify one thing at a time. We also list some known problems here: 1. Enabling CUDA graph (`enforce_eager=False`) might cause model performance degradation, whose cause is still under investigation. ================================================ FILE: verl_distillation/recipe/dapo/config/dapo_megatron_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_megatron_trainer - _self_ data: gen_batch_size: ${data.train_batch_size} reward_model: reward_manager: dapo overlong_buffer: enable: False # We try to avoid forgetting to set enable len: 0 penalty_factor: 0.0 log: False algorithm: filter_groups: _target_: verl.trainer.config.FilterGroupsConfig enable: False # We try to avoid forgetting to set enable metric: null # acc / score / seq_reward / seq_final_reward / ... max_num_gen_batches: 0 # Non-positive values mean no upper limit trainer: project_name: verl-dapo ================================================ FILE: verl_distillation/recipe/dapo/config/dapo_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: gen_batch_size: ${data.train_batch_size} reward_model: reward_manager: dapo overlong_buffer: enable: False # We try to avoid forgetting to set enable len: 0 penalty_factor: 0.0 log: False algorithm: filter_groups: _target_: verl.trainer.config.FilterGroupsConfig enable: False # We try to avoid forgetting to set enable metric: null # acc / score / seq_reward / seq_final_reward / ... max_num_gen_batches: 0 # Non-positive values mean no upper limit trainer: project_name: verl-dapo ================================================ FILE: verl_distillation/recipe/dapo/dapo_ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSDP PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import os import uuid from collections import defaultdict from copy import deepcopy from pprint import pprint import numpy as np import torch from tqdm import tqdm from verl import DataProto from verl.trainer.ppo.core_algos import agg_loss from verl.trainer.ppo.metric_utils import compute_data_metrics, compute_throughout_metrics, compute_timing_metrics from verl.trainer.ppo.ray_trainer import ( AdvantageEstimator, RayPPOTrainer, apply_kl_penalty, compute_advantage, compute_response_mask, ) from verl.trainer.ppo.reward import compute_reward from verl.utils.metric import reduce_metrics from verl.utils.profiler import marked_timer from verl.utils.rollout_skip import RolloutSkip class RayDAPOTrainer(RayPPOTrainer): """ Note that this trainer runs on the driver process on a single CPU/GPU node. """ def compute_kl_related_metrics(self, batch: DataProto, metrics: dict, timing_raw: dict): batch.batch["response_mask"] = compute_response_mask(batch) # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, "blue"): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw, "olive"): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) return batch def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 self.gen_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return if self.config.actor_rollout_ref.rollout.get("skip_rollout", False): rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg) rollout_skip.wrap_generate_sequences() # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 self.gen_steps += 1 last_val_metrics = None prev_step_profile = False curr_step_profile = ( self.global_steps in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) next_step_profile = False timing_raw = defaultdict(float) batch = None num_prompt_in_batch = 0 num_gen_batches = 0 for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} with marked_timer("start_profile", timing_raw): self._start_profiling( not prev_step_profile and curr_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) new_batch: DataProto = DataProto.from_single_dict(batch_dict) num_gen_batches += 1 # pop those keys for generation if "multi_modal_data" in new_batch.non_tensor_batch.keys(): gen_batch = new_batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids", "multi_modal_data"], ) else: gen_batch = new_batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids"], ) gen_batch_output = gen_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True ) is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw, "red"): gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch_output) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with marked_timer("gen_max", timing_raw, "red"): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) new_batch = new_batch.union(gen_baseline_output) # compute reward model score on new_batch rm_scores = None if self.use_rm and "rm_scores" not in new_batch.batch.keys(): rm_scores = self.rm_wg.compute_rm_score(new_batch) new_batch = new_batch.union(rm_scores) reward_baseline_tensor, _ = compute_reward(new_batch, self.reward_fn) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) keys_to_pop = set(gen_baseline_output.batch.keys()) if rm_scores is not None: keys_to_pop.update(rm_scores.batch.keys()) new_batch.pop(batch_keys=list(keys_to_pop)) new_batch.batch["reward_baselines"] = reward_baseline_tensor del rm_scores, gen_baseline_batch, gen_baseline_output new_batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout new_batch = new_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) new_batch = new_batch.union(gen_batch_output) if self.config.algorithm.use_kl_in_reward: # We need these metrics for apply_kl_penalty if using kl in reward new_batch = self.compute_kl_related_metrics(new_batch, metrics, timing_raw) # otherwise, we will compute those after dynamic sampling with marked_timer("reward", timing_raw, "yellow"): # compute scores. Support both model and function-based. # We first compute the scores using reward model. Then, we call reward_fn to combine # the results from reward model and rule-based results. if self.use_rm and "rm_scores" not in new_batch.batch.keys(): # we first compute reward model score reward_tensor = self.rm_wg.compute_rm_score(new_batch) new_batch = new_batch.union(reward_tensor) # we combine with rule-based rm reward_tensor, reward_extra_infos_dict = compute_reward(new_batch, self.reward_fn) new_batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: new_batch.non_tensor_batch.update( {k: np.array(v) for k, v in reward_extra_infos_dict.items()} ) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: new_batch, kl_metrics = apply_kl_penalty( new_batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update( kl_metrics ) # TODO: This will be cleared if we use multiple genenration batches else: new_batch.batch["token_level_rewards"] = new_batch.batch["token_level_scores"] if not self.config.algorithm.filter_groups.enable: batch = new_batch else: # NOTE: When prompts after filtering is less than train batch size, # we skip to the next generation batch metric_name = self.config.algorithm.filter_groups.metric if metric_name == "seq_final_reward": # Turn to numpy for easier filtering new_batch.non_tensor_batch["seq_final_reward"] = ( new_batch.batch["token_level_rewards"].sum(dim=-1).numpy() ) elif metric_name == "seq_reward": new_batch.non_tensor_batch["seq_reward"] = ( new_batch.batch["token_level_scores"].sum(dim=-1).numpy() ) # Collect the sequence reward for each trajectory prompt_uid2metric_vals = defaultdict(list) for uid, metric_val in zip( new_batch.non_tensor_batch["uid"], new_batch.non_tensor_batch[metric_name], strict=True ): prompt_uid2metric_vals[uid].append(metric_val) prompt_uid2metric_std = {} for prompt_uid, metric_vals in prompt_uid2metric_vals.items(): prompt_uid2metric_std[prompt_uid] = np.std(metric_vals) kept_prompt_uids = [ uid for uid, std in prompt_uid2metric_std.items() if std > 0 or len(prompt_uid2metric_vals[uid]) == 1 ] num_prompt_in_batch += len(kept_prompt_uids) kept_traj_idxs = [] for idx, traj_from_prompt_uid in enumerate(new_batch.non_tensor_batch["uid"]): if traj_from_prompt_uid in kept_prompt_uids: kept_traj_idxs.append(idx) new_batch = new_batch[kept_traj_idxs] batch = new_batch if batch is None else DataProto.concat([batch, new_batch]) prompt_bsz = self.config.data.train_batch_size if num_prompt_in_batch < prompt_bsz: print(f"{num_prompt_in_batch=} < {prompt_bsz=}") max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches: print(f"{num_gen_batches=}. Keep generating...") self.gen_steps += 1 is_last_step = self.global_steps >= self.total_training_steps continue else: raise ValueError( f"{num_gen_batches=} >= {max_num_gen_batches=}." + " Generated too many. Please check if your data are too difficult." + " You could also try set max_num_gen_batches=0 to enable endless trials." ) else: # Align the batch traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n batch = batch[:traj_bsz] # === Updating === # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() if not self.config.algorithm.use_kl_in_reward: batch = self.compute_kl_related_metrics(batch, metrics, timing_raw) # compute values if self.use_critic: with marked_timer("values", timing_raw, "cyan"): values = self.critic_wg.compute_values(batch) batch = batch.union(values) # Compute rollout IS weights and mismatch metrics (inherited from RayPPOTrainer) batch, is_metrics = self.compute_rollout_importance_weights_and_add_to_batch(batch) # IS and mismatch metrics already have mismatch/ prefix metrics.update(is_metrics) with marked_timer("adv", timing_raw, "brown"): # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get("norm_adv_by_std_in_grpo", True) batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, "pink"): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, "red"): actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: self._log_rollout_data(batch, reward_extra_infos_dict, timing_raw, rollout_data_dir) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, "green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with marked_timer("save_checkpoint", timing_raw, "green"): self._save_checkpoint() with marked_timer("stop_profile", timing_raw): next_step_profile = ( self.global_steps + 1 in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) self._stop_profiling( curr_step_profile and not next_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) prev_step_profile = curr_step_profile curr_step_profile = next_step_profile # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) timing_raw = defaultdict(float) # clear timing metrics["train/num_gen_batches"] = num_gen_batches batch = None num_prompt_in_batch = 0 num_gen_batches = 0 # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return progress_bar.update(1) self.global_steps += 1 self.gen_steps += 1 # check if last step checkpint exists checkpoint_dir = os.path.join(self.config.trainer.default_local_dir, f"global_step_{self.global_steps}") if not os.path.exists(checkpoint_dir): # save last step checkpoint timing_raw = defaultdict(float) with marked_timer("save_checkpoint", timing_raw, "green"): self._save_checkpoint() metrics = {f"timing/{k}": v for k, v in timing_raw.items()} logger.log(data=metrics, step=self.global_steps) ================================================ FILE: verl_distillation/recipe/dapo/main_dapo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import os import socket import hydra import ray from omegaconf import OmegaConf from verl.trainer.ppo.reward import load_reward_manager from verl.utils.device import is_cuda_available from .dapo_ray_trainer import RayDAPOTrainer @hydra.main(config_path="config", config_name="dapo_trainer", version_base=None) def main(config): run_ppo(config) def run_ppo(config) -> None: if not ray.is_initialized(): # this is for local ray cluster default_runtime_env = { "env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN"} } ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") ray.init(**OmegaConf.to_container(ray_init_kwargs)) try: if ( is_cuda_available and config.global_profiler.tool == "nsys" and OmegaConf.select(config.global_profiler, "steps") is not None and len(OmegaConf.select(config.global_profiler, "steps")) > 0 ): nsight_options = OmegaConf.to_container( config.global_profiler.global_tool_config.nsys.controller_nsight_options ) runner = TaskRunner.options(runtime_env={"nsight": nsight_options}).remote() else: runner = TaskRunner.remote() ray.get(runner.run.remote(config)) finally: if ray.is_initialized(): ray.shutdown() @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: def run(self, config): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # download the checkpoint from hdfs local_path = copy_to_local(config.actor_rollout_ref.model.path) # instantiate tokenizer from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) # used for multimodal LLM, could be none processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) from verl.single_controller.ray import RayWorkerGroup # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ray.remote(ActorRolloutRefWorker), Role.Critic: ray.remote(CriticWorker), } global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, Role.Critic: global_pool_id, } # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # reference model if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id reward_fn = load_reward_manager( config, tokenizer, 0, max_resp_len=config.data.max_response_length, overlong_buffer_cfg=config.reward_model.overlong_buffer, ) # Note that we always use function-based RM for validation val_reward_fn = load_reward_manager( config, tokenizer, 1, max_resp_len=config.data.max_response_length, overlong_buffer_cfg=config.reward_model.overlong_buffer, ) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) trainer = RayDAPOTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, ) trainer.init_workers() trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/dapo/prepare_dapo_data.sh ================================================ #!/usr/bin/env bash set -uxo pipefail export VERL_HOME=${VERL_HOME:-"${HOME}/verl"} export TRAIN_FILE=${TRAIN_FILE:-"${VERL_HOME}/data/dapo-math-17k.parquet"} export TEST_FILE=${TEST_FILE:-"${VERL_HOME}/data/aime-2024.parquet"} export OVERWRITE=${OVERWRITE:-0} mkdir -p "${VERL_HOME}/data" if [ ! -f "${TRAIN_FILE}" ] || [ "${OVERWRITE}" -eq 1 ]; then wget -O "${TRAIN_FILE}" "https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k/resolve/main/data/dapo-math-17k.parquet?download=true" fi if [ ! -f "${TEST_FILE}" ] || [ "${OVERWRITE}" -eq 1 ]; then wget -O "${TEST_FILE}" "https://huggingface.co/datasets/BytedTsinghua-SIA/AIME-2024/resolve/main/data/aime-2024.parquet?download=true" fi ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_early_qwen2.5_32b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Early-Qwen2.5-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # An early version for DAPO loss_agg_mode="seq-mean-token-mean" enable_filter_groups=False gen_prompt_bsz=512 # NOTE: no filtering here train_prompt_bsz=512 train_prompt_mini_bsz=32 n_resp_per_prompt=16 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-16} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_qwen2.5_32b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=512 gen_prompt_bsz=$((train_prompt_bsz * 3)) n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-16} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_qwen2.5_32b_npu.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO-Qwen2.5-32B' exp_name='Qwen2.5-32B-npu-32rank-gbs128' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 NNODES=2 train_prompt_bsz=128 gen_prompt_bsz=$((train_prompt_bsz * 3)) n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray PWD=./ RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size)) offload=True gen_tp=4 enable_chunked_prefill=True ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ --address "${RAY_ADDRESS}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.model.path="${MODEL_PATH}" \ +actor_rollout_ref.model.override_config.attention_dropout=0. \ +actor_rollout_ref.model.override_config.embd_pdrop=0. \ +actor_rollout_ref.model.override_config.resid_pdrop=0. \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.90 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=${enable_chunked_prefill} \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger="['console','wandb']" \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=16 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=20 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.device=npu \ trainer.resume_mode=auto \ actor_rollout_ref.actor.fsdp_config.forward_prefetch=True \ actor_rollout_ref.ref.fsdp_config.forward_prefetch=True \ ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_qwen2.5_32b_rollout_is.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail # Rollout Importance Sampling Example # References: # - When Speed Kills Stability: https://yingru.notion.site/When-Speed-Kills-Stability-271211a558b7808d8b12d403fd15edda # - Off-policy RL: https://fengyao.notion.site/off-policy-rl project_name='DAPO' exp_name='DAPO-Qwen2.5-32B-RolloutIS' # Rollout Importance Sampling adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 # Rollout Importance Sampling parameters rollout_is=True rollout_is_threshold=2.0 rollout_is_threshold_lower=null # No lower bound rollout_is_level=token # token-level rollout_is_mode=truncate # truncate mode rollout_is_veto_threshold=null # No veto clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=512 gen_prompt_bsz=$((train_prompt_bsz * 3)) n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-16} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 # Rollout Importance Sampling (corrects distribution mismatch between rollout and training) # # Please note that server mode (agent loop) hasn't returned rollout_log_probs for now, # so currently server mode is not supported for Rollout IS. # # Rollout IS parameters (configured at top of script): # algorithm.rollout_is=True # algorithm.rollout_is_threshold=2.0 # Upper threshold (can be tuned) # algorithm.rollout_is_level=token # Aggregation level # algorithm.rollout_is_mode=truncate # Bounding mode # actor_rollout_ref.rollout.calculate_log_probs=True # Required! ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ algorithm.rollout_is=${rollout_is} \ algorithm.rollout_is_threshold=${rollout_is_threshold} \ algorithm.rollout_is_threshold_lower=${rollout_is_threshold_lower} \ algorithm.rollout_is_level=${rollout_is_level} \ algorithm.rollout_is_mode=${rollout_is_mode} \ algorithm.rollout_is_veto_threshold=${rollout_is_veto_threshold} \ actor_rollout_ref.rollout.calculate_log_probs=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_qwen2.5_7b_npu.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO-Qwen2.5-7B-Instruct' exp_name='DAPO-Qwen2.5-7B-Instruct' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 NNODES=1 train_prompt_bsz=16 gen_prompt_bsz=$((train_prompt_bsz * 3)) n_resp_per_prompt=16 train_prompt_mini_bsz=1 # Ray PWD=./ RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-7B-Instruct"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size)) offload=True gen_tp=1 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ --address "${RAY_ADDRESS}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.model.path="${MODEL_PATH}" \ +actor_rollout_ref.model.override_config.attention_dropout=0. \ +actor_rollout_ref.model.override_config.embd_pdrop=0. \ +actor_rollout_ref.model.override_config.resid_pdrop=0. \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.50 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger="['console']" \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=16 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=20 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.device=npu \ trainer.resume_mode=auto \ actor_rollout_ref.actor.entropy_checkpointing=True \ actor_rollout_ref.ref.entropy_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.forward_prefetch=True \ actor_rollout_ref.ref.fsdp_config.forward_prefetch=True \ actor_rollout_ref.actor.entropy_from_logits_with_chunking=True \ actor_rollout_ref.ref.entropy_from_logits_with_chunking=True ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_qwen3_14b_base_npu.sh ================================================ #!/bin/bash project_name='DAPO' exp_name='DAPO-Qwen3-14B-Base' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=False filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=16 gen_prompt_bsz=$((train_prompt_bsz * 2)) n_resp_per_prompt=16 train_prompt_mini_bsz=1 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-14B-Base"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout # Performance Related Parameter sp_size=2 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size)) offload=True gen_tp=2 ray job submit --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ +actor_rollout_ref.model.override_config.attention_dropout=0. \ +actor_rollout_ref.model.override_config.embd_pdrop=0. \ +actor_rollout_ref.model.override_config.resid_pdrop=0. \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=8 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger=['console'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=16 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=20 \ trainer.total_epochs=1 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ data.shuffle=False \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.actor.entropy_checkpointing=True \ actor_rollout_ref.ref.entropy_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.forward_prefetch=True \ actor_rollout_ref.ref.fsdp_config.forward_prefetch=True \ trainer.device=npu ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_qwen3_8b_base_npu.sh ================================================ #!/bin/bash project_name='DAPO' exp_name='DAPO-Qwen3-8B-Base' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=False filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=16 gen_prompt_bsz=$((train_prompt_bsz * 3)) n_resp_per_prompt=16 train_prompt_mini_bsz=1 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-1} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-8B-Base"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout # Performance Related Parameter sp_size=2 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) / sp_size)) offload=True gen_tp=2 ray job submit --runtime-env="${RUNTIME_ENV}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ +actor_rollout_ref.model.override_config.attention_dropout=0. \ +actor_rollout_ref.model.override_config.embd_pdrop=0. \ +actor_rollout_ref.model.override_config.resid_pdrop=0. \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.90 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger=['console'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=20 \ trainer.total_epochs=1 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ data.shuffle=False \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.actor.entropy_checkpointing=True \ actor_rollout_ref.ref.entropy_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.forward_prefetch=True \ actor_rollout_ref.ref.fsdp_config.forward_prefetch=True \ trainer.device=npu ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_qwen3_moe_30b_base_fsdp_npu.sh ================================================ #!/usr/bin/env bash set -euxo pipefail project_name='DAPO' exp_name='DAPO-Qwen3-MOE-30B-FSDP-128rank-gbs512' NNODES=8 NPUS_PER_NODE=16 adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" ppo_mini_batch_size=32 enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=512 gen_prompt_bsz=$((train_prompt_bsz * 3)) n_resp_per_prompt=16 RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B-Base"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=16 # For load-balance. For smaller cluster this can be set to as less as 2. use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) / 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) / 2)) offload=True recompute=True max_num_seqs=128 gen_tp=2 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.rollout.max_num_seqs=${max_num_seqs} \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ +actor_rollout_ref.model.override_config.attention_dropout=0. \ +actor_rollout_ref.model.override_config.embd_pdrop=0. \ +actor_rollout_ref.model.override_config.resid_pdrop=0. \ actor_rollout_ref.model.enable_gradient_checkpointing=${recompute} \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${ppo_mini_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.forward_prefetch=False \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.ref.fsdp_config.forward_prefetch=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger=['console','wandb'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=5 \ trainer.save_freq=-1 \ trainer.total_epochs=1 \ trainer.device="npu" \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_qwen3_moe_30b_megatron_npu.sh ================================================ #!/bin/bash project_name='DAPO' exp_name='DAPO-Qwen3-30B-megatron' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=16 gen_prompt_bsz=$((train_prompt_bsz * 2)) n_resp_per_prompt=16 train_prompt_mini_bsz=2 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-1} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B"} # MCORE_MODEL_PATH points to the converted checkpoint. # To avoid loading these weights, set actor_rollout_ref.actor.megatron.use_dist_checkpointing=False. MCORE_MODEL_PATH=${MCORE_MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B-dist_ckpt"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length))) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length))) offload=True max_num_batched_tokens=$((max_prompt_length + max_response_length)) # Megatron backen train_tp=4 train_ep=2 train_pp=2 train_cp=1 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ -- python3 -m recipe.dapo.main_dapo \ --config-name="dapo_megatron_trainer" \ data.filter_overlong_prompts=False \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.shuffle=False \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.ppo_epochs=1 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ +actor_rollout_ref.model.override_config.attention_dropout=0. \ +actor_rollout_ref.model.override_config.embd_pdrop=0. \ +actor_rollout_ref.model.override_config.resid_pdrop=0. \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=${train_ep} \ actor_rollout_ref.actor.megatron.context_parallel_size=${train_cp} \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=${MCORE_MODEL_PATH} \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=${train_ep} \ actor_rollout_ref.ref.megatron.context_parallel_size=${train_cp} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=${MCORE_MODEL_PATH} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.enable_prefix_caching=False \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_num_batched_tokens} \ actor_rollout_ref.rollout.max_model_len=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger=['console'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=16 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=-1 \ trainer.save_freq=-1 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.device="npu" \ actor_rollout_ref.nccl_timeout=14400 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ +actor_rollout_ref.actor.megatron.override_transformer_config.use_flash_attn=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 ================================================ FILE: verl_distillation/recipe/dapo/run_dapo_wo_ds_qwen2.5_32b.sh ================================================ #!/usr/bin/env bash set -euxo pipefail # DAPO (w/o Dynamic Sampling) project_name='DAPO-verl' exp_name='DAPO-wo-DS-Qwen2.5-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=False train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-16} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto ================================================ FILE: verl_distillation/recipe/dapo/runtime_env.yaml ================================================ working_dir: ./ excludes: ["/.git/"] env_vars: TORCH_NCCL_AVOID_RECORD_STREAMS: "1" VLLM_USE_V1: "1" ================================================ FILE: verl_distillation/recipe/dapo/test_dapo_7b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7B-Math-Test' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 2)) enable_overlong_buffer=True overlong_buffer_len=512 overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=512 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=16 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=2 \ trainer.save_freq=2 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_distillation/recipe/dapo/test_dapo_7b_math.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=32 # reference run wandb: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/runs/ow47vvon?nw=nwusertongyuxuan361 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=200 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/dapo/test_dapo_7b_math_lora.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=32 # remember to set VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 for this model python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=8 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=200 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/dapo/test_dapo_7b_math_megatron.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-megatron-0519a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 train_tp=4 train_pp=2 # TODO: support dynamic_bsz for megatron # actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ # actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ # actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=16 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/dapo/test_dapo_dspk_671b_megatron_96gb.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail # 0. download the config # only need to download the configuration_deepseek.py and config.json # remove the `quantization_config` in the `config.json` # set `num_nextn_predict_layers=0` to disable MTP, which is not currently supported huggingface-cli download deepseek-ai/DeepSeek-V3-0324 configuration_deepseek.py config.json project_name='DAPO' exp_name='DAPO-DeepSeek-671b-megatron' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=0.1 loss_agg_mode="token-mean" train_prompt_bsz=256 # must be > n_gpus. need to fix n_resp_per_prompt=16 train_prompt_mini_bsz=32 # mini_bsz * n >= micro_bsz * pp * dp NNODES=${NNODES:-64} # 1. download the dist_ckpt format model from https://huggingface.co/BearBiscuit05/dpsk-v3-671B-BF16-dist_ckpt/tree/main # change the MODEL_PATH and MCORE_MODEL_PATH to your own path # Paths MODEL_PATH="" MCORE_MODEL_PATH="" RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} aime24_test_path=${RAY_DATA_HOME}/data/aime-2024.parquet # TEST_FILE="['$math500_test_path', '$aime24_test_path']" TEST_FILE="['$aime24_test_path']" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=32 train_tp=1 train_ep=32 train_pp=16 python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=${train_ep} \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=${MCORE_MODEL_PATH} \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_first_pipeline_stage=3 \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=2 \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=${train_ep} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=${MCORE_MODEL_PATH} \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=10 \ trainer.total_training_steps=10 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/dapo/test_dapo_glm_air_megatron.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail NNODES=${NNODES:-8} NGPUS_PER_NODES=${NGPUS_PER_NODES:-8} project_name='DAPO' exp_name='DAPO-GLM-AIR-MATH-megatron' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=128 train_ppo_micro_batch_size_per_gpu=2 infer_ppo_micro_batch_size_per_gpu=2 # Paths MODEL_PATH=/models/zai-org/GLM-4.5-Air-Base # GLM Base model can use chat_template.jinja from instruct models cp /models/zai-org/GLM-4.5-Air/chat_template.jinja ${MODEL_PATH}/chat_template.jinja TRAIN_FILE=/data/dapo/dapo-math-17k.parquet aime24_test_path=/data/dapo/aime-2024.parquet # math500_test_path=/data/rlhf/math500/test.parquet # TEST_FILE="['$math500_test_path', '$aime24_test_path']" TEST_FILE="['$aime24_test_path']" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length))) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length))) offload=True COMMON_PP=${COMMON_PP:-2} COMMON_VPP=${COMMON_VPP:-null} COMMON_CP=${COMMON_CP:-4} COMMON_TP=${COMMON_TP:-2} COMMON_EP=${COMMON_EP:-8} COMMON_ETP=${COMMON_ETP:-1} TRAIN_TP=${TRAIN_TP:-$COMMON_TP} INFER_TP=${INFER_TP:-8} ACTOR_PP=${ACTOR_PP:-$COMMON_PP} ACTOR_VPP=${ACTOR_VPP:-$COMMON_VPP} ACTOR_CP=${ACTOR_CP:-$COMMON_CP} ACTOR_TP=${ACTOR_TP:-$TRAIN_TP} ACTOR_EP=${ACTOR_EP:-$COMMON_EP} ACTOR_ETP=${ACTOR_ETP:-$COMMON_ETP} ROLLOUT_TP=${ROLLOUT_TP:-$INFER_TP} REF_PP=${REF_PP:-$COMMON_PP} REF_VPP=${REF_VPP:-$COMMON_VPP} REF_CP=${REF_CP:-$COMMON_CP} REF_TP=${REF_TP:-$TRAIN_TP} REF_EP=${REF_EP:-$COMMON_EP} REF_ETP=${REF_ETP:-$COMMON_ETP} CRITIC_PP=${CRITIC_PP:-$COMMON_PP} CRITIC_VPP=${CRITIC_VPP:-$COMMON_VPP} CRITIC_CP=${CRITIC_CP:-$COMMON_CP} CRITIC_TP=${CRITIC_TP:-$TRAIN_TP} CRITIC_EP=${CRITIC_EP:-$COMMON_EP} CRITIC_ETP=${CRITIC_ETP:-$COMMON_ETP} RM_PP=${RM_PP:-$COMMON_PP} RM_VPP=${RM_VPP:-$COMMON_VPP} RM_CP=${RM_CP:-$COMMON_CP} RM_TP=${RM_TP:-$TRAIN_TP} RM_EP=${RM_EP:-$COMMON_EP} RM_ETP=${RM_ETP:-$COMMON_ETP} USE_MBRIDGE=True USE_DIST_CKPT=False # Install the latest mbridge # pip install --no-cache-dir git+https://github.com/ISEEKYAN/mbridge.git python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ +actor_rollout_ref.model.override_config.model_config.max_position_embeddings=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${train_ppo_micro_batch_size_per_gpu} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.lr_decay_style='constant' \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.megatron.use_mbridge=$USE_MBRIDGE \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=$USE_DIST_CKPT \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${ACTOR_TP} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${ACTOR_PP} \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=${ACTOR_VPP} \ actor_rollout_ref.actor.megatron.context_parallel_size=${ACTOR_CP} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=${ACTOR_EP} \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=${ACTOR_ETP} \ actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity="selective" \ actor_rollout_ref.actor.megatron.override_transformer_config.recompute_modules=["core_attn","moe_act","layernorm","mlp","moe"] \ +actor_rollout_ref.actor.megatron.override_transformer_config.apply_rope_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.masked_softmax_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.bias_activation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.bias_dropout_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.deallocate_pipeline_outputs=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.persist_layer_norm=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_grouped_gemm=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_shared_expert_overlap=False \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_token_dispatcher_type="flex" \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_enable_deepep=False \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.name='vllm' \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${infer_ppo_micro_batch_size_per_gpu} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${INFER_TP} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${infer_ppo_micro_batch_size_per_gpu} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${REF_TP} \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${REF_PP} \ actor_rollout_ref.ref.megatron.virtual_pipeline_model_parallel_size=${REF_VPP} \ actor_rollout_ref.ref.megatron.context_parallel_size=${REF_CP} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=${REF_EP} \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=${REF_ETP} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','wandb'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODES}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=100 \ trainer.total_epochs=10 \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/dapo/test_dapo_qwen3_30b_math.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen3-30B-A3B-Base-MATH-0527a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B-Base"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=32 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=300 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/dapo/test_dapo_qwen3_30b_math_single_node.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen3-30B-A3B-Base-MATH-0719a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 4)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=0.1 loss_agg_mode="token-mean" train_prompt_bsz=64 n_resp_per_prompt=16 train_prompt_mini_bsz=16 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B-Base"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 1)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=8 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=300 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/deepeyes/README.md ================================================ # DeepEyes: Incentivizing "Thinking with Images" via Reinforcement Learning This directory contains the implementation for reproducing the DeepEyes paper within the verl framework, supporting multi-turn visual tool calls. This implementation is based on the original [DeepEyes paper](https://arxiv.org/abs/2505.14362) and its [official implementation](https://github.com/Visual-Agent/DeepEyes), integrated with the multi-modal and multi-turn capabilities of the verl framework. ## Reproducing the Experiment > **Note on the 'Chart' Dataset:** > > The provided preprocessing script intentionally excludes `data_v0.8_visual_toolbox_v2.parquet`, which contains the 'Chart' data. This subset consists of very high-resolution images, often resembling large figures composed of multiple sub-plots, much like those found in academic papers. > > Consequently, even after using the zoom-in tool, the resulting cropped images remain large. This poses a significant risk of causing Out-of-Memory (OOM) errors, which can abruptly terminate the training process. > > **We strongly recommend against training on the 'Chart' dataset on a single node.** > **Note on the 'thinklite' Dataset:** > Many images in the `thinklite` dataset have a very low resolution, with either a height or width below 28 pixels. This fails to meet the minimum input size required by the Qwen-2.5VL image processor and would cause errors during data loading. > > To mitigate this, we upscale these low-resolution images to satisfy the processor's requirements. However, please be aware that because the original resolution is low, subsequent `crop` operations by the zoom-in tool might frequently trigger exceptions, which could in turn affect the model's tool-use performance. First, launch an inference service to act as a judge for reward calculation. You can use the following script as a reference: ```bash python -m sglang.launch_server --model-path /path/to/Qwen2.5-72B-Instruct \ --port 18901 \ --tp-size 8 \ --context-length 32768 \ --trust-remote-code \ --log-requests false ``` Next, you can start the training: ```bash bash recipe/deepeyes/run_deepeyes_grpo.sh ``` ## Performance ![score](https://private-user-images.githubusercontent.com/82520804/474784419-b13f4f72-bb3a-4281-a43b-1f34a9037c0c.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NTQ0NTQxMTMsIm5iZiI6MTc1NDQ1MzgxMywicGF0aCI6Ii84MjUyMDgwNC80NzQ3ODQ0MTktYjEzZjRmNzItYmIzYS00MjgxLWE0M2ItMWYzNGE5MDM3YzBjLnBuZz9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTA4MDYlMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwODA2VDA0MTY1M1omWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTJjNGMxMjhiOGM4MTNhYTEzYTE2MTYzY2ZjYWRhNmEzMmVjNjUxOGI3MTgzOGQyM2ZmOWJlYTZlNDYzYzU0ZDkmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.qTDX-3fyLHWdeFh9o4b6nIAB57bT0XyLjKXhNV6k5nA) ![entropy](https://private-user-images.githubusercontent.com/82520804/474785253-752106a9-e25d-4b44-aef9-1ac98015d05c.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NTQ0NTQxMTMsIm5iZiI6MTc1NDQ1MzgxMywicGF0aCI6Ii84MjUyMDgwNC80NzQ3ODUyNTMtNzUyMTA2YTktZTI1ZC00YjQ0LWFlZjktMWFjOTgwMTVkMDVjLnBuZz9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTA4MDYlMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwODA2VDA0MTY1M1omWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTM4OGQ2ZGI3M2JlYWE4YTQyMzIxMWYxMzZhNDBmNmYxNzcwNDgxNThiZDRiMzQyYzUwZjc3OWE4YzdhYWEwMWUmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.PhimMTxXXEtMLPGzejPQuw-Ul0As8ey-hyy1qkeABIQ) ![num_turns](https://private-user-images.githubusercontent.com/82520804/474785462-c99c7952-14db-485a-acd2-14e5956ecc34.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NTQ0NTQxMTMsIm5iZiI6MTc1NDQ1MzgxMywicGF0aCI6Ii84MjUyMDgwNC80NzQ3ODU0NjItYzk5Yzc5NTItMTRkYi00ODVhLWFjZDItMTRlNTk1NmVjYzM0LnBuZz9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTA4MDYlMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwODA2VDA0MTY1M1omWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPTJkNWYwMGVjOWM4NDVhZTkzZWI5NWMzMGVjZTcyZGM2NDExY2FmYTBlYWJmZTk5YTU5MzM3NmNkYWI4Y2U4Y2YmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.Ieakk_ttMsNygVzpZZqGs1507j2GC-rqHSYH9iQQ71Q) See [Comment](https://github.com/volcengine/verl/pull/2398#issuecomment-3157142856) for more details. Note: AgentLoop does not directly record num_tool_calls, but records num_turns. In our scenario, you can calculate the number of tool calls by num_tool_calls = num_turns / 2 - 1. ## References and Acknowledgements - [DeepEyes Paper](https://arxiv.org/abs/2505.14362) - [DeepEyes Official Implementation](https://github.com/Visual-Agent/DeepEyes) --- If you need further details for reproduction or encounter any issues, feel free to open an issue or contact the maintainers. ================================================ FILE: verl_distillation/recipe/deepeyes/configs/deepeyes_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 2048 max_response_length: 2048 train_batch_size: 256 return_raw_chat: True return_multi_modal_inputs: False custom_cls: path: "recipe/deepeyes/deepeyes.py" name: CustomRLHFDataset actor_rollout_ref: hybrid_engine: True model: custom_chat_template: "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{%- if tools %}{{- '<|im_start|>system\\n' }}{%- if messages[0]['role'] == 'system' %}{%- if messages[0]['content'] is string %}{{- messages[0]['content'] }}{%- else %}{{- messages[0]['content'][0]['text'] }}{%- endif %}{%- else %}{{- 'You are a helpful assistant.' }}{%- endif %}{{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}{%- for tool in tools %}{{- \"\\n\" }}{{- tool | tojson }}{%- endfor %}{{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}{% for message in messages %}{% if message['role'] != 'system' or loop.first == false %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endif %}{% endfor %}{%- else %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endfor %}{%- endif %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 tool_config_path: "recipe/deepeyes/config/image_zoom_in_tool_config.yaml" custom_reward_function: path: "recipe/deepeyes/deepeyes.py" name: compute_score ================================================ FILE: verl_distillation/recipe/deepeyes/configs/image_zoom_in_tool_config.yaml ================================================ tools: - class_name: "verl.tools.image_zoom_in_tool.ImageZoomInTool" config: num_workers: 256 rate_limit: 256 timeout: 60 type: native tool_schema: type: "function" function: name: "image_zoom_in_tool" description: "Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an optional object label." parameters: type: "object" properties: bbox_2d: type: "array" items: type: "number" minItems: 4 maxItems: 4 description: "The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner." label: type: "string" description: "The name or label of the object in the specified bounding box (optional)." required: ["bbox_2d"] ================================================ FILE: verl_distillation/recipe/deepeyes/deepeyes.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import logging import os import random import re import requests from openai import OpenAI from PIL import Image import verl.utils.torch_functional as verl_F from verl.utils.dataset.rl_dataset import RLHFDataset from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__name__) openai_api_key = "EMPTY" openai_api_base = os.environ.get("LLM_AS_A_JUDGE_BASE", "http://10.1.100.71:18901/v1") client = OpenAI( api_key=openai_api_key, base_url=openai_api_base, ) model_name = "" if openai_api_base: try: response = requests.get(f"{openai_api_base}/models") response.raise_for_status() models = response.json() if models.get("data"): model_name = models["data"][0]["id"] else: logger.warning("No models found at the specified API base for reward scoring.") except (requests.exceptions.RequestException, KeyError, IndexError) as e: logger.warning(f"Failed to get model from {openai_api_base}: {e}. Reward scoring will be disabled.") class CustomRLHFDataset(RLHFDataset): def __getitem__(self, item): """ Note that we also return the raw_input_ids so that it can be combined with other chat template """ row_dict: dict = self.dataframe[item] row_dict[self.prompt_key] = [ { "role": "system", # We don't need tool description, because custom_chat_template will add it. "content": ( "You are a helpful assistant. You can call functions to assist with the user query. " "Important: You must call only one function at a time. After each function call, " "wait for the execution result before making the next function call if needed." ), }, { "role": "user", "content": row_dict[self.prompt_key][1]["content"], }, ] messages = self._build_messages(row_dict) model_inputs = {} if self.processor is not None: raw_prompt = self.processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) multi_modal_data = {} images = None row_dict_images = row_dict.pop(self.image_key, None) if row_dict_images: images = [Image.open(io.BytesIO(image["bytes"])) for image in row_dict_images] # due to the image key is "image" instead of "images" in vllm, we need to use "image" here # link: https://github.com/vllm-project/vllm/blob/3c545c0c3b98ee642373a308197d750d0e449403/vllm/multimodal/parse.py#L205 # noqa: E501 multi_modal_data["image"] = images model_inputs = self.processor(text=[raw_prompt], images=images, return_tensors="pt") input_ids = model_inputs.pop("input_ids") attention_mask = model_inputs.pop("attention_mask") if "second_per_grid_ts" in model_inputs: model_inputs.pop("second_per_grid_ts") # There's a trap here, multi_modal_inputs has to be a dict, not BatchFeature row_dict["multi_modal_data"] = multi_modal_data # We will do batch.union() in the trainer, # so we cannot have "multi_modal_inputs" in row_dict if rollout generates new multi_modal_inputs if self.return_multi_modal_inputs: row_dict["multi_modal_inputs"] = dict(model_inputs) # second_per_grid_ts isn't used for training, just for mrope row_dict["multi_modal_inputs"].pop("second_per_grid_ts", None) else: raw_prompt = self.tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) model_inputs = self.tokenizer(raw_prompt, return_tensors="pt", add_special_tokens=False) input_ids = model_inputs.pop("input_ids") attention_mask = model_inputs.pop("attention_mask") input_ids, attention_mask = verl_F.postprocess_data( input_ids=input_ids, attention_mask=attention_mask, max_length=self.max_prompt_length, pad_token_id=self.tokenizer.pad_token_id, left_pad=True, truncation=self.truncation, ) if self.processor is not None and "Qwen2VLImageProcessor" in self.processor.image_processor.__class__.__name__: from verl.models.transformers.qwen2_vl import get_rope_index position_ids = [ get_rope_index( self.processor, input_ids=input_ids[0], image_grid_thw=model_inputs.get("image_grid_thw"), video_grid_thw=model_inputs.get("video_grid_thw"), second_per_grid_ts=model_inputs.get("second_per_grid_ts"), attention_mask=attention_mask[0], ) ] # (1, 3, seq_len) else: position_ids = compute_position_id_with_mask(attention_mask) row_dict["input_ids"] = input_ids[0] row_dict["attention_mask"] = attention_mask[0] row_dict["position_ids"] = position_ids[0] raw_prompt_ids = self.tokenizer.encode(raw_prompt, add_special_tokens=False) if len(raw_prompt_ids) > self.max_prompt_length: if self.truncation == "left": raw_prompt_ids = raw_prompt_ids[-self.max_prompt_length :] elif self.truncation == "right": raw_prompt_ids = raw_prompt_ids[: self.max_prompt_length] elif self.truncation == "middle": left_half = self.max_prompt_length // 2 right_half = self.max_prompt_length - left_half raw_prompt_ids = raw_prompt_ids[:left_half] + raw_prompt_ids[-right_half:] elif self.truncation == "error": raise RuntimeError(f"Prompt length {len(raw_prompt_ids)} is longer than {self.max_prompt_length}.") row_dict["raw_prompt_ids"] = raw_prompt_ids # encode prompts without chat template if self.return_raw_chat: row_dict["raw_prompt"] = messages # get prompts with chat template if self.return_full_prompt: row_dict["full_prompts"] = raw_prompt # array of strings # add index for each prompt index = row_dict.get("extra_info", {}).get("index", 0) tools_kwargs = { "image_zoom_in_tool": { "create_kwargs": {"image": images[0]}, # "execute_kwargs": {}, # "calc_reward_kwargs": {}, # "release_kwargs": {}, } } row_dict["index"] = index row_dict["tools_kwargs"] = tools_kwargs row_dict["agent_name"] = "tool_agent" return row_dict def compute_score(data_source: str, solution_str: str, ground_truth: str, extra_info=None) -> float: """ Compute reward score for model solutions with robust handling of various formats. Returns a weighted combination of: - Accuracy reward (0.8 weight): Whether the answer is semantically correct - Format reward (0.2 weight): Whether the output follows expected format - Tool reward (1.2 weight): Whether tools were used when answer is correct """ # Initialize tracking variables is_format_error = False # 1. Check tag format count_think_1 = solution_str.count("") count_think_2 = solution_str.count("") if count_think_1 != count_think_2: is_format_error = True # 2. Check vision tokens (skip this since tokenizer removes special tokens) # We'll use and instead to detect tool usage # 3. Extract answer text with multiple fallback strategies answer_text = "" # Strategy 1: Try to extract from tags first predict_no_think = ( solution_str.split("")[-1].strip() if "" in solution_str else solution_str.strip() ) # Check tag format count_answer_1 = predict_no_think.count("") count_answer_2 = predict_no_think.count("") if count_answer_1 != count_answer_2: is_format_error = True # Try to extract from tags answer_match = re.search(r"(.*?)", predict_no_think, re.DOTALL) if answer_match: answer_text = answer_match.group(1).strip() else: # No proper tags found - this is a format error is_format_error = True # Strategy 2: If no tags, extract content after tool responses # Look for pattern: ...assistant\n[actual_answer] tool_response_match = re.search( r"\s*assistant\s*\n(.*?)$", predict_no_think, re.DOTALL | re.MULTILINE ) if tool_response_match: answer_text = tool_response_match.group(1).strip() else: # Strategy 3: If no tool responses, look for content after if "" in solution_str: # Remove any remaining tool-related tags and extract meaningful content remaining_content = predict_no_think # Remove tool calls and responses remaining_content = re.sub(r".*?", "", remaining_content, flags=re.DOTALL) remaining_content = re.sub( r".*?", "", remaining_content, flags=re.DOTALL ) # Remove user/assistant markers remaining_content = re.sub(r"\b(user|assistant)\b", "", remaining_content) answer_text = remaining_content.strip() else: # Strategy 4: Use the entire solution_str as fallback answer_text = solution_str.strip() # Clean up answer text answer_text = answer_text.strip() # If answer is still empty after all strategies, mark as format error if not answer_text: is_format_error = True answer_text = solution_str.strip() # Use full text as last resort # 4. Evaluate correctness using LLM judge question_text = extra_info.get("question", "") if extra_info else "" if not client or not model_name: logger.warning("Reward function client not initialized or model name not found.") return 0.0 system_prompt = ( "You are an expert evaluator. Your task is to determine if a model's answer is semantically equivalent to a " "provided standard answer, given a specific question.\n" "Your evaluation must be strict. The model's answer is only correct if it fully matches the meaning of the " "standard answer.\n" 'You must provide your final judgement as a single word: either "CORRECT" or "INCORRECT". Do not provide ' "any explanation or other text." ) user_prompt = ( f"I will provide a question, a standard answer, and a model's answer. You must evaluate if the model's " f"answer is correct.\n\n" f"---\n" f"**Example 1:**\n" f"[Question]: Is the countertop tan or blue?\n" f"[Standard Answer]: The countertop is tan.\n" f"[Model's Answer]: tan\n" f"[Your Judgement]: CORRECT\n" f"---\n" f"**Example 2:**\n" f"[Question]: Is the man phone both blue and closed?\n" f"[Standard Answer]: Yes, the man phone is both blue and closed.\n" f"[Model's Answer]: No.\n" f"[Your Judgement]: INCORRECT\n" f"---\n" f"**Task:**\n" f"[Question]: {question_text}\n" f"[Standard Answer]: {ground_truth}\n" f"[Model's Answer]: {answer_text}\n" f"[Your Judgement]:" ) try: chat_response = client.chat.completions.create( model=model_name, messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}, ], seed=random.randint(0, 1000000), temperature=0.1, # Lower temperature for more deterministic judgement extra_body={ "chat_template_kwargs": {"enable_thinking": False}, }, ) response = chat_response.choices[0].message.content.strip() except Exception as e: logger.warning(f" [WARNING] Chat completion request failed: {e}") return 0.0 # Parse LLM judge response if re.search(r"\bCORRECT\b", response, re.IGNORECASE): acc_reward = 1.0 elif re.search(r"\bINCORRECT\b", response, re.IGNORECASE): acc_reward = 0.0 else: logger.warning( f" [WARNING] Judgement format error. Expected 'CORRECT' or 'INCORRECT'.\n" f"Response: '{response}'\n" f"Model Answer: '{answer_text}'\n" f"Ground Truth: '{ground_truth}'" ) acc_reward = 0.0 # Penalize excessively long answers (potential judge hacking) if len(answer_text) >= 1000: acc_reward = 0.0 is_format_error = True # 5. Check tool usage - look for tool_call/tool_response patterns instead of vision tokens has_tool_usage = bool( re.search(r".*?", solution_str, re.DOTALL) or re.search(r".*?", solution_str, re.DOTALL) ) # Tool reward: only give if tools were used AND answer is correct tool_reward = 1.0 if has_tool_usage and acc_reward > 0.5 else 0.0 # Format reward: penalty for format errors format_reward = -1.0 if is_format_error else 0.0 # Log debug information for problematic cases if is_format_error or not answer_text: logger.debug( f"Format issue detected:\n" f"Solution: {solution_str[:200]}...\n" f"Extracted answer: '{answer_text}'\n" f"Format error: {is_format_error}\n" f"Tool usage: {has_tool_usage}" ) # Final weighted score final_score = 0.8 * acc_reward + 0.2 * format_reward + 1.2 * tool_reward return final_score if __name__ == "__main__": # Test case 1: Original test case predict_str = "The answer is 2 + 2 = 4 right left " ground_truth = "left" extra_info = { "answer": "The woman is to the left of the man who is holding the camera.", "id": 0, "image": "/cpfs/user/honglingyi/DATA/LLM/Vstar/gqa/images/713270.jpg", "pred_ans": "The woman is to the right of the man who is holding the camera.", "question": "Is the woman to the left or to the right of the man who is holding the camera?", } print("=== Test Case 1: Original test ===") import time time_start = time.time() score = compute_score("common_reasoning", predict_str, ground_truth, extra_info) print(f"Score: {score}") time_end = time.time() print(f"Time: {time_end - time_start}") # Test case 2: Problematic case mentioned by user problematic_solution = """ {"name": "image_zoom_in_tool", "arguments": {"bbox_2d": [226, 399, 265, 464], "label": "white van"}} user Zoomed in on the image to the region [226, 399, 265, 464] with label white van. assistant The white van is visible in the lower section of the image, near the diagonal road.""" problematic_ground_truth = "Yes, the white van is indeed situated in the bottom part of the picture." problematic_extra_info = { "question": "Is the white van in the bottom part of the picture?", } print("\n=== Test Case 2: Problematic case (no answer tags) ===") print(f"Solution: {problematic_solution}") print(f"Ground truth: {problematic_ground_truth}") time_start = time.time() score2 = compute_score("common_reasoning", problematic_solution, problematic_ground_truth, problematic_extra_info) print(f"Score: {score2}") time_end = time.time() print(f"Time: {time_end - time_start}") # Test case 3: Well-formatted case with tools well_formatted_solution = """ I need to use the image zoom tool to get a better look at the specific area. {"name": "image_zoom_in_tool", "arguments": {"bbox_2d": [226, 399, 265, 464], "label": "white van"}} Zoomed in on the image to the region [226, 399, 265, 464] with label white van. Yes, the white van is indeed situated in the bottom part of the picture.""" print("\n=== Test Case 3: Well-formatted case ===") time_start = time.time() score3 = compute_score( "common_reasoning", well_formatted_solution, problematic_ground_truth, problematic_extra_info ) print(f"Score: {score3}") time_end = time.time() print(f"Time: {time_end - time_start}") ================================================ FILE: verl_distillation/recipe/deepeyes/run_deepeyes_grpo.sh ================================================ #!/bin/bash set -x export LLM_AS_A_JUDGE_BASE="your llm-as-a-judge server/v1" export WANDB_API_KEY="your wandb key" PROJECT_NAME="your_project_name" EXPERIMENT_NAME="your_experiment_name" BASEDIR=base_dir SAVE_CHECKPOINT_DIR=${BASEDIR}/verl_checkpoints DATASET_TRAIN=${BASEDIR}/dataset/train.parquet DATASET_VAL=${BASEDIR}/dataset/val.parquet REF_MODEL_PATH=ref_model_path PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ --config-path=${BASEDIR}/recipe/deepeyes/configs \ --config-name='deepeyes_multiturn_grpo' \ data.train_files=${DATASET_TRAIN} \ data.val_files=[${DATASET_VAL}] \ data.train_batch_size=128 \ data.max_prompt_length=8192 \ data.max_response_length=16384 \ data.return_raw_chat=True \ data.filter_overlong_prompts=True \ algorithm.adv_estimator=grpo \ algorithm.kl_ctrl.kl_coef=0.0 \ actor_rollout_ref.model.path=${REF_MODEL_PATH} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.0 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0.0 \ actor_rollout_ref.actor.checkpoint.save_contents=['model','hf_model','optimizer','extra'] \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.rollout.max_num_batched_tokens=32768 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.multi_turn.enable=True \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=5 \ actor_rollout_ref.rollout.multi_turn.max_user_turns=5 \ actor_rollout_ref.rollout.multi_turn.max_parallel_calls=1 \ actor_rollout_ref.rollout.multi_turn.tool_config_path=recipe/deepeyes/configs/image_zoom_in_tool_config.yaml \ trainer.critic_warmup=0 \ trainer.logger=['console','wandb','tensorboard'] \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=8 \ trainer.test_freq=80 \ trainer.project_name=${PROJECT_NAME} \ trainer.experiment_name=${EXPERIMENT_NAME} \ trainer.default_local_dir=${SAVE_CHECKPOINT_DIR}/${PROJECT_NAME}/${EXPERIMENT_NAME} \ +trainer.tensorboard_dir=${SAVE_CHECKPOINT_DIR}/logs/tensorboard \ +trainer.rl_logging_board_dir=${SAVE_CHECKPOINT_DIR}/logs/rl_logging_board \ trainer.total_epochs=1 2>&1 | tee ./logs/${EXPERIMENT_NAME}.log ================================================ FILE: verl_distillation/recipe/entropy/32b_clip_cov.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-32B' exp_name='clipcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=1 clip_ratio_high=1 clip_cov_ratio=0.0002 clip_cov_lb=1.0 clip_cov_ub=5.0 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="clip_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=8 max_token=20480 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.02 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.clip_cov_ratio=${clip_cov_ratio} \ actor_rollout_ref.actor.policy_loss.clip_cov_lb=${clip_cov_lb} \ actor_rollout_ref.actor.policy_loss.clip_cov_ub=${clip_cov_ub} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ actor_rollout_ref.rollout.name=vllm \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.lr_scheduler_type=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.clip_cov_ratio=${clip_cov_ratio} \ actor_rollout_ref.actor.clip_cov_lb=${clip_cov_lb} \ actor_rollout_ref.actor.clip_cov_ub=${clip_cov_ub} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_distillation/recipe/entropy/32b_kl_cov.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-32B' exp_name='klcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.2 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="kl_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=8 max_token=20480 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.0002 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.kl_cov_ratio=${kl_cov_ratio} \ actor_rollout_ref.actor.policy_loss.ppo_kl_coef=${ppo_kl_coef} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ actor_rollout_ref.rollout.name=vllm \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.lr_scheduler_type=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_distillation/recipe/entropy/32b_kl_cov_mininbsz.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-32B' exp_name='klcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.2 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="kl_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=16 n_resp_per_prompt=8 max_token=20480 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.0002 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.kl_cov_ratio=${kl_cov_ratio} \ actor_rollout_ref.actor.policy_loss.ppo_kl_coef=${ppo_kl_coef} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ actor_rollout_ref.rollout.name=vllm \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.lr_scheduler_type=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_distillation/recipe/entropy/7b_clip_cov.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-7B' exp_name='clipcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=1 clip_ratio_high=1 clip_cov_ratio=0.0002 clip_cov_lb=1.0 clip_cov_ub=5.0 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="clip_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=8 max_token=30720 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.2 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.clip_cov_ratio=${clip_cov_ratio} \ actor_rollout_ref.actor.policy_loss.clip_cov_lb=${clip_cov_lb} \ actor_rollout_ref.actor.policy_loss.clip_cov_ub=${clip_cov_ub} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ actor_rollout_ref.rollout.name=vllm \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.lr_scheduler_type=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_distillation/recipe/entropy/7b_kl_cov.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-7B' exp_name='klcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.2 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="kl_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=8 max_token=30720 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.002 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.kl_cov_ratio=${kl_cov_ratio} \ actor_rollout_ref.actor.policy_loss.ppo_kl_coef=${ppo_kl_coef} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ actor_rollout_ref.rollout.name=vllm \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.lr_scheduler_type=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_distillation/recipe/entropy/README.md ================================================
# The Entropy Mechanism of Reinforcement Learning for Large Language Model Reasoning. [![Paper](https://img.shields.io/badge/paper-A42C25?style=for-the-badge&logo=arxiv&logoColor=white)](https://arxiv.org/pdf/2505.22617) [![Github](https://img.shields.io/badge/PRIME-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white)](https://github.com/PRIME-RL/Entropy-Mechanism-of-RL) [![alphaXiv](https://img.shields.io/badge/discussion-A42C25?style=for-the-badge&logo=arxiv&logoColor=white&color=blue )](https://www.alphaxiv.org/abs/2505.22617) [![Twitter](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/stingning/status/1928088554166505667) [![Twitter](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/charlesfornlp/status/1928089451080585283) [![Twitter-ak](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/_akhaliq/status/1928077929105268861)
# 🎉News - **[2025/05/29]** 🎉 Ranked **#1** of the day on [Huggingface Daily Papers](https://huggingface.co/papers?date=2025-05-29). - **[2025/05/29]** Released our Paper on arXiv. See [here](https://arxiv.org/pdf/2505.22617). We provide insights into the entropy mechanism of RL for LLMs and propose two simple yet effective strategies to alleviate the entropy collapse. # ✨Getting started After preparing the training data, for training Qwen2.5-7B on a single node, taking the KL-Cov approach as an example, you can simply run: ``` cd verl conda activate your_env bash recipe/dapo/7b_kl_cov.sh ``` While for training Qwen2.5-32B on multi nodes, you can run the following commands: ``` cd verl conda activate your_env bash recipe/dapo/32b_kl_cov.sh ``` # 📖Introduction
issue
This paper addresses the entropy collapse issue in scaling reinforcement learning (RL) for large language models (LLMs), where policy entropy drops sharply during training, leading to overconfidence and performance saturation. We empirically establish a relationship between entropy ($H$) and performance ($R$): $R=−aexp(H)+b$, showing performance is bottlenecked by entropy exhaustion.
issue
Theoretically, we find entropy changes are driven by the covariance between action probability and logit updates, which correlates with advantage in Policy Gradient methods. High-probability, high-advantage actions reduce entropy, while rare, high-advantage actions increase it. Empirically, the covariance term remains positive, explaining entropy’s monotonic decline. To mitigate this, we propose ​​Clip-Cov​​ and ​​KL-Cov​​, which restrict updates for high-covariance tokens. These methods effectively prevent entropy collapse, and improve performance. # 📃Evaluation
issue
Our method is able to maintain a considerably higher level of entropy throughout training. For example, when the baseline's entropy reaches a plateau and can no longer be consumed, the KL-Cov method still sustains an entropy level over 10 times higher. Meanwhile, the response length of the policy model steadily increases, and its performance on the test set consistently surpasses that of the baseline. This indicates that our model is able to explore more freely during training, learning better policy through RL. | **Method** | **AIME24** | **AIME25** | **AMC** | **MATH-500** | **OMNI-MATH** | **OlympiadBench** | **Minerva** | **Avg.** | | ----------------- | ---------: | ---------: | -------: | -----------: | ------------: | ----------------: | ----------: | -------: | | *Qwen2.5-7B* | | | | | | | | | | GRPO | 21.2 | 9.6 | 58.7 | 78.8 | 27.9 | 40.7 | 36.7 | 38.6 | | w. Clip-higher | 18.1 | 11.5 | 56.6 | 79.2 | 29.8 | 43.3 | 40.4 | 38.8 | | w. **`CLIP-Cov`** | 22.1 | **15.8** | 58.2 | 80.4 | **30.5** | **44.1** | **41.1** | 40.4 | | w. **`KL-Cov`** | **22.6** | 12.9 | **61.4** | **80.8** | 29.1 | 42.6 | 38.2 | **40.6** | | *Qwen2.5-32B* | | | | | | | | | | GRPO | 21.8 | 16.2 | 69.7 | 84.2 | 35.2 | 43.6 | 45.5 | 45.8 | | w. Clip-higher | 35.6 | 22.3 | 69.5 | 77.2 | 35.1 | 42.5 | 43.0 | 47.2 | | w. **`CLIP-Cov`** | 32.3 | 22.7 | 67.2 | **87.0** | **42.0** | **57.2** | 46.0 | 50.3 | | w. **`KL-Cov`** | **36.8** | **30.8** | **74.5** | 84.6 | 39.1 | 49.0 | **46.3** | **52.2** | Our two approaches both achieve non-trivial improvements across all benchmarks. Compared to GRPO, our method outperforms it by 2.0% on average for the 7B model and by 6.4% for the 32B model. Moreover, we observe that our method yields more substantial gains on the larger Qwen2.5-32B. Specifically, our method achieves improvements of 15.0% and 14.6% compared to GRPO on the most challenging benchmarks, AIME24 and AIME25, respectively. # 🎈Citation If you find this paper or repo helpful, please cite us. ```bibtex @article{cui2025entropy, title={The Entropy Mechanism of Reinforcement Learning for Reasoning Language Models}, author={Cui, Ganqu and Zhang, Yuchen and Chen, Jiacheng and Yuan, Lifan and Wang, Zhi and Zuo, Yuxin and Li, Haozhan and Fan, Yuchen and Chen, Huayu and Chen, Weize and others}, journal={arXiv preprint arXiv:2505.22617}, year={2025} } ``` # 🌻Acknowledgement We implement our reinforcement learning algorithm extending from [verl](https://github.com/volcengine/verl). We utilize [vLLM](https://github.com/vllm-project/vllm) for inference. Our models are trained primarily on [Qwen2.5 family](https://github.com/QwenLM/Qwen2.5). Our training data is built from [DAPO-MATH](https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k). Thanks for their great contributions! # 📬 Contact For questions, discussion, or collaboration opportunities, feel free to contact: - Ganqu Cui: cuiganqu@pjlab.org.cn - Yuchen Zhang: yuchen.zhang2003@gmail.com - Jiacheng Chen: jackchan9345@gmail.com - Ning Ding: ningding.cs@gmail.com ================================================ FILE: verl_distillation/recipe/entropy/config/entropy_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: gen_batch_size: ${data.train_batch_size} reward_model: reward_kwargs: overlong_buffer_cfg: ${reward_model.overlong_buffer} reward_manager: dapo overlong_buffer: enable: False len: 0 penalty_factor: 0.0 log: False algorithm: filter_groups: enable: False # We try to avoid forgetting to set enable metric: null # acc / score / seq_reward / seq_final_reward / ... max_num_gen_batches: 0 # Non-positive values mean no upper limit trainer: project_name: verl-entropy actor_rollout_ref: actor: policy_loss: loss_mode: "vanilla" # /clip-cov / kl-cov from https://arxiv.org/abs/2505. clip_cov_ratio: 0.0002 # for clip-cov loss clip_cov_lb: 1.0 # for clip-cov loss clip_cov_ub: 5.0 # for clip-cov loss kl_cov_ratio: 0.0002 # for kl-cov loss ppo_kl_coef: 0.1 # for kl-cov loss ================================================ FILE: verl_distillation/recipe/entropy/entropy_ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSDP PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import uuid from collections import defaultdict from copy import deepcopy from pprint import pprint import numpy as np import torch from tqdm import tqdm from verl import DataProto from verl.trainer.ppo.metric_utils import compute_data_metrics, compute_throughout_metrics, compute_timing_metrics from verl.trainer.ppo.ray_trainer import ( AdvantageEstimator, RayPPOTrainer, apply_kl_penalty, compute_advantage, compute_response_mask, ) from verl.trainer.ppo.reward import compute_reward from verl.utils.metric import reduce_metrics from verl.utils.profiler import simple_timer class RayEntropyTrainer(RayPPOTrainer): """ Note that this trainer runs on the driver process on a single CPU/GPU node. """ def compute_kl_related_metrics(self, batch: DataProto, timing_raw: dict): batch.batch["response_mask"] = compute_response_mask(batch) # recompute old_log_probs with simple_timer("old_log_prob", timing_raw): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with simple_timer("ref", timing_raw): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) return batch def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None timing_raw = defaultdict(float) batch = None num_prompt_in_batch = 0 num_gen_batches = 0 for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} new_batch: DataProto = DataProto.from_single_dict(batch_dict) num_gen_batches += 1 # pop those keys for generation if "multi_modal_inputs" in new_batch.non_tensor_batch.keys(): gen_batch = new_batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids", "multi_modal_data", "multi_modal_inputs"], ) else: gen_batch = new_batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids"], ) gen_batch_output = gen_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True ) is_last_step = self.global_steps >= self.total_training_steps with simple_timer("step", timing_raw): # generate a batch with simple_timer("gen", timing_raw): if not self.async_rollout_mode: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch_output) else: gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch_output) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with simple_timer("gen_max", timing_raw): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) new_batch = new_batch.union(gen_baseline_output) # compute reward model score on new_batch rm_scores = None if self.use_rm and "rm_scores" not in new_batch.batch.keys(): rm_scores = self.rm_wg.compute_rm_score(new_batch) new_batch = new_batch.union(rm_scores) reward_baseline_tensor, _ = compute_reward(new_batch, self.reward_fn) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) keys_to_pop = set(gen_baseline_output.batch.keys()) if rm_scores is not None: keys_to_pop.update(rm_scores.batch.keys()) new_batch.pop(batch_keys=list(keys_to_pop)) new_batch.batch["reward_baselines"] = reward_baseline_tensor del rm_scores, gen_baseline_batch, gen_baseline_output new_batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout new_batch = new_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) new_batch = new_batch.union(gen_batch_output) if self.config.algorithm.use_kl_in_reward: # We need these metrics for apply_kl_penalty if using kl in reward new_batch = self.compute_kl_related_metrics(new_batch, timing_raw) # otherwise, we will compute those after dynamic sampling with simple_timer("reward", timing_raw): # compute scores. Support both model and function-based. # We first compute the scores using reward model. Then, we call reward_fn to combine # the results from reward model and rule-based results. if self.use_rm and "rm_scores" not in new_batch.batch.keys(): # we first compute reward model score reward_tensor = self.rm_wg.compute_rm_score(new_batch) new_batch = new_batch.union(reward_tensor) # we combine with rule-based rm reward_tensor, reward_extra_infos_dict = compute_reward(new_batch, self.reward_fn) new_batch.batch["token_level_scores"] = reward_tensor print(f"{list(reward_extra_infos_dict.keys())=}") if reward_extra_infos_dict: new_batch.non_tensor_batch.update( {k: np.array(v) for k, v in reward_extra_infos_dict.items()} ) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: new_batch, kl_metrics = apply_kl_penalty( new_batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update( kl_metrics ) # TODO: This will be cleared if we use multiple genenration batches else: new_batch.batch["token_level_rewards"] = new_batch.batch["token_level_scores"] if not self.config.algorithm.filter_groups.enable: batch = new_batch else: # NOTE: When prompts after filtering is less than train batch size, # we skip to the next generation batch metric_name = self.config.algorithm.filter_groups.metric if metric_name == "seq_final_reward": # Turn to numpy for easier filtering new_batch.non_tensor_batch["seq_final_reward"] = ( new_batch.batch["token_level_rewards"].sum(dim=-1).numpy() ) elif metric_name == "seq_reward": new_batch.non_tensor_batch["seq_reward"] = ( new_batch.batch["token_level_scores"].sum(dim=-1).numpy() ) # Collect the sequence reward for each trajectory prompt_uid2metric_vals = defaultdict(list) for uid, metric_val in zip( new_batch.non_tensor_batch["uid"], new_batch.non_tensor_batch[metric_name], strict=True ): prompt_uid2metric_vals[uid].append(metric_val) prompt_uid2metric_std = {} for prompt_uid, metric_vals in prompt_uid2metric_vals.items(): prompt_uid2metric_std[prompt_uid] = np.std(metric_vals) kept_prompt_uids = [ uid for uid, std in prompt_uid2metric_std.items() if std > 0 or len(prompt_uid2metric_vals[uid]) == 1 ] num_prompt_in_batch += len(kept_prompt_uids) kept_traj_idxs = [] for idx, traj_from_prompt_uid in enumerate(new_batch.non_tensor_batch["uid"]): if traj_from_prompt_uid in kept_prompt_uids: kept_traj_idxs.append(idx) new_batch = new_batch[kept_traj_idxs] batch = new_batch if batch is None else DataProto.concat([batch, new_batch]) prompt_bsz = self.config.data.train_batch_size if num_prompt_in_batch < prompt_bsz: print(f"{num_prompt_in_batch=} < {prompt_bsz=}") max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches: print(f"{num_gen_batches=}. Keep generating...") continue else: raise ValueError( f"{num_gen_batches=} >= {max_num_gen_batches=}." + " Generated too many. Please check if your data are too difficult." + " You could also try set max_num_gen_batches=0 to enable endless trials." ) else: # Align the batch traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n print( f"Collected {num_prompt_in_batch} / {self.config.data.train_batch_size} prompt. " f"Collecting finished." ) batch = batch[:traj_bsz] # === Updating === # balance the number of valid tokens on each dp rank. # Note that this breaks the order of data inside the batch. # Please take care when you implement group based adv computation such as GRPO and rloo if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() if not self.config.algorithm.use_kl_in_reward: batch = self.compute_kl_related_metrics(batch, timing_raw) # compute values if self.use_critic: with simple_timer("values", timing_raw): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with simple_timer("adv", timing_raw): # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get("norm_adv_by_std_in_grpo", True) batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) # update critic if self.use_critic: with simple_timer("update_critic", timing_raw): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with simple_timer("update_actor", timing_raw): actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with simple_timer("testing", timing_raw): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with simple_timer("save_checkpoint", timing_raw): self._save_checkpoint() # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) timing_raw = defaultdict(float) # clear timing metrics["train/num_gen_batches"] = num_gen_batches batch = None num_prompt_in_batch = 0 num_gen_batches = 0 # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return progress_bar.update(1) self.global_steps += 1 ================================================ FILE: verl_distillation/recipe/entropy/main_entropy.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import hydra import ray from omegaconf import OmegaConf from .entropy_ray_trainer import RayEntropyTrainer from .reward import load_reward_manager @hydra.main(config_path="config", config_name="entropy_trainer", version_base=None) def main(config): run_ppo(config) def run_ppo(config) -> None: if not ray.is_initialized(): # this is for local ray cluster default_runtime_env = { "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN", } } ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") ray.init(**OmegaConf.to_container(ray_init_kwargs)) runner = TaskRunner.remote() ray.get(runner.run.remote(config)) def merge_dict(a: dict, b: dict) -> dict: """Return a new dict that has `a` updated with `b` (b wins on conflicts). Example:: >>> d1 = {"x": 1, "y": 2} >>> d2 = {"y": 20, "z": 3} >>> new_dict = merge_dict(d1, d2) >>> print(new_dict) # {'x': 1, 'y': 20, 'z': 3} >>> print(d1) # {"x": 1, "y": 2} (unchanged) >>> print(d2) # {"y": 20, "z": 3} (unchanged) """ return a | b @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: def run(self, config): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # download the checkpoint from hdfs local_path = copy_to_local(config.actor_rollout_ref.model.path) print(f"{config.actor_rollout_ref.model.path}") # instantiate tokenizer from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) processor = hf_processor(local_path, use_fast=True) # used for multimodal LLM, could be none # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup from verl.workers.fsdp_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray import RayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker actor_rollout_cls = ActorRolloutRefWorker ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ray.remote(actor_rollout_cls), Role.Critic: ray.remote(CriticWorker), } global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, Role.Critic: global_pool_id, } # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # use reference model if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id reward_kwargs = { "max_resp_len": config.data.max_response_length, "overlong_buffer_cfg": config.reward_model.overlong_buffer, } cfg_reward_kwargs = config.reward_model.get("reward_kwargs", {}) reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **OmegaConf.merge(OmegaConf.create(reward_kwargs), cfg_reward_kwargs) ) val_reward_fn = load_reward_manager(config, tokenizer, num_examine=1, **reward_kwargs) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) from verl.utils.dataset.rl_dataset import collate_fn train_dataset = create_rl_dataset( config.data.train_files, config.data, tokenizer, processor, max_samples=config.data.get("train_max_samples", -1), ) val_dataset = create_rl_dataset( config.data.val_files, config.data, tokenizer, processor, max_samples=config.data.get("val_max_samples", -1) ) train_sampler = create_rl_sampler(config.data, train_dataset) trainer = RayEntropyTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, ) trainer.init_workers() trainer.fit() def create_rl_dataset(data_paths, data_config, tokenizer, processor, max_samples: int = -1): """Create a dataset. Arguments: data_config: The data config. tokenizer (Tokenizer): The tokenizer. processor (Processor): The processor. Returns: dataset (Dataset): The dataset. """ from torch.utils.data import Dataset from verl.utils.dataset.rl_dataset import RLHFDataset if "custom_cls" in data_config and data_config.custom_cls.get("path", None) is not None: from verl.utils.import_utils import load_extern_type dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name) if not issubclass(dataset_cls, Dataset): raise TypeError( f"The custom dataset class '{data_config.custom_cls.name}' from '{data_config.custom_cls.path}' " f"must inherit from torch.utils.data.Dataset" ) else: dataset_cls = RLHFDataset print(f"Using dataset class: {dataset_cls.__name__}") dataset = dataset_cls( data_files=data_paths, tokenizer=tokenizer, processor=processor, config=data_config, max_samples=max_samples, ) return dataset def create_rl_sampler(data_config, dataset): """Create a sampler for the dataset. Arguments: data_config: The data config. dataset (Dataset): The dataset. Returns: sampler (Sampler): The sampler. """ import torch from torch.utils.data import RandomSampler, SequentialSampler # use sampler for better ckpt resume if data_config.shuffle: train_dataloader_generator = torch.Generator() seed = data_config.get("seed") if seed is not None: train_dataloader_generator.manual_seed(seed) sampler = RandomSampler(data_source=dataset, generator=train_dataloader_generator) else: sampler = SequentialSampler(data_source=dataset) return sampler if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/entropy/reward.py ================================================ # Copyright 2025 Individual Contributor: Thibaut Barroyer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing from functools import partial import ray from verl import DataProto from verl.trainer.ppo.reward import compute_reward, get_custom_reward_fn from .reward_score import _default_compute_score def load_reward_manager(config, tokenizer, num_examine, **reward_kwargs): """ Load and initialize a reward manager based on the configuration. Args: config: PPO trainer configuration object containing reward_model fields. tokenizer: Tokenizer object used for processing text. num_examine: Number of samples to examine. **reward_kwargs: Additional keyword arguments for the reward manager. Returns: An instance of the specified reward manager class. """ from verl.workers.reward_manager import get_reward_manager_cls # The list of pre-defined reward managers are defined in `verl/workers/reward_manager/`: # naive: NaiveRewardManager # prime: PrimeRewardManager # batch: BatchRewardManager # dapo: DAPORewardManager # Note(haibin.lin): For custom reward managers, please make sure they are imported and # registered via `verl.workers.reward_manager.register` # By default reward_manager is set to naive (NaiveRewardManager) reward_manager_name = config.reward_model.get("reward_manager", "naive") reward_manager_cls = get_reward_manager_cls(reward_manager_name) # Try to get a custom reward function based on the configuration compute_score = get_custom_reward_fn(config) final_compute_score = compute_score if compute_score is None: sandbox_config = config.reward_model.get("sandbox_fusion") sandbox_url = sandbox_config.get("url") if sandbox_config else None if sandbox_url: sandbox_manager = multiprocessing.Manager() # Create a semaphore to control concurrent access to the sandbox _concurrent_semaphore = sandbox_manager.Semaphore(sandbox_config.get("max_concurrent", 64)) final_compute_score = partial( _default_compute_score, sandbox_fusion_url=sandbox_url, concurrent_semaphore=_concurrent_semaphore ) else: final_compute_score = _default_compute_score # Instantiate and return the reward manager with the specified parameters return reward_manager_cls( tokenizer=tokenizer, num_examine=num_examine, compute_score=final_compute_score, reward_fn_key=config.data.reward_fn_key, **reward_kwargs, ) @ray.remote(num_cpus=1) def compute_reward_async(data: DataProto, config, tokenizer): """ Load the reward manager and compute the reward for a batch of data. This is meant to be run in a separate Ray worker. """ reward_fn = load_reward_manager(config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {})) return compute_reward(data, reward_fn) ================================================ FILE: verl_distillation/recipe/entropy/reward_score/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from . import gsm8k, math, prime_math, prime_code import traceback from . import entropy_math def _default_compute_score( data_source, solution_str, ground_truth, extra_info=None, sandbox_fusion_url=None, concurrent_semaphore=None ): try: res = entropy_math.compute_score(solution_str, str(ground_truth)) # print(f"data_source: {data_source}") # raise NotImplementedError(f"Reward function is not implemented for {data_source=}") if isinstance(res, dict): return res elif isinstance(res, int | float | bool): return float(res) else: return float(res[0]) except Exception as e: print(f"[ERROR] Error in process_completion for task : {str(e)}") traceback.print_exc() # 打印完整堆栈 raise # 重新抛出异常以便上层捕获 ================================================ FILE: verl_distillation/recipe/entropy/reward_score/entropy_math/__init__.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except Exception in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides a math answer grading function with high recall. Based on HF math_verify, verl, open reasoner zero, etc. """ import os import re import signal from itertools import islice, zip_longest from math import isclose from typing import Optional import sympy from latex2sympy2_extended import latex2sympy from math_verify import ExprExtractionConfig, LatexExtractionConfig, parse, verify from pylatexenc import latex2text from sympy import N, simplify from sympy.parsing import sympy_parser from sympy.parsing.latex import parse_latex from sympy.parsing.sympy_parser import parse_expr """ This code is adapted from: Dr. GRPO (https://github.com/sail-sg/understand-r1-zero/blob/main/understand_r1_zero/math_grader.py). """ def timeout_ours(timeout_seconds: int = 8): if os.name == "posix": import signal def decorator(func): def handler(signum, frame): raise TimeoutError("Operation timed out!") def wrapper(*args, **kwargs): old_handler = signal.getsignal(signal.SIGALRM) signal.signal(signal.SIGALRM, handler) signal.alarm(timeout_seconds) try: return func(*args, **kwargs) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) return wrapper return decorator else: raise NotImplementedError(f"Unsupported OS: {os.name}") # Dan Hendrycks' code def mathd_normalize_answer(answer: Optional[str]) -> Optional[str]: if answer is None: return None answer = answer.strip() try: # Remove enclosing `\text{}`. m = re.search(r"^\\text\{(?P.+?)\}$", answer) if m is not None: answer = m.group("text").strip() return _strip_string(answer) except Exception: return answer # units mainly from MathQA unit_texts = [ "east", "degree", "mph", "kmph", "ft", "m sqaure", " m east", "sq m", "deg", "mile", "q .", "monkey", "prime", "ratio", "profit of rs", "rd", "o", "gm", "p . m", "lb", "tile", "per", "dm", "lt", "gain", "ab", "way", "west", "a .", "b .", "c .", "d .", "e .", "f .", "g .", "h .", "t", "a", "h", "no change", "men", "soldier", "pie", "bc", "excess", "st", "inches", "noon", "percent", "by", "gal", "kmh", "c", "acre", "rise", "a . m", "th", "π r 2", "sq", "mark", "l", "toy", "coin", "sq . m", "gallon", "° f", "profit", "minw", "yr", "women", "feet", "am", "pm", "hr", "cu cm", "square", "v â € ™", "are", "rupee", "rounds", "cubic", "cc", "mtr", "s", "ohm", "number", "kmph", "day", "hour", "minute", "min", "second", "man", "woman", "sec", "cube", "mt", "sq inch", "mp", "∏ cm ³", "hectare", "more", "sec", "unit", "cu . m", "cm 2", "rs .", "rs", "kg", "g", "month", "km", "m", "cm", "mm", "apple", "liter", "loss", "yard", "pure", "year", "increase", "decrease", "d", "less", "Surface", "litre", "pi sq m", "s .", "metre", "meter", "inch", ] unit_texts.extend([t + "s" for t in unit_texts]) def _strip_string(string): def _fix_fracs(string): substrs = string.split("\\frac") new_str = substrs[0] if len(substrs) > 1: substrs = substrs[1:] for substr in substrs: new_str += "\\frac" if substr[0] == "{": new_str += substr else: try: assert len(substr) >= 2 except Exception: return string a = substr[0] b = substr[1] if b != "{": if len(substr) > 2: post_substr = substr[2:] new_str += "{" + a + "}{" + b + "}" + post_substr else: new_str += "{" + a + "}{" + b + "}" else: if len(substr) > 2: post_substr = substr[2:] new_str += "{" + a + "}" + b + post_substr else: new_str += "{" + a + "}" + b string = new_str return string def _fix_a_slash_b(string): if len(string.split("/")) != 2: return string a = string.split("/")[0] b = string.split("/")[1] try: a = int(a) b = int(b) assert string == "{}/{}".format(a, b) new_string = "\\frac{" + str(a) + "}{" + str(b) + "}" return new_string except Exception: return string def _remove_right_units(string): # "\\text{ " only ever occurs (at least in the val set) when describing units if "\\text{ " in string: splits = string.split("\\text{ ") assert len(splits) == 2 return splits[0] else: return string def _fix_sqrt(string): if "\\sqrt" not in string: return string splits = string.split("\\sqrt") new_string = splits[0] for split in splits[1:]: if split[0] != "{": a = split[0] new_substr = "\\sqrt{" + a + "}" + split[1:] else: new_substr = "\\sqrt" + split new_string += new_substr return new_string # linebreaks string = string.replace("\n", "") # print(string) # remove inverse spaces string = string.replace("\\!", "") # print(string) # replace \\ with \ string = string.replace("\\\\", "\\") # print(string) # matrix string = re.sub(r"\\begin\{array\}\{.*?\}", r"\\begin{pmatrix}", string) string = re.sub(r"\\end\{array\}", r"\\end{pmatrix}", string) string = string.replace("bmatrix", "pmatrix") # replace tfrac and dfrac with frac string = string.replace("tfrac", "frac") string = string.replace("dfrac", "frac") string = string.replace("\\neq", "\\ne").replace("\\leq", "\\le").replace("\\geq", "\\ge") # print(string) # remove \left and \right string = string.replace("\\left", "") string = string.replace("\\right", "") # print(string) # Remove unit: miles, dollars if after is not none _string = re.sub(r"\\text{.*?}$", "", string).strip() if _string != "" and _string != string: # print("Warning: unit not removed: '{}' -> '{}'".format(string, _string)) string = _string # Remove unit: texts for _ in range(2): for unit_text in unit_texts: # use regex, the prefix should be either the start of the string or a non-alphanumeric character # the suffix should be either the end of the string or a non-alphanumeric character _string = re.sub(r"(^|\W)" + unit_text + r"($|\W)", r"\1\2", string) if _string != "": string = _string # Remove circ (degrees) string = string.replace("^{\\circ}", "") string = string.replace("^\\circ", "") # remove dollar signs string = string.replace("\\$", "") # remove units (on the right) string = _remove_right_units(string) # remove percentage string = string.replace("\\\\%", "") string = string.replace("\\%", "") # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string string = string.replace(" .", " 0.") string = string.replace("{.", "{0.") # if empty, return empty string if len(string) == 0: return string if string[0] == ".": string = "0" + string # to consider: get rid of e.g. "k = " or "q = " at beginning if len(string.split("=")) == 2: if len(string.split("=")[0]) <= 2: string = string.split("=")[1] # fix sqrt3 --> sqrt{3} string = _fix_sqrt(string) # remove spaces string = string.replace(" ", "") # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). # Also does a/b --> \\frac{a}{b} string = _fix_fracs(string) # manually change 0.5 --> \frac{1}{2} if string == "0.5": string = "\\frac{1}{2}" # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y string = _fix_a_slash_b(string) return string SUBSTITUTIONS = [ ("an ", ""), ("a ", ""), (".$", "$"), ("\\$", ""), (r"\ ", ""), (" ", ""), ("mbox", "text"), (",\\text{and}", ","), ("\\text{and}", ","), ("\\text{m}", "\\text{}"), ] REMOVED_EXPRESSIONS = [ "square", "ways", "integers", "dollars", "mph", "inches", "ft", "hours", "km", "units", "\\ldots", "sue", "points", "feet", "minutes", "digits", "cents", "degrees", "cm", "gm", "pounds", "meters", "meals", "edges", "students", "childrentickets", "multiples", "\\text{s}", "\\text{.}", "\\text{\ns}", "\\text{}^2", "\\text{}^3", "\\text{\n}", "\\text{}", r"\mathrm{th}", r"^\circ", r"^{\circ}", r"\;", r",\!", "{,}", '"', "\\dots", ] def normalize_final_answer(final_answer: str) -> str: """ Normalize a final answer to a quantitative reasoning question. This code comes from https://arxiv.org/pdf/2206.14858.pdf, page18. """ # final_answer = final_answer.split("=")[-1] for before, after in SUBSTITUTIONS: final_answer = final_answer.replace(before, after) for expr in REMOVED_EXPRESSIONS: final_answer = final_answer.replace(expr, "") # Extract answer that is in LaTeX math, is bold, # is surrounded by a box, etc. final_answer = re.sub(r"(.*?)(\$)(.*?)(\$)(.*)", "$\\3$", final_answer) final_answer = re.sub(r"(\\text\{)(.*?)(\})", "\\2", final_answer) final_answer = re.sub(r"(\\textbf\{)(.*?)(\})", "\\2", final_answer) final_answer = re.sub(r"(\\overline\{)(.*?)(\})", "\\2", final_answer) final_answer = re.sub(r"(\\boxed\{)(.*)(\})", "\\2", final_answer) # Normalize shorthand TeX: # \fracab -> \frac{a}{b} # \frac{abc}{bef} -> \frac{abc}{bef} # \fracabc -> \frac{a}{b}c # \sqrta -> \sqrt{a} # \sqrtab -> sqrt{a}b final_answer = re.sub(r"(frac)([^{])(.)", "frac{\\2}{\\3}", final_answer) final_answer = re.sub(r"(sqrt)([^{])", "sqrt{\\2}", final_answer) final_answer = final_answer.replace("$", "") # Normalize 100,000 -> 100000 if final_answer.replace(",", "").isdigit(): final_answer = final_answer.replace(",", "") return final_answer def repeatness(s: str): def ranks(seq): index = {v: i for i, v in enumerate(sorted(set(seq)))} return [index[v] for v in seq] def suffixArray(s): line = ranks(s) n, k, ans, sa = len(s), 1, line, [0] * len(s) while k < n - 1: line = ranks(list(zip_longest(line, islice(line, k, None), fillvalue=-1))) ans, k = line, k << 1 for i, k in enumerate(ans): sa[k] = i return ans, sa def lcp(arr, suffixArr, inv_suff): n, ans, k = len(arr), [0] * len(arr), 0 for i in range(n): if inv_suff[i] == n - 1: k = 0 continue j = suffixArr[inv_suff[i] + 1] while i + k < n and j + k < n and arr[i + k] == arr[j + k]: k += 1 ans[inv_suff[i]] = k if k > 0: k -= 1 return ans arr = [ord(i) for i in s] n = len(arr) if n <= 1: return 0 c, sa = suffixArray(arr) cnt = sum(lcp(arr, sa, c)) return (cnt * 2 / (n * (n + 1))) > 0.2 class timeout: def __init__(self, seconds=1, error_message="Timeout"): self.seconds = seconds self.error_message = error_message def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) def __enter__(self): signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) def __exit__(self, type, value, traceback): signal.alarm(0) def latex_eval(latex): sym = parse_latex(latex) val = sym.evalf() return sym, val def numeric_equal(prediction: float, reference: float): # Note that relative tolerance has significant impact # on the result of the synthesized GSM-Hard dataset # if reference.is_integer(): # return isclose(reference, round(prediction), abs_tol=1e-4) # else: # prediction = round(prediction, len(str(reference).split(".")[-1])) return isclose(reference, prediction, rel_tol=1e-4) @timeout_ours(timeout_seconds=5) def symbolic_equal(a, b): def _parse(s): for f in [parse_latex, parse_expr, latex2sympy]: try: return f(s.replace("\\\\", "\\")) except Exception: try: return f(s) except Exception: pass return s a = _parse(a) b = _parse(b) # direct equal try: if str(a) == str(b) or a == b: return True except Exception: pass # simplify equal try: if a.equals(b) or simplify(a - b) == 0: return True except Exception: pass # equation equal try: if (abs(a.lhs - a.rhs)).equals(abs(b.lhs - b.rhs)): return True except Exception: pass try: if numeric_equal(float(N(a)), float(N(b))): return True except Exception: pass # matrix try: # if a and b are matrix if a.shape == b.shape: _a = a.applyfunc(lambda x: round(x, 3)) _b = b.applyfunc(lambda x: round(x, 3)) if _a.equals(_b): return True except Exception: pass return False def _is_latex_equal(str1, str2): try: sym1, val1 = latex_eval(str1) sym2, val2 = latex_eval(str2) if sym1 == sym2 or val1 == val2: return True else: raise ValueError except Exception: try: norm1, norm2 = normalize_final_answer(str1), normalize_final_answer(str2) sym1, val1 = latex_eval(norm1) sym2, val2 = latex_eval(norm2) if sym1 == sym2 or val1 == val2: return True except Exception: return norm1 == norm2 return False def is_latex_equal(given_answer: str, ground_truth: str) -> bool: try: with timeout(1): try: if (len(given_answer) > 128 and repeatness(given_answer)) or ( len(ground_truth) > 128 and repeatness(ground_truth) ): return False # First conduct normalized string matching. ground_truth_normalized = _normalize(ground_truth) given_normalized = _normalize(given_answer) if ground_truth_normalized is None: return False if ground_truth_normalized == given_normalized: return True # Next call math verify. given_answer.replace("\n", "") ground_truth.replace("\n", "") if "$" not in given_answer: given_answer = f"${given_answer}$" if "$" not in ground_truth: ground_truth = f"${ground_truth}$" return verify( parse( ground_truth, extraction_config=( LatexExtractionConfig(boxed_match_priority=0), ExprExtractionConfig(), ), fallback_mode="no_fallback", extraction_mode=["first_match"], parsing_timeout=1, ), parse( given_answer, extraction_config=( LatexExtractionConfig(boxed_match_priority=0), ExprExtractionConfig(), ), fallback_mode="no_fallback", extraction_mode=["first_match"], parsing_timeout=1, ), timeout_seconds=1, ) # or symbolic_equal(ground_truth, given_answer) except Exception: return False except TimeoutError: return False def is_value_equal(given_answer: str, ground_truth: str) -> bool: assert ground_truth is not None ground_truth_normalized_mathd = mathd_normalize_answer(ground_truth) given_answer_normalized_mathd = mathd_normalize_answer(given_answer) str_equal = ground_truth_normalized_mathd == given_answer_normalized_mathd try: number_equal = float(ground_truth_normalized_mathd) == float(given_answer_normalized_mathd) return str_equal or number_equal except Exception: return str_equal # sympy might hang -- we don't care about trying to be lenient in these cases BAD_SUBSTRINGS = ["^{", "^("] BAD_REGEXES = [r"\^[0-9]+\^", r"\^[0-9][0-9]+"] TUPLE_CHARS = "()[]" def _sympy_parse(expr: str): """Parses an expression with sympy.""" py_expr = expr.replace("^", "**") return sympy_parser.parse_expr( py_expr, transformations=(sympy_parser.standard_transformations + (sympy_parser.implicit_multiplication_application,)), ) def _parse_latex(expr: str) -> str: """Attempts to parse latex to an expression sympy can read.""" expr = expr.replace("\\tfrac", "\\frac") expr = expr.replace("\\dfrac", "\\frac") expr = expr.replace("\\frac", " \\frac") # Play nice with mixed numbers. expr = latex2text.LatexNodes2Text().latex_to_text(expr) # Replace the specific characters that this parser uses. expr = expr.replace("√", "sqrt") expr = expr.replace("π", "pi") expr = expr.replace("∞", "inf") expr = expr.replace("∪", "U") expr = expr.replace("·", "*") expr = expr.replace("×", "*") return expr.strip() def _is_float(num: str) -> bool: try: float(num) return True except ValueError: return False def _is_int(x: float) -> bool: try: return abs(x - int(round(x))) <= 1e-7 except Exception: return False def _is_frac(expr: str) -> bool: return bool(re.search(r"^-?[0-9]+.?/0*[1-9][0-9]*.?$", expr)) def _str_is_int(x: str) -> bool: try: x = _strip_properly_formatted_commas(x) x = float(x) return abs(x - int(round(x))) <= 1e-7 except Exception: return False def _str_to_int(x: str) -> bool: x = x.replace(",", "") x = float(x) return int(x) def _inject_implicit_mixed_number(step: str): """ Automatically make a mixed number evalable e.g. 7 3/4 => 7+3/4 """ p1 = re.compile("([0-9]) +([0-9])") step = p1.sub("\\1+\\2", step) ## implicit mults return step def _strip_properly_formatted_commas(expr: str): # We want to be careful because we don't want to strip tuple commas p1 = re.compile(r"(\d)(,)(\d\d\d)($|\D)") while True: next_expr = p1.sub("\\1\\3\\4", expr) if next_expr == expr: break expr = next_expr return next_expr def _normalize(expr: str) -> str: """Normalize answer expressions.""" if expr is None: return None # Remove enclosing `\text{}`. m = re.search(r"^\\text\{(?P.+?)\}$", expr) if m is not None: expr = m.group("text") expr = expr.replace("\\%", "%") expr = expr.replace("\\$", "$") expr = expr.replace("$", "") expr = expr.replace("%", "") expr = expr.replace(" or ", " , ") expr = expr.replace(" and ", " , ") expr = expr.replace("million", "*10^6") expr = expr.replace("billion", "*10^9") expr = expr.replace("trillion", "*10^12") for unit in [ "degree", "cm", "centimeter", "meter", "mile", "second", "minute", "hour", "day", "week", "month", "year", "foot", "feet", "inch", "yard", ]: expr = re.sub(f"{unit}(es)?(s)? *(\\^[0-9]+)?", "", expr) expr = re.sub(r"\^ *\\circ", "", expr) if len(expr) > 0 and expr[0] == "{" and expr[-1] == "}": expr = expr[1:-1] expr = re.sub(",\\\\! *", "", expr) if _is_float(expr) and _is_int(float(expr)): expr = str(int(round(float(expr)))) if "\\" in expr: try: expr = _parse_latex(expr) except Exception: pass # edge case with mixed numbers and negative signs expr = re.sub("- *", "-", expr) expr = _inject_implicit_mixed_number(expr) expr = expr.replace(" ", "") # if we somehow still have latex braces here, just drop them expr = expr.replace("{", "") expr = expr.replace("}", "") # don't be case sensitive for text answers expr = expr.lower() if _str_is_int(expr): expr = str(_str_to_int(expr)) return expr def count_unknown_letters_in_expr(expr: str): expr = expr.replace("sqrt", "") expr = expr.replace("frac", "") letters_in_expr = set([x for x in expr if x.isalpha()]) return len(letters_in_expr) def should_allow_eval(expr: str): # we don't want to try parsing unknown text or functions of more than two variables if count_unknown_letters_in_expr(expr) > 2: return False for bad_string in BAD_SUBSTRINGS: if bad_string in expr: return False for bad_regex in BAD_REGEXES: if re.search(bad_regex, expr) is not None: return False return True @timeout_ours(timeout_seconds=5) def are_equal_under_sympy(ground_truth_normalized: str, given_normalized: str): are_equal = False try: expr = f"({ground_truth_normalized})-({given_normalized})" if should_allow_eval(expr): sympy_diff = _sympy_parse(expr) simplified = sympy.simplify(sympy_diff) if simplified == 0: are_equal = True except Exception: pass return are_equal def split_tuple(expr: str): """ Split the elements in a tuple/interval, while handling well-formatted commas in large numbers """ expr = _strip_properly_formatted_commas(expr) if len(expr) == 0: return [] if ( len(expr) > 2 and expr[0] in TUPLE_CHARS and expr[-1] in TUPLE_CHARS and all([ch not in expr[1:-1] for ch in TUPLE_CHARS]) ): elems = [elem.strip() for elem in expr[1:-1].split(",")] else: elems = [expr] return elems def last_boxed_only_string(string): idx = string.rfind("\\boxed") if idx < 0: idx = string.rfind("\\fbox") if idx < 0: return None i = idx right_brace_idx = None num_left_braces_open = 0 while i < len(string): if string[i] == "{": num_left_braces_open += 1 if string[i] == "}": num_left_braces_open -= 1 if num_left_braces_open == 0: right_brace_idx = i break i += 1 if right_brace_idx is None: retval = None else: retval = string[idx : right_brace_idx + 1] return retval def remove_boxed(s): left = "\\boxed{" try: assert s[: len(left)] == left assert s[-1] == "}" return s[len(left) : -1] except Exception: return None def extract_boxed_answer(solution: str) -> str: """Extract the answer from inside a LaTeX \\boxed{} command""" solution = last_boxed_only_string(solution) solution = remove_boxed(solution) return solution def grade_answer_sympy(given_answer: str, ground_truth: str) -> bool: ground_truth_normalized = _normalize(ground_truth) given_normalized = _normalize(given_answer) if ground_truth_normalized is None: return False if ground_truth_normalized == given_normalized: return True if len(given_normalized) == 0: return False ground_truth_elems = split_tuple(ground_truth_normalized) given_elems = split_tuple(given_normalized) if len(ground_truth_elems) > 1 and ( ground_truth_normalized[0] != given_normalized[0] or ground_truth_normalized[-1] != given_normalized[-1] ): is_correct = False elif len(ground_truth_elems) != len(given_elems): is_correct = False else: for ground_truth_elem, given_elem in zip(ground_truth_elems, given_elems, strict=True): if _is_frac(ground_truth_elem) and _is_frac(given_elem): # if fractions aren't reduced, then shouldn't be marked as correct # so, we don't want to allow sympy.simplify in this case is_correct = ground_truth_elem == given_elem elif _str_is_int(ground_truth_elem) != _str_is_int(given_elem): # if the ground truth answer is an integer, we require the given answer to be a strict match # (no sympy.simplify) is_correct = False else: is_correct = are_equal_under_sympy(ground_truth_elem, given_elem) if not is_correct: break return is_correct def grade_answer_mathd(given_answer: str, ground_truth: str) -> bool: ground_truth_normalized_mathd = mathd_normalize_answer(ground_truth) given_answer_normalized_mathd = mathd_normalize_answer(given_answer) # be at least as lenient as mathd if ground_truth_normalized_mathd == given_answer_normalized_mathd: return True return False def extract_answer(passage: str) -> str: if "\\boxed" in passage: return extract_boxed_answer(passage) return None def grade(model_answer: str, gt_answer: str, fast: bool = True): if "\\boxed" in gt_answer: gt_answer = extract_answer(gt_answer) correct = grade_answer_mathd(model_answer, gt_answer) or grade_answer_sympy(model_answer, gt_answer) if not fast: # This mode further uses math_verify to recall originally false positives. # Will be a bit slower, and sensitive to bad inputs. correct = correct or is_latex_equal( model_answer, gt_answer, ) return correct def compute_score(model_response, gt_answer, fast=False): model_answer = extract_answer(model_response) if model_answer is None: return { "score": 0.0, "format_score": 0.0, "acc": False, "extracted_gt": gt_answer, # "extracted_pred": None, } # return 0.0, 0.0 # Cannot even parse anything. is_correct = False if isinstance(gt_answer, float) or isinstance(gt_answer, int): gt_answer = str(gt_answer) if isinstance(gt_answer, str): is_correct = grade(model_answer, gt_answer, fast) elif isinstance(gt_answer, list): is_correct = False for gt in gt_answer: is_correct |= grade(model_answer, gt, fast) if is_correct: return { "score": 1.0, "format_score": 1.0, "acc": True, "extracted_gt": gt_answer, # "extracted_pred": None, } else: return { "score": 0.0, "format_score": 1.0, "acc": False, "extracted_gt": gt_answer, # "extracted_pred": None, } ================================================ FILE: verl_distillation/recipe/entropy/reward_score/entropy_math/grader.py ================================================ # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Microsoft Corporation. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE # Copyright (c) 2023 OpenAI # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Copyright (c) 2021 Dan Hendrycks # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This logic is largely copied from the Hendrycks' MATH release (math_equivalence), and borrowed from: - https://github.com/microsoft/ToRA/blob/main/src/eval/grader.py - https://github.com/microsoft/ProphetNet/tree/master/CRITIC - https://github.com/openai/prm800k """ import contextlib import math import re from math import isclose # sympy related from sympy import N, simplify from sympy.parsing.latex import parse_latex from sympy.parsing.sympy_parser import parse_expr # verl related from verl.utils.py_functional import timeout_limit def is_digit(s): try: if "{,}" in str(s): num = float(str(s).replace("{,}", "")) return True, num num = float(str(s).replace(",", "")) return True, num except ValueError: return False, None def normalize(answer, pi) -> str: # checking if answer is $ and removing $ in that case to compare if isinstance(answer, str) and bool(re.match(r"\$\d+(\.\d+)?", answer)): return answer[1:] # checking if answer is % or \\% and removing % if isinstance(answer, str) and ( bool(re.match(r"^\d+(\.\d+)?%$", answer)) or bool(re.match(r"^\d+(\.\d+)?\\%$", answer)) ): return answer.replace("\\%", "").replace("%", "") # handle base answer = handle_base(answer) # handle pi answer = handle_pi(answer, pi) return answer def handle_base(x) -> str: if isinstance(x, str) and "_" in x: # Due to base x = x.split("_")[0] x = float(x) return int(x) return x def handle_pi(string, pi): if isinstance(string, str) and "\\pi" in string: # Find the first occurrence of "\pi" idx = string.find("\\pi") # Iterate over the string and find all occurrences of "\pi" with a valid previous character while idx != -1: if idx > 0 and string[idx - 1].isdigit(): # Replace "\pi" with "*math.pi" if the previous character is a digit string = string[:idx] + f"*{pi}" + string[idx + 3 :] else: # Replace "\pi" with "1*math.pi" if the previous character is not a digit string = string[:idx] + f"1*{pi}" + string[idx + 3 :] # Find the next occurrence of "\pi" idx = string.find("\\pi", idx + 1) # Evaluate the expression using eval() function with contextlib.suppress(Exception): string = eval(string) return string def math_equal( prediction: bool | float | str, reference: float | str, include_percentage: bool = True, tolerance: float = 1e-4, timeout: float = 10.0, pi: float = math.pi, ) -> bool: """ Exact match of math if and only if: 1. numerical equal: both can convert to float and are equal 2. symbolic equal: both can convert to sympy expression and are equal """ prediction = normalize(prediction, pi) reference = normalize(reference, pi) if isinstance(prediction, str) and len(prediction) > 1000: # handling weird corner-cases prediction = prediction[:1000] # 0. string comparison if isinstance(prediction, str) and isinstance(reference, str): if prediction.strip().lower() == reference.strip().lower(): return True if prediction.replace(" ", "") == reference.replace(" ", ""): return True try: # 1. numerical equal if is_digit(prediction)[0] and is_digit(reference)[0]: prediction = is_digit(prediction)[1] reference = is_digit(reference)[1] # number questions gt_result = [reference / 100, reference, reference * 100] if include_percentage else [reference] for item in gt_result: try: if isclose(item, prediction, rel_tol=tolerance): return True except Exception: continue return False except Exception: pass if not prediction and prediction not in [0, False]: return False # 2. symbolic equal reference = str(reference).strip() prediction = str(prediction).strip() ## deal with [], (), {} prediction = format_intervals(prediction) pred_str, ref_str = prediction, reference if (prediction.startswith("[") and prediction.endswith("]") and not reference.startswith("(")) or ( prediction.startswith("(") and prediction.endswith(")") and not reference.startswith("[") ): pred_str = pred_str.strip("[]()") ref_str = ref_str.strip("[]()") for s in ["{", "}", "(", ")"]: ref_str = ref_str.replace(s, "") pred_str = pred_str.replace(s, "") if pred_str == ref_str: return True ## [a, b] vs. [c, d], return a==c and b==d if ( prediction and reference and prediction[0] in "([" and prediction[-1] in ")]" and prediction[0] == reference[0] and prediction[-1] == reference[-1] ): pred_parts = prediction[1:-1].split(",") ref_parts = reference[1:-1].split(",") if len(pred_parts) == len(ref_parts) and all( [ math_equal(pred_pt, ref_pt, include_percentage, tolerance) for pred_pt, ref_pt in zip(pred_parts, ref_parts, strict=True) ] ): return True if "," in prediction and "," in reference: pred_parts = [item.strip() for item in prediction.split(",")] ref_parts = [item.strip() for item in reference.split(",")] if len(pred_parts) == len(ref_parts): return bool( all( [ math_equal(pred_parts[i], ref_parts[i], include_percentage, tolerance) for i in range(len(pred_parts)) ] ) ) # if we have point == tuple of values if prediction.startswith("Point") and reference[0] == "(" and reference[-1] == ")": pred_parts = prediction[prediction.find("(") + 1 : -1].split(",") ref_parts = reference[1:-1].split(",") if len(pred_parts) == len(ref_parts) and all( [ math_equal(pred_pt, ref_pt, include_percentage, tolerance) for pred_pt, ref_pt in zip(pred_parts, ref_parts, strict=True) ] ): return True # if reference is a matrix if r"\begin{pmatrix}" in reference and prediction.startswith("Matrix"): try: pred_matrix = parse_expr(prediction) ref_matrix_items = reference.split()[1:-1:2] if len(pred_matrix) == len(ref_matrix_items) and all( [ math_equal(pred, ref, include_percentage, tolerance) for ref, pred in zip(ref_matrix_items, pred_matrix, strict=True) ] ): return True except Exception: pass elif r"\begin{pmatrix}" in reference and prediction.startswith("[") and prediction.endswith("]"): if isinstance(eval(prediction), list): try: pred_matrix = eval(prediction) # ref_matrix_items = reference.split()[1:-1:2] ref_matrix_items = ( reference.removeprefix(r"\\begin{pmatrix}") .removeprefix(r"\begin{pmatrix}") .removesuffix(r"\\end{pmatrix}") .removesuffix(r"\end{pmatrix}") ) ref_matrix_items = ref_matrix_items.split("\\") ref_matrix_items = [row.split("&") if "&" in row else row for row in ref_matrix_items] if len(pred_matrix) == len(ref_matrix_items) and all( [ math_equal(pred, ref, include_percentage, tolerance) for ref, pred in zip(ref_matrix_items, pred_matrix, strict=True) ] ): return True except Exception: pass return symbolic_equal(prediction, reference, tolerance, timeout) def symbolic_equal(a, b, tolerance, timeout=10.0): def _parse(s): for f in [parse_expr, parse_latex]: try: with timeout_limit(seconds=timeout): return f(s) except TimeoutError: print(f"Parsing timed out for {s}") continue except Exception: continue return s a = _parse(a) b = _parse(b) try: with timeout_limit(seconds=timeout): if simplify(a - b) == 0: return True except TimeoutError: print(f"Simplification timed out for {a} - {b}") pass except Exception: pass try: with timeout_limit(seconds=timeout): if isclose(N(a), N(b), rel_tol=tolerance): return True except TimeoutError: print(f"Numerical evaluation timed out for {a}, {b}") pass except Exception: pass return False def format_intervals(prediction): patterns = { "Interval(": r"^Interval\((.*)\)$", "Interval.Ropen(": r"^Interval\.Ropen\((.*)\)$", "Interval.Lopen(": r"^Interval\.Lopen\((.*)\)$", "Interval.open(": r"^Interval\.open\((.*)\)$", } for key, pattern in patterns.items(): match = re.match(pattern, prediction) if match: inner_content = match.group(1) if key == "Interval(": # Intarval(a, b) == [a, b] return f"[{inner_content}]" elif key == "Interval.Ropen(": # Intarval.Ropen(a, b) == [a, b) return f"[{inner_content})" elif key == "Interval.Lopen(": # Intarval.Lopen(a, b) == (a, b] return f"({inner_content}]" elif key == "Interval.open(": # Intarval.open(a, b) == (a, b) return f"({inner_content})" return prediction ================================================ FILE: verl_distillation/recipe/entropy/reward_score/entropy_math/math_normalize.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) 2021 Dan Hendrycks # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ This logic is largely copied from the Hendrycks' MATH release (math_equivalence). From: https://github.com/openai/prm800k/blob/main/prm800k/grading/math_normalize.py """ import re from typing import Optional def normalize_answer(answer: Optional[str]) -> Optional[str]: if answer is None: return None answer = answer.strip() try: # Remove enclosing `\text{}`. m = re.search(r"^\\text\{(?P.+?)\}$", answer) if m is not None: answer = m.group("text").strip() return _strip_string(answer) except Exception: return answer def _fix_fracs(string): substrs = string.split("\\frac") new_str = substrs[0] if len(substrs) > 1: substrs = substrs[1:] for substr in substrs: new_str += "\\frac" if substr[0] == "{": new_str += substr else: try: assert len(substr) >= 2 except Exception: return string a = substr[0] b = substr[1] if b != "{": if len(substr) > 2: post_substr = substr[2:] new_str += "{" + a + "}{" + b + "}" + post_substr else: new_str += "{" + a + "}{" + b + "}" else: if len(substr) > 2: post_substr = substr[2:] new_str += "{" + a + "}" + b + post_substr else: new_str += "{" + a + "}" + b string = new_str return string def _fix_a_slash_b(string): if len(string.split("/")) != 2: return string a = string.split("/")[0] b = string.split("/")[1] try: a = int(a) b = int(b) assert string == "{}/{}".format(a, b) new_string = "\\frac{" + str(a) + "}{" + str(b) + "}" return new_string except Exception: return string def _remove_right_units(string): # "\\text{ " only ever occurs (at least in the val set) when describing units if "\\text{ " in string: splits = string.split("\\text{ ") assert len(splits) == 2 return splits[0] else: return string def _fix_sqrt(string): if "\\sqrt" not in string: return string splits = string.split("\\sqrt") new_string = splits[0] for split in splits[1:]: if split[0] != "{": a = split[0] new_substr = "\\sqrt{" + a + "}" + split[1:] else: new_substr = "\\sqrt" + split new_string += new_substr return new_string def _strip_string(string): # linebreaks string = string.replace("\n", "") # remove inverse spaces string = string.replace("\\!", "") # replace \\ with \ string = string.replace("\\\\", "\\") # replace tfrac and dfrac with frac string = string.replace("tfrac", "frac") string = string.replace("dfrac", "frac") # remove \left and \right string = string.replace("\\left", "") string = string.replace("\\right", "") # Remove circ (degrees) string = string.replace("^{\\circ}", "") string = string.replace("^\\circ", "") # remove dollar signs string = string.replace("\\$", "") # remove units (on the right) string = _remove_right_units(string) # remove percentage string = string.replace("\\\\%", "") string = string.replace("\\%", "") # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string string = string.replace(" .", " 0.") string = string.replace("{.", "{0.") # if empty, return empty string if len(string) == 0: return string if string[0] == ".": string = "0" + string # to consider: get rid of e.g. "k = " or "q = " at beginning if len(string.split("=")) == 2 and len(string.split("=")[0]) <= 2: string = string.split("=")[1] # fix sqrt3 --> sqrt{3} string = _fix_sqrt(string) # remove spaces string = string.replace(" ", "") # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). # Also does a/b --> \\frac{a}{b} string = _fix_fracs(string) # manually change 0.5 --> \frac{1}{2} if string == "0.5": string = "\\frac{1}{2}" # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y string = _fix_a_slash_b(string) return string ================================================ FILE: verl_distillation/recipe/fapo/README.md ================================================

FAPO: Flawed-Aware Policy Optimization for Efficient and Reliable Reasoning

Project Page Infra Design Resources Paper Code

- **Algorithm Insights:** Visit our [Project Page](https://fapo-rl.github.io/) for an overview; comprehensive details are available in the [Paper](). - **Infrastructure Design:** Refer to the [Reward Loop](https://verl.readthedocs.io/en/latest/advance/reward_loop.html) document for architectural insights. - **Open-Source Software:** Explore the [Huggingface Collections](https://huggingface.co/collections/dyyyyyyyy/fapo) for datasets and models. ![fapo-result](https://fapo-rl.github.io/_astro/intro_main.DKe72RHX_1Us2HB.webp) ## Step 1: Train FAPO-GenRM-4B (Generative Reward Model) We provide our training and evaluation datasets [here](https://huggingface.co/datasets/dyyyyyyyy/FAPO-Critic). Directly download them to `${RAY_DATA_HOME}/data/`. Then, submit the training job to the ray cluster: ```bash cd verl # Repo root export RAY_ADDRESS="..." # The Ray cluster address to connect to export RAY_DATA_HOME="..." # The directory to store the data export WORKING_DIR="${PWD}" # The local directory to package to the Ray cluster # Set the runtime environment like env vars and pip packages for the Ray cluster in yaml export RUNTIME_ENV="./recipe/fapo/runtime_env.yaml" # This sets environment variables for the Ray cluster bash recipe/fapo/run_fapo_genrm_train.sh ``` You can skip this step if you want to use the pre-trained FAPO-GenRM-4B model available [here](https://huggingface.co/dyyyyyyyy/FAPO-GenRM-4B). ## Step 2: Integrate the GRM into the Final Training Our training data is identical to that of DAPO-Math-17K, except that we replace the instruction with "Put the final answer in \boxed{}", which is a common practice for current instruct models. You can construct the training and evaluation datasets by: ```bash python recipe/fapo/prepare_fapo_data.py --local_dir ${RAY_DATA_HOME}/data/ ``` Or you can directly use the data available [here](https://huggingface.co/datasets/dyyyyyyyy/FAPO-Reasoning-Dataset). To integrate the GRM into the final training, we provide two options: 1. **Launch GRM as an external service:** Launch multiple model servers and a router in advance to handle and dispatch incoming requests. Refer to `verl/recipe/genrm_remote` for more details. The scripts is `verl/recipe/fapo/run_fapo_{7b/32b}_remote.sh`. 2. **Launch GRM in verl single controller:** Start the GRM model directly inside the verl single controller with an integrated router. (Note: this feature is still unstable for large-scale training scenarios.) ```bash cd verl # Repo root export RAY_ADDRESS="..." # The Ray cluster address to connect to export WORKING_DIR="${PWD}" # The local directory to package to the Ray cluster # Set the runtime environment like env vars and pip packages for the Ray cluster in yaml export RUNTIME_ENV="./recipe/fapo/runtime_env.yaml" # This sets environment variables for the Ray cluster # run Baseline Models bash recipe/fapo/run_baseline_7b.sh # 7b baseline model bash recipe/fapo/run_baseline_32b.sh # 32b baseline model # run FAPO Models (with external GRM service) # Note that you should launch the external GRM service first, # and specify the router address in the compute_score function bash recipe/fapo/run_fapo_7b_remote.sh # 7b fapo model bash recipe/fapo/run_fapo_32b_remote.sh # 32b fapo model # run FAPO Models (single controller mode) bash recipe/fapo/run_fapo_7b.sh # 7b fapo model bash recipe/fapo/run_fapo_32b.sh # 32b fapo model ``` ## Infrastructure Design We implement RewardLoop to enable efficient and flexible reward computation. The core implementation can be found in `verl/experimental/reward/`. Refer to [this official document](https://verl.readthedocs.io/en/latest/advance/reward_loop.html) for more implementation details. ================================================ FILE: verl_distillation/recipe/fapo/config/rm_config.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ reward_model: _target_: verl.workers.config.RewardModelConfig reward_manager: dapo enable: False # Whether to deploy the model to a separate resource pool. enable_resource_pool: False n_gpus_per_node: 0 nnodes: 0 model: type: discriminative path: ~/models/FsfairX-LLaMA3-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} trust_remote_code: False rollout: _target_: verl.workers.config.RolloutConfig name: ??? dtype: bfloat16 gpu_memory_utilization: 0.5 enforce_eager: true cudagraph_capture_sizes: null free_cache_engine: true data_parallel_size: 1 expert_parallel_size: 1 tensor_model_parallel_size: 2 max_num_batched_tokens: 8192 max_model_len: null max_num_seqs: 1024 load_format: auto engine_kwargs: {} limit_images: null enable_chunked_prefill: true enable_prefix_caching: true disable_log_stats: true skip_tokenizer_init: true prompt_length: 512 response_length: 512 ================================================ FILE: verl_distillation/recipe/fapo/prepare_fapo_data.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the dataset to parquet format """ import argparse import os from functools import partial from datasets import concatenate_datasets, load_dataset from verl.utils.hdfs_io import copy, makedirs def example_map_fn(example, idx, process_fn, data_source, ability, split): question, prompt, ground_truth = process_fn(example) data = { "data_source": data_source, "prompt": [{"role": "user", "content": prompt}], "ability": ability, "reward_model": {"style": "rule", "ground_truth": ground_truth}, "extra_info": {"split": split, "index": idx, "question": question}, } return data def build_aime2024_dataset(): def process_aime2024(example): question, ground_truth = example["Problem"], str(example["Answer"]) prompt = question.strip() + "\n\n" + "Please reason step by step, and put your final answer within \\boxed{}." return question, prompt, ground_truth data_source = "Maxwell-Jia/AIME_2024" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset = load_dataset(data_source, split="train") map_fn = partial(example_map_fn, process_fn=process_aime2024, data_source="aime24", ability="Math", split="test") dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names) return dataset def build_aime2025_dataset(): def process_aime2025(example): question, ground_truth = example["problem"], str(example["solution"]) prompt = question.strip() + "\n\n" + "Please reason step by step, and put your final answer within \\boxed{}." return question, prompt, ground_truth data_source = "yentinglin/aime_2025" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset = load_dataset(data_source, split="train") map_fn = partial(example_map_fn, process_fn=process_aime2025, data_source="aime25", ability="Math", split="test") dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names) return dataset def build_gpqa_diamond_dataset(): import random GPQA_QUERY_TEMPLATE = ( "{Question}\n" "A. {A}\nB. {B}\nC. {C}\nD. {D}\n\n" "Please reason step by step, and put your final answer (only the choice letter) within \\boxed{{}}." ) def process_gpqa_diamond(example): choices = [ example["Incorrect Answer 1"].strip(), example["Incorrect Answer 2"].strip(), example["Incorrect Answer 3"].strip(), ] random.shuffle(choices) gold_index = random.randint(0, 3) choices.insert(gold_index, example["Correct Answer"].strip()) question = example["Question"] query_prompt = GPQA_QUERY_TEMPLATE.format( A=choices[0], B=choices[1], C=choices[2], D=choices[3], Question=question, ) gold_choice = "ABCD"[gold_index] return question, query_prompt, gold_choice data_source = "Idavidrein/gpqa" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset = load_dataset(data_source, "gpqa_diamond", split="train") map_fn = partial( example_map_fn, process_fn=process_gpqa_diamond, data_source="gpqa-diamond", ability="General", split="test" ) dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names) return dataset def build_dapo_train_dataset(): def process_dapo(example): question, ground_truth = example["prompt"], example["solution"] prompt = question.strip() + "\n\n" + "Please reason step by step, and put your final answer within \\boxed{}." return question, prompt, ground_truth data_source = "open-r1/DAPO-Math-17k-Processed" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset = load_dataset(data_source, "all", split="train") map_fn = partial(example_map_fn, process_fn=process_dapo, data_source="math-dapo", ability="Math", split="train") dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names) return dataset if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/genrm") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--tasks", default="all") args = parser.parse_args() train_dataset = build_dapo_train_dataset() train_dataset = concatenate_datasets([train_dataset for _ in range(20)]) test_datasets = [] # AIME 2024 aime24_dataset = build_aime2024_dataset() test_datasets.extend([aime24_dataset for _ in range(32)]) # AIME 2025 aime25_dataset = build_aime2025_dataset() test_datasets.extend([aime25_dataset for _ in range(32)]) # GPQA Diamond gpqa_dataset = build_gpqa_diamond_dataset() test_datasets.extend([gpqa_dataset for _ in range(4)]) test_dataset = concatenate_datasets(test_datasets) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "fapo-train-boxed.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "fapo-test-full-boxed.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/recipe/fapo/reward_fn_genrm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hendrycks_math/utils.py from verl.utils.reward_score.math_dapo import last_boxed_only_string, remove_boxed def parse_ans( solution_str: str, total_steps: int, ) -> tuple[bool, str]: try: boxed_answer = last_boxed_only_string(solution_str[-300:]) extracted_answer = int(remove_boxed(boxed_answer)) if extracted_answer == -1 or 0 <= extracted_answer < total_steps: return extracted_answer else: return None except Exception: return None def compute_score_fapo_genrm( solution_str: str, ground_truth: int, extra_info: dict, **kwargs, ) -> float: # Verify the solution total_steps = extra_info["total_steps"] extracted_answer = parse_ans(solution_str, total_steps) gt = "correct" if ground_truth == -1 else "incorrect" pred = "correct" if extracted_answer == -1 else "incorrect" if extracted_answer is None: pred = "[INVALID]" acc = gt == pred # reward = 1.0 if acc else -1.0 if extracted_answer is None: reward = -1.0 elif ground_truth == -1: reward = 1.0 if extracted_answer == -1 else -1.0 else: # ground truth != -1 if extracted_answer == -1: reward = -1.0 else: # gt != -1, pred != -1 reward = 1.0 reward -= abs(extracted_answer - ground_truth) / total_steps return { "score": reward, "acc": acc, "pred": extracted_answer, "gt": ground_truth, } ================================================ FILE: verl_distillation/recipe/fapo/reward_fn_reasoning.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import json import logging import os import aiohttp from transformers import PreTrainedTokenizer from verl.utils.reward_score.math_dapo import last_boxed_only_string, normalize_final_answer, remove_boxed logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def verify( solution_str: str, gt: str, ) -> tuple[bool, str]: solution_str = solution_str[-300:] boxed_answer = last_boxed_only_string(solution_str) if boxed_answer is not None: extracted_answer = remove_boxed(boxed_answer) else: extracted_answer = "[INVALID]" pred = normalize_final_answer(extracted_answer) gt = normalize_final_answer(gt) return (pred == gt), pred async def compute_score_baseline( solution_str: str, ground_truth: str, **kwargs, ): loop = asyncio.get_running_loop() """Compute the reward score for Baseline.""" correct, pred = await loop.run_in_executor(None, lambda: verify(solution_str, ground_truth)) reward_score = 1.0 if correct else -1.0 return {"score": reward_score, "acc": correct, "pred": pred} # FAPO Hyper-parameters FAPO_GENRM_TEMPLATE = ( "The following is a math problem with its ground truth answer, along with an AI solution (split into steps):\n\n" "[Math Problem]\n\n" "{problem}\n\n" "[Ground Truth]\n\n" "{ground_truth}\n\n" "[AI Solution]\n\n" "{solution}\n\n" "Your task is to review and critique the solution step by step. " "Once you identify an error in a step, return the index of the step where the earliest error occurs. " "Otherwise, return the index of -1 (which typically denotes 'not found').\n\n" "Please reason step by step, put your final answer (i.e., the index) in \\boxed{{}}." ) GRM_SAMPLING_PARAMS = { "max_new_tokens": 16384, } FLAWED_REWARD_PENALTY = 1.0 async def generate_aiohttp(router_address: str, prompt_ids: list[int], sampling_params: dict): payload = { "input_ids": prompt_ids, "sampling_params": sampling_params, } url = f"http://{router_address}/generate" try: session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=None)) async with session.post(url, json=payload) as resp: output = await resp.text() try: output = json.loads(output) return output except Exception: logger.error(f"Failed to parse JSON response: {output}") return {} finally: await session.close() async def compute_score_fapo( data_source: str, solution_str: str, ground_truth: str, extra_info: dict, reward_router_address: str, reward_model_tokenizer: PreTrainedTokenizer, ): """Compute the reward score for FAPO.""" loop = asyncio.get_running_loop() question, split = extra_info["question"], extra_info["split"] correct, pred = await loop.run_in_executor(None, lambda: verify(solution_str, ground_truth)) reward_score = 1.0 if correct else -1.0 is_flawed_positive = False # for test set or incorrect solution, directly return the reward score if split == "test" or not correct: return {"score": reward_score, "acc": correct, "pred": pred, "is_flawed_positive": is_flawed_positive} grm_prompt = FAPO_GENRM_TEMPLATE.format( problem=question, ground_truth=ground_truth, solution=solution_str, ) grm_prompt_ids = await loop.run_in_executor( None, lambda: reward_model_tokenizer.apply_chat_template( [{"role": "user", "content": grm_prompt}], tokenize=True, add_generation_prompt=True, ), ) grm_outputs = await generate_aiohttp( router_address=reward_router_address, prompt_ids=grm_prompt_ids, sampling_params=GRM_SAMPLING_PARAMS, ) grm_response_ids = grm_outputs.get("output_ids", None) if grm_response_ids is not None: grm_response = await loop.run_in_executor( None, lambda: reward_model_tokenizer.decode(grm_response_ids, skip_special_tokens=True) ) try: err_location = remove_boxed(last_boxed_only_string(grm_response)) is_flawed_positive = int(err_location) != -1 except Exception: is_flawed_positive = False if is_flawed_positive: reward_score -= FLAWED_REWARD_PENALTY return {"score": reward_score, "acc": correct, "pred": pred, "is_flawed_positive": is_flawed_positive} ================================================ FILE: verl_distillation/recipe/fapo/reward_fn_reasoning_remote.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import aiohttp from verl.utils.reward_score.math_dapo import last_boxed_only_string, normalize_final_answer, remove_boxed def verify( solution_str: str, gt: str, ) -> tuple[bool, str]: boxed_answer = last_boxed_only_string(solution_str) if boxed_answer is not None: extracted_answer = remove_boxed(boxed_answer) else: extracted_answer = "[INVALID]" pred = normalize_final_answer(extracted_answer) gt = normalize_final_answer(gt) return (pred == gt), pred def compute_score_baseline( solution_str: str, ground_truth: str, **kwargs, ) -> float: # Limit solution length for efficiency solution_str = solution_str[-300:] # The longest answer in MATH-500 has 159 characters # Verify the solution correct, pred = verify(solution_str, ground_truth) reward = 1.0 if correct else -1.0 acc = correct return { "score": reward, "acc": acc, "pred": pred, } ADDRESS = "xx.xx.xx.xx:xxxx" MODEL_NAME = "FAPO-4B-GenRM" FAPO_GENRM_TEMPLATE = ( "The following is a math problem with its ground truth answer, along with an AI solution (split into steps):\n\n" "[Math Problem]\n\n" "{problem}\n\n" "[Ground Truth]\n\n" "{ground_truth}\n\n" "[AI Solution]\n\n" "{solution}\n\n" "Your task is to review and critique the solution step by step. " "Once you identify an error in a step, return the index of the step where the earliest error occurs. " "Otherwise, return the index of -1 (which typically denotes 'not found').\n\n" "Please reason step by step, put your final answer (i.e., the index) in \\boxed{{}}." ) async def chat_completions_aiohttp(address, **chat_complete_request): try: request_url = f"http://{address}/v1/chat/completions" timeout = aiohttp.ClientTimeout(total=None) session = aiohttp.ClientSession(timeout=timeout) async with session.post( url=request_url, json=chat_complete_request, ) as resp: output = await resp.text() try: output = json.loads(output) return output["choices"][0]["message"]["content"] except Exception as e: print(f"Error: {e}. Output: {output}") return "" finally: await session.close() def judge_fp_process(response, return_err_step=False): try: boxed_result = last_boxed_only_string(response) result = remove_boxed(boxed_result) reward_score = int(eval(result)) != -1 if return_err_step: return reward_score, int(result) return reward_score except Exception: if return_err_step: return None, None return None async def compute_score_fapo(data_source, solution_str, ground_truth, extra_info, keep_genrm_critics=False, **kwargs): question, split = extra_info["question"], extra_info["split"] result = compute_score_baseline(solution_str, ground_truth) result["flawed_positive"] = False if split == "test" or result["acc"] == 0: if keep_genrm_critics: result["genrm_critics"] = "" return result else: prompt = FAPO_GENRM_TEMPLATE.format(problem=question, ground_truth=ground_truth, solution=solution_str) messages = [{"role": "user", "content": prompt}] response = await chat_completions_aiohttp( ADDRESS, messages=messages, model=MODEL_NAME, max_tokens=16384, ) if response is not None and judge_fp_process(response): # flawed positive result["score"] = 0.0 result["flawed_positive"] = True if keep_genrm_critics and response is not None: result["genrm_critics"] = response return result ================================================ FILE: verl_distillation/recipe/fapo/run_baseline_32b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='FAPO-Reproduce' exp_name='Baseline-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=8 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/fapo-train-boxed.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/fapo-test-full-boxed.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 fsdp_size=32 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/recipe/fapo/config" ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m verl.trainer.main_ppo \ --config-path $CONFIG_PATH \ --config-name rm_config.yaml \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=True \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ custom_reward_function.path=recipe/fapo/reward_fn.py \ custom_reward_function.name=compute_score_baseline \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=600 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/fapo/run_baseline_7b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='FAPO-Reproduce' exp_name='Baseline-7B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=8 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/fapo-train-boxed.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/fapo-test-full-boxed.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=1 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=1 fsdp_size=8 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/recipe/fapo/config" ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m verl.trainer.main_ppo \ --config-path $CONFIG_PATH \ --config-name rm_config.yaml \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=True \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ custom_reward_function.path=recipe/fapo/reward_fn.py \ custom_reward_function.name=compute_score_baseline \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=200 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/fapo/run_fapo_32b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='FAPO-Reproduce' exp_name='FAPO-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=8 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} RM_NODES=${RM_NODES:-2} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} GRM_PATH=${GRM_PATH:-"${RAY_DATA_HOME}/models/FAPO-GenRM-4B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/fapo-train-boxed.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/fapo-test-full-boxed.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 fsdp_size=32 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/recipe/fapo/config" ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m verl.trainer.main_ppo \ --config-path $CONFIG_PATH \ --config-name rm_config.yaml \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.enable=True \ reward_model.enable_resource_pool=True \ reward_model.n_gpus_per_node=8 \ reward_model.nnodes="${RM_NODES}" \ reward_model.model.path=${GRM_PATH} \ reward_model.rollout.name=sglang \ reward_model.rollout.gpu_memory_utilization=0.95 \ reward_model.rollout.tensor_model_parallel_size=1 \ reward_model.rollout.free_cache_engine=False \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=True \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ custom_reward_function.path=recipe/fapo/reward_fn.py \ custom_reward_function.name=compute_score_fapo \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=600 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/fapo/run_fapo_32b_remote.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='FAPO-Reproduce' exp_name='FAPO-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=8 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/fapo-train-boxed.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/fapo-test-full-boxed.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 fsdp_size=32 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/recipe/fapo/config" ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m verl.trainer.main_ppo \ --config-path $CONFIG_PATH \ --config-name rm_config.yaml \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=True \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ custom_reward_function.path=recipe/fapo/reward_fn_reasoning_remote.py \ custom_reward_function.name=compute_score_fapo \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=600 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/fapo/run_fapo_7b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='FAPO-Reproduce' exp_name='FAPO-7B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=8 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} RM_NODES=${RM_NODES:-2} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} GRM_PATH=${GRM_PATH:-"${RAY_DATA_HOME}/models/FAPO-GenRM-4B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/fapo-train-boxed.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/fapo-test-full-boxed.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=1 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=1 fsdp_size=8 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/recipe/fapo/config" ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m verl.trainer.main_ppo \ --config-path $CONFIG_PATH \ --config-name rm_config.yaml \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.enable=True \ reward_model.enable_resource_pool=True \ reward_model.n_gpus_per_node=8 \ reward_model.nnodes="${RM_NODES}" \ reward_model.model.path=${GRM_PATH} \ reward_model.rollout.name=sglang \ reward_model.rollout.gpu_memory_utilization=0.95 \ reward_model.rollout.tensor_model_parallel_size=1 \ reward_model.rollout.free_cache_engine=False \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=True \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ custom_reward_function.path=recipe/fapo/reward_fn.py \ custom_reward_function.name=compute_score_fapo \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=200 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/fapo/run_fapo_7b_remote.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='FAPO-Reproduce' exp_name='FAPO-7B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=8 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/fapo-train-boxed.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/fapo-test-full-boxed.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=1 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=1 fsdp_size=8 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/recipe/fapo/config" ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m verl.trainer.main_ppo \ --config-path $CONFIG_PATH \ --config-name rm_config.yaml \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=True \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ custom_reward_function.path=recipe/fapo/reward_fn_reasoning_remote.py \ custom_reward_function.name=compute_score_fapo \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=200 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/fapo/run_fapo_genrm_train.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='FAPO-Reproduce' exp_name='FAPO-GenRM-4B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 5)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-4B-Instruct-2507"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/train.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/test.parquet"} # Algorithm temperature=1.2 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_temperature=0.6 val_top_p=0.95 # Performance Related Parameter sp_size=1 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=1 fsdp_size=8 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --address "${RAY_ADDRESS}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${val_temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=True \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ custom_reward_function.path=recipe/fapo/reward_fn_genrm.py \ custom_reward_function.name=compute_score_fapo_genrm \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=500 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/fapo/runtime_env.yaml ================================================ working_dir: ./ excludes: ["/.git/"] env_vars: TORCH_NCCL_AVOID_RECORD_STREAMS: "1" VLLM_USE_V1: "1" ================================================ FILE: verl_distillation/recipe/fully_async_policy/README.md ================================================ # Recipe: Fully Async Policy Trainer **Author:** `https://github.com/meituan-search` Last updated: 10/18/2025. This document introduces a fully asynchronous PPO training system that completely decouples the Trainer and Rollouter, supporting asynchronous sample generation and training. Under this system, we achieved a 2.35x-2.67x performance improvement when training the Qwen2.5-7B model with 128 GPUs, without significantly affecting the results. ## Introduction ### Background The separated rollout and train architecture, compared to the colocate architecture, can allocate resources more flexibly and design more flexible training logic, thereby addressing issues such as low GPU utilization and training efficiency caused by long-tail problems. The one_step_off_policy alleviates the problem of long rollout times and achieves some gains in training efficiency by designing a separated architecture and performing asynchronous training between rollout and train for one round. However, it forcibly uses data from one round of asynchronous training, which is not flexible enough and cannot completely eliminate the impact of long-tail on training efficiency. In other frameworks such as AReaL, Magistral, StreamRL, and AsyncFlow, asynchronous training and streaming training have been implemented based on the separated architecture and have achieved gains. We borrow from their methods and implemented them in VERL. The fully_async_policy supports asynchronous, streaming, and partial rollout training. By reasonably setting parameters such as resource allocation and parameter synchronization frequency, fully_async_policy can significantly improve training efficiency. > Magistral https://arxiv.org/abs/2506.10910 > > AReaL: A Large-Scale Asynchronous Reinforcement Learning System for Language > Reasoning https://arxiv.org/abs/2505.24298 > > StreamRL: Scalable, Heterogeneous, and Elastic RL for LLMs with Disaggregated Stream > Generation https://arxiv.org/abs/2504.15930 > > AsyncFlow: An Asynchronous Streaming RL Framework for Efficient LLM Post-Training https://arxiv.org/abs/2507.01663 > ### Core Contributions * **Resource Isolation**: Unlike using hybrid_engine, Rollouter and Trainer use separate computing resources and need to specify the resources they occupy separately. * **Parallel Generation and Training**: While the Trainer is training, the Rollouter is generating new samples. * **Multi-step Asynchronous**: Compared to one step off policy, it supports asynchronous settings from 0.x steps to multiple steps, making the asynchronous solution more flexible. * **NCCL Parameter Synchronization**: Uses NCCL communication primitives for parameter communication between Rollouter and Trainer. * **Stream Inference and Training**: Rollouter generates data sample by sample, and data transmission uses a single sample as the minimum transmission unit. * **Asynchronous Training and Freshness Control**: By setting the parameter async_training.staleness_threshold, it supports training with samples generated by old parameters. * **PartialRollout**: The Rollouter's inference process supports partial rollout logic. During parameter synchronization, by adding `sleep() and resume()` logic, it saves samples from ongoing rollouts and continues using them in the next rollout, reducing the time spent waiting for ongoing tasks to finish during parameter synchronization. Currently, the supported usage mode is megatron/fsdp+vllm. vllm must use the server mode based on AgentLoop. ## Design The overall architecture of fully_async_policy is shown in the figure below. fully_async_policy mainly consists of four parts: Rollouter, MessageQueue, Trainer, and ParameterSynchronizer. ![fully_async_policy_structure]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_structure.svg?raw=true) 1. Rollouter generates sequences sample by sample and puts the generated samples into the MessageQueue, with the production speed controlled by freshness. 2. MessageQueue is used to temporarily store samples generated by Rollouter. 3. Trainer fetches samples from MessageQueue sample by sample. After fetching `require_batches*ppo_mini_batch_size` samples, it will perform training. After training for async_training.trigger_parameter_sync_step rounds, it triggers a parameter synchronization with Rollouter. 4. ParameterSynchronizer implements the NCCL synchronous parameter synchronization capability. The source of benefits compared to the base scheme lies in the fact that in the colocate case, using more resources for rollout cannot solve the idleness caused by long-tail samples. After we perform resource isolation, the time for rollout and train may be longer than before (because fewer resources are used), but the overlap in their time consumption reduces the end-to-end time consumption. ![fully_async_policy_revenue]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_revenue.svg?raw=true) ## Usage ### Parameter Description | super params | implication | |-----------------------------------------------|------------------------------------------------------------------------------------------------| | `trainer.nnodes` | Number of nodes for Trainer | | `trainer.n_gpus_per_node` | Number of GPUs per node for Trainer | | `rollout.nnodes` | Number of nodes for Rollouter | | `rollout.n_gpus_per_node` | Number of GPUs per node for Rollouter | | `data.train_batch_size` | In the fully async strategy, this value is not effective (default is 0) | | `data.gen_batch_size` | In the fully async strategy, uses streaming sample production logic (default is 1) | | `rollout.total_rollout_steps` | Total number of rollout samples | | `rollout.test_freq` | How many times Rollouter updates parameters before performing a validation | | `actor_rollout_ref.actor.ppo_mini_batch_size` | The ppo_mini_batch_size is a global num across all workers/gpus | | `async_training.require_batches` | Number of ppo_mini_batch_size that FullyAsyncTrainer fetches at once | | `async_training.trigger_parameter_sync_step` | Indicates how many local updates FullyAsyncTrainer performs before a parameter synchronization | | `async_training.staleness_threshold` | Freshness control | | `async_training.partial_rollout` | Whether to perform partial_rollout | | `async_training.use_rollout_log_probs` | Use log_probs generated by rollout | | `async_training.compute_prox_log_prob` | Whether to compute log_prob using the training model's parameters during the training phase. | | **Further Explanation:** * `rollout.total_rollout_steps` Compared to colocate, the quantity can be aligned by multiplying train_batch_size and step: `rollout.total_rollout_steps = data.train_batch_size * step`. * `async_training.trigger_parameter_sync_step` In the fully async strategy, it indicates how many local updates the Trainer performs (i.e., how many times it fetches `require_batches * ppo_mini_batch_size` samples) before a parameter synchronization with Rollouter. Between every two parameter synchronizations between Rollouter and Trainer, the Trainer will process `trigger_parameter_sync_step* require_batches*ppo_mini_batch_size` samples. To fairly compare speed with colocate, trigger_parameter_sync_step should be set to `data.train_batch_size / (require_batches * ppo_mini_batch_size)`. * `async_training.staleness_threshold` In the fully async strategy, it indicates the maximum proportion of stale samples allowed to be used. * staleness_threshold=0, indicates synchronous training. Rollouter will generate a fixed number of samples between two parameter updates, the sample count is: $$rollout\_num = (trigger\_parameter\_sync\_step*require\_batches*ppo\_mini\_batch\_size)$$ * staleness_threshold>0, indicates asynchronous training, can be set to a decimal for more flexible asynchronous calls. Rollouter will generate at most the following number of samples between two parameter updates: $$rollout\_num = (1+staleness\_threshold)*(trigger\_parameter\_sync\_step*require\_batches*ppo\_mini\_batch\_size) - num\_staleness\_sample $$ num_staleness_sample represents the number of stale samples generated in excess during the last rollout. Since it's a streaming system, rollout continues to generate and trainer continues to consume. If rollouter is slower, trainer will trigger parameter synchronization earlier, and rollouter will not actually produce rollout_num samples. When rollout is fast enough, setting staleness_threshold to 1 is basically equivalent to one_step_off policy. To avoid too many expired samples affecting training accuracy, it is recommended to set this value to less than 1. * `async_training.partial_rollout` partial_rollout only actually takes effect when staleness_threshold>0. * `async_training.use_rollout_log_probs` In reinforcement learning algorithms, log_probs have implicit correlations with parameter versions and tokens. Due to the settings of algorithms like PPO/GRPO/DAPO, when calculating importance sampling, old_log_prob must use the log_probs corresponding to the rollout parameters and tokens to ensure algorithm correctness. In the fully async strategy, we default to old_log_prob being calculated by rollout rather than by trainer. * `async_training.require_batches` In streaming training, require_batches should be set to 1, indicating that training is performed after producing enough ppo_mini_batch_size samples. In actual testing, we found that if fewer samples are issued at once, due to the order of data distribution, it can cause training instability and longer response lengths. Here, we additionally provide require_batches for streaming distribution and control the number of samples participating in training at once. * `async_training.compute_prox_log_prob` (experimental) During the training process, we observed that metrics and response lengths may become unstable in the later stages of training. To mitigate this issue, we can use the [Rollout Importance Sampling](https://verl.readthedocs.io/en/latest/advance/rollout_is.html) technique for importance sampling. To utilize Rollout Importance Sampling, we need to compute log_prob using the training engine, which requires enabling this switch. Additionally, when compute_prox_log_prob and Rollout Importance Sampling are enabled under mode d (async stream pipeline with partial rollout), our implementation approximates `Areal's Decoupled PPO`. ### Supported Modes 1. on policy pipeline: 1. **trigger_parameter_sync_step=1, staleness_threshold=0** 2. Rollouter produces `require_batches*ppo_mini_batch_size` samples at once, Trainer fetches these samples for training, and after training completes, Trainer and Rollouter perform a parameter synchronization; 3. During the rollout phase, if there are long-tail samples but few rollout samples, shorter samples cannot fill idle resources, causing some resource waste. 4. As shown in figure a; 2. stream off policy pipeline: 1. **trigger_parameter_sync_step>1, staleness_threshold=0** 2. Synchronous streaming training will be performed. Rollouter produces `require_batches*ppo_mini_batch_size*trigger_parameter_sync_step` samples at once, Trainer performs a local training every time it fetches `require_batches*ppo_mini_batch_size` samples, and after training trigger_parameter_sync_step times, Trainer and Rollouter perform a parameter synchronization; 3. Compared to a, since more samples are generated at once, resource idleness will be lower. 4. In one step training, there will be two periods of resource idleness: when fetching the first batch of samples, train waits for `require_batches*ppo_mini_batch_size` samples to be produced, and during the last parameter update, rollout waits for training to complete. 5. As shown in figure b; 3. async stream pipeline with stale samples: 1. **trigger_parameter_sync_step>=1, staleness_threshold>0, partial_rollout=False** 2. After each parameter update, Rollouter will plan to produce at most rollout_num samples (in practice, the number of samples generated may be less than this value depending on rollout speed). 3. If the rollout process is relatively fast, Rollouter will generate some additional samples num_stale_samples before parameter synchronization for immediate use by Trainer after synchronization. When triggering parameter synchronization, if Rollouter has ongoing tasks, it will wait for the tasks to complete and not add new tasks; 4. Compared to b, except for the first step training, subsequent training will not have the time to wait for the first batch rollout to finish, but will have the time to wait for active tasks to finish. 5. As shown in figure c; 4. async stream pipeline with partial rollout: 1. **trigger_parameter_sync_step>=1, staleness_threshold>0, partial_rollout=True** 2. Compared to c, when triggering parameter synchronization, if Rollouter has samples being produced, it will interrupt the rollout process and perform parameter synchronization. The interrupted samples will continue to be generated after synchronization. This reduces the time to wait for active tasks to finish. 3. As shown in figure d; ![fully_async_policy_mode]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_mode.svg?raw=true) ### Key Metrics | metrics | implication | |------------------------------------------------|--------------------------------------------------------------------------------------------------------| | `trainer/idle_ratio` | Trainer idle rate | | `rollouter/idle_ratio` | Rollouter idle rate | | `fully_async/count/stale_samples_processed` | Total number of old samples used in training | | `fully_async/count/stale_trajectory_processed` | Total number of old trajectories used in training (one sample produces rollout.n trajectories) | | `fully_async/partial/total_partial_num` | Number of partial samples processed by Trainer between two trigger_parameter_sync_step | | `fully_async/partial/partial_ratio` | Ratio of partial samples processed by Trainer between two trigger_parameter_sync_step | | `fully_async/partial/max_partial_span` | Maximum parameter span of partial samples processed by Trainer between two trigger_parameter_sync_step | ### Parameter Tuning Recommendations * Resource Allocation and Adjustment: * Reasonable resource allocation is the prerequisite for achieving good training efficiency. The ideal resource allocation should make the rollout time and train time close, thereby minimizing pipeline bubbles in the entire training process, avoiding resource idleness, and ensuring Trainer does not use old samples. In real training scenarios, resource allocation can be adjusted based on the idle time of rollout and train during actual training, which can be obtained from rollouter/idle_ratio and trainer/idle_ratio. If rollouter/idle_ratio is high and trainer/idle_ratio is low, Trainer resources should be increased and Rollouter resources should be reduced, and vice versa. * Key Parameters: * staleness_threshold: Setting it too high will cause more old samples to be used, affecting model performance. It is recommended to set it to less than 1. * require_batches: The closer to 1, the closer to a pure streaming process, the smaller the training bubbles, and the faster the acceleration effect that can be achieved in terms of speed, but it will affect the order of sample processing; * trigger_parameter_sync_step: The smaller the setting, the closer to on policy, but it will cause frequent parameter synchronization. Long-tail samples waste resources that cannot be filled by short samples, resulting in low resource utilization. The larger the setting, the higher the computational efficiency, but the accuracy will be affected by off policy. * rollout.test_freq: It will occupy Rollouter resources and is not recommended to be set too small. * Mode Selection: By adjusting different parameters, the Fully Async architecture supports optimization acceleration at different levels, suitable for tasks in different scenarios. * For small-scale tasks that need to ensure training stability and on-policy nature, and have low speed requirements, the on policy pipeline mode (Mode 1) can be tried. * For scenarios that need to improve training throughput but are sensitive to staleness, the stream off policy pipeline mode can be tried. That is, by setting trigger_parameter_sync_step>1 to improve training efficiency, but still maintaining the synchronization mechanism (staleness_threshold=0) (Mode 2). * For large-scale tasks with high training speed requirements and can tolerate a certain degree of off-policy and staleness, setting staleness_threshold> 0 and partial_rollout=True can improve training efficiency, using the async stream pipeline mode (Mode 3 or 4). ### Quick Start ```shell rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*400))) test_freq=10 staleness_threshold=0 trigger_parameter_sync_step=16 partial_rollout=False python -m recipe.fully_async_policy.fully_async_main \ train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ trainer.nnodes="${NNODES_TRAIN}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.nnodes="${NNODES_ROLLOUT}" \ rollout.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.partial_rollout="${partial_rollout}" ``` ## Experiments ### Asynchronous Training on 7B Model We used Qwen2.5-Math-7B to verify the benefits of the fully async strategy under long candidates and multiple resources. Using the `async stream pipeline with stale samples` strategy, we achieved about 2x performance improvement on 32 cards, 64 cards, and 128 cards without significantly affecting experimental results. * Machine: H20 * Model: Qwen2.5-Math-7B * Rollout length: max_response_length FSDP2: 28K tokens; * Algorithm: DAPO * Dataset: TRAIN_FILE: dapo-math-17k.parquet TEST_FILE: aime-2024.parquet * Engine: vllm+FSDP2 * rollout.n: 16 * ppo_mini_batch_size: 32 * test_freq: 20 * colocate sync: * step: 400 * train_batch_size: 512 * fully_async_policy * total_rollout_steps: 512*400 * require_batches: 4 * trigger_parameter_sync_step: 4 * staleness_threshold: 0.5 * partial_rollout: True | training mode | resource allocation | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:--------------------:|:---------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:-------------------------------:| | colocate sync | 32 | 790.10 | 357.41 | 107.71 | 313.81 | 13h 44m | 1d 3h 43m | 2d 9h 22m | 3d 17h 5m | max: 0.3313
last: 0.2448 | | fully_async_policy | 16:16 | 294.77 | 21.26 | \ | 269.80 | 7h 58m
(1.72x) | 16h 21m
(1.70x) | 1d 0h 53m
(2.31x) | 1d 9h 26m
(2.66x) | max: 0.3302
last: 0.2333 | | colocate sync | 64 | 365.28 | 150.72 | 70.26 | 133.41 | 10h 22m | 20h 45m | 1d 7h 6m | 1d 17h 32m | max: 0.3365
last: 0.2333 | | fully_async_policy | 32:32 | 189.26 | 28.46 | \ | 156.98 | 4h 57m
(2.09x) | 10h 14m
(2.03x) | 16h 58m
(1.83x) | 21h 40m
(1.92x) | max: 0.3677
last: 0.3406 | | colocate sync | 128 | 356.30 | 177.85 | 53.92 | 113.81 | 8h 36m | 17h 56m | 1d 5h 6m | 1d 16h 48m | max: 0.3573
last: 0.2958 | | fully_async_policy | 64:64 | 150.63 | 33.14 | \ | 113.16 | 3h 13m
(2.67x) | 6h 46m
(2.65x) | 10h 53m
(2.67x) | 17h 22m
(2.35x) | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-colocate_async?nw=nwuserhouzg ### 128-card 7B Asynchronous Mode Experiment We used Qwen2.5-Math-7B to verify the effects of various modes supported by fully async. We can see that the benefit brought by streaming is approximately 1.6x, and after combining staleness and partial_rollout, the benefit reaches 2.35x. | mode | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:-------------------------------------------------------------------------------------------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------------:| | colocate sync | 356.30 | 177.85 | 53.92 | 113.81 | 8h 36m | 17h 56m | 1d 5h 6m | 1d 16h 48m | max: 0.3573
last: 0.2958 | | `stream off policy pipeline`
(+fully async: trigger_parameter_sync_step= 4,
require_batches= 4) | 231.34 | 128.47 | \ | 98.77 | 4h 25m | 9h 41m | 15h 2m | 1d 1h 53m | max: 0.2844
last: 0.2604 | | `async stream pipeline with stale samples`
(+staleness_threshold=0.5) | | | | | | | | | | | `async stream pipeline with partial rollout`
(+partial_rollout=True) | 150.63 | 33.14 | \ | 113.16 | 3h 13m | 6h 46m | 10h 53m | 17h 22m | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-stream_stale_partial?nw=nwuserhouzg ### 128-card Stale Ablation Experiment Under the `async stream pipeline with partial rollout` mode, we verified the impact of staleness settings on training efficiency. We found that the larger the staleness, the more obvious the final gains. We also noticed that the times for staleness values of 0.3 and 0.5 are quite close, because as the training steps increase, the response length changes significantly, causing training instability. Further analysis and optimization are needed for this issue. | staleness_threshold | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:---------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------------:| | 0 | 231.34 | 128.47 | \ | 98.77 | 4h 25m | 9h 41m | 15h 2m | 1d 1h 53m | max: 0.2844
last: 0.2604 | | 0.1 | 171.30 | 58.17 | \ | 109.12 | 3h 53m | 8h 37m | 14h 25m | 19h 59m | max: 0.3542
last: 0.2979 | | 0.3 | 146.11 | 38.88 | \ | 103.22 | 3h 18m | 6h 49m | 11h 40m | 17h 20m | max: 0.3469
last: 0.2865 | | 0.5 | 150.63 | 33.14 | \ | 113.16 | 3h 13m | 6h 46m | 10h 53m | 17h 22m | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-stream_stale_partial?nw=nwuserhouzg ### 128-card 7B require_batches Ablation Experiment In multiple tests, we found that the number of samples issued each time in streaming affects the response length during training, which in turn affects training time. We verified the impact on results by modifying `async_training.require_batches`. | require_batches | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | acc/mean@1 | |:-----------------:|:--------:|:-------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------------:| | 1 | 203.47 | 30.88 | \ | 181.08 | 3h 31m | 8h 29m | 17h 36m | max: 0.349
last: 0.326 | | 2 | 158.72 | 26.32 | \ | 128.08 | 3h 35m | 7h 38m | 13h 57m | max: 0.351
last: 0.3406 | | 4 | 124.64 | 25.62 | \ | 95.06 | 3h 13m | 6h 46m | 10h 53m | max: 0.3521
last: 0.3521 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-ablation_require_batches?nw=nwuserhouzg ### 30B Model Mode Experiment TODO: The 30B experiment is still in progress. * Machine: H20 * Model: Qwen2.5-32B * Rollout length: max_response_length FSDP2: 20K tokens; * Algorithm: DAPO * Engine: vllm+FSDP2 * rollout.n: 16 * ppo_mini_batch_size: 32 * test_freq: 20 * colocate sync: * step:200 * train_batch_size: 512 * fully_async_policy * total_rollout_steps: 512*200 * trigger_parameter_sync_step: 512/32 = 16 * staleness_threshold: 0 * partial_rollout: False | training mode | Resource allocation | mode | step | generate_sequences | old_log_prob | update_actor | total time | acc/best@32/mean | |--------------------|---------------------|--------------------------------------------|------|--------------------|--------------|--------------|------------|------------------| | colocate sync | 128 | | | | | | | | | fully_async_policy | 64:64 | stream off policy pipeline | | | | | | | | fully_async_policy | 64:64 | async stream pipeline with stale samples | | | | | | | | fully_async_policy | 64:64 | async stream pipeline with partial rollout | | | | | | | ## Future Plans * GRPO experiments * Megatron adaptation * SGLang integration * Transfer queue integration * Asynchronous parameter synchronization * AReaL asynchronous algorithm implementation * TPPO algorithm implementation * Multi-turn and Tool support ================================================ FILE: verl_distillation/recipe/fully_async_policy/README_zh.md ================================================ # Recipe: Fully Async Policy Trainer **Author:** `https://github.com/meituan-search` Last updated: 10/17/2025. 本文档介绍了完全异步PPO训练系统,该系统实现了 Trainer 和 Rollouter 的完全解耦,支持异步样本生成和训练。 在该系统下,我们使用128卡训练qwen2.5-7B模型取得了2.35x-2.67x的性能提升,同时效果没有显著受到影响。 ## Introduction ### Background rollout和train分离架构相较于colocate的架构能够更加灵活地分配资源,设计更加灵活的训练逻辑,从而处理长尾等问题带来的GPU利用率低,训练效率低的问题。 one_step_off_policy通过分离架构的设计并进行rollout和train一轮异步的训练方法,缓解了rollout时间过长的问题,并在训练效率上取得了一些收益, 但其强制使用一轮异步的数据,存在不够灵活等问题,而且并不能完全去除长尾对训练效率带来的的影响;在其他框架如areal、Magistral、streamrl、asyncflow上, 已经基于分离架构实现了异步训练、流式训练,并取得了收益;我们借鉴其方法,在verl上进行了实现。fully_async_policy支持异步、流式、partial rollout的训练, 通过合理设置资源分配情况、参数同步频率等参数,fully_async_policy能够显著提高训练效率。 > Magistral https://arxiv.org/abs/2506.10910 > > AReaL: A Large-Scale Asynchronous Reinforcement Learning System for Language > Reasoning https://arxiv.org/abs/2505.24298 > > StreamRL: Scalable, Heterogeneous, and Elastic RL for LLMs with Disaggregated Stream > Generation https://arxiv.org/abs/2504.15930 > > AsyncFlow: An Asynchronous Streaming RL Framework for Efficient LLM Post-Training https://arxiv.org/abs/2507.01663 > ### 核心贡献 * **资源隔离**:与使用hybrid_engine不同,Rollouter和Trainer使用分离的计算资源,需要分别指定所占用的资源。 * **生成与训练并行**:Trainer在训练的同时,Rollouter在生成新的样本。 * **多步异步**: 相比 one step off policy 支持0.x步到多步的异步设定,异步方案更加灵活。 * **nccl参数同步**:使用nccl通信原语进行Rollouter与Trainer参数的通信。 * **Stream推理与训练**:Rollouter逐样本生成数据,同时数据传输以单个sample为最小传输单位。 * **异步训练与新鲜度控制**:通过设置参数async_training.staleness_threshold,支持使用旧参数生成的样本进行训练。 * **PartialRollout**: Rollouter推理过程支持partial rollout逻辑,通过参数同步时,添加`sleep()`和`resume()` 逻辑,保存进行中的rollout的样本,并在下一次rollout中继续使用,减少参数同步等待进行中的任务结束时间。 目前支持使用模式为 megatron/fsdp+vllm。vllm必须使用基于AgentLoop的server模式。 ## 设计 fully_async_policy的整体架构如下图所示,fully_async_policy主要由Rollouter、MessageQueue、Trainer、ParameterSynchronizer四部分组成。 ![fully_async_policy_structure]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_structure.svg?raw=true) 1. Rollouter逐样本生成序列,并将生成的sample放入MessageQueue中,生产的速度受新鲜度控制。 2. MessageQueue用于暂存Rollouter生成的sample。 3. Trainer逐样本从MessageQueue中获取,获取到`require_batches*ppo_mini_batch_size` 数量的样本后,就会进行训练,训练async_training.trigger_parameter_sync_step轮后,触发与Rollouter的一次参数同步。 4. ParameterSynchronizer 实现了Nccl的同步参数同步能力。 当前方案对比base的收益来源,在于colocate情况下,rollout使用更多的资源无法解决长尾样本带来的空闲, 当我们进行资源隔离后,rollout的时间和train的时间都可能相较于之前更长(因为使用的资源变少了), 但是相互之间的耗时overlap,端到端的耗时反而有所缩减。 ![fully_async_policy_revenue]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_revenue.svg?raw=true) ## 使用方式 ### 参数说明 | super params | implication | |------------------------------------------------------|-----------------------------------------------------------------| | `trainer.nnodes` | Trainer的node数量 | | `trainer.n_gpus_per_node` | Trainer每个node上gpu的数量 | | `rollout.nnodes` | Rollouter的node数量 | | `rollout.n_gpus_per_node` | Rollouter每个node上gpu的数量 | | `data.train_batch_size` | 在fully async策略中,该值不生效(默认设置为0) | | `data.gen_batch_size` | 在fully async策略中,使用流式的样本生产逻辑(默认设置为1) | | `rollout.total_rollout_steps` | 总的rollout的sample数量 | | `rollout.test_freq` | Rollouter每更新多少次参数,进行一次validation | | `actor_rollout_ref.actor.ppo_mini_batch_size` | The ppo_mini_batch_size is a global num across all workers/gpus | | `async_training.require_batches` | FullyAsyncTrainer一次性获取的ppo_mini_batch_size的数量 | | `async_training.trigger_parameter_sync_step` | 表示FullyAsyncTrainer进行多少次本地更新后,进行一次参数同步 | | `async_training.staleness_threshold` | 新鲜度控制 | | `async_training.partial_rollout` | 是否进行partial_rollout | | `async_training.use_rollout_log_probs` | 使用rollout产生的log_probs | | `async_training.compute_prox_log_prob`(experimental) | 是否在train阶段,使用train模型的参数计算token的 log_prob | **进一步的解释:** * `rollout.total_rollout_steps` 与 colocate 相比,数量可以通过 train_batch_size 与 step 相乘对齐: `rollout.total_rollout_steps = data.train_batch_size * step`。 * `async_training.trigger_parameter_sync_step` 在fully async策略中,表示Trainer进行多少次本地更新后(也就是获取多少次`require_batches * ppo_mini_batch_size`数量样本), 与Rollouter之间进行一次参数同步。 每两次Rollouter和Trainer参数同步之间,Trainer将会处理`trigger_parameter_sync_step* require_batches\ ppo_mini_batch_size`份sample。 如果为了与colocate在公平的情况下对比速度,trigger_parameter_sync_step应该设置为 `data.train_batch_size / ( require_batches * ppo_mini_batch_size)`。 * `async_training.staleness_threshold` 在fully async策略中,表示最大允许使用的staleness样本的比例。 * staleness_threshold=0,表示同步训练。 Rollouter两次参数更新之间将会生成固定数量的样本,样本数为: $$rollout\_num = (trigger\_parameter\_sync\_step*require\_batches*ppo\_mini\_batch\_size)$$ * staleness_threshold>0,表示异步训练, 可以设置为小数,支持更灵活的异步调用。 Rollouter两次参数更新之间将会最多生成的样本数为: $$rollout\_num = (1+staleness\_threshold)*(trigger\_parameter\_sync\_step*require\_batches*ppo\_mini\_batch\_size) - num\_staleness\_sample $$ num_staleness_sample 表示上一次rollout多生成的陈旧样本数。 由于是流式系统,rollout持续生成,trainer持续消费。如果rollouter较慢,trainer会更早触发参数同步,rollouter并不会实际生产rollout_num个样本。 当rollout 足够快时,staleness_threshold设置为1,基本上等价于one_step_off policy。 为了避免过期样本太多影响训练精度,建议该值设置小于1。 * `async_training.partial_rollout` partial_rollout只会在staleness_threshold>0时才实际上起作用。 * `async_training.use_rollout_log_probs` 在强化学习算法中,log_probs与参数版本,token都存在隐性的相关性。由于PPO/GRPO/DAPO等算法的设定,我们在计算重要性采样时, 即 old_log_prob必须使用rollout参数及token所对应log_probs,才能保证算法的正确性。在fully async策略中,我们默认old_log_prob是有rollout所计算的,而不是由trainer所计算。 * `async_training.require_batches` 在流式训练中,require_batches 应该设置为1,表示生产够ppo_mini_batch_size样本后,就进行训练。 在实际测试中,我们发现,如果单次下发的样本较少,由于数据分发的顺序,会导致训练不稳定,response 长度变长。 在这里,我们额外提供 require_batches 进行流式分发,单次参与训练的样本数量控制。 * `async_training.compute_prox_log_prob` (experimental) 我们在训练过程中,观测到随着训练的进行,训练后期指标和response长度可能会出现不稳定的情况, 这里我们可以使用 [Rollout Importance Sampling](https://verl.readthedocs.io/en/latest/advance/rollout_is.html) 的技术进行 重要性采样,缓解这一问题。为了使用 `Rollout Importance Sampling` 我们需要使用训练引擎使用当前的参数版本计算old_log_prob,此开关需要打开。 此外,在 mode d (async stream pipeline with partial rollout) 的情况下开启 `compute_prox_log_prob` 以及 `Rollout Importance Sampling` 后,我们的实现已近似Areal的 `Decoupled PPO`。 ### 模式支持 1. on policy pipeline: 1. **trigger_parameter_sync_step=1,staleness_threshold=0** 2. Rollouter一次生产`require_batches*ppo_mini_batch_size` 的samples,Trainer获取这些samples后进行训练,训练完后Trainer和Rollouter之间进行一次参数同步; 3. 在rollout阶段,如果存在长尾的样本,但是rollout样本数较少时,较短的样本无法填充到空闲的资源中,会造成一定的资源浪费。 4. 如图a所示; 2. stream off policy pipeline: 1. **trigger_parameter_sync_step>1,staleness_threshold=0** 2. 将会进行同步的流式训练,Rollouter一次生产`require_batches*ppo_mini_batch_size*trigger_parameter_sync_step` 的samples,Trainer每获取`require_batches*ppo_mini_batch_size` 就进行一次本地训练,训练trigger_parameter_sync_step次后,Trainer和Rollouter之间进行一次参数同步; 3. 相较于a,由于一次生成的样本更多,资源的空闲会更低。 4. 在一次step训练中,会存在两次资源闲置的时间,分别是在第一次获取样本时,train等待`require_batches*ppo_mini_batch_size` 个样本生产,以及最后一次参数更新时,rollout等待训练完成。 5. 如图b所示; 3. async stream pipeline with staleness samples: 1. **trigger_parameter_sync_step>=1,staleness_threshold>0,partial_rollout=Flase** 2. Rollouter在每次参数更新后将计划最多生产rollout_num个样本(实际根据rollout速度,生成的样本可能会少与这个值)。 3. 如果rollout过程比较快,Rollouter将会在参数同步前额外生成一部分样本num_stale_samples,用于参数同步后立即给Trainer使用。 触发参数同步时,如果Rollouter有正在生产的任务,将会等待任务完成,同时不会添加新的任务; 4. 相较于b,除第一次step训练外,后续的训练都不会有wait first batch rollout finish的时间,但是会有wait active task finish的时间。 5. 如图c所示; 4. async stream pipeline with partial rollout: 1. **trigger_parameter_sync_step>=1,staleness_threshold>0,partial_rollout=True** 2. 相较于c,触发参数同步时,Rollouter如果有正在生产的sample,会打断rollout过程并进行参数同步,被中断的sample会在参数同步后继续生成。减少了wait active task finish的时间。 3. 如图d所示; ![fully_async_policy_mode]( https://github.com/ArronHZG/verl-community/blob/recipe/async_policy/docs/fully_async_policy_mode.svg?raw=true) ### 关键指标 | metrics | implication | |------------------------------------------------|-----------------------------------------------------------| | `trainer/idle_ratio` | Trainer闲置率 | | `rollouter/idle_ratio` | Rollouter闲置率 | | `fully_async/count/stale_samples_processed` | 训练使用的旧sample总数 | | `fully_async/count/stale_trajectory_processed` | 训练使用的旧trajectory总数(一个sample会生产rollout.n条trajectory) | | `fully_async/partial/total_partial_num` | 两次trigger_parameter_sync_step之间Trainer处理的partial样本数 | | `fully_async/partial/partial_ratio` | 两次trigger_parameter_sync_step之间Trainer处理的partial样本的比例 | | `fully_async/partial/max_partial_span` | 两次trigger_parameter_sync_step之间Trainer处理的partial样本的最大参数跨度 | ### 调参建议 * 资源分配与调整: * 合理的资源分配是获得好的训练效率的前提。理想的资源分配情况应该是使得Rollout的时间和Train的时间接近,从而使得整个训练过程流水气泡最小, 避免资源闲置,同时Trainer不会使用旧样本。在真实训练场景下,可以根据实际训练过程中rollout和train的空闲时间调整资源分配, 可从rollouter/idle_ratio和trainer/idle_ratio获得,如果rollouter/idle_ratio较高trainer/idle_ratio较低, 应该增多Trainer的资源减少Rollouter的资源,反之亦然。 * 关键参数: * staleness_threshold: 设置太大会导致较多的旧样本使用,影响模型效果,建议设置小于1。 * require_batches:越接近1,越接近纯流式过程,训练过程中bubble越小,能够在速度上获得更快的加速效果,但会对样本的处理顺序产生影响; * trigger_parameter_sync_step: 设置的越小越接近on policy,但会导致频繁的参数同步,长尾样本浪费的资源无法被短样本填充,资源利用率低。 设置的越大有更高的计算效率,但是精度上会受到off policy的影响。 * rollout.test_freq: 会占用Rollouter资源,不建议设置太小。 * 模式选择:通过调整不同的参数,Fully Async架构支持不同程度上的优化加速,适用于不同场景的任务。 * 对于小规模任务,需要保证训练的稳定性和 on-policy 性,对速度要求不高的场景,可以尝试使用on policy pipeline的模式(模式1)。 * 对于需要提高训练吞吐量,但对 staleness 敏感的场景,可以尝试使用 stream off policy pipeline 的模式。即通过 设置trigger_parameter_sync_step>1 ,提高 训练效率,但仍保持同步机制 (staleness_threshold=0 )(模式2)。 * 对于大规模任务,对训练速度有较高要求,且可以容忍一定 off-policy 程度、staleness的场景,可以设置staleness_threshold> 0、partial_rollout=True提高训练效率,使用 async stream pipeline 模式(模式 3 或 4)。 ### 快速开始 ```shell rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*400))) test_freq=10 staleness_threshold=0 trigger_parameter_sync_step=16 partial_rollout=False python -m recipe.fully_async_policy.fully_async_main \ train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ trainer.nnodes="${NNODES_TRAIN}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.nnodes="${NNODES_ROLLOUT}" \ rollout.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.partial_rollout="${partial_rollout}" ``` ## 实验 ### 在7B模型上进行异步训练 我们使用 Qwen2.5-Math-7B 验证 fully async 策略在长候选下,多种资源下的收益情况。 使用`async stream pipeline with staleness samples` 策略,我们在32卡,64卡,128卡都取得2x左右的性能提升,同时没有显著影响实验效果。 * 机器:H20 * 模型:Qwen2.5-Math-7B * rollout长度:max_response_length FSDP2: 28K tokens; * 算法:DAPO * 数据集: TRAIN_FILE: dapo-math-17k.parquet TEST_FILE: aime-2024.parquet * engine: vllm+FSDP2 * rollout.n: 16 * ppo_mini_batch_size: 32 * test_freq: 20 * colocate sync: * step: 400 * train_batch_size: 512 * fully_async_policy * total_rollout_steps: 512*400 * require_batches: 4 * trigger_parameter_sync_step: 4 * staleness_threshold: 0.5 * partial_rollout: True | training mode | resource allocation | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:--------------------:|:---------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:-------------------------------:| | colocate sync | 32 | 790.10 | 357.41 | 107.71 | 313.81 | 13h 44m | 1d 3h 43m | 2d 9h 22m | 3d 17h 5m | max: 0.3313
last: 0.2448 | | fully_async_policy | 16:16 | 294.77 | 21.26 | \ | 269.80 | 7h 58m
(1.72x) | 16h 21m
(1.70x) | 1d 0h 53m
(2.31x) | 1d 9h 26m
(2.66x) | max: 0.3302
last: 0.2333 | | colocate sync | 64 | 365.28 | 150.72 | 70.26 | 133.41 | 10h 22m | 20h 45m | 1d 7h 6m | 1d 17h 32m | max: 0.3365
last: 0.2333 | | fully_async_policy | 32:32 | 189.26 | 28.46 | \ | 156.98 | 4h 57m
(2.09x) | 10h 14m
(2.03x) | 16h 58m
(1.83x) | 21h 40m
(1.92x) | max: 0.3677
last: 0.3406 | | colocate sync | 128 | 356.30 | 177.85 | 53.92 | 113.81 | 8h 36m | 17h 56m | 1d 5h 6m | 1d 16h 48m | max: 0.3573
last: 0.2958 | | fully_async_policy | 64:64 | 150.63 | 33.14 | \ | 113.16 | 3h 13m
(2.67x) | 6h 46m
(2.65x) | 10h 53m
(2.67x) | 17h 22m
(2.35x) | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-colocate_async?nw=nwuserhouzg ### 128卡 7B 异步模式实验 我们使用 Qwen2.5-Math-7B 验证 fully async 所支持的各个模式的效果。 我们可以看到 stream 带来的收益大约1.6x,叠加 staleness 和 partial_rollout 后,收益为2.35x。 | mode | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:-------------------------------------------------------------------------------------------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------------:| | colocate sync | 356.30 | 177.85 | 53.92 | 113.81 | 8h 36m | 17h 56m | 1d 5h 6m | 1d 16h 48m | max: 0.3573
last: 0.2958 | | `stream off policy pipeline`
(+fully async: trigger_parameter_sync_step= 4,
require_batches= 4) | 231.34 | 128.47 | \ | 98.77 | 4h 25m | 9h 41m | 15h 2m | 1d 1h 53m | max: 0.2844
last: 0.2604 | | `async stream pipeline with staleness samples`
(+staleness_threshold=0.5) | | | | | | | | | | | `async stream pipeline with partial rollout`
(+partial_rollout=True) | 150.63 | 33.14 | \ | 113.16 | 3h 13m | 6h 46m | 10h 53m | 17h 22m | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-stream_stale_partial?nw=nwuserhouzg ### 128卡 stale 消融实验 在 `async stream pipeline with partial rollout` 模式下,我们验证 staleness 的设置对于训练效率的影响。 我们可以发现,staleness 越大,最终取得的收益越明显。 同时我们也注意到 staleness 取 0.3 和 0.5 的时间比较接近,原因是随着训练步数的增量,response 长度变化较大,训练出现了不稳定的问题。 后续还需要针对该问题进行进一步的分析和优化。 | staleness_threshold | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | total time
400 step | acc/mean@1 | |:---------------------:|:--------:|:--------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------------:| | 0 | 231.34 | 128.47 | \ | 98.77 | 4h 25m | 9h 41m | 15h 2m | 1d 1h 53m | max: 0.2844
last: 0.2604 | | 0.1 | 171.30 | 58.17 | \ | 109.12 | 3h 53m | 8h 37m | 14h 25m | 19h 59m | max: 0.3542
last: 0.2979 | | 0.3 | 146.11 | 38.88 | \ | 103.22 | 3h 18m | 6h 49m | 11h 40m | 17h 20m | max: 0.3469
last: 0.2865 | | 0.5 | 150.63 | 33.14 | \ | 113.16 | 3h 13m | 6h 46m | 10h 53m | 17h 22m | max: 0.3521
last: 0.3094 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-ablation_stale?nw=nwuserhouzg ### 128卡 7B require_batches 消融实验 在多次测试下,我们发现流式每次下发样本的数量会影响训练的response长度,进而影响训练时长,我们通过修改 `async_training.require_batches` 验证对与结果的影响。 | require_batches | step | gen | old_log_prob | update_actor | total time
100 step | total time
200 step | total time
300 step | acc/mean@1 | |:-----------------:|:--------:|:-------:|:--------------:|:--------------:|:------------------------:|:------------------------:|:------------------------:|:-----------------------------:| | 1 | 203.47 | 30.88 | \ | 181.08 | 3h 31m | 8h 29m | 17h 36m | max: 0.349
last: 0.326 | | 2 | 158.72 | 26.32 | \ | 128.08 | 3h 35m | 7h 38m | 13h 57m | max: 0.351
last: 0.3406 | | 4 | 124.64 | 25.62 | \ | 95.06 | 3h 13m | 6h 46m | 10h 53m | max: 0.3521
last: 0.3521 | > source data: https://wandb.ai/hou-zg-meituan/fully-async-policy-ablation_require_batches?nw=nwuserhouzg ### 30B模型模式实验 TODO: 30B 的实验,还在完善中。 * 机器: H20 * 模型:Qwen2.5-32B * rollout长度:max_response_length FSDP2: 20K tokens; * 算法:DAPO * engine: vllm+FSDP2 * rollout.n: 16 * ppo_mini_batch_size: 32 * test_freq: 20 * colacate sync: * step:200 * train_batch_size: 512 * fully_async_policy * total_rollout_steps: 512*200 * trigger_parameter_sync_step: 512/32 = 16 * staleness_threshold: 0 * partial_rollout: False | training mode | Resource allocation | mode | step | generate_sequences | old_log_prob | update_actor | total time | acc/best@32/mean | |--------------------|---------------------|----------------------------------------------|------|--------------------|--------------|--------------|------------|------------------| | colocate sync | 128 | | | | | | | | | fully_async_policy | 64:64 | stream off policy pipeline | | | | | | | | fully_async_policy | 64:64 | async stream pipeline with staleness samples | | | | | | | | fully_async_policy | 64:64 | async stream pipeline with partial rollout | | | | | | | ## 后续计划 * GRPO实验 * megatron 适配 * sglang 集成 * transfer queue 集成 * 异步参数同步 * Areal异步算法实现 * TPPO算法实现 * 多轮及Tool的支持 ================================================ FILE: verl_distillation/recipe/fully_async_policy/agent_loop/__init__.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .agent_loop import FullyAsyncAgentLoopManager from .partial_single_turn_agent_loop import PartialSingleTurnAgentLoop _ = [PartialSingleTurnAgentLoop] __all__ = [FullyAsyncAgentLoopManager] ================================================ FILE: verl_distillation/recipe/fully_async_policy/agent_loop/agent_loop.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os from typing import Any, Optional import hydra import numpy as np import ray from omegaconf import DictConfig from recipe.fully_async_policy.vllm_rollout.vllm_async_server import FullyAsyncvLLMReplica from verl.experimental.agent_loop.agent_loop import ( AgentLoopManager, AgentLoopOutput, AgentLoopWorkerBase, AsyncLLMServerManager, _agent_loop_registry, _DummyConfig, get_trajectory_info, ) from verl.protocol import DataProto from verl.single_controller.ray import RayWorkerGroup from verl.utils.rollout_trace import rollout_trace_attr from verl.workers.rollout.replica import TokenOutput logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class FullyAsyncLLMServerManager(AsyncLLMServerManager): async def generate_for_partial(self, request_id, prompt_ids, sampling_params, **kwargs_extra) -> TokenOutput: """Generate tokens from prompt ids. with partial rollout function""" server = self._choose_server(request_id) output = await server.generate_for_partial.remote( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, **kwargs_extra, ) return output class FullyAsyncAgentLoopOutput(AgentLoopOutput): """Agent loop output.""" is_cancel: bool = False """Indicates whether the request was interrupted""" log_probs: list[float] = None """Response token log probs including LLM generated token, tool response token.""" param_version_start: int = 0 """Indicate start parameter version when this response is generated""" param_version_end: int = 0 """Indicate end parameter version when this response is generated, used for partial rollout""" @ray.remote class FullyAsyncAgentLoopWorker(AgentLoopWorkerBase): def __init__( self, config: DictConfig, server_handles: list[ray.actor.ActorHandle], reward_router_address: str = None ): self.server_manager = FullyAsyncLLMServerManager(config, server_handles) super().__init__(config, server_handles, reward_router_address) async def generate_sequences_no_post( self, batch: DataProto, partial_output_list: Optional[list[AgentLoopOutput]] ) -> list[AgentLoopOutput]: """Generate sequences from agent loop. Args: batch (DataProto): Input batch. partial_output_list: Optional[List[AgentLoopOutput]]: already rollout result. Returns: list[FullyAsyncAgentLoopOutput]: List of agent loop outputs, one per sample in the batch. """ config = self.config.actor_rollout_ref.rollout sampling_params = dict( temperature=config.temperature, top_p=config.top_p, repetition_penalty=1.0, logprobs=config.calculate_log_probs, ) # override sampling params for validation if batch.meta_info.get("validate", False): sampling_params["top_p"] = config.val_kwargs.top_p sampling_params["temperature"] = config.val_kwargs.temperature # by default, we assume it's a single turn agent if "agent_name" not in batch.non_tensor_batch: batch.non_tensor_batch["agent_name"] = np.array(["single_turn_agent"] * len(batch), dtype=object) if "index" in batch.non_tensor_batch: index = batch.non_tensor_batch["index"] else: index = np.arange(len(batch)) trajectory_info = await get_trajectory_info( batch.meta_info.get("global_steps", -1), index, batch.meta_info.get("validate", False) ) if not partial_output_list: partial_output_list = [None] * len(batch) tasks = [] for i in range(len(batch)): kwargs = {k: v[i] for k, v in batch.non_tensor_batch.items()} kwargs["output"] = partial_output_list[i] tasks.append( asyncio.create_task(self._partial_run_agent_loop(sampling_params, trajectory_info[i], **kwargs)) ) return await asyncio.gather(*tasks) async def _partial_run_agent_loop( self, sampling_params: dict[str, Any], trajectory: dict[str, Any], *, agent_name: str, **kwargs, ) -> AgentLoopOutput: with rollout_trace_attr( step=trajectory["step"], sample_index=trajectory["sample_index"], rollout_n=trajectory["rollout_n"], validate=trajectory["validate"], name="agent_loop", ): assert agent_name in _agent_loop_registry, ( f"Agent loop {agent_name} not registered, registered agent loops: {_agent_loop_registry.keys()}" ) agent_loop_config = _agent_loop_registry[agent_name] agent_loop = hydra.utils.instantiate( config=agent_loop_config, trainer_config=_DummyConfig(config=self.config), server_manager=self.server_manager, tokenizer=self.tokenizer, processor=self.processor, ) return await agent_loop.run(sampling_params, **kwargs) class FullyAsyncAgentLoopManager(AgentLoopManager): def __init__(self, config: DictConfig, worker_group: RayWorkerGroup = None, rm_wg: RayWorkerGroup = None): self.config = config self.worker_group = worker_group self.reward_model_manager = None self.reward_router_address = None self.agent_loop_workers_class = FullyAsyncAgentLoopWorker self.rollout_replica_class = FullyAsyncvLLMReplica self.rm_wg = rm_wg self.rollout_replicas = None self.server_handles = None self.server_addresses = None self.agent_loop_workers = None @classmethod async def create(cls, config: DictConfig, worker_group: RayWorkerGroup = None, rm_wg: RayWorkerGroup = None): instance = cls(config, worker_group, rm_wg) await instance._async_init() return instance async def _async_init(self): if self.config.reward_model.enable and self.config.reward_model.enable_resource_pool: from verl.experimental.reward import RewardModelManager self.reward_model_manager = RewardModelManager(self.config.reward_model, self.rm_wg) self.reward_router_address = self.reward_model_manager.get_router_address() await self._initialize_llm_servers_async() self._init_agent_loop_workers() async def _initialize_llm_servers_async(self): rollout_world_size = self.config.actor_rollout_ref.rollout.tensor_model_parallel_size world_size = ( self.worker_group.world_size if self.worker_group else self.config.trainer.n_gpus_per_node * self.config.trainer.nnodes ) num_replicas = world_size // rollout_world_size rollout_config = self.config.actor_rollout_ref.rollout model_config = self.config.actor_rollout_ref.model self.rollout_replicas = [ self.rollout_replica_class( replica_rank=replica_rank, config=rollout_config, model_config=model_config, gpus_per_node=self.config.trainer.n_gpus_per_node, ) for replica_rank in range(num_replicas) ] if self.worker_group: await asyncio.gather(*[server.init_hybrid(self.worker_group) for server in self.rollout_replicas]) else: await asyncio.gather(*[server.init_standalone() for server in self.rollout_replicas]) self.server_handles = [server._server_handle for server in self.rollout_replicas] self.server_addresses = [server._server_address for server in self.rollout_replicas] async def generate_single_sample_async( self, sample: DataProto, partial_output_list: Optional[list[AgentLoopOutput]], ) -> list[AgentLoopOutput]: """ Asynchronously process a single sample Args: sample: Single sample data partial_output_list: Optional[List[AgentLoopOutput]]: already rollout result. Returns: list[AgentLoopOutput]: Processing results """ worker = self._select_best_worker() output_future = worker.generate_sequences_no_post.remote(sample, partial_output_list) return await asyncio.wrap_future(output_future.future()) def _select_best_worker(self): """Select the best worker, simple round-robin load balancing""" if not hasattr(self, "_worker_index"): self._worker_index = 0 worker = self.agent_loop_workers[self._worker_index] self._worker_index = (self._worker_index + 1) % len(self.agent_loop_workers) return worker async def cancel(self): await asyncio.gather(*[replica.cancel() for replica in self.rollout_replicas]) async def resume(self): await asyncio.gather(*[replica.resume() for replica in self.rollout_replicas]) async def wake_up(self): await asyncio.gather(*[replica.wake_up() for replica in self.rollout_replicas]) async def sleep(self): await asyncio.gather(*[replica.sleep() for replica in self.rollout_replicas]) async def reset_prefix_cache(self): await asyncio.gather(*[replica.reset_prefix_cache() for replica in self.rollout_replicas]) ================================================ FILE: verl_distillation/recipe/fully_async_policy/agent_loop/partial_single_turn_agent_loop.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any, Optional from uuid import uuid4 from recipe.fully_async_policy.agent_loop.agent_loop import AgentLoopOutput, FullyAsyncAgentLoopOutput from verl.experimental.agent_loop import AgentLoopBase from verl.experimental.agent_loop.agent_loop import register from verl.utils.profiler import simple_timer logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) @register("partial_single_turn_agent") class PartialSingleTurnAgentLoop(AgentLoopBase): """Naive agent loop that only do single turn chat completion.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.prompt_length = self.config.actor_rollout_ref.rollout.prompt_length self.response_length = self.config.actor_rollout_ref.rollout.response_length self.apply_chat_template_kwargs = self.config.data.get("apply_chat_template_kwargs", {}) async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput: output: Optional[FullyAsyncAgentLoopOutput] = kwargs.get("output", None) messages = list(kwargs["raw_prompt"]) param_version = kwargs.get("param_version", 0) metrics = {} request_id = uuid4().hex image_data = (kwargs.get("multi_modal_data") or {}).get("image", None) param_version_start = param_version param_version_end = param_version if not output: # TODO(baiyan): it is supposed to use the correct processor, # but I found the async training would hang if use_correct_processor=True. # so we use the tokenizer to tokenize the prompt for now. use_correct_processor = False if self.processor is not None and use_correct_processor: def get_prompt_ids(): raw_prompt = self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=False, **self.apply_chat_template_kwargs, ) model_inputs = self.processor(text=[raw_prompt], images=image_data, return_tensors="pt") return model_inputs.pop("input_ids").squeeze(0).tolist() prompt_ids = await self.loop.run_in_executor(None, get_prompt_ids) else: prompt_ids = await self.loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, **self.apply_chat_template_kwargs ), ) else: if output.is_cancel: # Resume the paused sample, # add the result directly after prompt_ids, # and reset generate_sequences metric prompt_ids = output.prompt_ids + output.response_ids metrics["generate_sequences"] = output.metrics.generate_sequences param_version_start = output.param_version_start else: # In the same batch of samples, # ome are canceled and some are not. # The samples without partial rollout are returned directly. return output with simple_timer("generate_sequences", metrics): response_ids, log_probs, is_cancel = await self.server_manager.generate_for_partial( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, image_data=image_data ) if not output: response_mask = [1] * len(response_ids) else: # Pause the sample to be resumed, add the output result to response_ids, and reset response_mask prompt_ids = output.prompt_ids log_probs = output.log_probs + log_probs response_ids = output.response_ids + response_ids response_mask = [1] * len(response_ids) return FullyAsyncAgentLoopOutput( prompt_ids=prompt_ids, response_ids=response_ids[: self.response_length], response_mask=response_mask[: self.response_length], num_turns=2, metrics=metrics, is_cancel=is_cancel, log_probs=log_probs, param_version_start=param_version_start, param_version_end=param_version_end, # multi_modal_data={"image": image_data} if image_data is not None else {}, ) ================================================ FILE: verl_distillation/recipe/fully_async_policy/config/fully_async_ppo_megatron_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_megatron_trainer - _self_ async_training: # Maximum samples staleness threshold staleness_threshold: 0.1 # Frequency of parameter synchronization between rollouter and trainer, # One step means trainer obtains a batch of required samples trigger_parameter_sync_step: 4 # The number of ppo_mini_batches that the FullyAsyncTrainer obtains once require_batches: 1 # When synchronizing parameters, whether to interrupt rollouter and perform partial rollout partial_rollout: True # Whether to use rollout log probs for training use_rollout_log_probs: True # compute_prox_log_prob compute_prox_log_prob: False # Rollout config rollout: # Number of nodes used in the rollout nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 # number of responses (i.e. num sample times). > 1 for grpo n: 4 # total rollout samples # TODO rename to total_rollout_samples total_rollout_steps: 100 # Number of epochs in training total_epochs: 10 # Test frequency, how many times a parameter update triggers a validation test_freq: 1 data: # Number of samples generated, currently only support 1 gen_batch_size: 1 actor_rollout_ref: actor: # Whether to use rollout log probs for training use_rollout_log_probs: ${oc.select:async_training.use_rollout_log_probs, True} ================================================ FILE: verl_distillation/recipe/fully_async_policy/config/fully_async_ppo_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ async_training: # Maximum samples staleness threshold staleness_threshold: 0.1 # Frequency of parameter synchronization between rollouter and trainer, # One step means trainer obtains a batch of required samples trigger_parameter_sync_step: 4 # The number of ppo_mini_batches that the FullyAsyncTrainer obtains once require_batches: 1 # When synchronizing parameters, whether to interrupt rollouter and perform partial rollout partial_rollout: True # Whether to use rollout log probs for training use_rollout_log_probs: True # compute_prox_log_prob compute_prox_log_prob: False # Rollout config rollout: # Number of nodes used in the rollout nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 # number of responses (i.e. num sample times). > 1 for grpo n: 4 # total rollout samples # TODO rename to total_rollout_samples total_rollout_steps: 100 # Number of epochs in training total_epochs: 10 # Test frequency, how many times a parameter update triggers a validation test_freq: 1 data: # Number of samples generated, currently only support 1 gen_batch_size: 1 actor_rollout_ref: actor: # Whether to use rollout log probs for training use_rollout_log_probs: ${oc.select:async_training.use_rollout_log_probs, True} ================================================ FILE: verl_distillation/recipe/fully_async_policy/detach_utils.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from collections import defaultdict from dataclasses import dataclass from typing import Any, Optional import numpy as np import torch from tensordict import TensorDict from verl import DataProto from verl.experimental.agent_loop.agent_loop import AgentLoopOutput from verl.trainer.ppo.ray_trainer import compute_response_mask from verl.utils.model import compute_position_id_with_mask def postprocess_agent_loop_outputs(rs: "RolloutSample", tokenizer, config, processor) -> DataProto: """Static method to postprocess a list of AgentLoopOutput into DataProto Args: rs: RolloutSample tokenizer: Tokenizer instance config: Configuration object Returns: DataProto: Processed batch data """ inputs: list[AgentLoopOutput] = rs.agent_loop_output_list full_batch = rs.full_batch # NOTE: consistent with batch version of generate_sequences in vllm_rollout_spmd.py # prompts: left pad # responses: right pad # input_ids: prompt + response # attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0] # position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11] # prompts tokenizer.padding_side = "left" outputs = tokenizer.pad( [{"input_ids": input.prompt_ids} for input in inputs], padding="max_length", max_length=config.actor_rollout_ref.rollout.prompt_length, return_tensors="pt", return_attention_mask=True, ) prompt_ids, prompt_attention_mask = outputs["input_ids"], outputs["attention_mask"] # responses tokenizer.padding_side = "right" outputs = tokenizer.pad( [{"input_ids": input.response_ids} for input in inputs], padding="max_length", max_length=config.actor_rollout_ref.rollout.response_length, return_tensors="pt", return_attention_mask=True, ) response_ids, response_attention_mask = outputs["input_ids"], outputs["attention_mask"] # response_mask outputs = tokenizer.pad( [{"input_ids": input.response_mask} for input in inputs], padding="max_length", max_length=config.actor_rollout_ref.rollout.response_length, return_tensors="pt", return_attention_mask=False, ) response_mask = outputs["input_ids"] assert response_ids.shape == response_mask.shape, ( f"mismatch in response_ids and response_mask shape: {response_ids.shape} vs {response_mask.shape}" ) response_mask = response_mask * response_attention_mask # Handle multi-modal inputs and position_ids calculation # Only support Qwen2VLImageProcessor for multi-modal processing currently # TODO: support other multi-modal inputs multi_modal_inputs = None if processor is not None and "Qwen2VLImageProcessor" in processor.image_processor.__class__.__name__: # qwen-vl mrope if "Qwen3VLProcessor" in processor.__class__.__name__: pass else: pass images = [one.get("image", None) for one in full_batch.non_tensor_batch.get("multi_modal_data")] current_text = [tokenizer.decode(input.prompt_ids, skip_special_tokens=False) for input in inputs] multi_modal_inputs = processor( text=current_text, images=images, return_tensors="pt", max_length=config.actor_rollout_ref.rollout.prompt_length, padding="max_length", padding_side="left", ) prompt_ids = multi_modal_inputs.pop("input_ids") prompt_attention_mask = multi_modal_inputs.pop("attention_mask") # TODO: megatron will cauculate rope position_ids in the forward pass, so we don't need to calculate it here # but for FSDP support, we need to calculate it here # # We must use dict(multi_modal_inputs) to convert BatchFeature values to a new dict # # because np.array() only keeps the keys for BatchFeature. # multi_modal_inputs = dict(multi_modal_inputs) # image_grid_thw = multi_modal_inputs.get("image_grid_thw") # video_grid_thw = multi_modal_inputs.get("video_grid_thw") # second_per_grid_ts = multi_modal_inputs.get("second_per_grid_ts") # vision_position_ids = get_rope_index( # processor, # input_ids=input_ids.squeeze(0), # image_grid_thw=image_grid_thw, # video_grid_thw=video_grid_thw, # second_per_grid_ts=second_per_grid_ts, # attention_mask=attention_mask.squeeze(0), # ).unsqueeze(0) # (1, 3, seq_len) # valid_mask = attention_mask[0].bool() # text_position_ids = torch.ones((1, len(input_ids[0])), dtype=torch.long) # text_position_ids[0, valid_mask] = torch.arange(valid_mask.sum().item()) # text_position_ids = text_position_ids.unsqueeze(0) # position_ids = torch.cat((text_position_ids, vision_position_ids), dim=1) # (1, 4, seq_length) else: pass input_ids = torch.cat([prompt_ids, response_ids], dim=1) attention_mask = torch.cat([prompt_attention_mask, response_attention_mask], dim=1) position_ids = compute_position_id_with_mask(attention_mask) # (1, seq_len) batch = TensorDict( { "prompts": prompt_ids, # [bsz, prompt_length] "responses": response_ids, # [bsz, response_length] "response_mask": response_mask, # [bsz, response_length] "input_ids": input_ids, # [bsz, prompt_length + response_length] "attention_mask": attention_mask, # [bsz, prompt_length + response_length] "position_ids": position_ids, # [bsz, prompt_length + response_length] }, batch_size=len(input_ids), ) num_turns = np.array([input.num_turns for input in inputs], dtype=np.int32) metrics = [input.metrics.model_dump() for input in inputs] return DataProto(batch=batch, non_tensor_batch={"__num_turns__": num_turns}, meta_info={"metrics": metrics}) @dataclass class RolloutSample: """Enhanced rollout sample containing both original batch info and AgentLoopOutput""" # Original batch information full_batch: Any # AgentLoopOutput from generation agent_loop_output_list: list[Any] # AgentLoopOutput # Metadata sample_id: str epoch: int # Processing metadata processing_times: list[float] param_version: int param_version_start: list[int] param_version_end: list[int] rollout_status: dict[str, Any] @dataclass class ValidateMetrics: """Metrics for validation""" timing_raw: dict[str, Any] metrics: Optional[dict[str, Any]] = None global_steps: Optional[int] = None param_version: Optional[int] = None def prepare_single_generation_data(batch_dict, global_steps, rollout_n) -> DataProto: """ Similar to the logic of ray_trainer._prepare_generate_batch, but for a single sample. Separate the data used for generation from the original data. Returns: tuple: (original_batch_dict, gen_data_for_single_sample) """ full_batch = DataProto.from_single_dict(batch_dict) batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] full_batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) # Setting agent - partial_single_turn_agent, that supports partial full_batch.non_tensor_batch["agent_name"] = np.array(["partial_single_turn_agent"] * len(full_batch), dtype=object) # Add global step count to generated data full_batch = full_batch.repeat(repeat_times=rollout_n, interleave=True) return full_batch def process_rollout_log_probs(data_proto: DataProto, rollout_log_probs: list[list[float]]) -> torch.Tensor: """ Process rollout_log_probs according to the mask in DataProto mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0] Args: data_proto: A DataProto object containing batch information rollout_log_probs: A two-dimensional list, each sublist containing the log_probs of a sample Returns: torch.Tensor: The processed log_probs tensor, with shape: [bsz, response_length] """ batch = data_proto.batch response_mask = batch["response_mask"] rollout_log_probs_tensor = torch.zeros(response_mask.shape, dtype=torch.float32) - 1 for i, log_probs_seq in enumerate(rollout_log_probs): # Get the effective length of the current sample (the number of positions with 1 in the mask) valid_length = response_mask[i].sum().item() # Ensure that the length of log_probs_seq does not exceed the valid length actual_length = min(len(log_probs_seq), valid_length) # Fill log_probs into the corresponding position if actual_length > 0: rollout_log_probs_tensor[i, :actual_length] = torch.tensor(log_probs_seq[:actual_length]) rollout_log_probs_tensor = rollout_log_probs_tensor.to(torch.float32) return rollout_log_probs_tensor def merge_rollout_sample(config, tokenizer, rs: RolloutSample, processor): """ Supplement and refine the RolloutSample object, """ # Step 1: Create a DataProto from the AgentLoopOutput to generate the result gen_batch_output = postprocess_agent_loop_outputs(rs, tokenizer, config, processor) rollout_log_probs = [x.log_probs for x in rs.agent_loop_output_list] rollout_log_probs = process_rollout_log_probs(gen_batch_output, rollout_log_probs) gen_batch_output.batch["rollout_log_probs"] = rollout_log_probs.to(torch.float32) # Step 2: Add uid rs.full_batch.non_tensor_batch["uid"] = np.array([f"uid_{rs.sample_id}"] * len(rs.full_batch), dtype=object) # Step 2: Merge batches # Merge the non_tensor_batch and meta_info of original_batch into final_batch for key, value in rs.full_batch.non_tensor_batch.items(): gen_batch_output.non_tensor_batch[key] = value gen_batch_output.meta_info.update(rs.full_batch.meta_info) # Step 3, set full_batch rs.full_batch = gen_batch_output rs.processing_times = [] for agent_loop in rs.agent_loop_output_list: rs.processing_times.append(agent_loop.metrics.generate_sequences) rs.param_version_start = [agent_loop.param_version_start for agent_loop in rs.agent_loop_output_list] rs.param_version_end = [agent_loop.param_version_end for agent_loop in rs.agent_loop_output_list] # Step 4, clear agent_loop_output_list rs.agent_loop_output_list = [] return rs def assemble_batch_from_rollout_samples( rollout_samples: list[RolloutSample], tokenizer, config, balance_batch=None ) -> DataProto: """ Assemble gen_batch_output from RolloutSample objects Assembles batches from RolloutSample objects, similar to the _post_generate_batch logic in ray_trainer. Args: rollout_samples: List of RolloutSample objects tokenizer: Tokenizer instance config: Configuration object containing trainer settings balance_batch: Whether to balance the batch (simplified version) Returns: DataProto: Assembled gen_batch_output Raises: ValueError: If rollout_samples is empty """ start_time = time.time() if not rollout_samples: raise ValueError("Empty rollout_samples provided for batch assembly") print(f"[BatchUtils] Assembling batch from {len(rollout_samples)} RolloutSample objects") rollout_samples_batch = [] processing_times = [] rollout_status = rollout_samples[0].rollout_status # Add a prefix to all rollout_status keys rollout_status = {f"fully_async/{key}": value for key, value in rollout_status.items()} for rs in rollout_samples: rollout_samples_batch.append(rs.full_batch) processing_times.extend(rs.processing_times) final_batch = DataProto.concat(rollout_samples_batch) # Calculate response_mask (if not present) if "response_mask" not in final_batch.batch.keys(): final_batch.batch["response_mask"] = compute_response_mask(final_batch) if balance_batch: balance_batch(final_batch, metrics={}) # Calculate the global valid token number if "attention_mask" in final_batch.batch: final_batch.meta_info["global_token_num"] = torch.sum(final_batch.batch["attention_mask"], dim=-1).tolist() # Collect statistics param_versions = [rs.param_version for rs in rollout_samples] trajectorys_param_versions = [version for rs in rollout_samples for version in rs.param_version_end] processing_time_stats = { "processing_time/avg": np.mean(processing_times), "processing_time/max": np.max(processing_times), "processing_time/min": np.min(processing_times), "processing_time/tp50": np.percentile(processing_times, 50), "processing_time/tp99": np.percentile(processing_times, 99), "processing_time/tp95": np.percentile(processing_times, 95), } processing_time_stats = {f"fully_async/{key}": value for key, value in processing_time_stats.items()} param_version_diff = [abs(a - b) for a, b in zip(rs.param_version_end, rs.param_version_start, strict=False)] num_diff0 = param_version_diff.count(0) partial_stats = { "fully_async/partial/total_partial_num": len(param_version_diff) - num_diff0, "fully_async/partial/partial_ratio": (len(param_version_diff) - num_diff0) / len(param_version_diff), "fully_async/partial/max_partial_span": max(param_version_diff), } # add meta_info final_batch.meta_info.update( { "rollout_param_versions": param_versions, "param_version_diversity": len(set(param_versions)) if param_versions else 0, "trajectory_param_versions": trajectorys_param_versions, **processing_time_stats, **rollout_status, **partial_stats, } ) print(f"[BatchUtils] Batch assembly completed in {time.time() - start_time:.2f}s") return final_batch class MetricsAggregator: """Metrics aggregator, used to combine metrics from multiple training steps""" def __init__(self, total_gpus: int): # Store all values ​​for each metric self.metric_values: dict[str, list[float]] = defaultdict(list) # Store the number of samples at each step for weighted averaging self.sample_counts: list[int] = [] # Store the timestamp of each step for time-related calculations self.timestamps: list[float] = [] # Step Count self.step_count = 0 # total num gpus used self.total_gpus = total_gpus # Metric aggregation rule configuration self.aggregation_rules = self._init_aggregation_rules() def _init_aggregation_rules(self) -> dict[str, dict[str, list[str]]]: """Initialize metrics aggregation rules""" return { # Time-Based metrics, can add metrics here "time_sum": ["perf/time_per_step"], "last": [ "fully_async/count/total_generated_samples", "fully_async/count/stale_samples_processed", "fully_async/count/stale_trajectory_processed", "fully_async/count/current_param_version", "fully_async/count/dropped_stale_samples", "training/global_step", # TODO change name to: total_step ], } def add_step_metrics(self, metrics: dict[str, Any], sample_count: int, timestamp: float = None): """Adding a single-step metrics""" if timestamp is None: timestamp = time.time() self.sample_counts.append(sample_count) self.timestamps.append(timestamp) self.step_count += 1 # Store all metrics values for key, value in metrics.items(): if isinstance(value, int | float | np.number): self.metric_values[key].append(float(value)) elif isinstance(value, torch.Tensor): self.metric_values[key].append(float(value.item())) def _get_aggregation_type(self, metric_name: str) -> str: """Determine the aggregation type based on the metric name""" for agg_type, metric_list in self.aggregation_rules.items(): if metric_name in metric_list: return agg_type metric_lower = metric_name.lower() if any(keyword in metric_lower for keyword in ["timing_s/"]): return "time_sum" if any(keyword in metric_lower for keyword in ["mean", "avg", "average"]): return "avg" if any(keyword in metric_lower for keyword in ["max", "maximum"]): return "max" if any(keyword in metric_lower for keyword in ["min", "minimum"]): return "min" if any(keyword in metric_lower for keyword in ["sum", "total"]): return "sum" if any(keyword in metric_lower for keyword in ["weighted_avg"]): return "weighted_avg" return "avg" def _aggregate_single_metric(self, metric_name: str, values: list[float]) -> float: """Aggregating a single metric""" if not values: return 0.0 agg_type = self._get_aggregation_type(metric_name) if agg_type == "last": return values[-1] elif agg_type == "weighted_avg": # Weighted average if len(values) != len(self.sample_counts): # If the lengths do not match, use a simple average return sum(values) / len(values) total_samples = sum(self.sample_counts) if total_samples == 0: return sum(values) / len(values) weighted_sum = sum(v * c for v, c in zip(values, self.sample_counts, strict=False)) return weighted_sum / total_samples elif agg_type == "sum" or agg_type == "time_sum": return sum(values) elif agg_type == "avg": return sum(values) / len(values) elif agg_type == "max": return max(values) elif agg_type == "min": return min(values) else: # Default average return sum(values) / len(values) def get_aggregated_metrics(self) -> dict[str, Any]: """aggregated metrics""" t = time.time() if self.step_count == 0: return {} aggregated = {} # Aggregate all metrics for metric_name, values in self.metric_values.items(): aggregated[metric_name] = self._aggregate_single_metric(metric_name, values) # Aggregate special metrics aggregated = self._special_metrics_aggergate(aggregated) print(f"aggregated metrics done. cost {time.time() - t}") return aggregated def _special_metrics_aggergate(self, aggregated: dict[str, Any]) -> dict[str, Any]: """calculate special metrics""" # global_seqlen/minmax_diff if "global_seqlen/minmax_diff" in aggregated.keys(): aggregated["global_seqlen/minmax_diff"] = aggregated["global_seqlen/max"] - aggregated["global_seqlen/min"] # perf/throughput REQUIRED_PERF_KEYS = {"perf/throughput", "perf/total_num_tokens", "perf/time_per_step"} if REQUIRED_PERF_KEYS.issubset(aggregated): aggregated["perf/throughput"] = aggregated["perf/total_num_tokens"] / ( aggregated["perf/time_per_step"] * self.total_gpus ) # trainer/idle_ratio if "timing_s/gen" in aggregated.keys() and "timing_s/step" in aggregated.keys(): aggregated["trainer/idle_ratio"] = aggregated["timing_s/gen"] / aggregated["timing_s/step"] return aggregated def reset(self): """Reset Aggregator""" self.metric_values.clear() self.sample_counts.clear() self.timestamps.clear() self.step_count = 0 def get_current_stats(self) -> dict[str, Any]: """Get statistics about the current aggregation state (for debugging)""" return { "step_count": self.step_count, "metric_count": len(self.metric_values), "total_samples": sum(self.sample_counts), "metric_names": list(self.metric_values.keys()), } ================================================ FILE: verl_distillation/recipe/fully_async_policy/fsdp2_utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch import torch.distributed as dist from packaging import version from torch.distributed.tensor import DTensor from torch.distributed.tensor._dtensor_spec import DTensorSpec if version.parse(torch.__version__) < version.parse("2.6"): raise RuntimeError("PyTorch 2.6 or higher is required to use fstp_utils.") def fsdp2_sharded_save_to_cpu( model: torch.nn.Module, ) -> tuple[dict[str, tuple[torch.Tensor, DTensorSpec]], DTensorSpec]: """ Sharded Save: Each process only saves the local DTensor shard from its own GPU to CPU memory. Args: model: FSDP2-wrapped model whose parameters are of DTensor type. Returns: cpu_sharded_state: Dictionary of CPU shards for the current process. Key = parameter name, Value = (CPU shard tensor, original DTensorSpec) global_spec: DTensorSpec of the first parameter (used to verify global rules during loading) """ cpu_sharded_state = {} global_spec = None # Record global sharding rules (all parameters follow the same spec) for param_name, param in model.named_parameters(): # Only process sharded parameters of DTensor type (core parameters of FSDP2) if not isinstance(param, DTensor): # Save non-sharded parameters (e.g., running_mean of BatchNorm) as local data cpu_tensor = param.detach().cpu() cpu_sharded_state[param_name] = (cpu_tensor, None) continue # Record global sharding rules (take spec of the first DTensor to ensure consistency) if global_spec is None: global_spec = param._spec assert hasattr(global_spec, "device_mesh"), "DTensorSpec must contain 'device_mesh' attribute" assert hasattr(global_spec, "placements"), "DTensorSpec must contain 'placements' attribute" # 1. Extract local shard data from the current GPU (_local_tensor) local_gpu_tensor = param._local_tensor # Local shard attribute defined in your DTensor class # 2. Move to CPU memory and detach from computation graph local_cpu_tensor = local_gpu_tensor.detach().cpu() # 3. Save CPU shard + original DTensorSpec (ensure sharding rules remain unchanged) cpu_sharded_state[param_name] = (local_cpu_tensor, param._spec) assert global_spec is not None, "No DTensor-type parameters found in the model. FSDP2 sharding may not be enabled." return cpu_sharded_state, global_spec def fsdp2_sharded_load_from_cpu( model: torch.nn.Module, cpu_sharded_state: dict[str, tuple[torch.Tensor, Optional[DTensorSpec]]], target_spec: DTensorSpec, ) -> None: """ Sharded Load: Each process only loads the CPU shard it is responsible for to the GPU, keeping sharding rules unchanged. Args: model: FSDP2 model to be restored (must have the same structure as when saved) cpu_sharded_state: Shard data read from CPU memory by the current process (from fsdp2_sharded_save_to_cpu) target_spec: Global DTensorSpec from saving (used to verify sharding rule consistency) """ # Verify device_mesh consistency (core: ensure loaded shards map to original GPUs) current_device_mesh = None for param in model.parameters(): if isinstance(param, DTensor): current_device_mesh = param._spec.device_mesh break assert current_device_mesh is not None, "DTensor parameters not initialized in the model to be loaded" assert current_device_mesh == target_spec.device_mesh, ( f"device_mesh mismatch during loading! Original: {target_spec.device_mesh}, Current: {current_device_mesh}" ) for param_name, param in model.named_parameters(): # Skip parameters not in the saved state (e.g., newly added parameters) if param_name not in cpu_sharded_state: continue # Extract CPU shard data and original Spec local_cpu_tensor, saved_spec = cpu_sharded_state[param_name] # Handle different parameter types: DTensor sharded parameters vs. regular parameters if isinstance(param, DTensor): # 1. Verify sharding rule consistency (placements must match original Spec) assert saved_spec is not None, f"DTensorSpec missing in saved state for parameter {param_name}" assert saved_spec.placements == target_spec.placements, ( f"Sharding strategy mismatch for parameter {param_name} (conflicts with global rules)!" ) # 2. Move CPU shard data to the current GPU (device of param._local_tensor) target_device = param._local_tensor.device local_gpu_tensor = local_cpu_tensor.to(target_device) # 3. Restore to DTensor's local shard (directly copy to _local_tensor, keep spec unchanged) param._local_tensor.copy_(local_gpu_tensor) else: # Regular parameters: load directly to original device target_device = param.device param.data.copy_(local_cpu_tensor.to(target_device)) # Process synchronization: ensure all processes complete loading before proceeding dist.barrier() ================================================ FILE: verl_distillation/recipe/fully_async_policy/fsdp_workers.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import torch import torch.distributed from omegaconf import DictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from recipe.fully_async_policy.fsdp2_utils import fsdp2_sharded_load_from_cpu, fsdp2_sharded_save_to_cpu from verl.single_controller.base.decorator import Dispatch, register from verl.utils.device import ( get_device_name, get_torch_device, ) from verl.utils.fsdp_utils import ( fsdp_version, ) from verl.workers.fsdp_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() __all__ = ["DetachActorWorker", "DetachAsyncRolloutWorker", "CriticWorker"] def get_inference_model(rollout): """ get models according to different types of inference_engine Args: rollout: rollout object Returns: model: model object """ inference_engine = rollout.inference_engine if hasattr(inference_engine, "llm_engine"): inference_model = inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model elif hasattr(inference_engine, "worker"): inference_model = inference_engine.worker.model_runner.model else: raise AttributeError( f"Unsupported inference_engine type: {type(inference_engine)}. " f"Expected LLM (with llm_engine attribute) or WorkerWrapperBase (with worker attribute)." ) return inference_model class DetachNcclSync(AsyncActorRolloutRefWorker): def _get_actor_params(self): pass @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine assert hasattr(self, "_weights_info") and self._weights_info is not None params = self._get_actor_params() if self._is_actor else None if self._is_rollout: inference_model = get_inference_model(self.rollout) from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader patch_vllm_moe_model_weight_loader(inference_model) for key, shape, dtype in self._weights_info: tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor: assert key in params origin_data = params[key] if hasattr(origin_data, "full_tensor"): origin_data = origin_data.full_tensor() if torch.distributed.get_rank() == 0: tensor.copy_(origin_data) from ray.util.collective import collective collective.broadcast(tensor, src_rank=0, group_name="actor_rollout") if self._is_rollout: inference_model.load_weights([(key, tensor)]) get_torch_device().empty_cache() class DetachActorWorker(DetachNcclSync): def _get_actor_params(self): assert self._is_actor params = self.actor_module_fsdp.state_dict() from verl.utils.model import convert_weight_keys params = convert_weight_keys( params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp) ) return params @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): assert self._is_actor if hasattr(self, "_weights_info"): return self._weights_info if fsdp_version(self.actor_module_fsdp) == 1: from torch.distributed.fsdp.api import ShardedStateDictConfig, StateDictType FSDP.set_state_dict_type( self.actor_module_fsdp, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) params = self._get_actor_params() ret = [] for key, tensor in params.items(): ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_model_to_cpu(self, n): if not hasattr(self, "cpu_saved_models"): self.cpu_saved_models = {} self.cpu_saved_models[n] = fsdp2_sharded_save_to_cpu(self.actor_module_fsdp) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def restore_model_from_cpu(self, n): if n in self.cpu_saved_models: cpu_sharded_state, global_spec = self.cpu_saved_models[n] fsdp2_sharded_load_from_cpu(self.actor_module_fsdp, cpu_sharded_state, global_spec) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def clear_cpu_model(self, n): if n in self.cpu_saved_models: del self.cpu_saved_models[n] class DetachAsyncRolloutWorker(DetachNcclSync): def __init__(self, config: DictConfig, role: str): print(f"[DetachAsyncRolloutWorker] {DetachAsyncRolloutWorker.__mro__}") ActorRolloutRefWorker.__init__(self, config, role) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): assert self._is_rollout self._weights_info = weights_info ================================================ FILE: verl_distillation/recipe/fully_async_policy/fully_async_main.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import socket import threading from pprint import pprint import hydra import ray from omegaconf import OmegaConf from recipe.fully_async_policy.fully_async_rollouter import FullyAsyncRollouter from recipe.fully_async_policy.fully_async_trainer import FullyAsyncTrainer from recipe.fully_async_policy.message_queue import MessageQueue, MessageQueueClient from verl.trainer.ppo.ray_trainer import ResourcePoolManager from verl.trainer.ppo.reward import load_reward_manager from verl.trainer.ppo.utils import Role from verl.utils.fs import copy_to_local def create_resource_pool_manager(config, roles: list) -> ResourcePoolManager: """ Create resource pool manager Args: config: Configuration object roles: List of roles that need to create resource pools Returns: ResourcePoolManager: Resource pool manager """ resource_pool_spec = {} mapping = {} # Actor/Critic resource pool if any(role in roles for role in [Role.Actor, Role.Critic, Role.RefPolicy, Role.RewardModel]): assert config.trainer.n_gpus_per_node > 0, "config.trainer.n_gpus_per_node must be greater than 0" assert config.trainer.nnodes > 0, "config.trainer.nnodes must be greater than 0" trainer_pool = [config.trainer.n_gpus_per_node] * config.trainer.nnodes resource_pool_spec["trainer_pool"] = trainer_pool # Map training-related roles to the same resource pool for role in [Role.Actor, Role.Critic, Role.RefPolicy, Role.RewardModel]: if role in roles: mapping[role] = "trainer_pool" # Rollout resource pool if Role.Rollout in roles: assert config.rollout.n_gpus_per_node > 0, "config.rollout.n_gpus_per_node must be greater than 0" assert config.rollout.nnodes > 0, "config.rollout.nnodes must be greater than 0" rollout_pool = [config.rollout.n_gpus_per_node] * config.rollout.nnodes resource_pool_spec["rollout_pool"] = rollout_pool mapping[Role.Rollout] = "rollout_pool" return ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) def create_role_worker_mapping(config): """ Create mapping from roles to worker classes Args: config: Configuration object Returns: dict: Mapping from roles to worker classes """ # Select worker class based on strategy if config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]: assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from recipe.fully_async_policy.fsdp_workers import ( CriticWorker, DetachActorWorker, DetachAsyncRolloutWorker, ) from verl.single_controller.ray import RayWorkerGroup ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.critic.strategy == "megatron" from recipe.fully_async_policy.megatron_worker import CriticWorker, DetachActorWorker, DetachAsyncRolloutWorker from verl.single_controller.ray import RayWorkerGroup ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError(f"Unsupported strategy: {config.actor_rollout_ref.actor.strategy}") role_worker_mapping = { Role.Actor: ray.remote(DetachActorWorker), Role.Rollout: ray.remote(DetachAsyncRolloutWorker), Role.Critic: ray.remote(CriticWorker), } if config.reward_model.enable: if config.reward_model.strategy in ["fsdp", "fsdp2"]: from verl.workers.fsdp_workers import RewardModelWorker # TODO megatron support else: raise NotImplementedError(f"Unsupported reward model strategy: {config.reward_model.strategy}") role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) # Add reference policy (if KL loss or reward is required) if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(DetachActorWorker) return role_worker_mapping, ray_worker_group_cls @ray.remote(num_cpus=1) class FullyAsyncTaskRunner: """ Ray remote class for executing distributed PPO training tasks. """ def __init__(self): self.running = False self.components = {} self.shutdown_event = threading.Event() def run(self, config): print("[ASYNC MAIN] Starting fully async PPO training...") self._initialize_components(config) self._run_training_loop() def _initialize_components(self, config) -> None: print(f"[ASYNC MAIN] TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) OmegaConf.resolve(config) print("[ASYNC MAIN] Initializing model and tokenizer...") local_path = copy_to_local( config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False) ) from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) # Used for multimodal LLM, could be None processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) self.components["tokenizer"] = tokenizer self.components["processor"] = processor self.components["config"] = config print("[ASYNC MAIN] Creating worker mapping and resource pools...") role_worker_mapping, ray_worker_group_cls = create_role_worker_mapping(config) self.components["role_worker_mapping"] = role_worker_mapping self.components["ray_worker_group_cls"] = ray_worker_group_cls print("[ASYNC MAIN] Loading reward functions...") reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager( config, tokenizer, num_examine=1, **config.reward_model.get("reward_kwargs", {}) ) self.components["reward_fn"] = reward_fn self.components["val_reward_fn"] = val_reward_fn print("[ASYNC MAIN] Creating FullyAsyncRollouter...") self._create_rollouter(config) print("[ASYNC MAIN] Creating FullyAsyncTrainer...") self._create_trainer(config) # sync total_train_steps between rollouter and trainer total_train_steps = ray.get(self.components["rollouter"].get_total_train_steps.remote()) print(f"total_train_steps {total_train_steps}") ray.get(self.components["trainer"].set_total_train_steps.remote(total_train_steps)) # max_queue_size max_queue_size = ray.get(self.components["rollouter"].get_max_queue_size.remote()) print(f"[ASYNC MAIN] Creating MessageQueue... max_queue_size {max_queue_size}") message_queue = MessageQueue.remote(config, max_queue_size) message_queue_client = MessageQueueClient(message_queue) self.components["message_queue"] = message_queue self.components["message_queue_client"] = message_queue_client ray.get(self.components["rollouter"].set_message_queue_client.remote(self.components["message_queue_client"])) ray.get(self.components["trainer"].set_message_queue_client.remote(self.components["message_queue_client"])) print("[ASYNC MAIN] Setting up parameter synchronization...") from recipe.fully_async_policy.param_sync import ParameterSynchronizer param_synchronizer = ParameterSynchronizer.remote( config=config, trainer=self.components["trainer"], rollouter=self.components["rollouter"], mq=self.components["message_queue_client"], ) ray.get(self.components["trainer"].set_parameter_synchronizer.remote(param_synchronizer)) # load checkpoint and sync parameter before doing anything val_before_train = val_reward_fn is not None and config.trainer.get("val_before_train", True) ray.get(self.components["trainer"].load_checkpoint.remote()) ray.get(param_synchronizer.sync_weights.remote(version=0, validate=val_before_train)) ray.get(param_synchronizer.wait_last_valid.remote()) self.components["param_synchronizer"] = param_synchronizer print("[ASYNC MAIN] All components initialized successfully") def _create_rollouter(self, config) -> None: rollouter = FullyAsyncRollouter.remote( config=config, tokenizer=self.components["tokenizer"], role_worker_mapping={Role.Rollout: self.components["role_worker_mapping"][Role.Rollout]}, resource_pool_manager=create_resource_pool_manager(config, roles=[Role.Rollout]), ray_worker_group_cls=self.components["ray_worker_group_cls"], processor=self.components["processor"], reward_fn=self.components["reward_fn"], val_reward_fn=self.components["val_reward_fn"], device_name=config.trainer.device, ) ray.get(rollouter.init_workers.remote()) ray.get(rollouter.set_max_required_samples.remote()) self.components["rollouter"] = rollouter print("[ASYNC MAIN] Rollouter created and initialized successfully") def _create_trainer(self, config) -> None: trainer_role_mapping = { role: worker_cls for role, worker_cls in self.components["role_worker_mapping"].items() if role != Role.Rollout } trainer = FullyAsyncTrainer.remote( config=config, tokenizer=self.components["tokenizer"], role_worker_mapping=trainer_role_mapping, resource_pool_manager=create_resource_pool_manager(config, roles=list(trainer_role_mapping.keys())), ray_worker_group_cls=self.components["ray_worker_group_cls"], processor=self.components["processor"], reward_fn=self.components["reward_fn"], val_reward_fn=self.components["val_reward_fn"], device_name=config.trainer.device, ) ray.get(trainer.init_workers.remote()) self.components["trainer"] = trainer print("[ASYNC MAIN] FullyAsyncTrainer created and initialized successfully") def _run_training_loop(self): self.running = True print("[ASYNC MAIN] Starting Rollouter and Trainer...") rollouter_future = self.components["rollouter"].fit.remote() trainer_future = self.components["trainer"].fit.remote() futures = [rollouter_future, trainer_future] try: while futures: # Use ray.wait to monitor all futures and return when any one is completed. done_futures, remaining_futures = ray.wait(futures, num_returns=1, timeout=None) for future in done_futures: try: ray.get(future) print("[ASYNC MAIN] One component completed successfully") except Exception as e: print(f"[ASYNC MAIN] Component failed with error: {e}") for remaining_future in remaining_futures: ray.cancel(remaining_future) raise e futures = remaining_futures except Exception as e: print(f"[ASYNC MAIN] Training failed: {e}") for future in futures: ray.cancel(future) raise finally: self.components["message_queue_client"].clear_queue() print("[ASYNC MAIN] Training completed or interrupted") @hydra.main(config_path="config", config_name="fully_async_ppo_trainer", version_base=None) def main(config): from verl.trainer.main_ppo import run_ppo # Ensure async training config exists if not hasattr(config, "async_training"): raise RuntimeError("must set async_training config") from time import time start_time = time() run_ppo(config, task_runner_class=FullyAsyncTaskRunner) print(f"total time: {time() - start_time:.2f} seconds") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/fully_async_policy/fully_async_rollouter.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import time from pprint import pformat import ray from ray import ObjectRef from recipe.fully_async_policy.detach_utils import ( RolloutSample, ValidateMetrics, merge_rollout_sample, prepare_single_generation_data, ) from recipe.fully_async_policy.message_queue import MessageQueueClient from recipe.fully_async_policy.ray_trainer import FullyAsyncRayPPOTrainer from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup from verl.trainer.ppo.ray_trainer import ResourcePoolManager from verl.trainer.ppo.utils import Role, WorkerType from verl.utils.profiler import marked_timer from verl.utils.tracking import ValidationGenerationsLogger @ray.remote(num_cpus=10, max_concurrency=100) class FullyAsyncRollouter(FullyAsyncRayPPOTrainer): """ Asynchronous sample generator, responsible for continuously generating training samples and putting them into MessageQueue Based on the mature implementation improvements of OneStepOffRayTrainer """ def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, device_name=None, ): # Store the tokenizer for text processing self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert not self.hybrid_engine assert self.config.data.train_batch_size == 0, "train_batch_size must be zero" assert self.config.data.gen_batch_size == 1, "gen_batch_size must be one" assert self.config.async_training.staleness_threshold >= 0, "staleness_threshold must larger than 0" assert self.config.async_training.trigger_parameter_sync_step >= 1, ( "trigger_parameter_sync_step must larger than 1" ) self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.ray_worker_group_cls = ray_worker_group_cls self.device_name = device_name if device_name else self.config.trainer.device self.validation_generations_logger = ValidationGenerationsLogger( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, ) self.ref_in_actor = False self.kl_ctrl_in_reward = False self.use_critic = False self.use_reference_policy = False self.use_rm = False print("[FullyAsyncRollouter] Creating datasets...") from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler from verl.utils.dataset.rl_dataset import collate_fn train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor) val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor) train_sampler = create_rl_sampler(config.data, train_dataset) self._validate_config() print(f"[FullyAsyncRollouter] Rollouter _create_dataloader...\n{train_dataset}\n{val_dataset}") self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) # ==================== fully async config ==================== self.total_rollout_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.rollout.total_rollout_steps is not None: self.total_rollout_steps = min(self.config.rollout.total_rollout_steps, self.total_rollout_steps) print(f"[FullyAsyncRollouter] Total rollout steps: {self.total_rollout_steps}") self.total_train_steps = None # Rollouter parameter configuration self.message_queue_client = None # Worker groups: rollout_wg is same to actor_rollout_wg self.rollout_wg = None self.actor_rollout_wg = None self.async_rollout_manager = None # Config self.staleness_threshold: float = config.async_training.get("staleness_threshold", 1) # required_samples use ppo_mini_batch_size*require_batches as the minimum number of samples. self.require_batches = config.async_training.require_batches self.required_samples = config.actor_rollout_ref.actor.ppo_mini_batch_size * self.require_batches self.max_required_samples = None self.max_concurrent_samples = None # queue size self.max_queue_size = None # Statistics self.current_param_version = 0 self.total_generated_samples = 0 self.staleness_samples = 0 self.dropped_stale_samples = 0 self.processed_sample_count = 0 self.global_steps = 0 self.idle_start_time = None self.version_start_time = None # Concurrency control # Modified by self.pause() or self._should_pause_generation() self.paused = False self.running = True self.monitor_loop_trigger = True # Initialize async locks directly self.lock = asyncio.Lock() self.condition = asyncio.Condition(self.lock) # Initialize async queues self.pending_queue = asyncio.Queue(maxsize=128) self.active_tasks = set() self.result_queue = asyncio.Queue() self.cancel_queue = asyncio.Queue() async def set_message_queue_client(self, message_queue_client: MessageQueueClient): """Set message queue client""" async with self.lock: self.message_queue_client = message_queue_client async def set_max_required_samples(self): async with self.lock: self.max_required_samples = int( self.required_samples * (self.staleness_threshold + 1) * self.config.async_training.trigger_parameter_sync_step ) self.total_train_steps = int( self.total_rollout_steps / (self.required_samples * self.config.async_training.trigger_parameter_sync_step) ) self.max_concurrent_samples = len(self.async_rollout_manager.server_handles) * 16 self.max_concurrent_samples = min(self.max_concurrent_samples, self.max_required_samples) self.max_queue_size = self.max_required_samples print( f"[FullyAsyncRollouter] required_samples : {self.required_samples} " f"max_required_samples: {self.max_required_samples} " f"max_queue_size: {self.max_queue_size} " f"total_train_steps: {self.total_train_steps} " f"total_rollout_steps: {self.total_rollout_steps} " f"max_concurrent_samples: {self.max_concurrent_samples} " ) def get_rollout_wg(self): """Get rollout worker group""" return self.rollout_wg def get_max_queue_size(self): return self.max_queue_size def get_total_train_steps(self): return self.total_train_steps async def update_param_version(self, version: int, validate: bool = False, global_steps: int = 0): """Update current parameter version""" async with self.lock: old_version = self.current_param_version self.current_param_version = version # every time param change, reset staleness_samples self.staleness_samples = ( len(self.active_tasks) + self.result_queue.qsize() + self.cancel_queue.qsize() + await self.message_queue_client.get_queue_size() ) timing_raw = {} idle_ratio = None if self.idle_start_time is not None and self.version_start_time is not None: rollout_active_time = self.idle_start_time - self.version_start_time rollout_version_time = time.time() - self.version_start_time idle_ratio = 1 - rollout_active_time / rollout_version_time timing_raw["rollouter/active_time"] = rollout_active_time timing_raw["rollouter/version_time"] = rollout_version_time timing_raw["rollouter/idle_ratio"] = idle_ratio self.idle_start_time = None print( f"[FullyAsyncRollouter][Public][update_param_version] " f"Parameter version updated from {old_version} to {version} " f",reset staleness_samples to: {self.staleness_samples}" f",idle_ratio: {idle_ratio}" ) val_metrics = None if ( self.val_reward_fn is not None and self.config.rollout.test_freq > 0 and self.current_param_version % self.config.rollout.test_freq == 0 and self.current_param_version > 0 # don't test here in the initial parameter sync ) or (validate and self.val_reward_fn is not None): with marked_timer("rollouter/validate_time", timing_raw, color="green"): val_metrics: dict = self._validate() data = ValidateMetrics( timing_raw=timing_raw, metrics=val_metrics, global_steps=global_steps, param_version=version ) await self.message_queue_client.put_validate(ray.cloudpickle.dumps(data)) self.version_start_time = time.time() def _validate_config(self): # Validate asynchronous training configuration if not hasattr(self.config, "async_training"): raise ValueError("[FullyAsyncRollouter] Missing async_training configuration") assert self.config.actor_rollout_ref.rollout.calculate_log_probs, "must rollout calculate log_probs" async def init_workers(self): """Initialize distributed training workers using Ray backend. Creates: 1. Ray resource pools from configuration 2. Worker groups for each role (actor, critic, etc.) """ self._init_resource_pools() self._create_worker_classes() self._init_worker_groups() self._init_models() await self._init_async_rollout_manager() def _create_actor_rollout_classes(self): # only create rollout for role in [Role.Rollout]: resource_pool = self.resource_pool_manager.get_resource_pool(role) role_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[role], config=self.config.actor_rollout_ref, role=str(role), ) self.resource_pool_to_cls[resource_pool][str(role)] = role_cls def _init_models(self): self.rollout_wg = self.all_wg[str(Role.Rollout)] self.rollout_wg.init_model() self.actor_rollout_wg = self.rollout_wg def _create_continuous_iterator(self): """ Create a continuous data iterator across epoch """ for epoch in range(self.config.rollout.total_epochs): iterator = iter(self.train_dataloader) for batch_dict in iterator: yield epoch, batch_dict async def _init_async_rollout_manager(self): # create async rollout manager and request scheduler assert self.config.actor_rollout_ref.rollout.mode == "async" from recipe.fully_async_policy.agent_loop import FullyAsyncAgentLoopManager self.async_rollout_mode = True self.async_rollout_manager = await FullyAsyncAgentLoopManager.create( config=self.config, worker_group=self.rollout_wg, ) # Add samples to the pending_queue async def _feed_samples(self): continuous_iterator = self._create_continuous_iterator() for epoch, batch_dict in continuous_iterator: # Similar to _prepare_generate_batch: Separate data full_batch = prepare_single_generation_data( batch_dict, self.global_steps, self.config.actor_rollout_ref.rollout.n ) sample_id = f"sample_{epoch}_{self.global_steps}" rollout_sample = RolloutSample( full_batch=full_batch, agent_loop_output_list=[None] * self.config.actor_rollout_ref.rollout.n, sample_id=sample_id, epoch=epoch, param_version=0, param_version_start=[], param_version_end=[], processing_times=[], rollout_status={}, ) await self.pending_queue.put(rollout_sample) # Check if have reached the last step if self.global_steps >= self.total_rollout_steps: print( f"[FullyAsyncRollouter][Feed] " f"Maximum count has been reached, stop adding new samples" f"{self.global_steps} >= {self.total_rollout_steps}" ) break self.global_steps += 1 # End signal await self.pending_queue.put("DONE") print(f"[FullyAsyncRollouter][Feed] Sample addition is complete, {self.global_steps} samples have been added") async def _processor_worker(self): """ Streaming worker coroutines, a sample is submitted for processing without waiting for batches """ while True: if self.paused or await self._should_pause_generation(): print( "[FullyAsyncRollouter][Processor] Received pause signal, waiting for remaining tasks to return..." ) async with self.lock: self.paused = True while self.active_tasks: async with self.lock: # After acquiring the lock, the number of active_tasks may change, need to be verified again if self.active_tasks: done_tasks, self.active_tasks = await asyncio.wait( self.active_tasks, return_when=asyncio.FIRST_COMPLETED ) for task in done_tasks: await task async with self.lock: while self.paused: self.idle_start_time = time.time() await self.condition.wait() continue simple_from_cancel_queue = False if not self.cancel_queue.empty(): rollout_sample = await self.cancel_queue.get() simple_from_cancel_queue = True else: rollout_sample = await self.pending_queue.get() self.staleness_samples += 1 if rollout_sample == "DONE": print( "[FullyAsyncRollouter][Processor] Received end signal, waiting for remaining tasks to complete..." ) while self.active_tasks: async with self.lock: if self.active_tasks: done_tasks, self.active_tasks = await asyncio.wait( self.active_tasks, return_when=asyncio.FIRST_COMPLETED ) for task in done_tasks: await task break # Check whether the number of concurrent tasks exceeds the limit while len(self.active_tasks) >= self.max_concurrent_samples: async with self.lock: if self.active_tasks: done_tasks, self.active_tasks = await asyncio.wait( self.active_tasks, return_when=asyncio.FIRST_COMPLETED ) for task in done_tasks: await task # Submit single sample processing async with self.lock: # After the pause is over, the lock is acquired and it is necessary # to determine whether it is the pause phase, otherwise continue to wait while self.paused: await self.condition.wait() task = asyncio.create_task( self._process_single_sample_streaming(rollout_sample), name=rollout_sample.sample_id, ) self.active_tasks.add(task) if simple_from_cancel_queue: self.cancel_queue.task_done() else: self.pending_queue.task_done() async def _process_single_sample_streaming(self, rollout_sample: RolloutSample): """Process a single sample streamingly""" # Calling asynchronous generation methods rollout_sample.full_batch.non_tensor_batch["param_version"] = [self.current_param_version] * len( rollout_sample.full_batch ) agent_loop_output_list = await self.async_rollout_manager.generate_single_sample_async( rollout_sample.full_batch, rollout_sample.agent_loop_output_list ) rollout_sample.agent_loop_output_list = agent_loop_output_list is_cancel = False for agent_loop in agent_loop_output_list: if not is_cancel and agent_loop.is_cancel: is_cancel = True if is_cancel: # Put in the cancel queue and wait for the generation to resume await self.cancel_queue.put(rollout_sample) else: # put into the result_queue rollout_sample.param_version = self.current_param_version rollout_sample.rollout_status = await self.get_statistics() await self.result_queue.put(rollout_sample) self.processed_sample_count += 1 async def _consumer_worker(self): """ The consumer coroutine is responsible for obtaining the processing results from the result queue and putting them into the message queue """ while True: rollout_sample = await self.result_queue.get() rollout_sample = merge_rollout_sample(self.config, self.tokenizer, rollout_sample, self.processor) # Put RolloutSample into the message queue success = await self.message_queue_client.put_sample( sample=ray.cloudpickle.dumps(rollout_sample), param_version=rollout_sample.param_version, ) if success: self.total_generated_samples += 1 else: self.dropped_stale_samples += 1 self.result_queue.task_done() async def _streaming_generation_main(self): """The main entry method for stream processing""" # we start from step 1 self.global_steps += 1 if self.async_rollout_manager is None: await self._init_async_rollout_manager() # Start the streaming loop print(f"[FullyAsyncRollouter] Start streaming mode, maximum concurrent samples: {self.max_concurrent_samples}") # Start sample feed coroutine, streaming process coroutine and consumer coroutine self.feed_task = asyncio.create_task(self._feed_samples()) self.processor_task = asyncio.create_task(self._processor_worker()) self.consumer_task = asyncio.create_task(self._consumer_worker()) try: # Wait for sample feed to complete await self.feed_task print("[FullyAsyncRollouter] Sample feed completed") # Wait for streaming to complete await self.processor_task print("[FullyAsyncRollouter] Streaming process completed") # Waiting for the result queue to clear await self.result_queue.join() print("[FullyAsyncRollouter] Result queue cleared") except Exception as e: print(f"[FullyAsyncRollouter] Streaming process exception:{e}") finally: if self.processor_task: self.processor_task.cancel() if self.consumer_task: self.consumer_task.cancel() await asyncio.gather(self.processor_task, self.consumer_task, return_exceptions=True) # Send a finish signal await self.message_queue_client.put_sample( sample=None, param_version=self.current_param_version, ) async with self.lock: self.running = False async def fit(self): """ Start the async rollouter - entry point that sets up and runs async tasks Main async fit method that coordinates all coroutines """ print("[FullyAsyncRollouter] Starting FullyAsyncRollouter...") if self.message_queue_client is None: raise ValueError("MessageQueue client not set. Call set_message_queue_client() first.") # Set the running status flag async with self.lock: self.paused = False self.running = True # Create the main asynchronous task generation_task = asyncio.create_task(self._streaming_generation_main()) monitor_task = asyncio.create_task(self._async_monitor_loop()) try: # Run build and monitoring tasks concurrently await asyncio.gather(generation_task, monitor_task, return_exceptions=True) except Exception as e: print(f"[FullyAsyncRollouter] Asynchronous task execution error: {e}") finally: if not generation_task.done(): generation_task.cancel() if not monitor_task.done(): monitor_task.cancel() # Wait for the task to complete await asyncio.gather(generation_task, monitor_task, return_exceptions=True) print("[FullyAsyncRollouter] Rollouter fit completed") async def _async_monitor_loop(self): """ Async coroutine for monitoring: Function 1: Log information output Function 2: Trigger rollout recovery """ last_stats_time = time.time() stats_interval = 60.0 check_interval = 10.0 while True: async with self.lock: if not self.running: break await asyncio.sleep(check_interval) # Print statistics periodically current_time = time.time() if current_time - last_stats_time >= stats_interval: stats = await self.get_statistics() print(f"[FullyAsyncRollouter][MonitorLoop][Statistics] {pformat(stats)}") last_stats_time = current_time # Trigger rollout recovery if self.monitor_loop_trigger: if not await self._should_pause_generation(): async with self.lock: self.paused = False self.condition.notify_all() async def _should_pause_generation(self) -> bool: """Determine whether the build should be paused""" queue_stats = self.message_queue_client.get_statistics_sync() queue_size = queue_stats["queue_size"] if queue_size >= self.max_queue_size: if not self.paused: print( f"[FullyAsyncRollouter][ShouldPause] " f"due to full queue: size={queue_size}, max={self.max_queue_size}" ) return True if self.staleness_samples >= self.max_required_samples: if not self.paused: print( "[FullyAsyncRollouter][ShouldPause] " f"due to " f"staleness_samples {self.staleness_samples} >= max_required_samples {self.max_required_samples} " ) return True return False async def pause(self): """pause rollout""" print("[FullyAsyncRollouter][Public][Pause]") async with self.lock: self.paused = True # Cancel all rollout tasks if self.config.async_training.partial_rollout: await self.async_rollout_manager.cancel() if self.active_tasks: await asyncio.gather(*self.active_tasks, return_exceptions=True) self.active_tasks.clear() print("[FullyAsyncRollouter][Public][Pause] All active tasks completed") await self.async_rollout_manager.reset_prefix_cache() self.monitor_loop_trigger = False async def resume(self, dependency_ref: ObjectRef = None): if dependency_ref is not None: ray.get(dependency_ref) print("[FullyAsyncRollouter][Public][Resume]") async with self.lock: self.paused = False self.monitor_loop_trigger = True self.condition.notify_all() if self.config.async_training.partial_rollout: await self.async_rollout_manager.resume() async def get_statistics(self) -> dict: queue_stats = self.message_queue_client.get_statistics_sync() stats = { # monitor stats "monitor/active_tasks_size": len(self.active_tasks), "monitor/queue/pending_queue_size": self.pending_queue.qsize(), "monitor/queue/cancel_queue_size": self.cancel_queue.qsize(), "monitor/queue/result_queue_size": self.result_queue.qsize(), "monitor/queue/mq_queue_size": queue_stats["queue_size"], # counting stats "count/current_param_version": self.current_param_version, "count/total_generated_samples": self.total_generated_samples, "count/staleness_samples": self.staleness_samples, "count/dropped_stale_samples": self.dropped_stale_samples, # static stats "static/max_required_samples": self.max_required_samples, "static/required_samples": self.required_samples, "static/staleness_threshold": self.staleness_threshold, "static/max_queue_size": self.max_queue_size, "static/max_concurrent_samples": self.max_concurrent_samples, } return stats ================================================ FILE: verl_distillation/recipe/fully_async_policy/fully_async_trainer.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from datetime import datetime from pprint import pprint from typing import Any import ray from omegaconf import OmegaConf from tqdm import tqdm from recipe.fully_async_policy.detach_utils import ( MetricsAggregator, ValidateMetrics, assemble_batch_from_rollout_samples, ) from recipe.fully_async_policy.message_queue import MessageQueueClient from recipe.fully_async_policy.ray_trainer import FullyAsyncRayPPOTrainer from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup from verl.trainer.ppo import core_algos from verl.trainer.ppo.ray_trainer import ResourcePoolManager from verl.trainer.ppo.utils import Role, WorkerType, need_critic, need_reference_policy, need_reward_model from verl.utils.debug import marked_timer @ray.remote(num_cpus=10) class FullyAsyncTrainer(FullyAsyncRayPPOTrainer): """ A fully asynchronous PPO trainer that obtains samples from a MessageQueue for training. Based on an improved implementation of OneStepOffRayTrainer """ def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, device_name=None, ): # Store the tokenizer for text processing self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert not self.hybrid_engine self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = need_reference_policy(self.role_worker_mapping) self.use_rm = need_reward_model(self.role_worker_mapping) self.use_critic = need_critic(self.config) self.ray_worker_group_cls = ray_worker_group_cls self.device_name = device_name if device_name else self.config.trainer.device # if ref_in_actor is True, the reference policy will be actor without lora applied self.ref_in_actor = config.actor_rollout_ref.model.get("lora_rank", 0) > 0 # define in-reward KL control # kl loss control currently not suppoorted if self.config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl) # ==================== fully async config ==================== self.message_queue_client = None self.param_synchronizer = None # Statistics # we start from step 1 self.global_steps = 1 self.local_trigger_step = 1 self.processed_samples = 0 self.stale_samples_processed = 0 self.stale_trajectory_processed = 0 self.current_param_version = 0 self.total_train_steps = None self.progress_bar = None self.trigger_parameter_sync_step = config.async_training.trigger_parameter_sync_step # required_samples use ppo_mini_batch_size*require_batches as the minimum number of samples. self.require_batches = config.async_training.require_batches self.required_samples = config.actor_rollout_ref.actor.ppo_mini_batch_size * self.require_batches self.compute_prox_log_prob = self.config.async_training.compute_prox_log_prob total_gpus = ( config.trainer.nnodes * config.trainer.n_gpus_per_node + config.rollout.nnodes * config.rollout.n_gpus_per_node ) self.metrics_aggregator = MetricsAggregator(total_gpus=total_gpus) def set_message_queue_client(self, message_queue_client: MessageQueueClient): """Set message queue client""" self.message_queue_client = message_queue_client def set_parameter_synchronizer(self, param_synchronizer): """Set parameter synchronizer""" self.param_synchronizer = param_synchronizer def set_total_train_steps(self, total_train_steps): self.total_train_steps = total_train_steps self.progress_bar = tqdm(total=self.total_train_steps, initial=0, desc="Training Progress") def get_actor_wg(self): """Get actor worker group""" return self.actor_wg def _get_samples_from_queue(self) -> tuple[None, None] | tuple[int, Any]: """ Get samples from message queue and compose gen_batch_output Uses a loop to continuously collect samples until enough are gathered Returns: tuple: (epoch, batch_dict, gen_batch_output) """ print( f"[FullyAsyncTrainer] Requesting {self.required_samples} samples from queue", flush=True, ) # Collect samples using a simple loop calling get_sample consumer_start = time.time() queue_samples = [] queue_len = 0 while len(queue_samples) < self.required_samples: # Get a single sample and wait until there is a sample or None is received sample, queue_len = self.message_queue_client.get_sample_sync() if sample is None: print( f"[FullyAsyncTrainer] Detected termination signal (None), stopping sample collection. " f"Collected {len(queue_samples)}/{self.required_samples} samples" ) break queue_samples.append(sample) if len(queue_samples) % 64 == 0: print( f"[FullyAsyncTrainer] Collected {len(queue_samples)}/{self.required_samples} samples. " f"mq_len: {queue_len}" ) consumer_end = time.time() if not queue_samples or len(queue_samples) < self.required_samples: print("[FullyAsyncTrainer] not enough samples collected after loop") return None, None total_wait_time = consumer_end - consumer_start print( f"[FullyAsyncTrainer] Loop collection completed: {len(queue_samples)}/{self.required_samples} samples, " f"total wait time: {total_wait_time:.2f} seconds." f"mq_len: {queue_len}" ) queue_samples = [ray.cloudpickle.loads(x) for x in queue_samples] # Assemble batch - now working directly with RolloutSample objects if self.config.trainer.balance_batch: batch = assemble_batch_from_rollout_samples(queue_samples, self.tokenizer, self.config, self._balance_batch) else: batch = assemble_batch_from_rollout_samples(queue_samples, self.tokenizer, self.config, None) batch.meta_info["fully_async/total_wait_time"] = total_wait_time return 0, batch def _create_actor_rollout_classes(self): # create actor for role in [Role.Actor]: resource_pool = self.resource_pool_manager.get_resource_pool(role) role_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[role], config=self.config.actor_rollout_ref, role=str(role), ) self.resource_pool_to_cls[resource_pool][str(role)] = role_cls def _init_models(self): if self.use_critic: self.critic_wg = self.all_wg[str(Role.Critic)] self.critic_wg.init_model() if self.use_reference_policy and not self.ref_in_actor: self.ref_policy_wg = self.all_wg[str(Role.RefPolicy)] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = self.all_wg[str(Role.RewardModel)] self.rm_wg.init_model() self.actor_wg = self.all_wg[str(Role.Actor)] self.actor_wg.init_model() self.actor_rollout_wg = self.actor_wg # to be compatible with the functions that not be modified def _init_async_rollout_manager(self): pass def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ print("[FullyAsyncTrainer] Starting FullyAsyncTrainer...") if self.message_queue_client is None: raise ValueError("MessageQueue client not set. Call set_message_queue_client() first.") if self.param_synchronizer is None: raise ValueError("param_synchronizer client not set. Call set_parameter_synchronizer() first.") from verl.utils.tracking import Tracking self.logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.max_steps_duration = 0 # get validate data before training val_data = self.message_queue_client.get_validate_sync() if val_data: val_data: ValidateMetrics = ray.cloudpickle.loads(val_data) if val_data.metrics: self.logger.log(data=val_data.metrics, step=val_data.param_version) pprint(f"[FullyAsyncTrainer] Initial validation metrics: {val_data.metrics}") self.logger.log(data=val_data.timing_raw, step=val_data.param_version) # Use queue mode, no need for traditional dataloader iterator # Initialize to get the first batch of data while True: metrics = {} timing_raw = {} with marked_timer("step", timing_raw): with marked_timer("gen", timing_raw, color="red"): epoch, batch = self._get_samples_from_queue() if batch is None: break self._collect_metrics_from_samples(batch, metrics) batch, reward_extra_infos_dict = self._process_batch_common( batch, metrics, timing_raw, self.local_trigger_step if self.compute_prox_log_prob else None ) self._log_rollout(batch, reward_extra_infos_dict, timing_raw) self._check_save_checkpoint(False, timing_raw) self._collect_metrics(batch, 0, metrics, timing_raw) self.metrics_aggregator.add_step_metrics( metrics=metrics, sample_count=self.required_samples, timestamp=time.time() ) # Trigger parameter synchronization after training step time_str = datetime.now().strftime("%H:%M:%S.%f")[:-3] print( f"[FullyAsyncTrainer] global_steps: {self.global_steps} " f"local_trigger_step: {self.local_trigger_step} " f"trigger_parameter_sync_step: {self.trigger_parameter_sync_step} " f"{time_str}" ) self._trigger_parameter_sync_after_step(global_steps=self.global_steps) val_data = self.message_queue_client.get_validate_sync() if val_data: val_data: ValidateMetrics = ray.cloudpickle.loads(val_data) if val_data.metrics: self.logger.log(data=val_data.metrics, step=val_data.param_version) pprint( f"[FullyAsyncTrainer] parameter version: {val_data.param_version} \ Validation metrics: {val_data.metrics}" ) self.logger.log(data=val_data.timing_raw, step=val_data.param_version) self.global_steps += 1 # final parameter sync and validate if val_data is None or val_data.metrics is None: self._trigger_parameter_sync_after_step(validate=True, global_steps=self.global_steps - 1) ray.get(self.param_synchronizer.wait_last_valid.remote()) val_data = self.message_queue_client.get_validate_sync() if val_data: val_data: ValidateMetrics = ray.cloudpickle.loads(val_data) if val_data.metrics: self.logger.log(data=val_data.metrics, step=val_data.param_version) pprint(f"[FullyAsyncTrainer] Final validation metrics: {val_data.metrics}") self.logger.log(data=val_data.timing_raw, step=val_data.param_version) else: pprint(f"[FullyAsyncTrainer] Final validation metrics: {val_data.metrics}") self.progress_bar.close() self._check_save_checkpoint(True, timing_raw) # TODO: check checkpoint def load_checkpoint(self): return self._load_checkpoint() def _collect_metrics_from_samples(self, batch, metrics): """ Collect metrics from samples """ if hasattr(batch, "meta_info") and batch.meta_info: samples_param_versions = batch.meta_info["rollout_param_versions"] stale_count = sum(1 for v in samples_param_versions if self.current_param_version - v >= 1) self.stale_samples_processed += stale_count trajectory_param_versions = batch.meta_info["trajectory_param_versions"] stale_traj_count = sum(1 for v in trajectory_param_versions if self.current_param_version - v >= 1) self.stale_trajectory_processed += stale_traj_count metrics.update( { "fully_async/count/stale_samples_processed": self.stale_samples_processed, "fully_async/count/stale_trajectory_processed": self.stale_trajectory_processed, "fully_async/count/current_param_version": self.current_param_version, } ) for key, value in batch.meta_info.items(): if key.startswith("fully_async"): metrics[key] = value def _trigger_parameter_sync_after_step(self, validate: bool = False, global_steps: int = None): """ Trigger parameter synchronization after training step This ensures rollouter always uses the latest trained parameters """ if self.local_trigger_step < self.trigger_parameter_sync_step and not validate: self.local_trigger_step += 1 return self.current_param_version += 1 self.local_trigger_step = 1 self.logger.log( data=self.metrics_aggregator.get_aggregated_metrics(), step=self.current_param_version, ) self.progress_bar.update(1) self.metrics_aggregator.reset() timing_param_sync = {} with marked_timer("timing_s/wait_last_valid", timing_param_sync): ray.get(self.param_synchronizer.wait_last_valid.remote()) with marked_timer("timing_s/param_sync", timing_param_sync): ray.get( self.param_synchronizer.sync_weights.remote( self.current_param_version, validate=validate, global_steps=global_steps ) ) self.logger.log(data=timing_param_sync, step=self.current_param_version) ================================================ FILE: verl_distillation/recipe/fully_async_policy/megatron_worker.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # Copyright 2025 NVIDIA Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import torch import torch.distributed from omegaconf import DictConfig from verl.single_controller.base.decorator import Dispatch, register from verl.utils.device import ( get_device_name, get_torch_device, ) from verl.utils.megatron_utils import per_tensor_generator from verl.workers.megatron_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() __all__ = ["DetachActorWorker", "DetachAsyncRolloutWorker", "CriticWorker"] def get_inference_model(rollout): """ get models according to different types of inference_engine Args: rollout: rollout object Returns: model: model object """ inference_engine = rollout.inference_engine if hasattr(inference_engine, "llm_engine"): inference_model = inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model elif hasattr(inference_engine, "worker"): inference_model = inference_engine.worker.model_runner.model else: raise AttributeError( f"Unsupported inference_engine type: {type(inference_engine)}. " f"Expected LLM (with llm_engine attribute) or WorkerWrapperBase (with worker attribute)." ) return inference_model class DetachNcclSync(AsyncActorRolloutRefWorker): def _get_actor_params(self): pass @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine assert hasattr(self, "_weights_info") and self._weights_info is not None params_generator = self._get_actor_params_generator() if self._is_actor else None if self._is_rollout: inference_model = get_inference_model(self.rollout) from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader patch_vllm_moe_model_weight_loader(inference_model) for key, shape, dtype in self._weights_info: if self._is_actor: weight_key, weight = next(params_generator) assert key == weight_key assert shape == weight.size() assert dtype == weight.dtype tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor and torch.distributed.get_rank() == 0: tensor.copy_(weight) from ray.util.collective import collective collective.broadcast(tensor, src_rank=0, group_name="actor_rollout") if self._is_rollout: inference_model.load_weights([(key, tensor)]) class DetachActorWorker(DetachNcclSync): def _get_actor_params_generator(self): assert self._is_actor if self.bridge is not None: generator = self.bridge.export_weights(self.actor.actor_module) else: generator = per_tensor_generator( self.actor.actor_module, self.actor_model_config, self.weight_converter, self.tf_config, self.layer_name_mapping, ) return generator @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): assert self._is_actor if hasattr(self, "_weights_info"): return self._weights_info params_generator = self._get_actor_params_generator() ret = [] for key, tensor in params_generator: ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret class DetachAsyncRolloutWorker(DetachNcclSync): def __init__(self, config: DictConfig, role: str): print(f"[DetachAsyncRolloutWorker] {DetachAsyncRolloutWorker.__mro__}") ActorRolloutRefWorker.__init__(self, config, role) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): assert self._is_rollout self._weights_info = weights_info ================================================ FILE: verl_distillation/recipe/fully_async_policy/message_queue.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging from collections import deque from typing import Any import ray from omegaconf import DictConfig logger = logging.getLogger(__name__) @ray.remote(num_cpus=2, max_concurrency=20) class MessageQueue: """ Simplified Ray-based asynchronous message queue for communication between Rollouter and Trainer """ def __init__(self, config: DictConfig, max_queue_size: int = 1000): self.config = config if max_queue_size is None: raise ValueError(f"max_queue_size cannot be None, got: {max_queue_size}") self.max_queue_size = int(max_queue_size) self.queue = deque(maxlen=self.max_queue_size) self.current_param_version = 0 self.val_queue = deque() try: if hasattr(config, "async_training") and config.async_training is not None: self.staleness_threshold = getattr(config.async_training, "staleness_threshold", 3) else: self.staleness_threshold = 3 except (AttributeError, RecursionError): self.staleness_threshold = 3 # Asyncio for message handling self.running = True # async safe self._lock = asyncio.Lock() self._consumer_condition = asyncio.Condition(self._lock) # statistic message self.total_produced = 0 self.total_consumed = 0 self.dropped_samples = 0 print( f"[MessageQueue] initialized with max_queue_size={max_queue_size}," f"staleness_threshold={self.staleness_threshold}" ) async def put_sample(self, sample: Any, param_version: int) -> bool: """ Put a batch sample into the queue Args: sample: Sample data param_version: Parameter version number Returns: bool: Whether the sample was successfully put into the queue """ async with self._lock: # If queue is full, remove the oldest sample (rarely happens) is_drop = False if len(self.queue) >= self.max_queue_size: self.queue.popleft() self.dropped_samples += 1 is_drop = True logger.warning("Queue full, dropped sample") self.queue.append(sample) self.total_produced += 1 # Notify waiting consumers self._consumer_condition.notify_all() if self.total_produced % 100 == 0: print(f"MessageQueue stats: produced={self.total_produced}, queue_size={len(self.queue)}") if is_drop: return False return True async def get_sample(self) -> Any | None: """ Get a single sample from the queue, wait until one is available Returns: Any: Single sample data or None if queue is closed """ async with self._lock: while len(self.queue) == 0 and self.running: await self._consumer_condition.wait() # If queue is closed and empty, return None if not self.running and len(self.queue) == 0: return None # Get one sample data = self.queue.popleft() self.total_consumed += 1 return data, len(self.queue) async def update_param_version(self, version: int): """Update current parameter version""" async with self._lock: old_version = self.current_param_version self.current_param_version = version print(f"Parameter version updated from {old_version} to {version}") async def get_queue_size(self) -> int: """Get current queue length""" async with self._lock: return len(self.queue) async def get_statistics(self) -> dict[str, Any]: """Get queue statistics""" async with self._lock: return { "queue_size": len(self.queue), "total_produced": self.total_produced, "total_consumed": self.total_consumed, "dropped_samples": self.dropped_samples, "current_param_version": self.current_param_version, "staleness_threshold": self.staleness_threshold, "max_queue_size": self.max_queue_size, } async def clear_queue(self): """Clear the queue""" async with self._lock: cleared_count = len(self.queue) self.queue.clear() logger.info(f"Cleared {cleared_count} samples from queue") async def shutdown(self): """Shutdown the message queue""" async with self._lock: self.running = False # Notify all waiting coroutines so they can exit self._consumer_condition.notify_all() logger.info("MessageQueue shutdown") async def get_memory_usage(self) -> dict: """Get memory usage statistics""" async with self._lock: # Estimate memory usage of samples in queue import sys total_size = 0 sample_count = len(self.queue) if sample_count > 0: # Estimate size of a single sample (simplified estimation) sample = list(self.queue)[0] try: sample_size = sys.getsizeof(sample) # Since we now store RolloutSample directly, estimate based on its components if hasattr(sample, "original_batch_dict") and sample.original_batch_dict: # Estimate batch data size batch_data = sample.original_batch_dict.get("batch", {}) sample_size += len(batch_data) * 1000 # Roughly estimate 1KB per batch entry if hasattr(sample, "agent_loop_output"): # Estimate AgentLoopOutput size sample_size += 5000 # Roughly estimate 5KB for AgentLoopOutput total_size = sample_size * sample_count except Exception: total_size = sample_count * 15000 # Roughly estimate 15KB per RolloutSample return { "queue_samples": sample_count, "estimated_memory_bytes": total_size, "estimated_memory_mb": total_size / (1024 * 1024), } async def put_validate(self, data): async with self._lock: self.val_queue.append(data) async def get_validate(self): async with self._lock: if self.val_queue: return self.val_queue.popleft() else: return None class MessageQueueClient: """Asyncio-compatible MessageQueue client for communicating with MessageQueue Actor""" def __init__(self, queue_actor: Any): self.queue_actor = queue_actor async def put_sample(self, sample: Any, param_version: int) -> bool: """Put batch into queue (async)""" future = self.queue_actor.put_sample.remote(sample, param_version) return await asyncio.wrap_future(future.future()) async def put_validate(self, data: Any) -> bool: future = self.queue_actor.put_validate.remote(data) return await asyncio.wrap_future(future.future()) def get_validate_sync(self) -> Any | None: return ray.get(self.queue_actor.get_validate.remote()) async def get_sample(self) -> Any | None: """Get single sample from queue, wait until one is available (async)""" future = self.queue_actor.get_sample.remote() return await asyncio.wrap_future(future.future()) async def get_queue_size(self) -> int: """Get queue size (async)""" future = self.queue_actor.get_queue_size.remote() return await asyncio.wrap_future(future.future()) async def get_statistics(self) -> dict[str, Any]: """Get statistics (async)""" future = self.queue_actor.get_statistics.remote() return await asyncio.wrap_future(future.future()) async def clear_queue(self): """Clear queue (async)""" future = self.queue_actor.clear_queue.remote() await asyncio.wrap_future(future.future()) async def shutdown(self): """Shutdown queue (async)""" future = self.queue_actor.shutdown.remote() await asyncio.wrap_future(future.future()) async def get_memory_usage(self) -> dict: """Get memory usage statistics (async)""" future = self.queue_actor.get_memory_usage.remote() return await asyncio.wrap_future(future.future()) # Synchronous version of the method (deprecated) def put_sample_sync(self, sample: Any, param_version: int) -> bool: """Put batch into queue (sync - deprecated, use put_sample instead)""" return ray.get(self.queue_actor.put_sample.remote(sample, param_version)) def get_sample_sync(self) -> Any | None: """Get single sample from queue (sync - deprecated, use get_sample instead)""" return ray.get(self.queue_actor.get_sample.remote()) def get_statistics_sync(self) -> dict[str, Any]: """Get statistics (sync - deprecated, use get_statistics instead)""" return ray.get(self.queue_actor.get_statistics.remote()) def update_param_version_sync(self, version: int): """Update parameter version (async)""" return ray.get(self.queue_actor.update_param_version.remote(version)) ================================================ FILE: verl_distillation/recipe/fully_async_policy/param_sync.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time import ray from ray.util.collective import collective logger = logging.getLogger(__name__) @ray.remote class ParameterSynchronizer: """ Unified parameter synchronizer, responsible for synchronizing model parameters between actor and rollout Based on the mature synchronization mode implementation of one_step_off_policy Merges the functions of the original multiple synchronizer classes """ def __init__(self, config, trainer, rollouter, mq): self.config = config self.trainer = trainer self.rollouter = rollouter self.mq_client = mq self.actor_wg = ray.get(trainer.get_actor_wg.remote()) self.rollout_wg = ray.get(rollouter.get_rollout_wg.remote()) # Basic attributes self.weights_info = None self.sync_group_initialized = False self.sync_group_name = "actor_rollout" self.wait_last_update = None self.wait_last_resume = None # Statistics self.current_version = 0 self._init_weights_info() self._init_sync_group() def get_current_param_version(self) -> int: """Get current parameter version number""" return self.current_version def get_weights_info(self): """Get weights info""" return self.weights_info def _init_weights_info(self): self.weights_info = self.actor_wg.get_actor_weights_info()[0] self.rollout_wg.set_actor_weights_info(self.weights_info) def _init_sync_group(self): print("[ParameterSynchronizer] Initializing parameter synchronization group...") actor_rollout_workers = self.actor_wg.workers + self.rollout_wg.workers collective.create_collective_group( actor_rollout_workers, len(actor_rollout_workers), list(range(0, len(actor_rollout_workers))), backend="nccl", group_name=self.sync_group_name, ) def sync_weights(self, version, validate=False, global_steps=0): """Sync weights between trainer and rollouter, and update parameter version""" start_time = time.time() self.current_version = version print(f"[ParameterSynchronizer] Starting weight synchronization (version {self.current_version})...") ray.get(self.rollouter.pause.remote()) # Update MQ version self.mq_client.update_param_version_sync(version) # sync weights self.actor_wg.sync_rollout_weights() ray.get(self.rollout_wg.sync_rollout_weights()) end_time = time.time() print(f"[ParameterSynchronizer] sync_weights success. cost {end_time - start_time:.2f} seconds") # Async Update rollout version & validation self.wait_last_update = self.rollouter.update_param_version.remote(version, validate, global_steps) self.wait_last_resume = self.rollouter.resume.remote(self.wait_last_update) def wait_last_valid(self): print("[ParameterSynchronizer] Waiting last sync and validate...") start_time = time.time() if self.wait_last_update: ray.get(self.wait_last_update) if self.wait_last_resume: ray.get(self.wait_last_resume) print(f"[ParameterSynchronizer] Wait last validate cost: {time.time() - start_time:.2f} seconds") ================================================ FILE: verl_distillation/recipe/fully_async_policy/ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import uuid from copy import deepcopy from pprint import pprint import numpy as np import ray import torch from omegaconf import OmegaConf from tqdm import tqdm from verl import DataProto from verl.experimental.dataset.sampler import AbstractCurriculumSampler from verl.single_controller.ray import RayClassWithInitArgs from verl.single_controller.ray.base import create_colocated_worker_cls from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss from verl.trainer.ppo.metric_utils import ( compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, ) from verl.trainer.ppo.ray_trainer import RayPPOTrainer, apply_kl_penalty, compute_advantage, compute_response_mask from verl.trainer.ppo.reward import compute_reward, compute_reward_async from verl.trainer.ppo.utils import Role from verl.utils.checkpoint.checkpoint_manager import should_save_ckpt_esi from verl.utils.config import omega_conf_to_dataclass from verl.utils.debug import marked_timer from verl.utils.metric import ( reduce_metrics, ) from verl.utils.rollout_skip import RolloutSkip class FullyAsyncRayPPOTrainer(RayPPOTrainer): def init_workers(self): """Initialize distributed training workers using Ray backend. Creates: 1. Ray resource pools from configuration 2. Worker groups for each role (actor, critic, etc.) """ self._init_resource_pools() self._create_worker_classes() self._init_worker_groups() self._init_models() self._init_async_rollout_manager() def _init_resource_pools(self): self.resource_pool_manager.create_resource_pool() self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()} def _create_worker_classes(self): self._create_actor_rollout_classes() self._create_critic_class() self._create_reference_policy_class() self._create_reward_model_class() def _create_actor_rollout_classes(self): raise NotImplementedError def _create_critic_class(self): # create critic if self.use_critic: resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic) critic_cfg = omega_conf_to_dataclass(self.config.critic) critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=critic_cfg) self.resource_pool_to_cls[resource_pool][str(Role.Critic)] = critic_cls def _create_reference_policy_class(self): # create reference policy if needed if self.use_reference_policy: resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy) ref_policy_cls = RayClassWithInitArgs( self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role=str(Role.RefPolicy), # profile_option=self.config.trainer.npu_profile.options, ) self.resource_pool_to_cls[resource_pool][str(Role.RefPolicy)] = ref_policy_cls def _create_reward_model_class(self): # create a reward model if reward_fn is None if self.use_rm: # we create a RM here resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model) self.resource_pool_to_cls[resource_pool][str(Role.RewardModel)] = rm_cls def _init_worker_groups(self): # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. # Instead, directly pass different resource pool to different worker groups. # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information. all_wg = {} wg_kwargs = {} # Setting up kwargs for RayWorkerGroup if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None: wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout if OmegaConf.select(self.config.global_profiler, "steps") is not None: wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps") # Only require nsight worker options when tool is nsys if OmegaConf.select(self.config.global_profiler, "tool") == "nsys": assert ( OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options") is not None ), "worker_nsight_options must be set when using nsys with profile_steps" wg_kwargs["worker_nsight_options"] = OmegaConf.to_container( OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options") ) wg_kwargs["device_name"] = self.device_name for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls( resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls, **wg_kwargs, ) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) self.all_wg = all_wg def _init_models(self): if self.use_critic: self.critic_wg = self.all_wg[str(Role.Critic)] self.critic_wg.init_model() if self.use_reference_policy and not self.ref_in_actor: self.ref_policy_wg = self.all_wg[str(Role.RefPolicy)] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = self.all_wg[str(Role.RewardModel)] self.rm_wg.init_model() # we should create rollout at the end so that vllm can have a better estimation of kv cache memory self.actor_rollout_wg = self.all_wg[str(Role.ActorRollout)] self.actor_rollout_wg.init_model() def _init_async_rollout_manager(self): pass def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return if self.config.actor_rollout_ref.rollout.get("skip_rollout", False): rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg) rollout_skip.wrap_generate_sequences() # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None self.max_steps_duration = 0 prev_step_profile = False curr_step_profile = ( self.global_steps in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) next_step_profile = False for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} with marked_timer("start_profile", timing_raw): self._start_profiling( not prev_step_profile and curr_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) batch, gen_batch = self._prepare_generate_batch(batch_dict) is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw, color="red"): if not self.async_rollout_mode: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) else: gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: if self.reward_fn is None: raise ValueError("A reward_fn is required for REMAX advantage estimation.") with marked_timer("gen_max", timing_raw, color="purple"): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False if not self.async_rollout_mode: gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) else: gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_batch) batch = batch.union(gen_baseline_output) reward_baseline_tensor = self.reward_fn(batch) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) batch.pop(batch_keys=list(gen_baseline_output.batch.keys())) batch.batch["reward_baselines"] = reward_baseline_tensor del gen_baseline_batch, gen_baseline_output batch = self._post_generate_batch(batch, gen_batch_output, metrics) batch, reward_extra_infos_dict = self._process_batch_common(batch, metrics, timing_raw) self._log_rollout(batch, reward_extra_infos_dict, timing_raw) last_val_metrics = self._validate_metrics(is_last_step, last_val_metrics, metrics, timing_raw) self._check_save_checkpoint(is_last_step, timing_raw) with marked_timer("stop_profile", timing_raw): next_step_profile = ( self.global_steps + 1 in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) self._stop_profiling( curr_step_profile and not next_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) prev_step_profile = curr_step_profile curr_step_profile = next_step_profile self._collect_metrics(batch, epoch, metrics, timing_raw) self._post_batch_processing(batch) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) progress_bar.update(1) self.global_steps += 1 if ( hasattr(self.config.actor_rollout_ref.actor, "profiler") and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory" ): self.actor_rollout_wg.dump_memory_snapshot( tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}" ) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return def _prepare_generate_batch(self, batch_dict): batch: DataProto = DataProto.from_single_dict(batch_dict) # add uid to batch batch.non_tensor_batch["uid"] = np.array([str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object) gen_batch = self._get_gen_batch(batch) # pass global_steps to trace gen_batch.meta_info["global_steps"] = self.global_steps gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) return batch, gen_batch def _post_generate_batch(self, batch, gen_batch_output, metrics): # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) if "response_mask" not in batch.batch.keys(): batch.batch["response_mask"] = compute_response_mask(batch) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() return batch def _process_batch_common(self, batch, metrics, timing_raw, local_trigger_step=None): with marked_timer("reward", timing_raw, color="yellow"): # compute reward model score if self.use_rm: reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) if self.config.reward_model.launch_reward_fn_async: future_reward = compute_reward_async.remote(data=batch, reward_fn=self.reward_fn) else: reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn) with marked_timer("old_log_prob", timing_raw, color="blue"): def compute_old_log_prob(batch): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if "rollout_log_probs" in batch.batch.keys(): # TODO: we may want to add diff of probs too. from verl.utils.debug.metrics import calculate_debug_metrics metrics.update(calculate_debug_metrics(batch)) return batch async_training = self.config.get("async_training", None) if async_training and async_training.use_rollout_log_probs: # If local_triger_step == 1, load the training engine's parameters to the CPU # and save a copy for subsequent MIS use. # If local_trigger_step == 2, 3, ..., restore the parameters of version 1 to calculate the old_log_prob, # then restore the parameters of the current version. if local_trigger_step == 1: self.actor_rollout_wg.save_model_to_cpu(1) batch = compute_old_log_prob(batch) elif local_trigger_step is not None: self.actor_rollout_wg.save_model_to_cpu(local_trigger_step) self.actor_rollout_wg.restore_model_from_cpu(1) batch = compute_old_log_prob(batch) self.actor_rollout_wg.restore_model_from_cpu(local_trigger_step) self.actor_rollout_wg.clear_cpu_model(local_trigger_step) else: batch.batch["old_log_probs"] = batch.batch["rollout_log_probs"] batch.meta_info["temperature"] = self.config.actor_rollout_ref.rollout.temperature else: batch = compute_old_log_prob(batch) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw, color="olive"): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw, color="cyan"): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw, color="brown"): # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] if self.config.reward_model.launch_reward_fn_async: reward_tensor, reward_extra_infos_dict = ray.get(future_reward) batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()}) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] # Compute rollout importance sampling weights centrally (once per batch) # This corrects for mismatch between rollout policy and training policy # Also computes mismatch metrics (KL, PPL, etc.) batch, is_metrics = self.compute_rollout_importance_weights_and_add_to_batch(batch) # IS and mismatch metrics already have mismatch/ prefix metrics.update(is_metrics) # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get( "norm_adv_by_std_in_grpo", True ) # GRPO adv normalization factor batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, config=self.config.algorithm, ) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, color="pink"): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, color="red"): batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) return batch, reward_extra_infos_dict def _log_rollout(self, batch, reward_extra_infos_dict, timing_raw): # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: with marked_timer("dump_rollout_generations", timing_raw, color="green"): inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True) outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True) scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist() sample_gts = [item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in batch] if "request_id" in batch.non_tensor_batch: reward_extra_infos_dict.setdefault( "request_id", batch.non_tensor_batch["request_id"].tolist(), ) self._dump_generations( inputs=inputs, outputs=outputs, gts=sample_gts, scores=scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=rollout_data_dir, ) def _validate_metrics(self, is_last_step, last_val_metrics, metrics, timing_raw): if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, color="green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) return last_val_metrics def _check_save_checkpoint(self, is_last_step, timing_raw): # Check if the ESI (Elastic Server Instance)/training plan is close to expiration. esi_close_to_expiration = should_save_ckpt_esi( max_steps_duration=self.max_steps_duration, redundant_time=self.config.trainer.esi_redundant_time, ) # Check if the conditions for saving a checkpoint are met. # The conditions include a mandatory condition (1) and # one of the following optional conditions (2/3/4): # 1. The save frequency is set to a positive value. # 2. It's the last training step. # 3. The current step number is a multiple of the save frequency. # 4. The ESI(Elastic Server Instance)/training plan is close to expiration. if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration ): if esi_close_to_expiration: print("Force saving checkpoint: ESI instance expiration approaching.") with marked_timer("save_checkpoint", timing_raw, color="green"): self._save_checkpoint() def _collect_metrics(self, batch, epoch, metrics, timing_raw): steps_duration = timing_raw["step"] self.max_steps_duration = max(self.max_steps_duration, steps_duration) # training metrics metrics.update( { "training/global_step": self.global_steps, "training/epoch": epoch, } ) # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) def _post_batch_processing(self, batch: DataProto): # this is experimental and may be changed/removed in the future in favor of a general-purpose one if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler): self.train_dataloader.sampler.update(batch=batch) # this is experimental and may be changed/removed in the future # in favor of a general-purpose data buffer pool if hasattr(self.train_dataset, "on_batch_end"): # The dataset may be changed after each training batch self.train_dataset.on_batch_end(batch=batch) ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/dapo_7b_math_fsdp2_16_16.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='dapo_qwen2-7B-math_28k_fsdp2_fully-async_16-16' # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 28)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=4 sp_size=4 fsdp_size=8 # Fully async specific parameters NNODES_ROLLOUT=${NNODES_ROLLOUT:-2} NNODES_TRAIN=${NNODES_TRAIN:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*400))) test_freq=20 staleness_threshold=0.1 trigger_parameter_sync_step=4 require_batches=4 partial_rollout=True python -m recipe.fully_async_policy.fully_async_main \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.save_freq=-1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.nnodes="${NNODES_TRAIN}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.nnodes="${NNODES_ROLLOUT}" \ rollout.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.total_epochs=10 \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.require_batches="${require_batches}" \ async_training.partial_rollout="${partial_rollout}" \ async_training.use_rollout_log_probs=True ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/dapo_7b_math_fsdp2_32_32.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='dapo_qwen2-7B-math_28k_fsdp2_fully-async_32-32' # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 28)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=4 sp_size=4 fsdp_size=8 # Fully async specific parameters NNODES_ROLLOUT=${NNODES_ROLLOUT:-4} NNODES_TRAIN=${NNODES_TRAIN:-4} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*400))) test_freq=20 staleness_threshold=0.1 trigger_parameter_sync_step=4 require_batches=4 partial_rollout=True python -m recipe.fully_async_policy.fully_async_main \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.save_freq=-1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.nnodes="${NNODES_TRAIN}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.nnodes="${NNODES_ROLLOUT}" \ rollout.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.total_epochs=10 \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.require_batches="${require_batches}" \ async_training.partial_rollout="${partial_rollout}" \ async_training.use_rollout_log_probs=True ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/dapo_7b_math_fsdp2_4_12.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-fully-async-4-12' # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=1 sp_size=1 fsdp_size=2 # Fully async specific parameters NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*100))) test_freq=10 staleness_threshold=0.1 trigger_parameter_sync_step=4 require_batches=4 partial_rollout=True python -m recipe.fully_async_policy.fully_async_main \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.test_freq="${test_freq}" \ trainer.save_freq=-1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.total_epochs=10 \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.require_batches="${require_batches}" \ async_training.partial_rollout="${partial_rollout}" \ async_training.use_rollout_log_probs=True ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/dapo_7b_math_fsdp2_4_4.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-fully-async-4-4' # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=1 sp_size=1 fsdp_size=2 # Fully async specific parameters NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=4 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*100))) test_freq=10 staleness_threshold=0.1 trigger_parameter_sync_step=4 require_batches=4 partial_rollout=True python -m recipe.fully_async_policy.fully_async_main \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.calculate_log_probs=True \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=False \ trainer.save_freq=-1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.total_epochs=10 \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.require_batches="${require_batches}" \ async_training.partial_rollout="${partial_rollout}" \ async_training.use_rollout_log_probs=True ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/dapo_7b_math_fsdp2_64_64.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='dapo_qwen2-7B-math_28k_fsdp2_fully-async_64-64' # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 28)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=4 sp_size=4 fsdp_size=8 # Fully async specific parameters NNODES_ROLLOUT=${NNODES_ROLLOUT:-8} NNODES_TRAIN=${NNODES_TRAIN:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*400))) test_freq=20 staleness_threshold=0.5 trigger_parameter_sync_step=4 require_batches=4 partial_rollout=True python -m recipe.fully_async_policy.fully_async_main \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.save_freq=-1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.nnodes="${NNODES_TRAIN}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.nnodes="${NNODES_ROLLOUT}" \ rollout.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.total_epochs=10 \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.require_batches="${require_batches}" \ async_training.partial_rollout="${partial_rollout}" \ async_training.use_rollout_log_probs=True ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/dapo_7b_math_fsdp2_64_64_mis.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='dapo_qwen2-7B-math_28k_fsdp2_fully-async_64-64' # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 28)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=4 sp_size=4 fsdp_size=8 # Fully async specific parameters NNODES_ROLLOUT=${NNODES_ROLLOUT:-8} NNODES_TRAIN=${NNODES_TRAIN:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*400))) test_freq=20 staleness_threshold=0.5 trigger_parameter_sync_step=4 require_batches=4 partial_rollout=True # Rollout Importance Sampling rollout_is_threshold=1.001 rollout_is=True rollout_is_threshold_lower=0.99 rollout_is_level=geometric rollout_is_mode=mask rollout_is_veto_threshold=1e-4 python -m recipe.fully_async_policy.fully_async_main \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.save_freq=-1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.nnodes="${NNODES_TRAIN}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.nnodes="${NNODES_ROLLOUT}" \ rollout.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.total_epochs=10 \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.require_batches="${require_batches}" \ async_training.partial_rollout="${partial_rollout}" \ async_training.use_rollout_log_probs=True \ async_training.compute_prox_log_prob=True \ algorithm.rollout_is=${rollout_is} \ algorithm.rollout_is_threshold=${rollout_is_threshold} \ algorithm.rollout_is_threshold_lower=${rollout_is_threshold_lower} \ algorithm.rollout_is_level=${rollout_is_level} \ algorithm.rollout_is_mode=${rollout_is_mode} \ algorithm.rollout_is_veto_threshold=${rollout_is_veto_threshold} ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/dapo_7b_math_fsdp2_8_8.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-fully-async-8-8' # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=1 sp_size=1 fsdp_size=2 # Fully async specific parameters NNODES_ROLLOUT=${NNODES_ROLLOUT:-1} NNODES_TRAIN=${NNODES_TRAIN:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=32 total_rollout_steps=$(((512*100))) test_freq=10 staleness_threshold=0.1 trigger_parameter_sync_step=4 require_batches=4 partial_rollout=True python -m recipe.fully_async_policy.fully_async_main \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.rollout.calculate_log_probs=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.save_freq=-1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.nnodes="${NNODES_TRAIN}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.nnodes="${NNODES_ROLLOUT}" \ rollout.n_gpus_per_node="${NGPUS_PER_NODE}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.total_epochs=10 \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.require_batches="${require_batches}" \ async_training.partial_rollout="${partial_rollout}" \ async_training.use_rollout_log_probs=True ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/geo3k_qwen25vl_7b_megatron_4_4.sh ================================================ set -x ENGINE=${1:-vllm} export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping HF_MODEL_PATH=${HF_MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-VL-7B-Instruct"} train_path=$HOME/data/geo3k/train.parquet test_path=$HOME/data/geo3k/test.parquet rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Fully async specific parameters NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=4 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=4 train_prompt_mini_bsz=128 total_rollout_steps=$(((512*100))) test_freq=5 staleness_threshold=0.1 trigger_parameter_sync_step=4 require_batches=2 partial_rollout=True total_epochs=200 python -m recipe.fully_async_policy.fully_async_main \ --config-path=config \ --config-name='fully_async_ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_path" \ data.val_files="$test_path" \ data.train_batch_size=${train_prompt_bsz} \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ actor_rollout_ref.rollout.max_model_len=32768 \ actor_rollout_ref.rollout.max_num_batched_tokens=32768 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.gen_batch_size=${gen_prompt_bsz} \ data.return_raw_chat=${return_raw_chat} \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_decay_steps=51200 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.rollout.calculate_log_probs=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=1 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=5120 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=5120 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=5120 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.mode=${rollout_mode} \ +actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=1 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=4 \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_offload_fraction=1 \ +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=True \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=True \ actor_rollout_ref.actor.megatron.use_mbridge=True \ actor_rollout_ref.actor.megatron.param_offload=True \ actor_rollout_ref.actor.megatron.optimizer_offload=True \ actor_rollout_ref.actor.megatron.grad_offload=True \ actor_rollout_ref.ref.megatron.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_megatron_async' \ trainer.test_freq="${test_freq}" \ trainer.total_epochs="${total_epochs}" \ trainer.val_before_train=False \ trainer.save_freq=-1 \ trainer.resume_mode=auto \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" \ rollout.total_rollout_steps="${total_rollout_steps}" \ rollout.total_epochs="${total_epochs}" \ rollout.test_freq="${test_freq}" \ async_training.staleness_threshold="${staleness_threshold}" \ async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" \ async_training.require_batches="${require_batches}" \ async_training.partial_rollout="${partial_rollout}" \ async_training.use_rollout_log_probs=True ================================================ FILE: verl_distillation/recipe/fully_async_policy/shell/runtime_env.yaml ================================================ env_vars: VLLM_USE_V1: "1" NCCL_DEBUG: "INFO" HYDRA_FULL_ERROR: "1" ================================================ FILE: verl_distillation/recipe/fully_async_policy/unittest/simple_streaming_demo.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import random import time class SimpleStreamingSystem: """Simplified streaming system demonstration""" def __init__(self, max_concurrent_tasks: int = 4): self.max_concurrent_tasks = max_concurrent_tasks self.data_queue = asyncio.Queue() self.result_queue = asyncio.Queue() self.consumer_count = 0 # Data stream coroutine async def data_stream(self): # Add initial data # Prepare test data test_data = [{"id": f"task_{i}", "content": f"data_{i}"} for i in range(8)] await self.add_data_stream(test_data) # Simulate subsequent data stream await asyncio.sleep(3) print("\nAdding second batch of data...") extra_data = [{"id": f"extra_{i}", "content": f"extra_data_{i}"} for i in range(5)] await self.add_data_stream(extra_data) # Send termination signal await asyncio.sleep(1) await self.data_queue.put("DONE") print("Sending termination signal") async def add_data_stream(self, data_list: list[dict]): """Simulate data stream""" print("Starting to add data stream...") for i, data_item in enumerate(data_list): await self.data_queue.put(data_item) print(f"Data {data_item['id']} added to pending queue") # Simulate interval between data streams if i < len(data_list) - 1: # Don't wait after the last item await asyncio.sleep(0.8) print("Initial data stream added successfully") async def _process_data_async(self, data_item: dict): """Asynchronously process a single data item""" data_id = data_item["id"] content = data_item["content"] # Simulate different processing times (1-3 seconds) processing_time = random.uniform(1, 3) print(f" Starting to process {data_id}, estimated time {processing_time:.1f}s") # Asynchronously wait for processing completion await asyncio.sleep(processing_time) result = { "id": data_id, "processed_content": f"Processed {content}", "processing_time": round(processing_time, 2), "completed_at": time.time(), } # Immediately put into result queue await self.result_queue.put(result) print(f" {data_id} processing completed! (took {processing_time:.1f}s) -> Added to result queue") async def _submit_worker(self): """Stream submission worker coroutine""" active_tasks = set() print("Stream submitter started...") while True: # Get data to process data_item = await self.data_queue.get() if data_item == "DONE": print("Received termination signal, waiting for remaining tasks to complete...") if active_tasks: await asyncio.gather(*active_tasks, return_exceptions=True) break # Check concurrent limit while len(active_tasks) >= self.max_concurrent_tasks: print(f"Reached maximum concurrency {self.max_concurrent_tasks}, waiting for tasks to complete...") done_tasks, active_tasks = await asyncio.wait(active_tasks, return_when=asyncio.FIRST_COMPLETED) # Clean up completed tasks for task in done_tasks: try: await task print(f"Task completed {task}") except Exception as e: print(f"Task execution failed: {e}") # Immediately submit new task task = asyncio.create_task(self._process_data_async(data_item), name=f"active {data_item}") active_tasks.add(task) print(f"Submitted task {data_item['id']}, current concurrency: {len(active_tasks)}") async def _consumer_worker(self): """Result consumer coroutine""" print("Consumer started...") while True: try: # Get processing result from result queue result = await asyncio.wait_for(self.result_queue.get(), timeout=2.0) self.consumer_count += 1 print( f"Consumed #{self.consumer_count}: {result['id']} " f"(processing time {result['processing_time']}s) - {result['processed_content']}" ) except asyncio.TimeoutError: print(" Consumer waiting...") await asyncio.sleep(0.5) async def run_demo(self): """Run demonstration""" print("=" * 60) print(f"Maximum concurrency: {self.max_concurrent_tasks}") print("=" * 60) # Start core coroutines stream_task = asyncio.create_task(self.data_stream()) submit_task = asyncio.create_task(self._submit_worker()) consumer_task = asyncio.create_task(self._consumer_worker()) try: # Wait for data stream to complete await stream_task print("Data stream completed") # Wait for processing to complete await submit_task print("All tasks processed") finally: # Cleanup submit_task.cancel() consumer_task.cancel() await asyncio.gather(submit_task, consumer_task, return_exceptions=True) print(f"\nFinal statistics: Consumed {self.consumer_count} results") async def main(): """Main function""" system = SimpleStreamingSystem(max_concurrent_tasks=3) await system.run_demo() if __name__ == "__main__": asyncio.run(main()) ================================================ FILE: verl_distillation/recipe/fully_async_policy/vllm_rollout/__init__.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/recipe/fully_async_policy/vllm_rollout/vllm_async_server.py ================================================ # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging from typing import Any, Optional, Sequence import ray from ray.actor import ActorHandle from vllm import SamplingParams from vllm.inputs import TokensPrompt from vllm.outputs import RequestOutput from verl.workers.config import HFModelConfig, RewardModelConfig, RolloutConfig from verl.workers.rollout.replica import RolloutMode from verl.workers.rollout.vllm_rollout.vllm_async_server import ( _qwen2_5_vl_dedup_image_tokens, vLLMHttpServerBase, vLLMReplica, ) logger = logging.getLogger(__file__) logger.setLevel(logging.INFO) @ray.remote(num_cpus=1) class vLLMHttpServerForPartial(vLLMHttpServerBase): def __init__( self, config: RolloutConfig | RewardModelConfig, model_config: HFModelConfig, rollout_mode: RolloutMode, workers: list[ActorHandle], replica_rank: int, node_rank: int, gpus_per_node: int, nnodes: int, ): super().__init__(config, model_config, rollout_mode, workers, replica_rank, node_rank, gpus_per_node, nnodes) # for cancel LLMServer self.paused = False self.lock = asyncio.Lock() self.cancel_event: dict[str, asyncio.Event] = {} self.req_output: dict[str, Optional[RequestOutput]] = {} async def _generate_step( self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, ): max_tokens = self.config.max_model_len - len(prompt_ids) sampling_params["logprobs"] = 1 sampling_params.setdefault("repetition_penalty", self.config.get("repetition_penalty", 1.0)) sampling_params = SamplingParams(max_tokens=max_tokens, **sampling_params) prompt_ids = _qwen2_5_vl_dedup_image_tokens(prompt_ids, self.model_config.processor) prompt = TokensPrompt( prompt_token_ids=prompt_ids, multi_modal_data={"image": image_data} if image_data else None ) generator = self.engine.generate(prompt=prompt, sampling_params=sampling_params, request_id=request_id) # Get final response async for output in generator: self.req_output[request_id] = output assert self.req_output[request_id] is not None async def generate_for_partial( self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, ) -> tuple[list[Any], list[Any], bool] | tuple[Sequence[int], list[float], Any]: async with self.lock: if self.paused: # After cancel, all tasks will return directly and wait for the next submission return [], [], True self.req_output[request_id]: Optional[RequestOutput] = None self.cancel_event[request_id] = asyncio.Event() cancel_handle = asyncio.create_task(self.cancel_event[request_id].wait()) generation_handle = asyncio.create_task( self._generate_step(prompt_ids, sampling_params, request_id, image_data) ) done, pend = await asyncio.wait([generation_handle, cancel_handle], return_when=asyncio.FIRST_COMPLETED) for task in done: await task for task in pend: task.cancel() async with self.lock: if self.req_output[request_id] is None: return [], [], True token_ids = self.req_output[request_id].outputs[0].token_ids log_probs: list[float] = [] for i, x in enumerate(self.req_output[request_id].outputs[0].logprobs): # In sampling_params, logprobs is set to 1, which should return 1, # but in practice there are multiple. Take the log_prob corresponding to token_id token_id = self.req_output[request_id].outputs[0].token_ids[i] log_probs.append(x[token_id].logprob) is_cancel = generation_handle not in done self.cancel_event.pop(request_id, None) self.req_output.pop(request_id, None) return token_ids, log_probs, is_cancel async def cancel(self): async with self.lock: self.paused = True for request_id in self.cancel_event: self.cancel_event[request_id].set() async def resume(self): async with self.lock: self.paused = False async def reset_prefix_cache(self): async with self.lock: await self.engine.reset_prefix_cache() class FullyAsyncvLLMReplica(vLLMReplica): def __init__( self, replica_rank: int, config: RolloutConfig | RewardModelConfig, model_config: HFModelConfig, gpus_per_node: int = 8, is_reward_model: bool = False, ): super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model) self.server_class = vLLMHttpServerForPartial async def cancel(self): """Cancel each rollout server.""" await asyncio.gather(*[server.cancel.remote() for server in self.servers]) async def resume(self): """Resume each rollout server.""" await asyncio.gather(*[server.resume.remote() for server in self.servers]) async def reset_prefix_cache(self): """reset kv cache in each rollout server.""" await asyncio.gather(*[server.reset_prefix_cache.remote() for server in self.servers]) ================================================ FILE: verl_distillation/recipe/genrm_remote/README.md ================================================ # Generative Reward Model ## Scripts ### Step 1: Launch a vLLM Server (Optional) Deploy the pretrained GenRM model using vLLM. Skip this step if you want to use an external api service. ```bash vllm serve verl-team/GenRM-CI-Test-1.5B --served-model-name genrm-demo ``` ### Step 2: Perform RL using GenRM ```bash bash recipe/api-genrm/run_genrm_remote.sh ``` The implementation works by passing a customized reward function (see `reward_function.py`) For convenience, we run both the RL training and server on the same machine. To use an external server, configure the `BASE_URL` and `API_KEY` in `reward_function.py` first. ## Advanced: Customizing Your GenRM You can use sglang server with data parallel for faster inference: ```bash CUDA_VISIBLE_DEVICES=0,1,2,3 python -m sglang_router.launch_server --model-path verl-team/GenRM-CI-Test-1.5B --dp-size 4 ``` Note that you should modify the `BASE_URL` in `reward_function.py` to match your SGLang Server address. You can also create your own customized GenRM by implementing a custom reward function. Here are some tips for customizing your own GenRM based on `reward_function.py`: - Design appropriate prompts for your GenRM - Convert GenRM responses into RL rewards - ... Since these aspects are highly flexible, we only provide a demo implementation. The actual design and implementation of GenRM is left to the user's discretion. ================================================ FILE: verl_distillation/recipe/genrm_remote/reward_function.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from concurrent.futures import ThreadPoolExecutor from time import sleep import requests from verl.utils.reward_score.math_reward import last_boxed_only_string, remove_boxed BASE_URL = "http://localhost:30000" API_KEY = "EMPTY" MAX_RETRIES = 3 BASE_DELAY = 2 MAX_WORKERS = 32 MODEL_NAME = "genrm-demo" GENRM_PROMPT_TEMPLATE = """ The following is a math problem and an AI solution: [Math Problem] {problem} [AI Solution] {solution} Your task is to review and critique the solution step by step, and output whether the AI solution is correct. Please put your final answer (i.e., 'True' or 'False') in \\boxed{{}}. """.strip() def get_response(problem, solution_str, ground_truth): prompt = GENRM_PROMPT_TEMPLATE.format(problem=problem, solution=solution_str) messages = [{"role": "user", "content": prompt}] for attempt in range(MAX_RETRIES): try: headers = {"Content-Type": "application/json"} chat_url = f"{BASE_URL}/v1/chat/completions" data = {"model": MODEL_NAME, "messages": messages} output = requests.post(chat_url, headers=headers, json=data, timeout=30) response = output.json()["choices"][0]["message"]["content"] return response except Exception as e: if attempt < MAX_RETRIES - 1: print("Exception: ", repr(e)) delay = BASE_DELAY * (2**attempt) print(f"Retrying in {delay} seconds...") sleep(delay) else: print(f"Failed after {MAX_RETRIES} attempts. Error: {e}") raise ConnectionRefusedError(f"Failed to run the model for {prompt}!") def compute_reward(response): reward_score = 0.0 try: boxed_result = last_boxed_only_string(response) if boxed_result is not None: result = remove_boxed(boxed_result) reward_score = float(result == "True") except Exception as e: print(e) return reward_score def compute_score(data_source, solution_str, ground_truth, extra_info): split = extra_info["split"] from verl.utils.reward_score import default_compute_score func_rm_score = default_compute_score(data_source, solution_str, ground_truth, extra_info) if split == "test": return func_rm_score else: problem = extra_info["question"] response = get_response(problem, solution_str, ground_truth) if response is not None: reward_score = compute_reward(response) else: reward_score = 0.0 return reward_score def compute_score_batch(data_sources, solution_strs, ground_truths, extra_infos): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: futures = [] for data_source, solution_str, ground_truth, extra_info in zip( data_sources, solution_strs, ground_truths, extra_infos, strict=True ): future = executor.submit(compute_score, data_source, solution_str, ground_truth, extra_info) futures.append(future) results = [future.result() for future in futures] return results ================================================ FILE: verl_distillation/recipe/genrm_remote/run_genrm_remote.sh ================================================ # vllm server # CUDA_VISIBLE_DEVICES=0,1,2,3 vllm serve verl-team/GenRM-CI-Test-1.5B --served_model_name genrm-demo # sglang server # CUDA_VISIBLE_DEVICES=0,1,2,3 python -m sglang_router.launch_server --model-path verl-team/GenRM-CI-Test-1.5B --dp-size 4 set -x CUDA_VISIBLE_DEVICES=4,5,6,7 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=${HOME}/data/gsm8k/train.parquet \ data.val_files=${HOME}/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=8 \ algorithm.use_kl_in_reward=False \ reward_model.reward_manager=batch \ custom_reward_function.path=recipe/genrm_remote/reward_function.py \ custom_reward_function.name=compute_score_batch \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_func_rm_example_gsm8k' \ trainer.experiment_name='qwen2_5_3b_gen_rm' \ trainer.n_gpus_per_node=4 \ trainer.val_before_train=True \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=10 \ trainer.resume_mode='disable' ================================================ FILE: verl_distillation/recipe/gspo/test_gspo_3b_math.sh ================================================ #!/usr/bin/env bash #SBATCH --job-name=rl-gspo-3B #SBATCH --partition=main #SBATCH --nodes=1 # Number of nodes #SBATCH --ntasks-per-node=1 # One task per node #SBATCH --cpus-per-task=128 # cpu-cores per task #SBATCH --gres=gpu:8 #SBATCH --mem=0 #SBATCH --exclusive #SBATCH --time=500:00:00 #SBATCH --output=/rl/logs/Qwen2.5-3B/gspo/math/vllm_%x_%j.out #SBATCH --error=/rl/logs/Qwen2.5-3B/gspo/math/vllm_%x_%j.err set -xeuo pipefail # activate the venv echo "Activating distill environment..." eval "$(conda shell.bash hook)" conda deactivate conda activate distill # can make training faster, depends on your infrastructure export NCCL_IBEXT_DISABLE=1 export NCCL_NVLS_ENABLE=1 export NCCL_IB_HCA=mlx5 export UCX_NET_DEVICES=mlx5_0:1,mlx5_1:1,mlx5_2:1,mlx5_3:1,mlx5_4:1,mlx5_5:1,mlx5_6:1,mlx5_7:1 # Set how many GPUs we actually have on this node. export GPUS_PER_NODE=8 NNODES=${SLURM_JOB_NUM_NODES} export NNODES export VLLM_ATTENTION_BACKEND=FLASH_ATTN export RAY_LOGGING_LEVEL=DEBUG export HYDRA_FULL_ERROR=1 export WANDB_API_KEY=... # your wandb API key echo "Using $NNODES nodes for training..." # ------------------------------------- Setup xp params --------------------------------------- project_name='RL-GSPO' adv_estimator=grpo loss_mode=gspo loss_agg_mode="seq-mean-token-mean" MODEL_PATH=Qwen/Qwen2.5-3B-Instruct offload=false # it's a small model, offloading will just slow-down training rollout_engine=vllm rollout_mode=sync # can be async to speedup large scale xps gpu_memory_utilization=0.8 reward_manager=dapo adv_estimator=grpo shuffle_dataset=true first_time_dataset_prep=true # prepare dataset test_freq=10 save_freq=10 total_epochs=10 total_training_steps=500 val_before_train=false use_kl_in_reward=false kl_coef=0.0 use_kl_loss=false kl_loss_coef=0.0 clip_ratio_low=0.0003 # as recommended by the paper, see Sec. 5.1 clip_ratio_high=0.0004 # as recommended by the paper, see Sec. 5.1 train_batch_size=512 ppo_mini_batch_size=128 # maintain 4 mini-batches as recommended by the paper, see Sec. 5.1 ppo_micro_batch_size_per_gpu=8 # setup depending on your GPU memory n_resp_per_prompt=16 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) # dapo reward manager params enable_overlong_buffer=false # true overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Paths and namings SFT_MODEL=$(basename $MODEL_PATH) exp_name="${loss_mode}-epslow-${clip_ratio_low}-epshigh-${clip_ratio_high}-${SFT_MODEL}-RL" CKPTS_DIR=/rl/checkpoints/experimental/4b/${loss_mode}/${exp_name} # Sampling params at rollouts temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=1 use_dynamic_bsz=true actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=true gen_tp=1 entropy_checkpointing=true # This enables entropy recomputation specifically for the entropy calculation, lowering memory usage during training. # ------------------------------------- train/val data preparation --------------------------------------- if [ "$first_time_dataset_prep" = true ]; then echo "Preprocessing GSM8K dataset..." python examples/data_preprocess/gsm8k.py --local_save_dir /data/gsm8k/ fi gsm8k_train_path=/data/gsm8k/train.parquet gsm8k_test_path=/data/gsm8k/test.parquet # set the paths train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=${adv_estimator} \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ data.train_files="${train_files}" \ data.val_files="${test_files}" \ data.shuffle=$shuffle_dataset \ data.prompt_key=prompt \ data.truncation='error' \ data.filter_overlong_prompts=true \ data.train_batch_size=${train_batch_size} \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.model.use_remove_padding=true \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.name=${rollout_engine} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=true \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.05 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${ppo_mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${ppo_micro_batch_size_per_gpu} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=${gpu_memory_utilization} \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=true \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=true \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.entropy_checkpointing=${entropy_checkpointing} \ reward_model.reward_manager=${reward_manager} \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=false \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${GPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=${val_before_train} \ trainer.test_freq=${test_freq} \ trainer.save_freq=${save_freq} \ trainer.total_epochs=${total_epochs} \ trainer.total_training_steps=${total_training_steps} \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=2 \ $@ ================================================ FILE: verl_distillation/recipe/gspo/test_gspo_3b_math_slurm.sh ================================================ #!/usr/bin/env bash #SBATCH --job-name=rl-gspo-3B #SBATCH --partition=main #SBATCH --nodes=1 # Number of nodes #SBATCH --ntasks-per-node=1 # One task per node #SBATCH --cpus-per-task=128 # cpu-cores per task #SBATCH --gres=gpu:8 #SBATCH --mem=0 #SBATCH --exclusive #SBATCH --time=500:00:00 #SBATCH --output=/rl/logs/Qwen2.5-3B/gspo/math/vllm_%x_%j.out #SBATCH --error=/rl/logs/Qwen2.5-3B/gspo/math/vllm_%x_%j.err set -xeuo pipefail # activate the venv echo "Activating distill environment..." eval "$(conda shell.bash hook)" conda deactivate conda activate distill # can make training faster, depends on your infrastructure export NCCL_IBEXT_DISABLE=1 export NCCL_NVLS_ENABLE=1 export NCCL_IB_HCA=mlx5 export UCX_NET_DEVICES=mlx5_0:1,mlx5_1:1,mlx5_2:1,mlx5_3:1,mlx5_4:1,mlx5_5:1,mlx5_6:1,mlx5_7:1 # Set how many GPUs we actually have on this node. export GPUS_PER_NODE=8 NNODES=${SLURM_JOB_NUM_NODES} export NNODES export VLLM_ATTENTION_BACKEND=FLASH_ATTN export RAY_memory_monitor_refresh_ms=0 export RAY_LOGGING_LEVEL=DEBUG export HYDRA_FULL_ERROR=1 export WANDB_API_KEY=... # your wandb API key # Let Ray know how many nodes to expect export RAY_NUM_NODES=$NNODES echo "Using $NNODES nodes for training..." # ------------------------------------- Setup xp params --------------------------------------- project_name='RL-GSPO' adv_estimator=grpo loss_mode=gspo loss_agg_mode="seq-mean-token-mean" MODEL_PATH=Qwen/Qwen2.5-3B-Instruct offload=false # it's a small model, offloading will just slow-down training rollout_engine=vllm rollout_mode=sync # can be async to speedup large scale xps gpu_memory_utilization=0.8 reward_manager=dapo adv_estimator=grpo shuffle_dataset=true first_time_dataset_prep=true # prepare dataset test_freq=10 save_freq=10 total_epochs=10 total_training_steps=500 val_before_train=false use_kl_in_reward=false kl_coef=0.0 use_kl_loss=false kl_loss_coef=0.0 clip_ratio_low=0.0003 # as recommended by the paper, see Sec. 5.1 clip_ratio_high=0.0004 # as recommended by the paper, see Sec. 5.1 train_batch_size=512 ppo_mini_batch_size=128 # maintain 4 mini-batches as recommended by the paper, see Sec. 5.1 ppo_micro_batch_size_per_gpu=8 # setup depending on your GPU memory n_resp_per_prompt=16 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) # dapo reward manager params enable_overlong_buffer=false # true overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # Paths and namings SFT_MODEL=$(basename $MODEL_PATH) exp_name="${loss_mode}-epslow-${clip_ratio_low}-epshigh-${clip_ratio_high}-${SFT_MODEL}-RL" CKPTS_DIR=/rl/checkpoints/experimental/4b/${loss_mode}/${exp_name} # Sampling params at rollouts temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=1 use_dynamic_bsz=true actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=true gen_tp=1 entropy_checkpointing=true # This enables entropy recomputation specifically for the entropy calculation, lowering memory usage during training. # ------------------------------------- train/val data preparation --------------------------------------- if [ "$first_time_dataset_prep" = true ]; then echo "Preprocessing GSM8K dataset..." python examples/data_preprocess/gsm8k.py --local_save_dir /data/gsm8k/ fi gsm8k_train_path=/data/gsm8k/train.parquet gsm8k_test_path=/data/gsm8k/test.parquet # set the paths train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=${adv_estimator} \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ data.train_files="${train_files}" \ data.val_files="${test_files}" \ data.shuffle=$shuffle_dataset \ data.prompt_key=prompt \ data.truncation='error' \ data.filter_overlong_prompts=true \ data.train_batch_size=${train_batch_size} \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.model.use_remove_padding=true \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.name=${rollout_engine} \ actor_rollout_ref.rollout.mode=${rollout_mode} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=true \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.05 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${ppo_mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${ppo_micro_batch_size_per_gpu} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=${gpu_memory_utilization} \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=true \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=true \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.entropy_checkpointing=${entropy_checkpointing} \ reward_model.reward_manager=${reward_manager} \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=false \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${GPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=${val_before_train} \ trainer.test_freq=${test_freq} \ trainer.save_freq=${save_freq} \ trainer.total_epochs=${total_epochs} \ trainer.total_training_steps=${total_training_steps} \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=2 \ $@ ================================================ FILE: verl_distillation/recipe/gspo/test_gspo_qwen30b_a3b_ep.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export NCCL_DEBUG=WARN # export VERL_LOGGING_LEVEL=DEBUG project_name='DAPO' exp_name='GSPO-Qwen3-30B-A3B-Base-MATH' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=3e-4 clip_ratio_high=4e-4 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode=gspo train_prompt_bsz=256 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths # RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B-Base"} # CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} # TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} # TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} MODEL_PATH=$HDFS_ROOT/model/Qwen3-30B-A3B-Base CKPTS_DIR=$DATA_ROOT/checkpoint/${project_name}/${exp_name} TRAIN_FILE=$DATA_ROOT/dataset/BytedTsinghua-SIA/DAPO-Math-17k/data/dapo-math-17k.parquet aime24_test_path=$DATA_ROOT/dataset/aime-2024.parquet TEST_FILE="['$aime24_test_path']" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 1)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True # gen rollout_name=vllm # vllm or sglang gen_tp=1 gen_dp=4 gen_ep=4 # train train_tp=4 train_pp=1 EP=4 ETP=1 python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.return_raw_chat=True \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=${rollout_name} \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.calculate_log_probs=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.data_parallel_size=${gen_dp} \ actor_rollout_ref.rollout.expert_parallel_size=${gen_ep} \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.use_mbridge=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.apply_rope_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_router_dtype=fp32 \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ +actor_rollout_ref.actor.megatron.override_transformer_config.gradient_accumulation_fusion=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}-tp${gen_tp}-ep${gen_ep}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=30 \ trainer.total_epochs=10 \ trainer.total_training_steps=300 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/infigui-g1/README.md ================================================ # Recipe for InfiGUI-G1 This directory contains the official implementation for the paper [InfiGUI-G1: Advancing GUI Grounding with Adaptive Exploration Policy Optimization](https://arxiv.org/abs/2508.05731). This work introduces Adaptive Exploration Policy Optimization (AEPO), a policy optimization framework designed to enhance GUI grounding in Multimodal Large Language Models (MLLMs). AEPO improves exploration efficiency by employing a multi-answer generation strategy and a theoretically grounded Adaptive Exploration Reward (AER) function. This approach effectively addresses the challenge of semantic alignment in complex GUI grounding tasks. We provide training scripts for both 3B and 7B models, configured for a single machine with 8 GPUs by default. ## Environment Setup Please follow the main environment setup guide for `verl`. The provided scripts use the following Docker image: `verlai/verl:app-verl0.5-transformers4.55.4-sglang0.4.10.post2-mcore0.13.0-te2.2` ## Data Preparation Before starting the training, you need to download the example dataset. This dataset is a filtered version of [omniact](https://huggingface.co/datasets/Writer/omniact), containing only grounding tasks and excluding easy samples. The data is hosted on the Hugging Face. You can download it using the `huggingface-cli`: ```bash huggingface-cli download --repo-type dataset --resume-download InfiX-ai/omniact_grounding_filtered --local-dir data/omniact_grounding_filtered ``` This command will download the training and validation parquet files into the `data/omniact_grounding_filtered` directory, which is the default path used by the scripts. ## Training We provide scripts to train the 3B and 7B models. Please run them from the root directory of `verl`. - **Train the 3B model:** ```bash bash recipe/infigui-g1/run_3b.sh ``` - **Train the 7B model:** ```bash bash recipe/infigui-g1/run_7b.sh ``` ## Using Custom Data If you wish to train on your own dataset, please format your data to match the structure of the example files located in `data/omniact_grounding_filtered`. Once your data is ready, you need to update the data path arguments in the training script. In `run_3b.sh` or `run_7b.sh`, modify the following lines: ```bash data.train_files=./path/to/your/train_data.parquet \ data.val_files=./path/to/your/val_data.parquet \ ``` Replace the paths with the location of your custom data files. ================================================ FILE: verl_distillation/recipe/infigui-g1/reward_fn.py ================================================ # Copyright 2025 Individual Contributor: InfiX.ai # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import math import re from itertools import combinations FMT_RATIO = 1.0 ACC_RATIO = 1.0 # ============================================================================ # Utility Functions # ============================================================================ def extract_think_format(predict_str: str) -> None | dict[str, str]: """ Check if the predicted string meets format requirements and extract thinking and answer parts. Args: predict_str: The predicted string Returns: If format requirements are met, returns a dictionary containing thinking and answer parts; otherwise returns None """ if not predict_str or not isinstance(predict_str, str): return None # Check if is at the beginning if not predict_str.startswith(""): return None # Check if there is ... format pattern = r"(.*?)" think_match = re.search(pattern, predict_str, re.DOTALL) if not think_match: return None if predict_str.count("") != 1 or predict_str.count("") != 1: return None # Extract thinking content think_content = think_match.group(1).strip() if not think_content: return None # Get content after think_end_pos = predict_str.find("") + len("") post_think_content = predict_str[think_end_pos:].strip() # Check if there is non-empty content after if not post_think_content: return None return {"think": think_content, "answer": post_think_content} def extract_and_parse_json(input_string, wrapper): """ Try to extract and parse JSON from a string. Args: input_string: The input string wrapper: JSON wrapper symbols, can be '{}' or '[]' Returns: Parsed JSON object, returns None if parsing fails """ if len(wrapper) != 2: raise ValueError("Wrapper must be exactly two characters long") start_char, end_char = wrapper start_index = input_string.find(start_char) if start_index == -1: return None # Find the matching end character by balancing brackets/braces balance = 1 end_index = -1 for i in range(start_index + 1, len(input_string)): if input_string[i] == start_char: balance += 1 elif input_string[i] == end_char: balance -= 1 if balance == 0: end_index = i break if end_index == -1: return None json_string = input_string[start_index : end_index + 1] try: return json.loads(json_string) except json.JSONDecodeError: return None # ============================================================================ # AER Reward Functions # ============================================================================ def _extract_verifiable_answer(answer): """ Extract and verify the format of the point list from the answer string. A valid format is a JSON list of dictionaries, where each dictionary has a "point_2d" key with a list of two numbers as the value. Args: answer: The answer string to extract points from Returns: List of valid points or None if format is invalid """ points = extract_and_parse_json(answer, "[]") if points is None or not isinstance(points, list): return None # Verify each point in the list for point in points: if isinstance(point, dict) and "point_2d" in point: point_2d = point["point_2d"] if isinstance(point_2d, list) and len(point_2d) == 2: continue # If any point is malformed, the whole answer is invalid return None return points def _format_reward(answer): """ Calculate the format reward for 'point' type data. This function is now primarily used as a check to see if the format is valid. Args: answer: The answer string to validate Returns: Tuple of (reward, is_collinear) where reward is 1.0 for valid format, 0.0 otherwise """ points = _extract_verifiable_answer(answer) if points is None: return 0.0, 0 points_2d = [item["point_2d"] for item in points] if _check_collinear(points_2d): return 0.0, 1 return 1.0, 0 def _check_collinear(points_2d): """ Check if 3 or more points in the list are collinear on any straight line. This uses the cross-product method to avoid division and handle all line types. Args: points_2d: A list of [x, y] coordinates Returns: True if 3 or more points are collinear, False otherwise """ if len(points_2d) < 3: return False # Iterate through all unique combinations of 3 points for p1, p2, p3 in combinations(points_2d, 3): x1, y1 = p1 x2, y2 = p2 x3, y3 = p3 # Check for collinearity using the cross-product method. # If (y2 - y1) * (x3 - x1) == (y3 - y1) * (x2 - x1), the points are collinear. # This is equivalent to checking if the area of the triangle formed by the points is 0. if math.isclose((y2 - y1) * (x3 - x1), (y3 - y1) * (x2 - x1)): return True return False def _accuracy_reward(answer, ground_truth): """ Calculate the accuracy reward based on the symmetric zero-centered formula. The reward is in the range [-1, 1]. Args: answer: The answer string containing predicted points ground_truth: Ground truth bounding box dictionary Returns: Tuple containing: - accuracy (float): The calculated reward - extracted_answer (str): The JSON string of the predicted points - num_pred (int): The number of predicted points - first_correct (int): 1 if the first predicted point is correct, 0 otherwise """ pred_points = _extract_verifiable_answer(answer) # If no valid points are extracted, this is considered a format error, return -1 reward if pred_points is None: return -1.0, "", 0, 0 num_pred = len(pred_points) extracted_answer = json.dumps(pred_points) if num_pred == 0: return -1.0, extracted_answer, 0, 0 # Find the rank 'k' of the first correct point first_correct_rank = -1 for i, item in enumerate(pred_points): point_2d = item["point_2d"] if ( ground_truth["x1"] <= point_2d[0] <= ground_truth["x2"] and ground_truth["y1"] <= point_2d[1] <= ground_truth["y2"] ): first_correct_rank = i + 1 # 1-based index break # Calculate reward based on the zero-centered symmetric formula accuracy = 0.0 if first_correct_rank != -1: # Case a: Correct point found (Positive reward space) k = first_correct_rank accuracy = 1.0 / math.sqrt(num_pred * k) else: # Case b: No correct point found (Negative reward space) accuracy = -1.0 / num_pred first_correct = 1 if first_correct_rank == 1 else 0 return accuracy, extracted_answer, num_pred, first_correct def calculate_point_reward(solution_str, ground_truth, extra_info=None, fmt_ratio=1.0, acc_ratio=1.0, **kwargs): """ Calculate the final reward for 'point' type data. Implements the full logic including format checks, collinearity checks, and the zero-centered symmetric reward calculation. Args: solution_str: The solution string from the model ground_truth: Ground truth data extra_info: Extra information dictionary fmt_ratio: Format reward ratio acc_ratio: Accuracy reward ratio **kwargs: Additional keyword arguments Returns: Dictionary containing detailed reward information """ if extra_info.get("no_think", False): answer = solution_str else: solution_dict = extract_think_format(solution_str) # If the overall 'think'/'answer' format is wrong, return score of -1 if solution_dict is None: return { "score": -1.0, "format": 0.0, "accuracy": -1.0, "pred": "", "num_pred": 0, "has_correct": 0, "first_correct": 0, "only_correct": 0, "is_collinear": 0, } answer = solution_dict["answer"] # Reuse _format_reward to check the format of the 'answer' part # If it's invalid, return score of -1 format_reward, is_collinear = _format_reward(answer) if format_reward == 0.0: return { "score": -1.0, "format": 0.0, "accuracy": -1.0, "pred": "", "num_pred": 0, "has_correct": 0, "first_correct": 0, "only_correct": 0, "is_collinear": is_collinear, } # If format is OK, calculate the accuracy reward accuracy_reward, extracted_answer, num_pred, first_correct = _accuracy_reward(answer, ground_truth) return { "score": fmt_ratio * format_reward + acc_ratio * accuracy_reward, "format": format_reward, "accuracy": accuracy_reward, "pred": extracted_answer, "num_pred": num_pred, "has_correct": 1 if accuracy_reward > 0 else 0, "first_correct": first_correct, "only_correct": 1 if num_pred == 1 and accuracy_reward > 0 else 0, "is_collinear": 0, } # ============================================================================ # AER Reward Handler Registry # ============================================================================ # Dictionary to map data_source to the respective reward calculation function AER_REWARD_HANDLERS = { "point": calculate_point_reward, } def aer_gui_reward_function(data_source, solution_str, ground_truth, extra_info=None, **kwargs): """ Main reward function dispatcher for the Adaptive Exploration Reward (AER) system. Delegates reward calculation to specific functions based on the data_source using a dictionary lookup. Args: data_source: The source or type of the data (e.g., "point", "bbox") solution_str: The solution string generated by the model ground_truth: The ground truth data extra_info: Any extra information passed along (optional) **kwargs: Additional keyword arguments that might be passed from the PPO trainer config Returns: Dictionary containing detailed reward information with keys: - score: The final calculated reward score - format: Format validation score - accuracy: Accuracy score - pred: Extracted prediction string - num_pred: Number of predictions - has_correct: Whether any correct prediction exists - first_correct: Whether first prediction is correct - only_correct: Whether only one correct prediction exists - is_collinear: Whether points are collinear (for point type) """ handler = AER_REWARD_HANDLERS.get(data_source, None) if handler: try: return handler( solution_str, ground_truth, extra_info=extra_info, fmt_ratio=FMT_RATIO, acc_ratio=ACC_RATIO, **kwargs ) except Exception as e: logging.exception( f"Error executing reward handler for data_source '{data_source}': {e}", ) return { "score": -1.0, "format": 0.0, "accuracy": -1.0, "pred": "", "num_pred": 0, "has_correct": 0, "first_correct": 0, "only_correct": 0, "is_collinear": 0, } # Return a default penalty score on error else: raise ValueError(f"Unknown data_source: '{data_source}'. No specific reward handler defined.") ================================================ FILE: verl_distillation/recipe/infigui-g1/run_3b.sh ================================================ #!/bin/bash set -x ulimit -n 65535 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=rloo \ data.train_files=./data/omniact_grounding_filtered/omniact_filtered_train.parquet \ data.val_files=./data/omniact_grounding_filtered/omniact_filtered_val.parquet \ data.train_batch_size=128 \ data.max_prompt_length=7168 \ data.max_response_length=1024 \ data.filter_overlong_prompts=False \ data.truncation='error' \ data.image_key=images \ custom_reward_function.path=./recipe/infigui-g1/reward_fn.py \ custom_reward_function.name=aer_gui_reward_function \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.model.enable_activation_offload=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=False \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=0 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.clip_ratio_high=0.4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.max_num_batched_tokens=8192 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.rollout.temperature=1.0 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.logger=['console','wandb'] \ trainer.project_name='infigui-g1' \ trainer.experiment_name='3b' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=16 \ trainer.test_freq=16 \ trainer.total_epochs=6 ================================================ FILE: verl_distillation/recipe/infigui-g1/run_7b.sh ================================================ #!/bin/bash set -x ulimit -n 65535 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=rloo \ data.train_files=./data/omniact_grounding_filtered/omniact_filtered_train.parquet \ data.val_files=./data/omniact_grounding_filtered/omniact_filtered_val.parquet \ data.train_batch_size=128 \ data.max_prompt_length=7168 \ data.max_response_length=1024 \ data.filter_overlong_prompts=False \ data.truncation='error' \ data.image_key=images \ custom_reward_function.path=./recipe/infigui-g1/reward_fn.py \ custom_reward_function.name=aer_gui_reward_function \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.model.enable_activation_offload=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=False \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=0 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.clip_ratio_high=0.4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.max_num_batched_tokens=8192 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.rollout.temperature=1.0 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.logger=['console','wandb'] \ trainer.project_name='infigui-g1' \ trainer.experiment_name='7b' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=16 \ trainer.test_freq=16 \ trainer.total_epochs=6 ================================================ FILE: verl_distillation/recipe/langgraph_agent/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/recipe/langgraph_agent/chat_model.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Ref: https://python.langchain.com/docs/how_to/custom_chat_model/ """ import asyncio import json import logging import os import uuid from typing import Any, Optional from langchain_core.language_models import BaseChatModel from langchain_core.language_models.base import LanguageModelInput from langchain_core.messages import ( AIMessage, BaseMessage, convert_to_openai_messages, ) from langchain_core.messages.tool import InvalidToolCall, ToolCall from langchain_core.outputs import ChatGeneration, ChatResult from langchain_core.runnables import Runnable, RunnableConfig from langchain_core.tools import StructuredTool from langchain_core.utils.function_calling import convert_to_openai_tool from pydantic import Field from verl.experimental.agent_loop.agent_loop import AgentLoopOutput, AsyncLLMServerManager from verl.experimental.agent_loop.tool_parser import ToolParser from verl.experimental.agent_loop.utils import add_generation_prompt_for_gpt_oss, format_gpt_oss_tool_response_manually logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MaxTokenExceededError(Exception): """Indicate that history chat messages + tool message exceeds LLM max_tokens.""" pass class ChatModel(BaseChatModel): model_name: str = Field(alias="model") """The name of the model""" client: AsyncLLMServerManager """AsyncLLM server manager""" tokenizer: Any """Tokenizer for the model""" max_tokens: int """Max tokens to generate""" tool_parser: str = "hermes" """Tool parser for the model""" max_parallel_calls: int = 1 """Max parallel tool calls""" temperature: float = 1.0 """Temperature for sampling""" top_p: float = 1.0 """Top p for sampling""" repetition_penalty: float = 1.0 """Repetition penalty for sampling""" def bind_tools(self, tools, **kwargs) -> Runnable[LanguageModelInput, BaseMessage]: """Bind tools to the model. Args: tools: Sequence of tools to bind to the model. Returns: A Runnable that returns a message. """ formatted_tools: list = [convert_to_openai_tool(tool) for tool in tools] # used to remove system prompt prefix when encoding tool response system_prompt = self.tokenizer.apply_chat_template([{}], add_generation_prompt=False, tokenize=True) kwargs["system_prompt"] = system_prompt return self.bind(tools=formatted_tools, **kwargs) def with_structured_output( self, schema: dict | type, *, include_raw: bool = False, **kwargs: Any, ) -> Runnable[LanguageModelInput, dict | BaseChatModel]: """Ref: https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output/""" raise NotImplementedError def _generate( self, messages: list[BaseMessage], stop: Optional[list[str]] = None, **kwargs: Any, ) -> ChatResult: raise NotImplementedError async def _agenerate( self, messages: list[BaseMessage], stop: Optional[list[str]] = None, **kwargs: Any, ) -> ChatResult: """Asynchronously generate chat completion message. Args: messages (list[BaseMessage]): List of list of messages. stop (Optional[list[str]], optional): Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. Defaults to None. Returns: ChatResult: Chat result. """ request_id, prompt_ids, response_mask = await self._preprocess(messages, **kwargs) sampling_params = { "temperature": self.temperature, "top_p": self.top_p, "repetition_penalty": self.repetition_penalty, } if "sampling_params" in kwargs: sampling_params.update(kwargs["sampling_params"]) output = await self.client.generate( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params ) message = await self._postprocess(request_id, prompt_ids, response_mask, output.token_ids, **kwargs) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @property def _llm_type(self) -> str: """Get the type of language model used by this chat model.""" return self.model_name async def _preprocess(self, messages: list[BaseMessage], **kwargs: Any) -> tuple[str, list[int], list[int]]: """Preprocess messages for chat completion. To ensure strong consistency with policy model, AsyncLLM server generate response with token in token out instead of messages list. But all agent frameworks use messages list to represent chat history. To mitigate the gap, we store trajectory (prompt_ids, response_mask) in lastest AIMessage.response_metadata. 1. Encode ToolMessage to token ids. 2. Retrieve trajectory (prompt_ids, response_mask) from lastest AIMessage.response_metadata. 3. Append ToolMessage token ids to prompt_ids, and append 0 to response_mask. Ref: https://python.langchain.com/docs/concepts/chat_history/ Args: messages (list[BaseMessage]): List of messages. Returns: tuple[str, list[int], list[int]]: Request id, prompt ids, response mask. """ # messages: [system], human, ai, human|tool, ai, human|tool, ... assert messages[-1].type in ["human", "tool"], ( f"Last message must be human or tool, but got {messages[-1].type}" ) loop = asyncio.get_running_loop() # Case 1: initial chat completion: [system], human if messages[-1].type == "human" and (len(messages) == 1 or messages[-2].type != "ai"): prompt_ids = await loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template( convert_to_openai_messages(messages), tools=kwargs.get("tools"), add_generation_prompt=True, tokenize=True, ), ) return str(uuid.uuid4()), prompt_ids, [] # Case 2: follow up chat completion with tool/human response: [system], human, ai, human|tool, ... for i in range(len(messages) - 1, -1, -1): if messages[i].type == "ai": break assert "prompt_ids" in messages[i].response_metadata, "Last message must have prompt_ids in response_metadata" assert "response_mask" in messages[i].response_metadata, ( "Last message must have response_mask in response_metadata" ) # encode tool response tool_responses = convert_to_openai_messages(messages[i + 1 :]) if self.tool_parser == "hermes": tool_response_ids = await loop.run_in_executor( None, lambda messages=tool_responses: self.tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True ), ) tool_response_ids = tool_response_ids[len(kwargs["system_prompt"]) :] elif self.tool_parser == "gpt-oss": # Format tool responses manually # since gpt-oss chat template requires tool call messages to parse tool response messages # we need to format the tool response messages manually tool_response_texts = [] for tool_msg in tool_responses: if tool_msg["role"] == "tool": # Use tool message's name if available (for multiple tool calls) actual_tool_name = tool_msg.get("name", "unknown") if actual_tool_name == "unknown": logger.error(f"actual_tool_name: {actual_tool_name}") formatted = format_gpt_oss_tool_response_manually(tool_msg["content"], actual_tool_name) tool_response_texts.append(formatted) # Tokenize the manually formatted tool responses tool_response_text = "".join(tool_response_texts) # need to add generation tokens for gpt-oss manually since add_generation_prompt is True tool_response_text = add_generation_prompt_for_gpt_oss(tool_response_text) logger.debug(f"tool_response_text: {tool_response_text}") tool_response_ids = await loop.run_in_executor( None, lambda: self.tokenizer.encode(tool_response_text, add_special_tokens=False) ) else: raise ValueError(f"Unsupported tool parser: {self.tool_parser}") # stop generation if response length exceeds max response length if len(messages[i].response_metadata["response_mask"]) + len(tool_response_ids) >= self.max_tokens: raise MaxTokenExceededError(f"Max response length {self.max_tokens} exceeded") # append tool response to prompt request_id = messages[i].response_metadata.pop("request_id") prompt_ids = messages[i].response_metadata.pop("prompt_ids") response_mask = messages[i].response_metadata.pop("response_mask") prompt_ids += tool_response_ids response_mask += [0] * len(tool_response_ids) return request_id, prompt_ids, response_mask async def _postprocess( self, request_id: str, prompt_ids: list[int], response_mask: list[int], response_ids: list[int], **kwargs: Any ) -> AIMessage: """Postprocess response_ids when chat completion is done. 1. Decode response_ids, parse tool calls to AIMessage. 2. Append response_ids to prompt_ids, and append 1 to response_mask. 3. Store trajectory (prompt_ids, response_mask) in AIMessage.response_metadata. Args: request_id (str): Unique request id. prompt_ids (list[int]): Input prompt token ids in this chat completion. response_mask (list[int]): Response mask before this chat completion. response_ids (list[int]): LLM generated token ids in this chat completion. Returns: AIMessage: Postprocessed message. """ prompt_ids += response_ids response_mask += [1] * len(response_ids) tool_parser = ToolParser.get_tool_parser(self.tool_parser, self.tokenizer) content, function_calls = await tool_parser.extract_tool_calls(response_ids) tool_calls, invalid_tool_calls = [], [] for function_call in function_calls: error = None try: args = json.loads(function_call.arguments) if not isinstance(args, dict): error = f"Tool arguments must be a JSON object, got {type(args).__name__}" except json.JSONDecodeError as e: error = f"Invalid JSON tool arguments: {e}" if error: logger.warning(error) invalid_tool_calls.append( InvalidToolCall( name=function_call.name, args=function_call.arguments, id=str(uuid.uuid4()), error=error, ) ) else: tool_calls.append( ToolCall( name=function_call.name, args=args, id=str(uuid.uuid4()), ) ) message = AIMessage( content=content, tool_calls=tool_calls[: self.max_parallel_calls], invalid_tool_calls=invalid_tool_calls[: self.max_parallel_calls], response_metadata={ "request_id": request_id, "prompt_ids": prompt_ids, "response_mask": response_mask, }, ) return message class TruncateStructuredTool(StructuredTool): """Structured tool with response truncation.""" tool_response_truncate_side: str """truncate side of tool response: left, middle, right""" max_tool_response_length: int """max length of tool response""" async def _arun( self, *args: Any, config: RunnableConfig, **kwargs: Any, ) -> Any: tool_response = await super()._arun(*args, config=config, **kwargs) tool_response = str(tool_response) if len(tool_response) > self.max_tool_response_length: if self.tool_response_truncate_side == "left": tool_response = tool_response[: self.max_tool_response_length] + "...(truncated)" elif self.tool_response_truncate_side == "right": tool_response = "(truncated)..." + tool_response[-self.max_tool_response_length :] else: length = self.max_tool_response_length // 2 tool_response = tool_response[:length] + "...(truncated)..." + tool_response[-length:] return tool_response def convert_to_agent_output(messages: list[BaseMessage], response_length: int) -> AgentLoopOutput: """Convert messages to AgentLoopOutput. Args: messages (List[BaseMessage]): List of messages, last message must be assistant with response_metadata containing `prompt_ids` and `response_mask`. response_length (int): Max length of response. Returns: AgentLoopOutput: agent loop output trajectory used for training. """ # skip last tool calls for i in range(len(messages) - 1, -1, -1): if messages[i].type != "tool": break last_message = messages[i] assert last_message.type == "ai", f"Last message must be assistant, but got {last_message.type}" assert "prompt_ids" in last_message.response_metadata, "Last message must have prompt_ids in response_metadata" assert "response_mask" in last_message.response_metadata, ( "Last message must have response_mask in response_metadata" ) num_turns = 0 for i in range(len(messages)): if messages[i].type == "system": continue # parallel tool calls are in single turn if i == 0 or messages[i].type != messages[i - 1].type: num_turns += 1 prompt_ids = last_message.response_metadata["prompt_ids"] response_mask = last_message.response_metadata["response_mask"] response_ids = prompt_ids[-len(response_mask) :] prompt_ids = prompt_ids[: len(prompt_ids) - len(response_mask)] output = AgentLoopOutput( prompt_ids=prompt_ids, response_ids=response_ids[:response_length], response_mask=response_mask[:response_length], num_turns=num_turns, metrics={}, ) return output ================================================ FILE: verl_distillation/recipe/langgraph_agent/example/README.md ================================================ # MathExpression: LangGraph Agent Example MathExpression is a tiny example to demonstrate multi-turn rollout with [LangGraph ReactAgent](https://langchain-ai.github.io/langgraph/agents/overview/). ### Define react agent with tool Firstly, to force ReactAgent to evaluate math expression by tool, we define a special operand `@`: ```python @tool(parse_docstring=True) def calculate(a: int, b: int, operand: str) -> int: """ Compute the results using operand with two integers Args: a: the first operand b: the second operand operand: '+' or '-' or '*' or '@' """ assert operand in ["+", "-", "*", "@"], f"unknown operand {operand}" if operand == "@": return 3 * a - 2 * b return eval(f"{a} {operand} {b}") ``` Without calling `calculate`, ReactAgent is impossible to evaluate math expression correctly. Then, we can equip ReactAgent with `calculate` tool: ```python class MathExpressionReactAgentLoop(ReactAgentLoop): @classmethod def init_class(cls, config, tokenizer): cls.tools = [calculate] super().init_class(config, tokenizer) ``` We can define agent loop config in yaml file, which will be used by AgentLoopWorker to dynamic load custom AgentLoop class. ```yaml - name: math_expression _target_: recipe.langgraph_agent.example.math_expression.MathExpressionReactAgentLoop ``` ### Prepare dataset Now, let's prepare two small datasets for training and evaluation: ```bash python recipe/langgraph_agent/example/create_dataset.py ``` - Parameters: `--train_size` (default: 5000), `--test_size` (default: 500), `--output_dir` (default: `data/math_expression_tool`). - Example with custom sizes/output: ```bash python recipe/langgraph_agent/example/create_dataset.py \ --train_size 10000 \ --test_size 1000 \ --output_dir data/math_expression_tool ``` Note that dataset should contain a column `agent_name` with `math_expression`, which is used by `AgentLoopWorker` to select the agent loop class. | prompt | reward_model | agent_name | |--------------------------------------|------------------------------|-----------------| | [{'role': 'user', 'content': '...'}] | {'ground_truth': '-10', ...} | math_expression | | [{'role': 'user', 'content': '...'}] | {'ground_truth': '-10', ...} | math_expression | Generated math expressions are like below, requiring model to call `calculate` multiple times to solve sub expressions. ``` (2 @ (8 @ 8 @ 5 @ 5 @ 3) @ 6 @ (1 @ 4 @ 4 @ 4) @ 2) @ 6 (4.6 @ (9.05 @ 4.0) @ 8.3 @ 1.21) @ 8.6 9 @ 4 ((2 @ 2) @ (3 @ 3)) @ 4 ``` ### Training Hook all these up and start training: ```bash bash recipe/langgraph_agent/example/run_qwen2.5_3b.sh 2>&1 | tee train.log ``` To submit on a SLURM cluster (the script contains SBATCH headers): ```bash sbatch recipe/langgraph_agent/example/run_qwen2.5_3b.sh ``` **Note on `GPUS_PER_NODE` and `NNODES`:** - `GPUS_PER_NODE`: GPUs per node. Detection order: `SLURM_GPUS_ON_NODE` (if set) → `GPUS_PER_NODE` → `2`. - `NNODES`: number of nodes. Detection order: `SLURM_JOB_NUM_NODES` (if set) → `NNODES` → `1`. - Total GPUs = `GPUS_PER_NODE × NNODES` (must be ≥ 2). Local override (no `SLURM_*` set): ```bash GPUS_PER_NODE=4 NNODES=2 bash recipe/langgraph_agent/example/run_qwen2.5_3b.sh ``` After total 39 steps, model should achieve 100% accuray on test dataset: - val-aux/lighteval/MATH/reward: 1.0 - val-aux/num_turns/mean: 9.0, average number of messages include assistant and tool turns. We can see model can solve math expression by calling `calculate` tool: ``` system You are Qwen, created by Alibaba Cloud. You are a helpful assistant. user We define a new math operator @, where you can only call an external tool to compute. Please put your final answer inside \boxed{} only in the last turn. Now answer the following questions: Compute 5 @ (3 @ 6 @ 8) assistant Sure, let's break this down step by step. We need to compute the expression \(5 @ (3 @ 6 @ 8)\). First, we need to compute the inner expression \(3 @ 6 @ 8\). Let's compute \(3 @ 6\) first using the external tool. {"name": "calculate", "arguments": {"a": 3, "b": 6, "operand": "@"}} user -3 assistant We have computed that \(3 @ 6 = -3\). Now we need to compute \(-3 @ 8\). Let's use the external tool to compute this. {"name": "calculate", "arguments": {"a": -3, "b": 8, "operand": "@"}} user -25 assistant We have computed that \(-3 @ 8 = -25\). Now we can put this result into the original expression to get the final result of \(5 @ (3 @ 6 @ 8)\), which is \(5 @ (-25)\). Let's compute \(5 @ (-25)\) using the external tool. {"name": "calculate", "arguments": {"a": 5, "b": -25, "operand": "@"}} user 65 assistant The final result of \(5 @ (3 @ 6 @ 8)\) is \(\boxed{65}\). ``` ================================================ FILE: verl_distillation/recipe/langgraph_agent/example/agent.yaml ================================================ - name: math_expression _target_: recipe.langgraph_agent.example.math_expression.MathExpressionReactAgentLoop ================================================ FILE: verl_distillation/recipe/langgraph_agent/example/create_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Create dataset for calculator """ import argparse import os import random import pandas as pd def generate_math_expression(min_terms=2, max_terms=5, min_number=1, max_number=10, allow_decimals=False, max_depth=2): """ Generate a random mathematical expression with operators +, -, *, /, and parentheses. Args: min_terms (int): Minimum number of terms in the expression. max_terms (int): Maximum number of terms in the expression. max_number (int): Maximum value for numbers in the expression. allow_decimals (bool): Whether to allow decimal numbers. max_depth (int): Maximum nesting depth for parentheses. Returns: str: A valid mathematical expression as a string. """ def generate_number(): """Generate a random number (integer or float).""" assert min_number < max_number num = random.uniform(min_number, max_number) if not allow_decimals: num = int(num) else: num = round(num, random.randint(0, 2)) # Round to 0-2 decimal places return str(num) def generate_term(depth=0): """Generate a term (number or parenthesized expression).""" if depth < max_depth and random.random() < 0.5: # 50% chance to add parentheses expr = generate_expression(depth + 1) return f"({expr})" else: return generate_number() def generate_expression(depth=0): """Generate a full expression with multiple terms and operators.""" num_terms = random.randint(min_terms, max_terms) terms = [generate_term(depth) for _ in range(num_terms)] # Randomly select operators operators = ["+", "-", "*", "/", "@"] expr = terms[0] for i in range(1, num_terms): # Bias towards + and - for readability op = random.choices( operators, weights=[0, 0, 0, 0, 1], # + and - are 1.5x more likely than * and / )[0] expr += f" {op} " + terms[i] return expr return generate_expression() def test(): # Example 1: Basic integer expression print(generate_math_expression()) # Output: (3 + 7) * 2 - 5 # Example 2: Expression with decimals print(generate_math_expression(allow_decimals=True)) # Output: 4.5 / (2.1 + 3.7) - 1.2 # Example 3: More complex expression with higher depth print(generate_math_expression(max_terms=6, max_depth=3)) # Output: ((5 * 2) - (3 + 1)) / (7 - 2) + 4 # Example 4: Simplified expression print(generate_math_expression(min_terms=2, max_terms=3, max_number=5)) # Output: 4 - 2 * 3 def calculate(expression: str) -> float: """ Evaluate a mathematical expression with +, -, *, /, @, and parentheses. The @ operator is defined as: a @ b = 3a - 2b. Args: expression (str): Input mathematical expression (e.g., "3@2+4"). Returns: float: Result of the evaluated expression. Raises: ValueError: For invalid expressions (e.g., mismatched parentheses, division by zero). """ def tokenize(s: str) -> list: """Convert the input string into tokens (numbers, operators, parentheses).""" tokens = [] i = 0 while i < len(s): if s[i].isdigit() or s[i] == ".": # Parse number (integer or float) j = i while j < len(s) and (s[j].isdigit() or s[j] == "."): j += 1 tokens.append(s[i:j]) i = j elif s[i] in "+-*/@()": # Operator or parenthesis tokens.append(s[i]) i += 1 elif s[i].isspace(): # Skip whitespace i += 1 else: raise ValueError(f"Invalid character: {s[i]}") return tokens def infix_to_postfix(tokens: list) -> list: """Convert infix notation to postfix notation (Reverse Polish Notation).""" output = [] stack = [] # Higher precedence for @ (between * and +) precedence = {"@": 3, "*": 2, "/": 2, "+": 1, "-": 1} for token in tokens: if token.isdigit() or "." in token: output.append(token) elif token == "(": stack.append(token) elif token == ")": while stack and stack[-1] != "(": output.append(stack.pop()) if not stack or stack[-1] != "(": raise ValueError("Mismatched parentheses") stack.pop() # Discard '(' else: # Operator while stack and stack[-1] != "(" and precedence.get(stack[-1], 0) >= precedence.get(token, 0): output.append(stack.pop()) stack.append(token) # Pop remaining operators while stack: if stack[-1] in "()": raise ValueError("Mismatched parentheses") output.append(stack.pop()) return output def evaluate_postfix(postfix: list) -> float: """Evaluate postfix expression using a stack.""" stack = [] for token in postfix: if token.isdigit() or "." in token: stack.append(float(token)) else: if len(stack) < 2: raise ValueError("Invalid expression") b = stack.pop() a = stack.pop() if token == "+": res = a + b elif token == "-": res = a - b elif token == "*": res = a * b elif token == "/": if b == 0: raise ValueError("Division by zero") res = a / b elif token == "@": res = 3 * a - 2 * b # Custom @ operator implementation else: raise ValueError(f"Invalid operator: {token}") stack.append(res) if len(stack) != 1: raise ValueError("Invalid expression") return stack[0] # Remove spaces and validate parentheses expression = expression.replace(" ", "") if expression.count("(") != expression.count(")"): raise ValueError("Mismatched parentheses") tokens = tokenize(expression) postfix = infix_to_postfix(tokens) result = evaluate_postfix(postfix) # Convert integers to integer representation if result.is_integer(): return int(result) return result def generate_data(total_num_dataset, split): rl_dataset = { "prompt": [], "data_source": [], "ability": [], "reward_model": [], "extra_info": [], "agent_name": [], } for idx in range(total_num_dataset): while True: try: expression: str = generate_math_expression( min_terms=2, max_terms=3, min_number=1, max_number=10, allow_decimals=False, max_depth=1 ) num_plus = expression.count("+") num_minus = expression.count("-") num_mul = expression.count("*") num_star = expression.count("@") answer = str(calculate(expression)) # answer = str(eval(expression)) break except Exception as e: print(e) continue num_tool_calls = num_plus + num_minus + num_mul + num_star prompt = ( f"We define a new math operator @, where you can only call an external tool to compute. " f"Please put your final answer inside \\boxed{{}} only in the last turn. Now answer the " f"following questions:\nCompute {expression}" ) prompt_with_template = [ { "role": "user", "content": prompt, } ] rl_dataset["prompt"].append(prompt_with_template) rl_dataset["data_source"].append("lighteval/MATH") rl_dataset["ability"].append("math") rl_dataset["reward_model"].append({"style": "lighteval/MATH", "ground_truth": answer}) rl_dataset["extra_info"].append( {"index": idx, "expression": expression, "split": split, "expected_tool_calls": num_tool_calls} ) rl_dataset["agent_name"].append("math_expression") rl_dataset = pd.DataFrame(data=rl_dataset) return rl_dataset if __name__ == "__main__": parser = argparse.ArgumentParser(description="Math Expression Dataset Generator") parser.add_argument("--train_size", type=int, default=5000, help="Number of training samples") parser.add_argument("--test_size", type=int, default=500, help="Number of testing samples") parser.add_argument("--output_dir", default="data/math_expression_tool", help="Directory to save the dataset") args = parser.parse_args() # print(calculate("3@2")) # Output: 5 (3*3 - 2*2) # print(calculate("3@2+4")) # Output: 9 (5 + 4) # print(calculate("3*(4@2)")) # Output: 24 (3 * 8) # print(calculate("(5@3)*2")) # Output: 18 (9 * 2) train_dataset = generate_data(total_num_dataset=args.train_size, split="train") test_dataset = generate_data(total_num_dataset=args.test_size, split="test") # Make sure the dataset directory exists os.makedirs(args.output_dir, exist_ok=True) # Save the datasets to parquet files train_dataset.to_parquet(os.path.join(args.output_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(args.output_dir, "test.parquet")) ================================================ FILE: verl_distillation/recipe/langgraph_agent/example/math_expression.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from langchain_core.tools import tool from recipe.langgraph_agent.react_agent_loop import ReactAgentLoop @tool(parse_docstring=True) def calculate(a: int, b: int, operand: str) -> int: """ Compute the results using operand with two integers Args: a: the first operand b: the second operand operand: '+' or '-' or '*' or '@' """ assert operand in ["+", "-", "*", "@"], f"unknown operand {operand}" if operand == "@": return 3 * a - 2 * b return eval(f"{a} {operand} {b}") class MathExpressionReactAgentLoop(ReactAgentLoop): @classmethod def init_class(cls, config, tokenizer, **kwargs): cls.tools = [calculate] super().init_class(config, tokenizer) ================================================ FILE: verl_distillation/recipe/langgraph_agent/example/run_gpt_oss_20b_bf16.sh ================================================ #!/usr/bin/env bash #SBATCH --job-name=rl-langgraph-3B #SBATCH --partition=main #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 #SBATCH --cpus-per-task=64 #SBATCH --gres=gpu:4 #SBATCH --mem=0 #SBATCH --time=10:00:00 #SBATCH --output=%x_%j.out #SBATCH --error=%x_%j.err set -xeuo pipefail # ================= cluster topology ================= export GPUS_PER_NODE=${SLURM_GPUS_ON_NODE:-${GPUS_PER_NODE:-2}} # GPUs on this node NNODES=${SLURM_JOB_NUM_NODES:-${NNODES:-1}} export NNODES export RAY_NUM_NODES=$NNODES # Require at least 2 GPUs TOTAL_GPUS=$((GPUS_PER_NODE * NNODES)) if [ "$TOTAL_GPUS" -lt 2 ]; then echo "Error: at least 2 GPUs are required, detected $TOTAL_GPUS." >&2 exit 1 fi echo "Using $NNODES nodes and $GPUS_PER_NODE GPUs per node..." # ================= data/model/tool ================= HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} # Prefer local model if present, otherwise fall back to HF hub path model_path="lmsys/gpt-oss-20b-bf16" # Use the default output directory produced by create_dataset.py train_files=$DATA_ROOT/data/math_expression_tool/train.parquet test_files=$DATA_ROOT/data/math_expression_tool/test.parquet # Agent config agent_loop_config_path=recipe/langgraph_agent/example/agent.yaml # =================== wandb =================== project_name=math_expression_tool experiment_name=gpt-oss-20b-bf16 default_local_dir=$DATA_ROOT/checkpoint/$experiment_name # ================= algorithm ================= adv_estimator=grpo use_kl_in_reward=false kl_coef=0.0 use_kl_loss=false kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_turns=8 max_prompt_length=1024 max_response_length=8192 actor_lr=1e-6 train_batch_size=128 ppo_mini_batch_size=16 n_resp_per_prompt=8 n_resp_per_prompt_val=1 # =================== logging =================== export RAY_LOGGING_LEVEL=DEBUG export HYDRA_FULL_ERROR=1 # ================= performance ================= export NCCL_IBEXT_DISABLE=1 export NCCL_NVLS_ENABLE=1 export NCCL_IB_HCA=mlx5 export UCX_NET_DEVICES=mlx5_0:1,mlx5_1:1,mlx5_2:1,mlx5_3:1,mlx5_4:1,mlx5_5:1,mlx5_6:1,mlx5_7:1 export VLLM_USE_V1=1 export VLLM_ATTENTION_BACKEND=FLASH_ATTN infer_tp=2 # vLLM tensor parallel size train_sp=4 # Ulysses sequence parallel size for actor offload=true actor_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 4 )) log_prob_max_token_len_per_gpu=$(( actor_max_token_len_per_gpu * 2 )) train_files="['$train_files']" test_files="['$test_files']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=$adv_estimator \ algorithm.use_kl_in_reward=$use_kl_in_reward \ algorithm.kl_ctrl.kl_coef=$kl_coef \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=true \ data.train_batch_size=$train_batch_size \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=true \ data.truncation='error' \ actor_rollout_ref.model.path="$model_path" \ actor_rollout_ref.model.use_remove_padding=true \ actor_rollout_ref.model.enable_gradient_checkpointing=true \ actor_rollout_ref.actor.use_kl_loss=$use_kl_loss \ actor_rollout_ref.actor.kl_loss_coef=$kl_loss_coef \ actor_rollout_ref.actor.clip_ratio_low=$clip_ratio_low \ actor_rollout_ref.actor.clip_ratio_high=$clip_ratio_high \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.optim.lr=$actor_lr \ actor_rollout_ref.actor.use_dynamic_bsz=true \ actor_rollout_ref.actor.ppo_mini_batch_size=$ppo_mini_batch_size \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$actor_max_token_len_per_gpu \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=$train_sp \ actor_rollout_ref.actor.fsdp_config.param_offload=$offload \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$offload \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=$log_prob_max_token_len_per_gpu \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.tensor_model_parallel_size=$infer_tp \ actor_rollout_ref.rollout.multi_turn.max_user_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.format=gpt-oss \ +actor_rollout_ref.rollout.engine_kwargs.sglang.attention_backend=triton \ actor_rollout_ref.rollout.agent.agent_loop_config_path=$agent_loop_config_path \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=$n_resp_per_prompt \ actor_rollout_ref.rollout.val_kwargs.top_p=1.0\ actor_rollout_ref.rollout.val_kwargs.temperature=1.0 \ actor_rollout_ref.rollout.val_kwargs.n=$n_resp_per_prompt_val \ trainer.logger='["console","wandb"]' \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.n_gpus_per_node="$GPUS_PER_NODE" \ trainer.val_before_train=true \ trainer.log_val_generations=50 \ trainer.nnodes="$NNODES" \ trainer.save_freq=-1 \ trainer.default_local_dir="$default_local_dir" \ trainer.test_freq=5 \ trainer.total_epochs=1 "$@" ================================================ FILE: verl_distillation/recipe/langgraph_agent/example/run_qwen2.5_3b.sh ================================================ #!/usr/bin/env bash #SBATCH --job-name=rl-langgraph-3B #SBATCH --partition=main #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 #SBATCH --cpus-per-task=64 #SBATCH --gres=gpu:4 #SBATCH --mem=0 #SBATCH --time=10:00:00 #SBATCH --output=%x_%j.out #SBATCH --error=%x_%j.err set -xeuo pipefail # ================= cluster topology ================= export GPUS_PER_NODE=${SLURM_GPUS_ON_NODE:-${GPUS_PER_NODE:-2}} # GPUs on this node NNODES=${SLURM_JOB_NUM_NODES:-${NNODES:-1}} export NNODES export RAY_NUM_NODES=$NNODES # Require at least 2 GPUs TOTAL_GPUS=$((GPUS_PER_NODE * NNODES)) if [ "$TOTAL_GPUS" -lt 2 ]; then echo "Error: at least 2 GPUs are required, detected $TOTAL_GPUS." >&2 exit 1 fi echo "Using $NNODES nodes and $GPUS_PER_NODE GPUs per node..." # ================= data/model/tool ================= HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} # Prefer local model if present, otherwise fall back to HF hub path model_path=${model_path:-$DATA_ROOT/model/Qwen2.5-3B-Instruct} if [ ! -d "$model_path" ]; then model_path=Qwen/Qwen2.5-3B-Instruct fi # Use the default output directory produced by create_dataset.py train_files=$DATA_ROOT/data/math_expression_tool/train.parquet test_files=$DATA_ROOT/data/math_expression_tool/test.parquet # Agent config agent_loop_config_path=recipe/langgraph_agent/example/agent.yaml # =================== wandb =================== project_name=math_expression_tool experiment_name=qwen2.5-3b default_local_dir=$DATA_ROOT/checkpoint/$experiment_name # ================= algorithm ================= adv_estimator=grpo use_kl_in_reward=false kl_coef=0.0 use_kl_loss=false kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_turns=8 max_prompt_length=1024 max_response_length=2048 actor_lr=1e-6 train_batch_size=128 ppo_mini_batch_size=16 n_resp_per_prompt=8 n_resp_per_prompt_val=1 # =================== logging =================== export RAY_LOGGING_LEVEL=DEBUG export HYDRA_FULL_ERROR=1 # ================= performance ================= export NCCL_IBEXT_DISABLE=1 export NCCL_NVLS_ENABLE=1 export NCCL_IB_HCA=mlx5 export UCX_NET_DEVICES=mlx5_0:1,mlx5_1:1,mlx5_2:1,mlx5_3:1,mlx5_4:1,mlx5_5:1,mlx5_6:1,mlx5_7:1 export VLLM_USE_V1=1 export VLLM_ATTENTION_BACKEND=FLASH_ATTN infer_tp=2 # vLLM tensor parallel size train_sp=4 # Ulysses sequence parallel size for actor offload=true actor_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 4 )) log_prob_max_token_len_per_gpu=$(( actor_max_token_len_per_gpu * 2 )) train_files="['$train_files']" test_files="['$test_files']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=$adv_estimator \ algorithm.use_kl_in_reward=$use_kl_in_reward \ algorithm.kl_ctrl.kl_coef=$kl_coef \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=true \ data.train_batch_size=$train_batch_size \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=true \ data.truncation='error' \ actor_rollout_ref.model.path="$model_path" \ actor_rollout_ref.model.use_remove_padding=true \ actor_rollout_ref.model.enable_gradient_checkpointing=true \ actor_rollout_ref.actor.use_kl_loss=$use_kl_loss \ actor_rollout_ref.actor.kl_loss_coef=$kl_loss_coef \ actor_rollout_ref.actor.clip_ratio_low=$clip_ratio_low \ actor_rollout_ref.actor.clip_ratio_high=$clip_ratio_high \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.optim.lr=$actor_lr \ actor_rollout_ref.actor.use_dynamic_bsz=true \ actor_rollout_ref.actor.ppo_mini_batch_size=$ppo_mini_batch_size \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$actor_max_token_len_per_gpu \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=$train_sp \ actor_rollout_ref.actor.fsdp_config.param_offload=$offload \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$offload \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=$log_prob_max_token_len_per_gpu \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.tensor_model_parallel_size=$infer_tp \ actor_rollout_ref.rollout.multi_turn.max_user_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.agent.agent_loop_config_path=$agent_loop_config_path \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.n=$n_resp_per_prompt \ actor_rollout_ref.rollout.val_kwargs.top_p=0.6 \ actor_rollout_ref.rollout.val_kwargs.temperature=1.0 \ actor_rollout_ref.rollout.val_kwargs.n=$n_resp_per_prompt_val \ trainer.logger='["console","wandb"]' \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.n_gpus_per_node="$GPUS_PER_NODE" \ trainer.val_before_train=true \ trainer.log_val_generations=50 \ trainer.nnodes="$NNODES" \ trainer.save_freq=-1 \ trainer.default_local_dir="$default_local_dir" \ trainer.test_freq=5 \ trainer.total_epochs=1 "$@" ================================================ FILE: verl_distillation/recipe/langgraph_agent/react_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LangGraph React Agent Loop. This implementation is exact same as `ToolAgentLoop`. Ref: https://langchain-ai.github.io/langgraph/tutorials/workflows/ """ from typing import Any, Literal from langchain_core.runnables import RunnableConfig from langgraph.graph import END, MessagesState, StateGraph from langgraph.prebuilt import ToolNode from recipe.langgraph_agent.chat_model import ( ChatModel, MaxTokenExceededError, convert_to_agent_output, ) from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput async def call_model(state: MessagesState, config: RunnableConfig): model = config["configurable"]["model"] sampling_params = config["configurable"]["sampling_params"] try: message = await model.ainvoke(state["messages"], sampling_params=sampling_params) return {"messages": [message]} except MaxTokenExceededError: # last message is ToolMessage return {"messages": []} def should_continue(state: MessagesState, config: RunnableConfig) -> Literal["tools", END]: max_assistant_turns = config["configurable"]["max_assistant_turns"] num_assistant_turns = 0 for message in state["messages"]: if message.type == "ai": num_assistant_turns += 1 last_message = state["messages"][-1] # LLM call failed, e.g: max response length exceeded if last_message.type == "tool": return END # max assistant turns exceeded if max_assistant_turns and num_assistant_turns >= max_assistant_turns: return END # no tool calls if not last_message.tool_calls: return END return "tools" class ReactAgentLoop(AgentLoopBase): @classmethod def init_class(cls, config, tokenizer, **kwargs): if cls._class_initialized: return cls._class_initialized = True print("Performing class-level ReactAgentLoop initialization") # build graph cls.graph = cls.build_graph() @classmethod def build_graph(cls) -> StateGraph: workflow = StateGraph(MessagesState) workflow.add_node("agent", call_model) workflow.add_node("tools", ToolNode(cls.tools)) workflow.set_entry_point("agent") workflow.add_conditional_edges( "agent", should_continue, { "tools": "tools", END: END, }, ) workflow.add_edge("tools", "agent") graph = workflow.compile() return graph async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput: messages = list(kwargs["raw_prompt"]) model_path = self.config.actor_rollout_ref.model.path model_name = "/".join(model_path.split("/")[-2:]) rollout = self.config.actor_rollout_ref.rollout model = ChatModel( model=model_name, client=self.server_manager, tokenizer=self.tokenizer, max_tokens=rollout.response_length, max_parallel_calls=rollout.multi_turn.max_parallel_calls, tool_parser=rollout.multi_turn.format, ) model = model.bind_tools(self.tools, tool_choice="any") config = { "configurable": { "model": model, "sampling_params": sampling_params, "max_user_turns": rollout.multi_turn.max_user_turns, "max_assistant_turns": rollout.multi_turn.max_assistant_turns, } } # TODO: how to handle multiple trajectories in an graph invocation? # Each graph node may has its own LLM calls and state, e.g: # https://github.com/google-gemini/gemini-fullstack-langgraph-quickstart state = await self.graph.ainvoke(input={"messages": messages}, config=config) output = convert_to_agent_output(state["messages"], rollout.response_length) return output ================================================ FILE: verl_distillation/recipe/langgraph_agent/test_react_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import numpy as np import pytest import ray from langchain_core.tools import tool from omegaconf import DictConfig from recipe.langgraph_agent.react_agent_loop import ReactAgentLoop from tests.experimental.agent_loop.agent_utils import init_agent_loop_manager from verl.protocol import DataProto from verl.utils import hf_tokenizer @pytest.fixture def init_config() -> DictConfig: from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose(config_name="ppo_trainer") model_path = "Qwen/Qwen2.5-1.5B-Instruct" config.actor_rollout_ref.model.path = model_path config.actor_rollout_ref.rollout.name = os.getenv("ROLLOUT_NAME", "vllm") config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.prompt_length = 4096 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.n = 4 config.actor_rollout_ref.rollout.agent.num_workers = 2 config.actor_rollout_ref.actor.use_dynamic_bsz = True # test sleep/wake_up with fsdp offload config.actor_rollout_ref.actor.fsdp_config.param_offload = True config.actor_rollout_ref.actor.fsdp_config.optimizer_offload = True return config @tool(parse_docstring=True) def get_current_temperature(location: str, unit: str = "celsius"): """Get current temperature at a location. Args: location: The location to get the temperature for, in the format "City, State, Country". unit: The unit to return the temperature in. Defaults to "celsius". (choices: ["celsius", "fahrenheit"]) Returns: the temperature, the location, and the unit in a dict """ print(f"[DEBUG] get_current_temperature: {location}, {unit}") return { "temperature": 26.1, "location": location, "unit": unit, } @tool(parse_docstring=True) def get_temperature_date(location: str, date: str, unit: str = "celsius"): """Get temperature at a location and date. Args: location: The location to get the temperature for, in the format "City, State, Country". date: The date to get the temperature for, in the format "Year-Month-Day". unit: The unit to return the temperature in. Defaults to "celsius". (choices: ["celsius", "fahrenheit"]) Returns: the temperature, the location, the date and the unit in a dict """ print(f"[DEBUG] get_temperature_date: {location}, {date}, {unit}") return { "temperature": 25.9, "location": location, "date": date, "unit": unit, } class TestReactAgentLoop(ReactAgentLoop): @classmethod def init_class(cls, config, tokenizer, **kwargs): # TODO: find better way to configure tools cls.tools = [get_current_temperature, get_temperature_date] super().init_class(config, tokenizer, **kwargs) def test_react_agent(init_config): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) # =========================== 1. Init rollout manager =========================== agent_loop_config = [ { "_target_": "recipe.langgraph_agent.test_react_agent_loop.TestReactAgentLoop", "name": "react_agent", }, ] agent_loop_config_path = "/tmp/agent_loop_config.json" with open(agent_loop_config_path, "w") as f: json.dump(agent_loop_config, f) n = 2 init_config.actor_rollout_ref.rollout.n = n # init_config.actor_rollout_ref.rollout.multi_turn.tool_config_path = tool_config_path init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 2 init_config.actor_rollout_ref.rollout.agent.agent_loop_config_path = agent_loop_config_path agent_loop_manager = init_agent_loop_manager(init_config) # =========================== 2. Generate sequences =========================== raw_prompts = [ [ {"role": "user", "content": "How are you?"}, ], [ {"role": "user", "content": "What's the temperature in Los Angeles now?"}, ], [ {"role": "user", "content": "What's the temperature in New York now?"}, ], [ { "role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\n\n" "Current Date: 2024-09-30", }, {"role": "user", "content": "What's the temperature in San Francisco now? How about tomorrow?"}, ], ] batch = DataProto( non_tensor_batch={ "raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object), "agent_name": np.array(["react_agent"] * len(raw_prompts)), "data_source": np.array(["openai/gsm8k"] * len(raw_prompts)), "reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)), }, ) batch = batch.repeat(n) result = agent_loop_manager.generate_sequences(prompts=batch) assert len(result) == len(raw_prompts) * n # Check turns num_turns = result.non_tensor_batch["__num_turns__"] print(f"num_turns: {num_turns}") for i in range(len(num_turns)): if i // n == 0: # [user, assistant] assert num_turns[i] == 2 else: # [user, assistant, tool, assistant] assert num_turns[i] == 4 # Check response_mask tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path) responses = result.batch["responses"] response_mask = result.batch["response_mask"] attention_mask = result.batch["attention_mask"] assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}" response_length = response_mask.size(1) for i in range(len(responses)): # response with tool response valid_tokens = responses[i][attention_mask[i][-response_length:].bool()] response_with_obs = tokenizer.decode(valid_tokens) # response without tool response valid_tokens = responses[i][response_mask[i].bool()] response_without_obs = tokenizer.decode(valid_tokens) assert "" not in response_without_obs, ( f"found in response: {response_without_obs}" ) assert "" not in response_without_obs, ( f"found in response: {response_without_obs}" ) print("=========================") print(response_with_obs) print("---") print(response_without_obs) print("Test passed!") ray.shutdown() ================================================ FILE: verl_distillation/recipe/minicpmo/rl_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import logging import math import os import re from typing import Optional import datasets import torch from omegaconf import DictConfig, ListConfig from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from transformers import PreTrainedTokenizer, ProcessorMixin import verl.utils.torch_functional as verl_F from verl.utils.dataset.vision_utils import process_image from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__name__) def build_transform(): IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) # timm.data.IMAGENET_INCEPTION_MEAN IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) # timm.data.IMAGENET_INCEPTION_STD return transforms.Compose( [ transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), ] ) def build_image_bound(input_ids, tokenizer, new_schema=True, logger=None): if new_schema: start_cond = (input_ids == tokenizer.im_start_id) | (input_ids == tokenizer.slice_start_id) end_cond = (input_ids == tokenizer.im_end_id) | (input_ids == tokenizer.slice_end_id) else: start_cond = input_ids == tokenizer.im_start_id end_cond = input_ids == tokenizer.im_end_id image_start_tokens = torch.where(start_cond)[0] image_start_tokens += 1 image_end_tokens = torch.where(end_cond)[0] if len(image_start_tokens) != len(image_end_tokens): logger.error("image start token != image end tokens") raise Exception("image start token != image end tokens") if len(image_start_tokens) > 0: image_bound = torch.hstack([image_start_tokens.unsqueeze(-1), image_end_tokens.unsqueeze(-1)]) else: image_bound = [] return image_bound def preprocess( images_dict, conversations, tokenizer, transform, query_nums=64, slice_config=None, llm_type=None, patch_size=14, batch_vision=False, max_length=2048, truncation="error", apply_chat_template_kwargs=None, logger=None, ): """ single(multi) image(s) preprocess, the image(s) will be placed at the top of the conversation """ conversations = copy.deepcopy(conversations) assert conversations[0]["role"] == "user", "the first role must be user" if slice_config is not None: assert isinstance(slice_config, dict) assert "patch_size" in slice_config assert "max_slice_nums" in slice_config assert "scale_resolution" in slice_config default_image_placeholder = tokenizer.im_start + tokenizer.unk_token * query_nums + tokenizer.im_end new_schema = False use_image_id = False if llm_type == "qwen": new_schema = True use_image_id = True image_placeholder_dict = {} images = [] image_id_cnt = 0 for img_name, image in images_dict.items(): if slice_config: source_image, patches, best_grid = slice_image( image, slice_config["max_slice_nums"], slice_config["scale_resolution"], slice_config["patch_size"], ) images.append(source_image) image_placeholder = default_image_placeholder if len(patches) > 0: for i in range(len(patches)): for j in range(len(patches[0])): images.append(patches[i][j]) if use_image_id: image_placeholder = ( f"{tokenizer.im_id_start}{image_id_cnt}{tokenizer.im_id_end}" + image_placeholder ) image_id_cnt += 1 image_placeholder += get_grid_placeholder(tokenizer, best_grid, query_nums, new_schema=new_schema) image_placeholder_dict[img_name] = image_placeholder else: images.append(image) if use_image_id: image_placeholder = f"{tokenizer.im_id_start}{image_id_cnt}{tokenizer.im_id_end}" + image_placeholder image_id_cnt += 1 else: image_placeholder = default_image_placeholder image_placeholder_dict[img_name] = image_placeholder images = [transform(i) for i in images] if len(images_dict) == 1 and "" in images_dict: if "" in conversations[0]["content"]: conversations[0]["content"] = conversations[0]["content"].replace("", image_placeholder) else: conversations[0]["content"] = image_placeholder + "\n" + conversations[0]["content"] else: pattern = r"" new_conversations = [] for conversation in conversations: content = conversation["content"] parts = re.split(f"({pattern})", content) for i, part in enumerate(parts): if not part.strip(): continue if re.match(pattern, part): if part in image_placeholder_dict: parts[i] = image_placeholder_dict[part] else: raise Exception(f"not found {part} in image dict") conversation["content"] = "\n".join(parts) new_conversations.append(conversation) conversations = new_conversations # TODO change role in conversation for different llm prompt_with_chat_template = tokenizer.apply_chat_template( conversations, add_generation_prompt=True, tokenize=False, **(apply_chat_template_kwargs or {}) ) input_ids, attention_mask = verl_F.tokenize_and_postprocess_data( prompt=prompt_with_chat_template, tokenizer=tokenizer, max_length=max_length, pad_token_id=tokenizer.pad_token_id, left_pad=True, truncation=truncation, ) position_ids = compute_position_id_with_mask(attention_mask) image_bound = build_image_bound(input_ids[0], tokenizer, new_schema, logger) input_dict = { "input_ids": input_ids[0], "attention_mask": attention_mask[0], "position_ids": position_ids[0], "image_bound": image_bound, } if batch_vision: tgt_sizes = [] reshape_images = [] for image in images: H, W = image.shape[1:] reshape_image = reshape_by_patch(image, patch_size) reshape_images.append(reshape_image) tgt_sizes.append([H // patch_size, W // patch_size]) if tgt_sizes: tgt_sizes = torch.Tensor(tgt_sizes).type(torch.int32) input_dict["pixel_values"] = reshape_images input_dict["tgt_sizes"] = tgt_sizes else: input_dict["pixel_values"] = images input_dict["tgt_sizes"] = [] return input_dict def slice_image(image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False): original_size = image.size original_width, original_height = original_size log_ratio = math.log(original_width / original_height) ratio = original_width * original_height / (scale_resolution * scale_resolution) multiple = min(math.ceil(ratio), max_slice_nums) source_image = None best_grid = None patches = [] if multiple <= 1 or never_split: # dont need to slice, upsample best_size = find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=True) source_image = image.resize(best_size, Image.Resampling.BICUBIC) else: candidate_split_grids_nums = [] for i in [multiple - 1, multiple, multiple + 1]: if i == 1 or i > max_slice_nums: continue candidate_split_grids_nums.append(i) # source image, down-sampling and ensure divided by patch_size best_resize = find_best_resize(original_size, scale_resolution, patch_size) source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC) candidate_grids = [] # find best grid for split_grids_nums in candidate_split_grids_nums: m = 1 while m <= split_grids_nums: if split_grids_nums % m == 0: candidate_grids.append([m, split_grids_nums // m]) m += 1 best_grid = [1, 1] min_error = float("inf") for grid in candidate_grids: error = abs(log_ratio - math.log(grid[0] / grid[1])) if error < min_error: best_grid = grid min_error = error refine_size = get_refine_size(original_size, best_grid, scale_resolution, patch_size, allow_upscale=True) refine_image = image.resize(refine_size, Image.Resampling.BICUBIC) patches = split_to_patches(refine_image, best_grid) return source_image, patches, best_grid def ensure_divide(length, patch_size): return max(round(length / patch_size) * patch_size, patch_size) def find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=False): width, height = original_size if (width * height > scale_resolution * scale_resolution) or allow_upscale: r = width / height height = int(scale_resolution / math.sqrt(r)) width = int(height * r) best_width = ensure_divide(width, patch_size) best_height = ensure_divide(height, patch_size) return (best_width, best_height) def get_refine_size(original_size, grid, scale_resolution, patch_size, allow_upscale=False): width, height = original_size grid_x, grid_y = grid refine_width = ensure_divide(width, grid_x) refine_height = ensure_divide(height, grid_y) grid_width = refine_width / grid_x grid_height = refine_height / grid_y best_grid_size = find_best_resize( (grid_width, grid_height), scale_resolution, patch_size, allow_upscale=allow_upscale, ) refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y) return refine_size def split_to_patches(image, grid): patches = [] width, height = image.size grid_x = int(width / grid[0]) grid_y = int(height / grid[1]) for i in range(0, height, grid_y): images = [] for j in range(0, width, grid_x): box = (j, i, j + grid_x, i + grid_y) patch = image.crop(box) images.append(patch) patches.append(images) return patches def get_grid_placeholder(tokenizer, grid, query_num, new_schema=False): if new_schema: image_placeholder = tokenizer.slice_start + tokenizer.unk_token * query_num + tokenizer.slice_end else: image_placeholder = tokenizer.im_start + tokenizer.unk_token * query_num + tokenizer.im_end cols = grid[0] rows = grid[1] slices = [] for i in range(rows): lines = [] for j in range(cols): lines.append(image_placeholder) slices.append("".join(lines)) if new_schema: slice_placeholder = "\n".join(slices) else: slice_placeholder = tokenizer.slice_start + "\n".join(slices) + tokenizer.slice_end return slice_placeholder def reshape_by_patch(image_tensor, patch_size): """ :param image_tensor: shape [3, H, W] :param patch_size: :return: [3, patch_size, HW/patch_size] """ patches = torch.nn.functional.unfold(image_tensor, (patch_size, patch_size), stride=(patch_size, patch_size)) patches = patches.reshape(image_tensor.size(0), patch_size, patch_size, -1) patches = patches.permute(0, 1, 3, 2).reshape(image_tensor.size(0), patch_size, -1) return patches def init_minicpmo_config(processor, config): """Initialize MiniCPM-o specific configuration""" minicpmo_config = { "transform": build_transform(), "patch_size": config.get("patch_size", 14), "query_nums": config.get("query_nums", 64), "slice_config": config.get( "slice_config", {"max_slice_nums": 9, "patch_size": config.get("patch_size", 14), "scale_resolution": 448} ), "llm_type": config.get("llm_type", "qwen"), "batch_vision": config.get("batch_vision", True), } return minicpmo_config def process_minicpmo_data( row_dict, messages, tokenizer, minicpmo_config, image_key, max_prompt_length, truncation, apply_chat_template_kwargs, logger, ): """Process data for MiniCPM-o model""" if len(row_dict[image_key]) == 1: multi_modal_data = {} image = process_image(row_dict.pop(image_key)[0]) multi_modal_data["image"] = [image] images_dict = {"": image} else: raise NotImplementedError model_inputs = preprocess( images_dict, messages, tokenizer, minicpmo_config["transform"], query_nums=minicpmo_config["query_nums"], slice_config=minicpmo_config["slice_config"], llm_type=minicpmo_config["llm_type"], patch_size=minicpmo_config["patch_size"], batch_vision=minicpmo_config["batch_vision"], max_length=max_prompt_length, truncation=truncation, apply_chat_template_kwargs=apply_chat_template_kwargs, logger=logger, ) raw_prompt = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=False, **(apply_chat_template_kwargs or {}) ) raw_prompt = raw_prompt.replace("", "(./)") return model_inputs, multi_modal_data, raw_prompt class RLHFDataset(Dataset): """ Load and preprocess RLHF data from Parquet files. - Caches files locally. - Reads into a HuggingFace Dataset and tokenizes prompts. - Optionally handles images/videos via a ProcessorMixin. - Filters prompts over a max length. - Supports resuming from checkpoints. Args: data_files (str or list): Path(s) to Parquet file(s). tokenizer (PreTrainedTokenizer): For the tokenization of text to token IDs. config (DictConfig): Options like cache_dir, prompt_key, max_prompt_length, truncation, etc. processor (ProcessorMixin, optional): Multimodal preprocessor for images/videos. """ def __init__( self, data_files: str | list[str], tokenizer: PreTrainedTokenizer, config: DictConfig, processor: Optional[ProcessorMixin] = None, ): if not isinstance(data_files, list | ListConfig): data_files = [data_files] self.data_files = copy.deepcopy(data_files) self.original_data_files = copy.deepcopy(data_files) # use for resume self.tokenizer = tokenizer self.processor = processor self.config = config self.cache_dir = os.path.expanduser(config.get("cache_dir", "~/.cache/verl/rlhf")) self.prompt_key = config.get("prompt_key", "prompt") self.image_key = config.get("image_key", "images") self.video_key = config.get("video_key", "videos") self.max_prompt_length = config.get("max_prompt_length", 1024) self.return_raw_chat = config.get("return_raw_chat", False) self.return_full_prompt = config.get("return_full_prompt", False) self.truncation = config.get("truncation", "error") self.filter_overlong_prompts = config.get("filter_overlong_prompts", True) self.apply_chat_template_kwargs = config.get("apply_chat_template_kwargs", {}) self.num_workers = config.get("filter_overlong_prompts_workers", max(1, os.cpu_count() // 4)) self.num_workers = min(self.num_workers, os.cpu_count()) self.use_shm = config.get("use_shm", False) self.chat_template_func = config.get("chat_template_func", None) self.need_tools_kwargs = config.get("need_tools_kwargs", False) self.filter_prompts = config.get("filter_prompts", True) self.serialize_dataset = False self.minicpmo_config = init_minicpmo_config(self.processor, config) self._download() self._read_files_and_tokenize() def _download(self, use_origin_parquet=False): from verl.utils.fs import copy_to_local data_files = self.data_files if not use_origin_parquet else self.original_data_files for i, parquet_file in enumerate(data_files): self.data_files[i] = copy_to_local(src=parquet_file, cache_dir=self.cache_dir, use_shm=self.use_shm) def _read_files_and_tokenize(self): dataframes = [] for parquet_file in self.data_files: # read parquet files and cache dataframe = datasets.load_dataset("parquet", data_files=parquet_file)["train"] dataframes.append(dataframe) self.dataframe: datasets.Dataset = datasets.concatenate_datasets(dataframes) print(f"dataset len: {len(self.dataframe)}") def resume_dataset_state(self): self.serialize_dataset = not hasattr(self, "original_data_files") # resume dataframe if not it's serialized in data.pt if not self.serialize_dataset: self._download(use_origin_parquet=True) # download and resume from original parquet files self._read_files_and_tokenize() else: print(r"old dataloader ckpt file is used, please train from scratch for better ckpt performance") def __len__(self): return len(self.dataframe) def _build_messages(self, example: dict): return example.pop(self.prompt_key) def __getitem__(self, item): """ Note that we also return the raw_input_ids so that it can be combined with other chat template """ row_dict: dict = self.dataframe[item] messages = self._build_messages(row_dict) model_inputs = {} if self.processor is not None: model_inputs, multi_modal_data, raw_prompt = process_minicpmo_data( row_dict, messages, self.tokenizer, self.minicpmo_config, self.image_key, self.max_prompt_length, self.truncation, self.apply_chat_template_kwargs, logger, ) input_ids = model_inputs.pop("input_ids") attention_mask = model_inputs.pop("attention_mask") position_ids = model_inputs.pop("position_ids") # There's a trap here, multi_modal_inputs has to be a dict, not BatchFeature row_dict["multi_modal_data"] = multi_modal_data row_dict["multi_modal_inputs"] = dict(model_inputs) else: raw_prompt = self.tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=False, **self.apply_chat_template_kwargs ) model_inputs = self.tokenizer(raw_prompt, return_tensors="pt", add_special_tokens=False) input_ids = model_inputs.pop("input_ids") attention_mask = model_inputs.pop("attention_mask") position_ids = compute_position_id_with_mask(attention_mask) row_dict["input_ids"] = input_ids row_dict["attention_mask"] = attention_mask row_dict["position_ids"] = position_ids raw_prompt_ids = self.tokenizer.encode(raw_prompt, add_special_tokens=False) if len(raw_prompt_ids) > self.max_prompt_length: if self.truncation == "left": raw_prompt_ids = raw_prompt_ids[-self.max_prompt_length :] elif self.truncation == "right": raw_prompt_ids = raw_prompt_ids[: self.max_prompt_length] elif self.truncation == "middle": left_half = self.max_prompt_length // 2 right_half = self.max_prompt_length - left_half raw_prompt_ids = raw_prompt_ids[:left_half] + raw_prompt_ids[-right_half:] elif self.truncation == "error": raise RuntimeError(f"Prompt length {len(raw_prompt_ids)} is longer than {self.max_prompt_length}.") row_dict["raw_prompt_ids"] = raw_prompt_ids # encode prompts without chat template if self.return_raw_chat: row_dict["raw_prompt"] = messages # get prompts with chat template if self.return_full_prompt: row_dict["full_prompts"] = raw_prompt # array of strings # add index for each prompt index = row_dict.get("extra_info", {}).get("index", 0) tools_kwargs = row_dict.get("extra_info", {}).get("tools_kwargs", {}) interaction_kwargs = row_dict.get("extra_info", {}).get("interaction_kwargs", {}) need_tools_kwargs = row_dict.get("extra_info", {}).get("need_tools_kwargs", self.need_tools_kwargs) if need_tools_kwargs and not tools_kwargs: logger.warning("tools_kwargs is empty for index {}, data source: {}", index, row_dict["data_source"]) row_dict["index"] = index row_dict["tools_kwargs"] = tools_kwargs row_dict["interaction_kwargs"] = interaction_kwargs return row_dict def __getstate__(self): if not self.serialize_dataset: state = self.__dict__.copy() if "dataframe" in state: del state["dataframe"] return state return self.__dict__.copy() ================================================ FILE: verl_distillation/recipe/one_step_off_policy/README.md ================================================ # Recipe: One Step Off Policy Async Trainer **Author:** `https://github.com/meituan-search` Last updated: 07/17/2025. ## Introduction ### Background The current reinforcement learning training process implemented by verl is synchronous, adhering to the algorithmic workflows of established methods like PPO, GRPO, and DAPO. In each step, training samples are generated by the latest model, and the model is updated after training completes. While this approach aligns with off-policy reinforcement learning and stabilizes RL training, but it suffers from severe efficiency issues. Model updates must wait for the longest output in the generation phase to complete. During the generation of long-tail samples, GPUs remain idle, resulting in significant underutilization. The more severe the long-tail problem in sample generation, the lower the overall training efficiency. For example, in DAPO 32B training, the Rollout phase accounts for approximately 70% of the total time, and increasing resources does not reduce the Rollout duration. ![DAPO 32B Math Performance]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/dapo_32b_math.png) > source data: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=nwusertongyuxuan361 ### Solution We have implemented the **One Step Off Async Trainer** to help alleviate this issue. This approach parallelizes the generation and training processes, utilizing samples generated in the previous step for current training. It also involves appropriately partitioning resources, allocating dedicated resources for generation while automatically assigning the remainder to training. By reducing resources allocated to the generation phase, we mitigate GPU idle time during long-tail sample generation. Throughout this process, generation and training parameters maintain a one-step off policy. ![One Step Off Policy Diagram]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/one_step_off_policy.png) > reference: [AReaL: A Large-Scale Asynchronous Reinforcement Learning System for Language Reasoning]( > https://arxiv.org/abs/2505.24298) > original work: [Asynchronous RLHF: Faster and More Efficient Off-Policy RL for Language Models](https://arxiv.org/abs/2410.18252) Our core contributions include: 1. **Parallel Generation and Training**: Samples for the next batch are asynchronously generated while the current batch is being trained. 2. **Resource Isolation**: Unlike `hybrid_engine`, this method requires explicit resource allocation for rollout, with remaining resources automatically assigned to training. 3. **NCCL Parameter Synchronization**: Employs NCCL communication primitives for seamless parameter transfer between generation and training modules. ### Experimental Results - **Machine Configuration**: 2 nodes with 16 H20 GPUs each - Generation: 4 GPUs - Training: 12 GPUs - **Model**: Qwen2.5-Math-7B - **Rollout Configuration**: - **Max Response Length**: FSDP2: 20,480 tokens; Megatron: 8,192 tokens - **Algorithm**: DAPO - **Rollout Engine**: vLLM | training mode | engine | step | gen | wait_prev_gen | generate_sequences | old_log_prob | update_actor | total time | acc/best@32/mean | acc/maj@32/mean | |------------------------|---------------|------|-----|---------------|--------------------|--------------|--------------|---------------|------------------|-----------------| | colocate sync | VLLM+FSDP2 | 749 | 321 | - | 247 | 88 | 286 | 19h18m | 0.5948 | 0.417 | | one-step-overlap async | VLLM+FSDP2 | 520 | - | 45 | 458 | 108 | 337 | 15h34m(+23%) | 0.6165 | 0.494 | | colocate sync | VLLM+Megatron | 699 | 207 | - | 162 | 119 | 344 | 18h21m | 0.605 | 0.4217 | | one-step-overlap async | VLLM+Megatron | 566 | - | 59 | 501 | 120 | 347 | 13h06m (+40%) | 0.6569 | 0.4038 | * colocate sync: step ≈ gen + old_log_prob + update_actor * one-step-overlap async: step ≈ wait_prev_gen + old_log_prob + update_actor ![One Step Off Megatron Performance]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/one_step_off_megatron.png) > source data: https://wandb.ai/hou-zg-meituan/one-step-off-policy?nw=nwuserhouzg ## Implementation ### One Step Off Policy Async Pipline Our implemented **One Step Off Policy Async Pipeline** integrates seamlessly into existing training logic at minimal cost, eliminating the need for additional sample storage management. The core mechanism uses `async_gen_next_batch` for asynchronous rollout generation while maintaining continuous operation during epoch transitions via `create_continuous_iterator`. ```python # iterator generator, simplify one-step integration of the training process def _create_continuous_iterator(self): for epoch in range(self.config.trainer.total_epochs): iterator = iter(self.train_dataloader) for batch_dict in iterator: yield epoch, batch_dict # read next batch samples, parameters sync and launch asyn gen_seq def _async_gen_next_batch(self, continuous_iterator): # read train_data try: epoch, batch_dict = next(continuous_iterator) except StopIteration: return None batch = DataProto.from_single_dict(batch_dict) gen_batch = batch_pocess(batch) # sync weights from actor to rollout self.sync_rollout_weights() # async generation gen_batch_output = self.rollout_wg.async_generate_sequences(gen_batch) # future encapsulated return GenerationBatchFuture(epoch, batch, gen_batch_output) continuous_iterator = self._create_continuous_iterator() # run rollout first to achieve one-step-off batch_data_future = self._async_gen_next_batch(continuous_iterator) while batch_data_future is not None: # wait for the gen_seq result from the previous step batch = batch_data_future.get() # launch the next async call to generate sequences batch_data_future = self._async_gen_next_batch(continuous_iterator) # compute advantages batch = critic.compute_values(batch) batch = reference.compute_log_prob(batch) batch = reward.compute_reward(batch) batch = compute_advantages(batch) # model update critic_metrics = critic.update_critic(batch) actor_metrics = actor.update_actor(batch) ``` ### Parameter Synchronization The exciting point is that our nccl based weights updating for rollout model has great performance. At most of time, the latency is under 300ms, which is negligible for RLHF. > **sync_rollout_weights**:The time for synchronizing parameters from actor to rollout is extremely fast and can almost > be ignored because it is implemented with nccl. ```python class ActorRolloutRefWorker: # actor acquires the meta-info of model parameters for parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): params = self._get_actor_params() ret = [] for key, tensor in params.items(): ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret # rollout sets the meta-info of model parameters for parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): self._weights_info = weights_info class AsyncRayPPOTrainer(RayPPOTrainer): def init_workers(self): ... # rollout obtains the meta-info of model parameters from the actor for parameter sync weights_info = self.actor_wg.get_actor_weights_info()[0] self.rollout_wg.set_actor_weights_info(weights_info) # Create an actor-rollout communication group for parameter sync actor_rollout_workers = self.actor_wg.workers + self.rollout_wg.workers collective.create_collective_group( actor_rollout_workers, len(actor_rollout_workers), list(range(0, len(actor_rollout_workers))), backend="nccl", group_name="actor_rollout" ) ``` ```python # drive process call the actor and rollout respectively to sync parameters by nccl def sync_rollout_weights(self): self.actor_wg.sync_rollout_weights() ray.get(self.rollout_wg.sync_rollout_weights()) # fsdp model parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): params = self._get_actor_params() if self._is_actor else None if self._is_rollout: inference_model = ( self.rollout.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model ) from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader patch_vllm_moe_model_weight_loader(inference_model) # Model parameters are broadcast tensor-by-tensor from actor to rollout for key, shape, dtype in self._weights_info: tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor: assert key in params origin_data = params[key] if hasattr(origin_data, "full_tensor"): origin_data = origin_data.full_tensor() if torch.distributed.get_rank() == 0: tensor.copy_(origin_data) from ray.util.collective import collective collective.broadcast(tensor, src_rank=0, group_name="actor_rollout") if self._is_rollout: inference_model.load_weights([(key, tensor)]) ``` ## Usage ### FSDP2 Configuration Example ```shell python3 -m recipe.one_step_off_policy.async_main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_trainer.yaml' \ actor_rollout_ref.actor.strategy=fsdp2 \ # actor and rollout are placed separately actor_rollout_ref.hybrid_engine=False \ # actor and rollout resource trainer.nnodes=1 \ trainer.n_gpus_per_node=6 \ rollout.nnodes=1 \ rollout.n_gpus_per_node=2 ``` ### Megatron Configuration Example ```shell python3 -m recipe.one_step_off_policy.async_main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_megatron_trainer.yaml' \ actor_rollout_ref.actor.strategy=megatron \ # actor and rollout are placed separately actor_rollout_ref.hybrid_engine=False \ # actor and rollout resource trainer.nnodes=1 \ trainer.n_gpus_per_node=6 \ rollout.nnodes=1 \ rollout.n_gpus_per_node=2 ``` ### Configuration Guidelines 1. **Card Number Relationships** Maintain either of these relationships for optimal batch distribution: - `actor_rollout_ref.rollout.n` should be an integer divisor of: `trainer.n_gpus_per_node * trainer.nnodes` - `actor_rollout_ref.rollout.n * data.train_batch_size` should be evenly divisible by: `trainer.n_gpus_per_node * trainer.nnodes` > Rationale: Ensures training samples can be evenly distributed across training GPUs when using partial resources for generation. 2. **Dynamic Resource Tuning** Adjust `trainer.nnodes` `trainer.n_gpus_per_node` `rollout.nnodes` `rollout.n_gpus_per_node` based on phase durations: - **Ideal state**: Rollout and training phases have comparable durations - **Diagnostic metrics**: - Monitor `wait_prev_gen` duration - Analyze `sequence_length` distribution - **Adjustment strategy**: - High `wait_prev_gen` + uniform sequence lengths → Increase rollout resources - High `wait_prev_gen` + long-tail sequences → Optimize stopping criteria (resource increase won't help) > **wait_prev_gen**:The time consumed waiting for the previous rollout to end (the part that is not fully overlapped). **Resource Configuration Strategies:** - **Resource-constrained scenario**: Optimize resource utilization by adjusting GPU allocation ratios, keeping the number of nodes equal to allow training and rollout to share nodes; - Configure `trainer.nnodes = rollout.nnodes` with `trainer.n_gpus_per_node + rollout.n_gpus_per_node = physical_gpus_per_node`. Control rollout resource allocation by adjusting `n_gpus_per_node`. - **Resource-abundant scenario**: Optimize performance by adjusting the number of nodes, keeping the number of GPUs per node equal to enable independent scaling of training and rollout parallelism. - Configure `trainer.n_gpus_per_node = rollout.n_gpus_per_node` and control rollout resource allocation by adjusting `trainer.nnodes` and `rollout.nnodes`to achieve optimal performance. > **Note**: The total number of nodes required by the system is not simply `trainer.nnodes + rollout.nnodes`. The > actual calculation depends on GPU capacity: > - When `trainer.n_gpus_per_node + rollout.n_gpus_per_node <= physical_gpus_per_node`, > the required node count is `max(trainer.nnodes, rollout.nnodes)` > - When `trainer.n_gpus_per_node + rollout.n_gpus_per_node > physical_gpus_per_node`, > the required node count is `trainer.nnodes + rollout.nnodes` ## Functional Support | Category | Support Situation | |--------------------|-----------------------------------------------------------------------------------------------------------------| | train engine | FSDP2
Megatron | | rollout engine | vLLM
SGLang | | AdvantageEstimator | GRPO
GRPO_PASSK
REINFORCE_PLUS_PLUS
RLOO
OPO
REINFORCE_PLUS_PLUS_BASELINE
GPG | | Reward | all | ================================================ FILE: verl_distillation/recipe/one_step_off_policy/config/one_step_off_ppo_megatron_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_megatron_trainer - _self_ # config for the rollout (only for resource isolation) rollout: # Number of nodes used in the rollout nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 ================================================ FILE: verl_distillation/recipe/one_step_off_policy/config/one_step_off_ppo_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ # config for the rollout (only for resource isolation) rollout: # Number of nodes used in the rollout nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 ================================================ FILE: verl_distillation/recipe/one_step_off_policy/dapo_7b_math_fsdp2_4_12.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-one-step-off-4-12' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=12 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=2 sp_size=4 fsdp_size=2 python3 -m recipe.one_step_off_policy.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" ================================================ FILE: verl_distillation/recipe/one_step_off_policy/dapo_7b_math_fsdp2_colocate.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-colocate' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=12 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=2 sp_size=4 fsdp_size=2 # reference run wandb: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/runs/ow47vvon?nw=nwusertongyuxuan361 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/one_step_off_policy/dapo_7b_math_fsdp2_sglang_4_12.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-sglang-one-step-off-4-12' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=12 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=2 sp_size=4 fsdp_size=2 python3 -m recipe.one_step_off_policy.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" ================================================ FILE: verl_distillation/recipe/one_step_off_policy/dapo_7b_math_fsdp2_sglang_colocate.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-sglang-colocate' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=12 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=2 sp_size=4 fsdp_size=2 # reference run wandb: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/runs/ow47vvon?nw=nwusertongyuxuan361 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/one_step_off_policy/dapo_7b_math_megatron_4_12.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-megatron-one-step-off-4-12' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=12 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=2 train_tp=2 train_pp=2 # TODO: support dynamic_bsz for megatron # actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ # actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ # actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ python3 -m recipe.one_step_off_policy.main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=megatron \ critic.strategy=megatron \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.grad_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.param_offload=${ref_offload} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" ================================================ FILE: verl_distillation/recipe/one_step_off_policy/dapo_7b_math_megatron_colocate.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0519a1-megatron-colocate' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=2 train_tp=2 train_pp=2 # TODO: support dynamic_bsz for megatron # actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ # actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ # actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=megatron \ critic.strategy=megatron \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_distillation/recipe/one_step_off_policy/distributed_util.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from verl.utils.device import is_npu_available def stateless_init_process_group(master_address, master_port, rank, world_size, device): """ vLLM provides `StatelessProcessGroup` to create a process group without considering the global process group in torch.distributed. It is recommended to create `StatelessProcessGroup`, and then initialize the data-plane communication (NCCL) between external (train processes) and vLLM workers. """ # NOTE: If it is necessary to support weight synchronization with the sglang backend in the future, # the following can be used: # from sglang.srt.distributed.device_communicators.pynccl import PyNcclCommunicator # from sglang.srt.distributed.utils import statelessprocessgroup if is_npu_available: from vllm_ascend.distributed.device_communicators.pyhccl import ( PyHcclCommunicator as PyNcclCommunicator, ) else: from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator from vllm.distributed.utils import StatelessProcessGroup pg = StatelessProcessGroup.create(host=master_address, port=master_port, rank=rank, world_size=world_size) pynccl = PyNcclCommunicator(pg, device=device) return pynccl ================================================ FILE: verl_distillation/recipe/one_step_off_policy/fsdp_workers.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import torch import torch.distributed from omegaconf import DictConfig, OmegaConf from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from transformers import AutoConfig from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register from verl.utils import hf_processor, hf_tokenizer, omega_conf_to_dataclass from verl.utils.device import ( get_device_id, get_device_name, get_nccl_backend, get_torch_device, ) from verl.utils.fs import copy_to_local from verl.utils.fsdp_utils import ( fsdp_version, ) from verl.utils.import_utils import import_external_libs from verl.utils.model import get_generation_config, update_model_config from verl.utils.profiler import DistProfiler, DistProfilerExtension, ProfilerConfig, log_gpu_memory_usage, simple_timer from verl.utils.profiler.performance import reduce_timing, topk_reduce_ratio_min_max from verl.utils.ray_utils import get_event_loop from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.fsdp_workers import ActorRolloutRefWorker as ARRWorker from verl.workers.fsdp_workers import CriticWorker from verl.workers.rollout import get_rollout_class from .distributed_util import stateless_init_process_group logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() __all__ = ["ActorRolloutRefWorker", "AsyncActorRolloutRefWorker", "CriticWorker", "RolloutWorker"] class ActorRolloutRefWorker(ARRWorker): @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def create_weight_sync_group(self, master_address, master_port, rank_offset, world_size): rank = torch.distributed.get_rank() + rank_offset self._weight_sync_group = stateless_init_process_group( master_address, master_port, rank, world_size, get_torch_device().current_device(), ) def _get_actor_params(self): assert self._is_actor params = self.actor_module_fsdp.state_dict() from verl.utils.model import convert_weight_keys params = convert_weight_keys( params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp) ) return params @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine assert hasattr(self, "_weights_info") and self._weights_info is not None params = self._get_actor_params() if self._is_actor else None rollout_name = self.config.rollout.name if self._is_rollout: if rollout_name == "vllm": inference_model = ( self.rollout.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model ) from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader patch_vllm_moe_model_weight_loader(inference_model) elif rollout_name == "sglang": inference_model = self.rollout._engine else: raise NotImplementedError(f"Unknown rollout name: {rollout_name}") loop = get_event_loop() for key, shape, dtype in self._weights_info: tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor: assert key in params origin_data = params[key] if hasattr(origin_data, "full_tensor"): origin_data = origin_data.full_tensor() if torch.distributed.get_rank() == 0: tensor.copy_(origin_data) self._weight_sync_group.broadcast(tensor, src=0, stream=get_torch_device().current_stream()) if self._is_rollout: if rollout_name == "vllm": inference_model.load_weights([(key, tensor)]) elif rollout_name == "sglang": loop.run_until_complete(self.update_weights(inference_model, [(key, tensor)])) async def update_weights(self, inference_engine, params): from sglang.srt.weight_sync.utils import update_weights as sgl_update_weights await sgl_update_weights( engine=inference_engine, params_batch=params, device_mesh_key="infer_tp", device_mesh=self.rollout_device_mesh, ) if self.rollout_device_mesh["infer_tp"].get_local_rank() == 0: await inference_engine.flush_cache() @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): assert self._is_actor if hasattr(self, "_weights_info"): return self._weights_info if fsdp_version(self.actor_module_fsdp) == 1: from torch.distributed.fsdp.api import ShardedStateDictConfig, StateDictType FSDP.set_state_dict_type( self.actor_module_fsdp, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) params = self._get_actor_params() ret = [] for key, tensor in params.items(): ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret class RolloutWorker(ActorRolloutRefWorker): def __init__(self, config: DictConfig, role: str): Worker.__init__(self) assert role == "rollout" self.config = config import torch.distributed if not torch.distributed.is_initialized(): rank = int(os.environ.get("RANK", 0)) world_size = int(os.environ.get("WORLD_SIZE", 1)) torch.distributed.init_process_group( backend=f"cpu:gloo,{get_device_name()}:{get_nccl_backend()}", rank=rank, world_size=world_size, init_method=os.environ.get("DIST_INIT_METHOD", None), ) # TODO(haibin.lin): # As of now the type of config is DictConfig, if we assign config.profiler with ProfilerConfig, # it will actually convert the ProfilerConfig dataclass back to a DictConfig. # We can still use ProfilerConfig for testing purpose (tests/utils/test_nvtx_profile.py) # as they provides DictConfig-like interface # The benefit of creating the dataclass config is to perform validation during __post_init__ omega_profiler_config = config.get("profiler", {}) profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig) if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]: tool_config = omega_conf_to_dataclass( omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool")) ) else: tool_config = None DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config) ) self._is_rollout = True self._is_actor = False @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) use_shm = self.config.model.get("use_shm", False) local_path = copy_to_local(self.config.model.path, use_shm=use_shm) trust_remote_code = self.config.model.get("trust_remote_code", False) self.tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) self.processor = hf_processor(local_path, trust_remote_code=trust_remote_code) if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template # override model kwargs actor_model_config = AutoConfig.from_pretrained( local_path, trust_remote_code=trust_remote_code, attn_implementation="flash_attention_2" ) # patch for kimi-vl if getattr(actor_model_config, "model_type", None) == "kimi_vl": actor_model_config.text_config.topk_method = "greedy" self.generation_config = get_generation_config(local_path, trust_remote_code=trust_remote_code) override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_model_config) update_model_config(actor_model_config, override_config_kwargs=override_config_kwargs) if self.rank == 0: print(f"Model config after override: {actor_model_config}") infer_tp = self.config.rollout.tensor_model_parallel_size dp = self.world_size // infer_tp assert self.world_size % infer_tp == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}" ) rollout_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, infer_tp), mesh_dim_names=["dp", "infer_tp"] ) self.rollout_device_mesh = rollout_device_mesh is_collect = rollout_device_mesh["infer_tp"].get_local_rank() == 0 self._register_dispatch_collect_info( "rollout", dp_rank=rollout_device_mesh["dp"].get_local_rank(), is_collect=is_collect ) rollout_name = self.config.rollout.name if rollout_name not in ("vllm", "sglang"): raise NotImplementedError(f"rollout_name: {rollout_name} is not supported") rollout_config: RolloutConfig = omega_conf_to_dataclass(self.config.rollout) model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model, dataclass_type=HFModelConfig) self.model_config = model_config log_gpu_memory_usage(f"Before building {rollout_name} rollout", logger=logger) rollout = get_rollout_class(rollout_config.name, rollout_config.mode)( config=rollout_config, model_config=model_config, device_mesh=rollout_device_mesh ) log_gpu_memory_usage(f"After building {rollout_name} rollout", logger=logger) if rollout_name == "vllm": from .vllm_sharding_manager import VLLMShardingManager rollout_sharding_manager = VLLMShardingManager( inference_engine=rollout.inference_engine, device_mesh=rollout_device_mesh ) log_gpu_memory_usage("After building sharding manager", logger=logger) elif rollout_name == "sglang": from .sglang_sharding_manager import SGLangShardingManager rollout_sharding_manager = SGLangShardingManager(device_mesh=rollout_device_mesh) log_gpu_memory_usage("After building sharding manager", logger=logger) self.model_config = model_config self.rollout = rollout self.rollout_sharding_manager = rollout_sharding_manager @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="rollout"), blocking=False) def async_generate_sequences(self, prompts): # Support all hardwares prompts = prompts.to(get_device_id()) assert self._is_rollout meta_info = { "eos_token_id": self.generation_config.eos_token_id if self.generation_config is not None else self.tokenizer.eos_token_id, "pad_token_id": self.generation_config.pad_token_id if self.generation_config is not None else self.tokenizer.pad_token_id, } prompts.meta_info.update(meta_info) timing_generate = {} with self.rollout_sharding_manager: log_gpu_memory_usage("After entering rollout sharding manager", logger=logger) with simple_timer("generate_sequences", timing_generate): output = self.rollout.generate_sequences(prompts=prompts) log_gpu_memory_usage("After rollout generation", logger=logger) timing_generate.update(self.rollout_sharding_manager.timing) # We calculate the average timing across all ranks # to make sure meta_info["timing"] is the same timing_generate_topk_ratio, timing_generate_min, timing_generate_max = topk_reduce_ratio_min_max( timing_generate["generate_sequences"] ) timing_generate = reduce_timing(timing_generate) timing_generate.update( { "generation_timing/max": timing_generate_max, "generation_timing/min": timing_generate_min, "generation_timing/topk_ratio": timing_generate_topk_ratio, } ) output.meta_info["timing"] = timing_generate output = output.to("cpu") # clear kv cache get_torch_device().empty_cache() return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): assert self._is_rollout self._weights_info = weights_info class AsyncActorRolloutRefWorker(ActorRolloutRefWorker): def __init__(self, *args, **kwargs): raise NotImplementedError ================================================ FILE: verl_distillation/recipe/one_step_off_policy/grpo_0.6b_gsm8k_fsdp2_2_6.sh ================================================ set -x project_name='GRPO' exp_name='GRPO-Qwen3-0.6b-gsm8k-fsdp2-one-step-off-2-6' # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-0.6B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/gsm8k/train.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/gsm8k/test.parquet"} NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) python3 -m recipe.one_step_off_policy.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.train_batch_size=1152 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=192 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.val_before_train=True \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=2 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" $@ ================================================ FILE: verl_distillation/recipe/one_step_off_policy/grpo_0.6b_gsm8k_fsdp2_sglang_2_6.sh ================================================ set -x project_name='GRPO' exp_name='GRPO-Qwen3-0.6b-gsm8k-fsdp2-sglang-one-step-off-2-6' # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-0.6B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/gsm8k/train.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/gsm8k/test.parquet"} NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) python3 -m recipe.one_step_off_policy.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.train_batch_size=1152 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=192 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.val_before_train=True \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=2 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" $@ ================================================ FILE: verl_distillation/recipe/one_step_off_policy/grpo_3b_gsm8k_fsdp2_2_6.sh ================================================ set -x project_name='GRPO' exp_name='GRPO-Qwen3-0.6b-gsm8k-fsdp2-one-step-off-2-6' # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen/Qwen2.5-3B-Instruct"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/gsm8k/train.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/gsm8k/test.parquet"} NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) python3 -m recipe.one_step_off_policy.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.train_batch_size=1152 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=192 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.val_before_train=True \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=2 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" $@ ================================================ FILE: verl_distillation/recipe/one_step_off_policy/main_ppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import os import socket import hydra import ray from omegaconf import OmegaConf from recipe.one_step_off_policy.utils import need_critic from verl.trainer.constants_ppo import get_ppo_ray_runtime_env from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler from verl.trainer.ppo.reward import load_reward_manager from verl.trainer.ppo.utils import need_reference_policy from verl.utils.config import validate_config from .ray_trainer import OneStepOffRayTrainer @hydra.main(config_path="config", config_name="one_step_off_ppo_trainer", version_base=None) def main(config): run_ppo(config) # Define a function to run the PPO-like training process def run_ppo(config) -> None: # Check if Ray is not initialized if not ray.is_initialized(): # Initialize Ray with a local cluster configuration # Set environment variables in the runtime environment to control tokenizer parallelism, # NCCL debug level, VLLM logging level, and allow runtime LoRA updating # `num_cpus` specifies the number of CPU cores Ray can use, obtained from the configuration default_runtime_env = get_ppo_ray_runtime_env() ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") ray.init(**OmegaConf.to_container(ray_init_kwargs)) # Create a remote instance of the TaskRunner class, and # Execute the `run` method of the TaskRunner instance remotely and wait for it to complete if ( config.global_profiler.tool == "nsys" and OmegaConf.select(config.global_profiler, "steps") is not None and len(OmegaConf.select(config.global_profiler, "steps")) > 0 ): nsight_options = OmegaConf.to_container(config.global_profiler.tool_config.nsys.controller_nsight_options) runner = TaskRunner.options(runtime_env={"nsight": nsight_options}).remote() else: runner = TaskRunner.remote() ray.get(runner.run.remote(config)) # [Optional] get the path of the timeline trace file from the configuration, default to None # This file is used for performance analysis timeline_json_file = config.ray_kwargs.get("timeline_json_file", None) if timeline_json_file: ray.timeline(filename=timeline_json_file) @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: def run(self, config): # Print the initial configuration. `resolve=True` will evaluate symbolic values. from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) OmegaConf.resolve(config) # Define worker classes based on the actor strategy. if config.actor_rollout_ref.actor.strategy == "fsdp2": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray import RayWorkerGroup from .fsdp_workers import ( ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker, RolloutWorker, ) actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray import RayWorkerGroup from .megatron_workers import ( ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker, RolloutWorker, ) actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError from .ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.Actor: ray.remote(actor_rollout_cls), Role.Rollout: ray.remote(RolloutWorker), Role.Critic: ray.remote(CriticWorker), } global_pool_id = "actor_pool" assert config.trainer.n_gpus_per_node > 0, "config.trainer.n_gpus_per_node must be greater than 0" assert config.trainer.nnodes > 0, "config.trainer.nnodes must be greater than 0" assert config.rollout.n_gpus_per_node > 0, "config.rollout.n_gpus_per_node must be greater than 0" assert config.rollout.nnodes > 0, "config.rollout.nnodes must be greater than 0" actor_pool = [config.trainer.n_gpus_per_node] * config.trainer.nnodes rollout_pool = [config.rollout.n_gpus_per_node] * config.rollout.nnodes resource_pool_spec = { "actor_pool": actor_pool, "rollout_pool": rollout_pool, } mapping = { Role.Actor: "actor_pool", Role.Rollout: "rollout_pool", Role.Critic: "actor_pool", } print(f"resource_pool_spec: {resource_pool_spec}") # We should adopt a multi-source reward function here: # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # finally, we combine all the rewards together # The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in ["fsdp2"]: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # Add a reference policy worker if KL loss or KL reward is used. if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id # validate config validate_config( config=config, use_reference_policy=need_reference_policy(role_worker_mapping), use_critic=need_critic(config), ) # Download the checkpoint from HDFS to the local machine. # `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on local_path = copy_to_local( config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False) ) # Instantiate the tokenizer and processor. from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) # Used for multimodal LLM, could be None processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) # Load the reward manager for training and validation. reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager( config, tokenizer, num_examine=1, **config.reward_model.get("reward_kwargs", {}) ) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) from verl.utils.dataset.rl_dataset import collate_fn # Create training and validation datasets. train_dataset = create_rl_dataset( config.data.train_files, config.data, tokenizer, processor, max_samples=config.data.get("train_max_samples", -1), ) val_dataset = create_rl_dataset( config.data.val_files, config.data, tokenizer, processor, max_samples=config.data.get("val_max_samples", -1) ) train_sampler = create_rl_sampler(config.data, train_dataset) # Initialize the PPO trainer. trainer = OneStepOffRayTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, device_name=config.trainer.device, ) # Initialize the workers of the trainer. trainer.init_workers() # Start the training process. trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/one_step_off_policy/megatron_workers.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import torch import torch.distributed from omegaconf import DictConfig, OmegaConf from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register from verl.utils.config import omega_conf_to_dataclass from verl.utils.debug import ( log_gpu_memory_usage, ) from verl.utils.device import get_device_name, get_torch_device from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.megatron_workers import ActorRolloutRefWorker as ARRWorker from verl.workers.megatron_workers import CriticWorker, RewardModelWorker from verl.workers.rollout import get_rollout_class from .distributed_util import stateless_init_process_group logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) __all__ = ["ActorRolloutRefWorker", "AsyncActorRolloutRefWorker", "CriticWorker", "RewardModelWorker", "RolloutWorker"] class ActorRolloutRefWorker(ARRWorker): def __init__(self, config: DictConfig, role: str): assert role in ["actor", "ref"] tmp_role = "ref" if role == "ref" else "actor_rollout" super().__init__(config, tmp_role) if role == "actor": self._is_rollout = False self.role = role @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def create_weight_sync_group(self, master_address, master_port, rank_offset, world_size): rank = torch.distributed.get_rank() + rank_offset self._weight_sync_group = stateless_init_process_group( master_address, master_port, rank, world_size, get_torch_device().current_device(), ) def _get_actor_params_generator(self): assert self._is_actor from verl.models.mcore import get_mcore_weight_converter from verl.utils.megatron_utils import per_tensor_generator layer_name_mapping = { "qkv_layer_name": "self_attention.linear_qkv.", "gate_proj_layer_name": "linear_fc1.", } weight_converter = get_mcore_weight_converter(self.actor_model_config, self.dtype) generator = per_tensor_generator( self.actor.actor_module, self.actor_model_config, weight_converter, self.tf_config, layer_name_mapping, ) return generator @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine assert hasattr(self, "_weights_info") and self._weights_info is not None params_generator = self._get_actor_params_generator() if self._is_actor else None if self._is_rollout: inference_model = ( self.rollout.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model ) from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader patch_vllm_moe_model_weight_loader(inference_model) for key, shape, dtype in self._weights_info: if self._is_actor: weight_key, weight = next(params_generator) assert key == weight_key assert shape == weight.size() assert dtype == weight.dtype tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor and torch.distributed.get_rank() == 0: tensor.copy_(weight) self._weight_sync_group.broadcast(tensor, src=0, stream=get_torch_device().current_stream()) if self._is_rollout: inference_model.load_weights([(key, tensor)]) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): assert self._is_actor if hasattr(self, "_weights_info"): return self._weights_info params_generator = self._get_actor_params_generator() ret = [] for key, tensor in params_generator: ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret class RolloutWorker(ActorRolloutRefWorker): def __init__(self, config: DictConfig, role: str): assert role == "rollout" ARRWorker.__init__(self, config, role) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) from verl.utils.torch_dtypes import PrecisionType override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) override_transformer_config = {} self.param_dtype = torch.bfloat16 self.dtype = PrecisionType.to_dtype(self.param_dtype) trust_remote_code = self.config.model.get("trust_remote_code", False) from verl.utils.model import get_generation_config self._init_hf_config_and_tf_config( self.config.model.path, self.config.model.path, self.dtype, override_model_config, override_transformer_config, trust_remote_code, ) self.generation_config = get_generation_config(self.local_path) from torch.distributed.device_mesh import init_device_mesh assert self.config.rollout.name == "vllm" assert self.config.rollout.mode == "sync" from .vllm_sharding_manager import VLLMShardingManager # NOTE(sgm): If the QKV and gate_up projection layer are concate together in actor, # we will reorganize their weight format when resharding from actor to rollout. infer_tp = self.config.rollout.tensor_model_parallel_size dp = self.world_size // infer_tp assert self.world_size % infer_tp == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}" ) rollout_device_mesh = init_device_mesh( get_device_name(), mesh_shape=(dp, infer_tp), mesh_dim_names=["dp", "infer_tp"] ) is_collect = rollout_device_mesh["infer_tp"].get_local_rank() == 0 self._register_dispatch_collect_info( "rollout", dp_rank=rollout_device_mesh["dp"].get_local_rank(), is_collect=is_collect ) log_gpu_memory_usage("Before building vllm rollout", logger=None) rollout_config: RolloutConfig = omega_conf_to_dataclass(self.config.rollout) model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model, dataclass_type=HFModelConfig) rollout = get_rollout_class(rollout_config.name, rollout_config.mode)( config=rollout_config, model_config=model_config, device_mesh=rollout_device_mesh ) log_gpu_memory_usage("After building vllm rollout", logger=logger) sharding_manager = VLLMShardingManager( inference_engine=rollout.inference_engine, device_mesh=rollout_device_mesh, ) log_gpu_memory_usage("After building sharding manager", logger=logger) self.rollout, self.sharding_manager = rollout, sharding_manager self.rollout.sharding_manager = sharding_manager @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="rollout"), blocking=False) def async_generate_sequences(self, *args, **kwargs): return super().generate_sequences(*args, **kwargs) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): assert self._is_rollout self._weights_info = weights_info class AsyncActorRolloutRefWorker(ActorRolloutRefWorker): def __init__(self, *args, **kwargs): raise NotImplementedError ================================================ FILE: verl_distillation/recipe/one_step_off_policy/ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This trainer supports model-agonistic model initialization with huggingface """ import uuid from pprint import pprint import numpy as np import ray import torch from omegaconf import OmegaConf from torch.utils.data import Dataset, Sampler from tqdm import tqdm from recipe.one_step_off_policy.utils import need_critic from verl import DataProto from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup from verl.single_controller.ray.base import create_colocated_worker_cls from verl.trainer.ppo import core_algos from verl.trainer.ppo.core_algos import agg_loss from verl.trainer.ppo.metric_utils import ( compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, ) from verl.trainer.ppo.ray_trainer import ( RayPPOTrainer, ResourcePoolManager, apply_kl_penalty, compute_advantage, compute_response_mask, ) from verl.trainer.ppo.reward import compute_reward, compute_reward_async from verl.trainer.ppo.utils import Role, WorkerType, need_reference_policy, need_reward_model from verl.utils.debug import marked_timer from verl.utils.metric import ( reduce_metrics, ) from verl.utils.tracking import ValidationGenerationsLogger class GenerationBatchFuture: """ Wrapper class for encapsulating batch generation results """ def __init__(self, epoch, batch, gen_batch_output, future_reward=None): """ :param epoch: current epoch :param batch: Input batch data :param gen_batch_output: Generated sequences from the main model (DataProtoFuture) :param future_reward: Future for reward computation (optional) """ self.epoch = epoch self.batch = batch self.gen_batch_output = gen_batch_output self.future_reward = future_reward def get(self): """ Get the actual results by calling get() method on gen_batch_output Returns: tuple: (epoch, batch, gen_batch_result, future_reward) - epoch: Current epoch - batch: Original input batch data - gen_batch_result: Result from gen_batch_output.get() or gen_batch_output itself - future_reward: Future for reward computation if available, else None """ # Call get() method on gen_batch_output if available if hasattr(self.gen_batch_output, "get"): gen_batch_result = self.gen_batch_output.get() else: gen_batch_result = self.gen_batch_output return self.epoch, self.batch, gen_batch_result, self.future_reward class OneStepOffRayTrainer(RayPPOTrainer): # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, train_dataset: Dataset | None = None, val_dataset: Dataset | None = None, collate_fn=None, train_sampler: Sampler | None = None, device_name="cuda", ): """ Initialize distributed PPO trainer with Ray backend. Note that this trainer runs on the driver process on a single CPU/GPU node. Args: config: Configuration object containing training parameters. tokenizer: Tokenizer used for encoding and decoding text. role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes. resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools. ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup. processor: Optional data processor, used for multimodal data reward_fn: Function for computing rewards during training. val_reward_fn: Function for computing rewards during validation. train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None. val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None. collate_fn: Function to collate data samples into batches. train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None. device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to "cuda". """ # Store the tokenizer for text processing self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert not self.hybrid_engine self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = need_reference_policy(self.role_worker_mapping) self.use_rm = need_reward_model(self.role_worker_mapping) self.use_critic = need_critic(config) self.ray_worker_group_cls = ray_worker_group_cls self.device_name = device_name self.validation_generations_logger = ValidationGenerationsLogger() # if ref_in_actor is True, the reference policy will be actor without lora applied self.ref_in_actor = config.actor_rollout_ref.model.get("lora_rank", 0) > 0 # define in-reward KL control # kl loss control currently not suppoorted if config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(config.algorithm.kl_ctrl) self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) def _validate(self): self.actor_rollout_wg = self.rollout_wg ret = super()._validate() self.actor_rollout_wg = self.actor_wg return ret def init_workers(self): """Initialize distributed training workers using Ray backend. Creates: 1. Ray resource pools from configuration 2. Worker groups for each role (actor, critic, etc.) """ self.resource_pool_manager.create_resource_pool() self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()} # create actor and rollout for role, role_name in [(Role.Actor, "actor"), (Role.Rollout, "rollout")]: resource_pool = self.resource_pool_manager.get_resource_pool(role) role_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[role], config=self.config.actor_rollout_ref, role=role_name, ) self.resource_pool_to_cls[resource_pool][role_name] = role_cls # create critic if self.use_critic: resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic) critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=self.config.critic) self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls # create reference policy if needed if self.use_reference_policy: resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy) ref_policy_cls = RayClassWithInitArgs( self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role="ref", ) self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls # create a reward model if reward_fn is None if self.use_rm: # we create a RM here resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model) self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. # Instead, directly pass different resource pool to different worker groups. # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information. all_wg = {} wg_kwargs = {} # Setting up kwargs for RayWorkerGroup if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None: wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout if OmegaConf.select(self.config.global_profiler, "steps") is not None: wg_kwargs["profile_steps"] = OmegaConf.select(self.config.trainer, "steps") assert ( OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options") is not None ), "worker_nsight_options must be set when profile_steps is set" wg_kwargs["worker_nsight_options"] = OmegaConf.to_container( OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options") ) for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls( resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls, device_name=self.device_name, **wg_kwargs, ) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) if self.use_critic: self.critic_wg = all_wg["critic"] self.critic_wg.init_model() if self.use_reference_policy and not self.ref_in_actor: self.ref_policy_wg = all_wg["ref"] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = all_wg["rm"] self.rm_wg.init_model() self.actor_wg = all_wg["actor"] self.rollout_wg = all_wg["rollout"] self.actor_wg.init_model() self.rollout_wg.init_model() self.actor_rollout_wg = self.actor_wg # to be compatible with the functions that not be modified weights_info = self.actor_wg.get_actor_weights_info()[0] self.rollout_wg.set_actor_weights_info(weights_info) self.create_weight_sync_group() self.sync_rollout_weights() # create async rollout manager and request scheduler self.async_rollout_mode = False if self.config.actor_rollout_ref.rollout.mode == "async" and self._is_rollout: from verl.workers.rollout.async_server import AsyncLLMServerManager self.async_rollout_mode = True self.async_rollout_manager = AsyncLLMServerManager( config=self.config, worker_group=self.rollout_wg, ) def create_weight_sync_group(self): master_address = ray.get(self.actor_wg.workers[0]._get_node_ip.remote()) master_port = ray.get(self.actor_wg.workers[0]._get_free_port.remote()) world_size = len(self.actor_wg.workers + self.rollout_wg.workers) self.actor_wg.create_weight_sync_group( master_address, master_port, 0, world_size, ) ray.get( self.rollout_wg.create_weight_sync_group( master_address, master_port, len(self.actor_wg.workers), world_size, ) ) def sync_rollout_weights(self): if not self.hybrid_engine: self.actor_wg.sync_rollout_weights() ray.get(self.rollout_wg.sync_rollout_weights()) def _create_continuous_iterator(self): """ Create a continuous data iterator across epoch """ for epoch in range(self.config.trainer.total_epochs): iterator = iter(self.train_dataloader) for batch_dict in iterator: yield epoch, batch_dict def _async_gen_next_batch(self, continuous_iterator): """ Call parameter synchronization and asynchronous sequence generation. """ try: epoch, batch_dict = next(continuous_iterator) except StopIteration: return None except Exception as e: print(f"Error in async_gen_next_batch: {e}") return None # Create the initial batch from the data loader batch = DataProto.from_single_dict(batch_dict) # pop those keys for generation batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] if "multi_modal_data" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_data") if "raw_prompt" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("raw_prompt") if "tools_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("tools_kwargs") if "interaction_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("interaction_kwargs") gen_batch = batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) # sync weights from actor to rollout self.sync_rollout_weights() # async generation gen_batch_output = self.rollout_wg.async_generate_sequences(gen_batch) # Launch individual reward computations as each generation completes future_reward = None if self.config.reward_model.launch_reward_fn_async: # Store the object reference and set up callback future_reward = self._launch_individual_rewards.remote( gen_batch_output, self.config, self.tokenizer, batch.non_tensor_batch ) # Return the original, now-modified `batch` and the `future_reward` return GenerationBatchFuture(epoch, batch, gen_batch_output, future_reward) @staticmethod @ray.remote def _launch_individual_rewards(gen_batch_output, config, tokenizer, original_non_tensor_batch): # Get generation results gen_batch_result = gen_batch_output.get() # Repeat non_tensor_batch to match the number of responses n = config.actor_rollout_ref.rollout.n repeated_non_tensor_batch = {} for key, value in original_non_tensor_batch.items(): repeated_non_tensor_batch[key] = np.repeat(value, n, axis=0) # Split into individual responses with preserved non_tensor_batch responses_split = [] for i in range(len(gen_batch_result)): response_data = gen_batch_result[i : i + 1] # Get single response # Add repeated non_tensor_batch values for key in repeated_non_tensor_batch: response_data.non_tensor_batch[key] = repeated_non_tensor_batch[key][i : i + 1] responses_split.append(response_data) # Launch async reward computation reward_futures = [ compute_reward_async.remote(response_data, config, tokenizer) for response_data in responses_split ] # Wait for results and combine results = ray.get(reward_futures) rewards_list = [r[0] for r in results] extras_list = [r[1] for r in results] combined_reward_tensor = torch.cat(rewards_list, dim=0) combined_extras_dict = {} if extras_list and extras_list[0]: for key in extras_list[0].keys(): combined_extras_dict[key] = [d[key] for d in extras_list if key in d] return combined_reward_tensor, combined_extras_dict def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None # across epoch iterator continuous_iterator = self._create_continuous_iterator() # Start the first asynchronous generation task. batch_data_future = self._async_gen_next_batch(continuous_iterator) while batch_data_future is not None: do_profile = ( self.global_steps in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) if do_profile: self.actor_wg.start_profile() if not self.hybrid_engine: self.rollout_wg.start_profile() if self.use_reference_policy: self.ref_policy_wg.start_profile() if self.use_critic: self.critic_wg.start_profile() if self.use_rm: self.rm_wg.start_profile() metrics = {} timing_raw = {} is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # wait for the previous batch with marked_timer("wait_prev_gen", timing_raw, color="red"): epoch, batch, gen_batch_output, future_reward = batch_data_future.get() timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) # asys next generation (with syns weights from actor to rollout) with marked_timer("sync_rollout_weights", timing_raw, color="purple"): if not is_last_step: batch_data_future = self._async_gen_next_batch(continuous_iterator) batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) batch.batch["response_mask"] = compute_response_mask(batch) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() with marked_timer("reward", timing_raw, color="yellow"): # compute reward model score if self.use_rm: reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) # Use the pre-launched future reward if available if self.config.reward_model.launch_reward_fn_async: # future_reward was already started in _async_gen_next_batch reward_tensor, reward_extra_infos_dict = ray.get(future_reward) else: reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn) # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, color="blue"): old_log_prob = self.actor_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if "rollout_log_probs" in batch.batch.keys(): # TODO: we may want to add diff of probs too. rollout_old_log_probs = batch.batch["rollout_log_probs"] actor_old_log_probs = batch.batch["old_log_probs"] attention_mask = batch.batch["attention_mask"] responses = batch.batch["responses"] response_length = responses.size(1) response_mask = attention_mask[:, -response_length:] rollout_probs = torch.exp(rollout_old_log_probs) actor_probs = torch.exp(actor_old_log_probs) rollout_probs_diff = torch.abs(rollout_probs - actor_probs) rollout_probs_diff = torch.masked_select(rollout_probs_diff, response_mask.bool()) rollout_probs_diff_max = torch.max(rollout_probs_diff) rollout_probs_diff_mean = torch.mean(rollout_probs_diff) rollout_probs_diff_std = torch.std(rollout_probs_diff) metrics.update( { "training/rollout_probs_diff_max": rollout_probs_diff_max.detach().item(), "training/rollout_probs_diff_mean": rollout_probs_diff_mean.detach().item(), "training/rollout_probs_diff_std": rollout_probs_diff_std.detach().item(), } ) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw, color="olive"): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw, color="cyan"): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw, color="brown"): # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()}) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] # Compute rollout IS weights and mismatch metrics (inherited from RayPPOTrainer) batch, is_metrics = self.compute_rollout_importance_weights_and_add_to_batch(batch) # IS and mismatch metrics already have mismatch/ prefix metrics.update(is_metrics) # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get( "norm_adv_by_std_in_grpo", True ) # GRPO adv normalization factor batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, config=self.config.algorithm, ) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, color="pink"): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, color="red"): batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable actor_output = self.actor_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: with marked_timer("dump_rollout_generations", timing_raw, color="green"): inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True) outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True) scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist() self._dump_generations( inputs=inputs, outputs=outputs, scores=scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=rollout_data_dir, ) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, color="green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with marked_timer("save_checkpoint", timing_raw, color="green"): self._save_checkpoint() # training metrics metrics.update( { "training/global_step": self.global_steps, "training/epoch": epoch, } ) # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) progress_bar.update(1) self.global_steps += 1 if do_profile: self.actor_wg.stop_profile() if not self.hybrid_engine: self.rollout_wg.stop_profile() if self.use_reference_policy: self.ref_policy_wg.stop_profile() if self.use_critic: self.critic_wg.stop_profile() if self.use_rm: self.rm_wg.stop_profile() if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return ================================================ FILE: verl_distillation/recipe/one_step_off_policy/sglang_sharding_manager.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.protocol import all_gather_data_proto from verl.utils.debug import GPUMemoryLogger from verl.utils.device import get_torch_device from verl.utils.torch_functional import check_device_is_available from verl.workers.sharding_manager.base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class SGLangShardingManager(BaseShardingManager): @check_device_is_available() def __init__(self, device_mesh: DeviceMesh): self.device_mesh = device_mesh self.tp_size = self.device_mesh["infer_tp"].size() self.tp_rank = self.device_mesh["infer_tp"].get_local_rank() self.timing = {} gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) self.gen_random_states = get_torch_device().get_rng_state() @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def __enter__(self): get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def __exit__(self, exc_type, exc_value, traceback): self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().empty_cache() @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: """All gather across tp group to make each rank has identical input.""" if self.tp_size == 1: return data # TODO: Current impl doesn't consider FSDP with torch micro-dp group = self.device_mesh["infer_tp"].get_group() all_gather_data_proto(data=data, process_group=group) return data @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: """Get chunk data of this tp rank since we do all gather in preprocess.""" if self.tp_size == 1: return data return data.chunk(chunks=self.tp_size)[self.tp_rank] ================================================ FILE: verl_distillation/recipe/one_step_off_policy/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from omegaconf import DictConfig from verl.trainer.ppo.core_algos import AdvantageEstimator def need_critic(config: DictConfig) -> bool: """Given a config, do we need critic""" if config.algorithm.adv_estimator == AdvantageEstimator.GAE: return True elif config.algorithm.adv_estimator in [ AdvantageEstimator.GRPO, AdvantageEstimator.GRPO_PASSK, AdvantageEstimator.REINFORCE_PLUS_PLUS, # AdvantageEstimator.REMAX, # TODO:REMAX advantage estimator is not yet supported in one_step_off_policy AdvantageEstimator.RLOO, AdvantageEstimator.OPO, AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE, AdvantageEstimator.GPG, ]: return False else: raise NotImplementedError ================================================ FILE: verl_distillation/recipe/one_step_off_policy/vllm_sharding_manager.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.protocol import all_gather_data_proto from verl.third_party.vllm import parallel_state as vllm_ps from verl.utils.debug import GPUMemoryLogger from verl.utils.device import get_torch_device from verl.utils.torch_functional import check_device_is_available from verl.workers.sharding_manager.base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class VLLMShardingManager(BaseShardingManager): @check_device_is_available() def __init__(self, inference_engine, device_mesh: DeviceMesh): self.device_mesh = device_mesh self.inference_engine = inference_engine inference_engine.wake_up() assert device_mesh is not None assert inference_engine is not None self.tp_size = self.device_mesh["infer_tp"].size() self.tp_rank = self.device_mesh["infer_tp"].get_local_rank() self.timing = {} gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) self.gen_random_states = get_torch_device().get_rng_state() @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def __enter__(self): get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def __exit__(self, exc_type, exc_value, traceback): self.gen_random_states = get_torch_device().get_rng_state() self.inference_engine.reset_prefix_cache() @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: """All gather across tp group to make each rank has identical input.""" if self.tp_size == 1: return data group = vllm_ps.get_tensor_model_parallel_group().device_group all_gather_data_proto(data=data, process_group=group) return data @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: """Get chunk data of this tp rank since we do all gather in preprocess.""" if self.tp_size == 1: return data return data.chunk(chunks=self.tp_size)[self.tp_rank] ================================================ FILE: verl_distillation/recipe/onpolicy_distill/__init__.py ================================================ # On-policy distillation recipe package. ================================================ FILE: verl_distillation/recipe/onpolicy_distill/config/onpolicy_distill_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ # This recipe reuses `ppo_trainer` config and only provides a new training entry. # You are expected to override most fields (models, data, rollout, etc.) via CLI. data: gen_batch_size: ${data.train_batch_size} trainer: project_name: verl-on-policy-distill ================================================ FILE: verl_distillation/recipe/onpolicy_distill/main_onpolicy_distill.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other mpain. """ import hydra import ray from omegaconf import OmegaConf from verl.trainer.main_ppo import TaskRunner, run_ppo from verl.utils.import_utils import load_extern_type from .onpolicy_distill_trainer import RayOnPolicyDistillTrainer def create_rl_dataset(data_paths, data_config, tokenizer, processor, is_train=True, max_samples: int = -1): """Create a dataset. Arguments: data_paths: List of paths to data files. data_config: The data config. tokenizer (Tokenizer): The tokenizer. processor (Processor): The processor. Returns: dataset (Dataset): The dataset. """ from torch.utils.data import Dataset from verl.utils.dataset.onerec_dataset import OneRecDataset # Check if a custom dataset class is specified in the data configuration # and if the path to the custom class is provided if "custom_cls" in data_config and data_config.custom_cls.get("path", None) is not None: # Dynamically load the custom dataset class dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name) # Verify that the custom dataset class inherits from torch.utils.data.Dataset if not issubclass(dataset_cls, Dataset): raise TypeError( f"The custom dataset class '{data_config.custom_cls.name}' from " f"'{data_config.custom_cls.path}' must inherit from torch.utils.data.Dataset" ) elif "datagen" in data_config and data_config.datagen.get("path", None) is not None and is_train: # If a data generation strategy is specified, use the DynamicGenDataset class from verl.utils.dataset.dynamicgen_dataset import DynamicGenDataset dataset_cls = DynamicGenDataset print("Using DynamicGenDataset for data generation.") else: # Use the default RLHFDataset class if no custom class is specified dataset_cls = OneRecDataset print(f"Using dataset class: {dataset_cls.__name__}") # Instantiate the dataset using the determined dataset class dataset = dataset_cls( data_files=data_paths, tokenizer=tokenizer, processor=processor, config=data_config, max_samples=max_samples, ) return dataset @ray.remote(num_cpus=1) class OnPolicyDistillTaskRunner(TaskRunner): def run(self, config): import os import socket from pprint import pprint from omegaconf import OmegaConf from verl.trainer.ppo.reward import load_reward_manager from verl.trainer.ppo.utils import need_critic, need_reference_policy from verl.utils import hf_processor, hf_tokenizer from verl.utils.config import validate_config from verl.utils.dataset.rl_dataset import collate_fn from verl.utils.fs import copy_to_local from verl.utils.import_utils import load_extern_type print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) OmegaConf.resolve(config) # Initialize role worker mapping self.role_worker_mapping = {} self.mapping = {} # Add actor rollout worker based on the actor strategy actor_rollout_cls, ray_worker_group_cls = self.add_actor_rollout_worker(config) # Add critic worker to role mapping self.add_critic_worker(config) # Add reward model worker if enabled self.add_reward_model_worker(config) # Add a reference policy worker if KL loss or KL reward is used self.add_ref_policy_worker(config, actor_rollout_cls) # validate config validate_config( config=config, use_reference_policy=need_reference_policy(self.role_worker_mapping), use_critic=need_critic(config), ) # Download the checkpoint from HDFS to the local machine local_path = copy_to_local( config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False) ) # Instantiate the tokenizer and processor trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) # Used for multimodal LLM, could be None processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) if config.actor_rollout_ref.model.get("custom_chat_template", None) is not None: print(f'{config.actor_rollout_ref.model.custom_chat_template=}') if processor is not None: processor.chat_template = config.actor_rollout_ref.model.custom_chat_template if tokenizer is not None: tokenizer.chat_template = config.actor_rollout_ref.model.custom_chat_template # Load the reward manager for training and validation reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager( config, tokenizer, num_examine=1, **config.reward_model.get("reward_kwargs", {}) ) # Initialize resource pool manager resource_pool_manager = self.init_resource_pool_mgr(config) # Create training and validation datasets from verl.trainer.main_ppo import create_rl_sampler train_dataset = create_rl_dataset( config.data.train_files, config.data, tokenizer, processor, is_train=True, max_samples=config.data.get("train_max_samples", -1), ) val_dataset = create_rl_dataset( config.data.val_files, config.data, tokenizer, processor, is_train=False, max_samples=config.data.get("val_max_samples", -1), ) train_sampler = create_rl_sampler(config.data, train_dataset) # Initialize the DAPO trainer with RayDAPOTrainer instead of RayPPOTrainer trainer = RayOnPolicyDistillTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=self.role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, ) # Initialize the workers of the trainer trainer.init_workers() # Start the training process trainer.fit() @hydra.main(config_path="config", config_name="onpolicy_distill_trainer", version_base=None) def main(config): """Main entry point for PPO training with Hydra configuration management. Args: config_dict: Hydra configuration dictionary containing training parameters. """ run_ppo(config, task_runner_class=OnPolicyDistillTaskRunner) if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/onpolicy_distill/onpolicy_distill_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSDP PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import gc import os import uuid from collections import defaultdict from copy import deepcopy from pprint import pprint import numpy as np import torch from tqdm import tqdm from verl import DataProto from verl.trainer.ppo.core_algos import agg_loss from verl.trainer.ppo.metric_utils import ( compute_on_policy_distill_data_metrics, compute_throughout_metrics, compute_timing_metrics) from verl.trainer.ppo.ray_trainer import (AdvantageEstimator, RayPPOTrainer, apply_kl_penalty, compute_advantage, compute_response_mask) from verl.trainer.ppo.reward import compute_reward from verl.utils.metric import reduce_metrics from verl.utils.profiler import marked_timer from verl.utils.rollout_skip import RolloutSkip class RayOnPolicyDistillTrainer(RayPPOTrainer): """ Note that this trainer runs on the driver process on a single CPU/GPU node. """ def compute_kl_related_metrics(self, batch: DataProto, metrics: dict, timing_raw: dict): batch.batch["response_mask"] = compute_response_mask(batch) # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, "blue"): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw, "olive"): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) return batch def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 self.gen_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return if self.config.actor_rollout_ref.rollout.get("skip_rollout", False): rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg) rollout_skip.wrap_generate_sequences() # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 self.gen_steps += 1 last_val_metrics = None prev_step_profile = False curr_step_profile = ( self.global_steps in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) next_step_profile = False timing_raw = defaultdict(float) batch = None num_prompt_in_batch = 0 num_gen_batches = 0 for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} with marked_timer("start_profile", timing_raw): self._start_profiling( not prev_step_profile and curr_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) new_batch: DataProto = DataProto.from_single_dict(batch_dict) num_gen_batches += 1 gen_batch = self._get_gen_batch(new_batch) gen_batch_output = gen_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True ) is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw, "red"): if not self.async_rollout_mode: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch_output) else: gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch_output) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) new_batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout new_batch = new_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) new_batch = new_batch.union(gen_batch_output) batch = new_batch # === Updating === # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() batch = self.compute_kl_related_metrics(batch, metrics, timing_raw) # Compute rollout IS weights and mismatch metrics (inherited from RayPPOTrainer) batch, is_metrics = self.compute_rollout_importance_weights_and_add_to_batch(batch) # IS and mismatch metrics already have mismatch/ prefix metrics.update(is_metrics) with marked_timer("adv", timing_raw, "brown"): # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get("norm_adv_by_std_in_grpo", True) batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, distill_adv_max_clip=self.config.algorithm.distill_adv_max_clip, distill_adv_min_clip=self.config.algorithm.distill_adv_min_clip, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) # update actor with marked_timer("update_actor", timing_raw, "red"): actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # pop multi_modal_inputs before save model non_tensor_batch_keys_to_pop = [] if "multi_modal_data" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_data") if "multi_modal_inputs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_inputs") if "processor_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("processor_kwargs") batch.pop( non_tensor_batch_keys=non_tensor_batch_keys_to_pop ) gc.collect() # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: with marked_timer("dump_rollout_generations", timing_raw, color="green"): inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True) outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True) INVALID_FIELDS = ['score','index','uid','__num_turns__','multi_modal_inputs', 'sample_reward', "raw_prompt"] extra_infos = {} for key in batch.non_tensor_batch.keys(): if key not in INVALID_FIELDS: extra_infos[key] = batch.non_tensor_batch[key].tolist() self._dump_generations( inputs=inputs, outputs=outputs, scores=[0 for _ in range(len(outputs))], reward_extra_infos_dict=extra_infos, dump_path=rollout_data_dir, logger=logger, ) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, "green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with marked_timer("save_checkpoint", timing_raw, "green"): self._save_checkpoint() with marked_timer("stop_profile", timing_raw): next_step_profile = ( self.global_steps + 1 in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) self._stop_profiling( curr_step_profile and not next_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) prev_step_profile = curr_step_profile curr_step_profile = next_step_profile # collect metrics metrics.update(compute_on_policy_distill_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) timing_raw = defaultdict(float) # clear timing metrics["train/num_gen_batches"] = num_gen_batches batch = None num_prompt_in_batch = 0 num_gen_batches = 0 # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return progress_bar.update(1) self.global_steps += 1 self.gen_steps += 1 # check if last step checkpint exists checkpoint_dir = os.path.join(self.config.trainer.default_local_dir, f"global_step_{self.global_steps}") if not os.path.exists(checkpoint_dir): # save last step checkpoint timing_raw = defaultdict(float) with marked_timer("save_checkpoint", timing_raw, "green"): self._save_checkpoint() metrics = {f"timing/{k}": v for k, v in timing_raw.items()} logger.log(data=metrics, step=self.global_steps) ================================================ FILE: verl_distillation/recipe/onpolicy_distill/run_qwen3_distill.sh ================================================ #!/bin/bash # On-policy Distillation: distill from a teacher model (e.g., Qwen3-1.7B) to a student model # with extended vocabulary (e.g., recommendation pretrained model with item tokens). # # Usage: # export BASE_MODEL=/path/to/student_model # export TEACHER_MODEL=/path/to/teacher_model # export DATASET_PARQUET=/path/to/train.parquet # bash run_qwen3_1.7b_distill.sh [hostfile] set -x HOME=$(pwd) timestamp=$(date +"%Y-%m-%d-%H:%M:%S") # tmp_hostfile_dir 需要保留,框架需要 HOSTFILE="${1:-/etc/mpi/hostfile}" NODES=$(wc -l < $HOSTFILE) if [ ! -d "$HOME/tmp_hostfile_dir" ]; then mkdir -p "$HOME/tmp_hostfile_dir" fi if [ ! -d "$HOME/timeline_dir" ]; then mkdir -p "$HOME/timeline_dir" fi cat $HOSTFILE > "$HOME/tmp_hostfile_dir/hostfile_$timestamp" N_GPUS_PER_NODE=2 project_name="verl_on_policy_distill" experiment_name="verl_1.7b_distill_${NODES}_${timestamp}" CKPT_HOME=${CKPT_HOME:-"$HOME/outputs"} CKPT_DIR=${CKPT_DIR:-"${CKPT_HOME}/ckpts/${project_name}/${experiment_name}/"} rollout_mode="async" rollout_name="sglang" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi export HYDRA_FULL_ERROR=1 # rollout buffer setting NUM_WORKER=16 CUDA_GRAPH_MAX_BS=64 # ===== Open-source friendly defaults ===== # You MUST set these paths for your own environment. export BASE_MODEL=${BASE_MODEL:-""} export TEACHER_MODEL=${TEACHER_MODEL:-""} export DATASET_PARQUET=${DATASET_PARQUET:-"$(realpath ../output/onpolicy_distillation.parquet)"} # Logging: default is console only. # To enable W&B, export WANDB_API_KEY and override trainer.logger: # export WANDB_API_KEY="your-key" # ... trainer.logger='[console,wandb]' ... export WANDB_API_KEY=${WANDB_API_KEY:-""} if [ -z "$BASE_MODEL" ] || [ -z "$TEACHER_MODEL" ] || [ -z "$DATASET_PARQUET" ]; then echo "[ERROR] Please set BASE_MODEL / TEACHER_MODEL / DATASET_PARQUET before running." echo " BASE_MODEL=$BASE_MODEL" echo " TEACHER_MODEL=$TEACHER_MODEL" echo " DATASET_PARQUET=$DATASET_PARQUET" exit 1 fi export USE_DYNAMIC_BSZ=True # 是否开启动态batch size, 则无视上述batch_size设置,按token数来分配显卡,避免某张显卡处理的token数过多导致OOM显存溢出 export MAX_TOKENS_PER_GPU=24000 # n*(prompt_len+response_len) export TRAIN_BATCH_SIZE=32 export LEARNING_RATE=5e-6 export ROLLOUT_N=1 # 每个prompt的CoT采样数量 export BEAM_SIZE_PER_ROLLOUT=1 # 每个CoT的beam search数量 export TEMPERATURE=1.1 export ENABLE_THINK=True # 是否在user prompt末尾添加/think export THINK_MODE="auto" export MAX_RESPONSE_LEN=2048 export DISTILL_ADV_MAX=5.0 export DISTILL_ADV_MIN=-30.0 # ===== Extended vocabulary distillation settings ===== # Token ID threshold: tokens with id >= this value are considered "extended vocab tokens" # For Qwen3 with OneRec item tokens, 151669 is the start of extended vocabulary. # Set to empty string or "null" to disable extended vocab handling. export EXTEND_VOCAB_START_TOKEN=151669 # Whether to mask the entire response if it contains any extended token export MASK_RESPONSE_IF_HAVE_EXTEND_TOKEN=False export TRAIN_FILES=$DATASET_PARQUET export VAL_FILES=$DATASET_PARQUET echo "Training files: $TRAIN_FILES" echo "Validation files: $VAL_FILES" PYTHONUNBUFFERED=1 python3 -m recipe.onpolicy_distill.main_onpolicy_distill --config-name='onpolicy_distill_trainer'\ +ray_kwargs.ray_init.runtime_env.env_vars.TRACE_GPU_MEM=False \ +ray_kwargs.ray_init.runtime_env.env_vars.WORK_DIR=$HOME \ +ray_kwargs.ray_init.runtime_env.env_vars.WANDB_API_KEY="$WANDB_API_KEY" \ +ray_kwargs.ray_init.runtime_env.env_vars.nosp="1" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_IB_ECE_ENABLE="0" \ +ray_kwargs.ray_init.runtime_env.env_vars.CUDA_DEVICE_MAX_CONNECTIONS="32" \ +ray_kwargs.ray_init.runtime_env.env_vars.NVTE_ALLOW_NONDETERMINISTIC_ALGO="1" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_NVLS_ENABLE="0" \ +ray_kwargs.ray_init.runtime_env.env_vars.PYTHONWARNINGS="ignore" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_DEBUG="VERSION" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_IB_DISABLE="0" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_IB_GID_INDEX="3" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_ASYNC_ERROR_HANDLING="1" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_SOCKET_IFNAME="bond0" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_IB_HCA="mlx5" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_PXN_DISABLE="0" \ +ray_kwargs.ray_init.runtime_env.env_vars.NCCL_IB_QPS_PER_CONNECTION="4" \ +ray_kwargs.ray_init.runtime_env.env_vars.SGLANG_VLM_CACHE_SIZE_MB="512" \ +ray_kwargs.ray_init.runtime_env.env_vars.TIMESTAMP=$timestamp \ algorithm.adv_estimator=on_policy_distill \ data.train_files=$TRAIN_FILES \ data.val_files=$VAL_FILES \ data.max_prompt_length=10240 \ ++data.enable_think=$ENABLE_THINK \ ++data.think_mode=$THINK_MODE \ data.prompt_key=prompt \ data.image_key=dummy \ data.video_key=dummy \ ++data.data_source_key='source' \ data.reward_fn_key='source' \ data.max_response_length=$MAX_RESPONSE_LEN \ data.train_batch_size=$TRAIN_BATCH_SIZE \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=$return_raw_chat \ actor_rollout_ref.actor.use_dynamic_bsz=$USE_DYNAMIC_BSZ \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$MAX_TOKENS_PER_GPU \ actor_rollout_ref.actor.ppo_mini_batch_size=$TRAIN_BATCH_SIZE \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=$MAX_TOKENS_PER_GPU \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=$MAX_TOKENS_PER_GPU \ actor_rollout_ref.rollout.calculate_log_probs=False \ actor_rollout_ref.actor.optim.lr=${LEARNING_RATE} \ actor_rollout_ref.actor.clip_ratio_high=0.28 \ actor_rollout_ref.model.enable_activation_offload=True \ actor_rollout_ref.model.path=$BASE_MODEL \ +actor_rollout_ref.ref.model.path=$TEACHER_MODEL \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.ref_log_prob_replace_val=-100 \ actor_rollout_ref.ref.ref_log_prob_replace_val=-100 \ actor_rollout_ref.rollout.name=$rollout_name \ actor_rollout_ref.rollout.mode=$rollout_mode \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.extend_vocab_start_token=$EXTEND_VOCAB_START_TOKEN \ actor_rollout_ref.rollout.mask_response_if_have_extend_token=$MASK_RESPONSE_IF_HAVE_EXTEND_TOKEN \ +actor_rollout_ref.rollout.engine_kwargs.sglang.chunked_prefill_size=16384 \ +actor_rollout_ref.rollout.engine_kwargs.sglang.cuda_graph_max_bs=$CUDA_GRAPH_MAX_BS \ +actor_rollout_ref.rollout.engine_kwargs.sglang.max_running_requests=$CUDA_GRAPH_MAX_BS \ +actor_rollout_ref.rollout.engine_kwargs.sglang.disable_radix_cache=False \ +actor_rollout_ref.rollout.engine_kwargs.sglang.log_level=info \ +actor_rollout_ref.rollout.engine_kwargs.sglang.log_requests=False \ +actor_rollout_ref.rollout.engine_kwargs.sglang.log_requests_level=2 \ actor_rollout_ref.rollout.n=$ROLLOUT_N \ actor_rollout_ref.rollout.temperature=${TEMPERATURE} \ actor_rollout_ref.rollout.top_p=0.95 \ actor_rollout_ref.rollout.top_k=200 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.agent.num_workers=$NUM_WORKER \ actor_rollout_ref.rollout.agent.default_agent_loop=tool_agent \ algorithm.use_kl_in_reward=False \ ++algorithm.distill_adv_max_clip=$DISTILL_ADV_MAX \ ++algorithm.distill_adv_min_clip=$DISTILL_ADV_MIN \ actor_rollout_ref.actor.loss_agg_mode="token-mean" \ actor_rollout_ref.actor.kl_loss_coef=0.0 \ trainer.logger='[console]' \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.n_gpus_per_node=$N_GPUS_PER_NODE \ trainer.nnodes=$NODES \ trainer.save_freq=5 \ trainer.max_actor_ckpt_to_keep=100 \ trainer.test_freq=-1 \ trainer.default_hdfs_dir=null \ trainer.default_local_dir=$CKPT_DIR \ trainer.val_before_train=False \ trainer.val_only=False \ trainer.rollout_data_dir=$HOME \ +trainer.validation_data_dir=$HOME \ +trainer.ray_timeline_dir=$HOME/tmp_hostfile_dir \ trainer.total_epochs=1 2>&1 | tee $project_name-$experiment_name-$timestamp.log ================================================ FILE: verl_distillation/recipe/open_math_reasoning/README.md ================================================ # Open math reasoning ## Introduction In this recipe, we perform SFT on the [open math reasoning](https://huggingface.co/datasets/nvidia/OpenMathReasoning) dataset using the new SFT trainer with backend agostic model engine. Note that our goal is not to replicate the [AIMO-2 Winning Solution](https://arxiv.org/abs/2504.16891) work, but to demonstrate a SFT demo from end to end. Note that you may need to modify the path as needed in the following scripts. ## Dataset Preprocessing ### Download Dataset ```bash hf download nvidia/OpenMathReasoning --repo-type dataset --include data/cot* --local-dir /path/to/dataset/nvidia/OpenMathReasoning hf download math-ai/aime24 --repo-type dataset --local-dir /path/to/dataset/math-ai/aime24 hf download math-ai/aime25 --repo-type dataset --local-dir /path/to/dataset/math-ai/aime25 ``` ### Preprocess the dataset ```bash python3 recipe/open_math_reasoning/prepare_nvidia-OpenMathReasoning_sft.py --local_dataset_path /path/to/nvidia/OpenMathReasoning --local_save_dir /path/to/open_math_reasoning ``` ### Prepare the eval dataset ```bash python3 recipe/open_math_reasoning/prepare_eval_dataset.py --local_dataset_path /path/to/dataset --local_save_dir /path/to/eval_dataset ``` ## Train the model using SFT ```bash export CKPT_HOME=/path/to/ckpt export MODEL_ID=Qwen/Qwen3-8B-Base export TRAIN_FILES=/path/to/open_math_reasoning/cot_dataset.parquet ``` ### FSDP backend ```bash export BACKEND=fsdp2 bash recipe/open_math_reasoning/run_sft_qwen3_8b.sh ``` ### Megatron backend ```bash export BACKEND=megatron bash recipe/open_math_reasoning/run_sft_qwen3_8b.sh ``` ## Eval the model ### Merge checkpoint into huggingface format FSDP backend ```bash python -m verl.model_merger merge --backend fsdp --local_dir /path/to/ckpt/global_step_19751 --target_dir /path/to/ckpt/global_step_19751/huggingface ``` Megatron backend ```bash python -m verl.model_merger merge --backend megatron --local_dir /path/to/ckpt/global_step_19751 --target_dir /path/to/ckpt/global_step_19751/huggingface --use_cpu_initialization ``` ### Generate the responses ```bash export MODEL_PATH=/path/to/ckpt/global_step_19751/huggingface bash recipe/open_math_reasoning/run_generation.sh ``` ### Evaluate the responses ```bash bash recipe/open_math_reasoning/run_eval.sh ``` You should see the results like: ```python {'test_score/aime24': 0.584375, 'test_score/aime25': 0.43333333333333335} ``` ================================================ FILE: verl_distillation/recipe/open_math_reasoning/compute_score.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def compute_score_data_source(data_source, response, ground_truth): from verl.utils.reward_score.math_reward import compute_score if data_source in ["aime24", "aime25"]: return compute_score(response, ground_truth) else: raise ValueError(f"Unknown data source: {data_source}") ================================================ FILE: verl_distillation/recipe/open_math_reasoning/prepare_eval_dataset.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # prepare eval dataset including AIME'24, AIME'25 # hf download math-ai/aime24 --repo-type dataset --local-dir /opt/tiger/datasets/math-ai/aime24 # hf download math-ai/aime25 --repo-type dataset --local-dir /opt/tiger/datasets/math-ai/aime25 import os import datasets from verl.utils.reward_score.math_reward import remove_boxed instruction_following = "Please reason step by step, and put your final answer within \\boxed{}." def make_map_fn(data_source): def process_fn(example, idx): question_raw = example.pop("problem") question = question_raw + " " + instruction_following if "solution" not in example: example["solution"] = example["answer"] answer_raw = example.pop("solution") example.clear() try: solution = remove_boxed(answer_raw) except Exception: solution = answer_raw data = { "data_source": data_source, "prompt": [ { "role": "user", "content": question, } ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "index": idx, "answer": answer_raw, "question": question_raw, }, } return data return process_fn if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/math-ai", help="The save directory for the preprocessed dataset." ) args = parser.parse_args() if args.local_dataset_path is not None: aime24_dataset_path = os.path.join(args.local_dataset_path, "math-ai/aime24") aime25_dataset_path = os.path.join(args.local_dataset_path, "math-ai/aime25") else: aime24_dataset_path = "math-ai/aime24" aime25_dataset_path = "math-ai/aime25" aime24_dataset = datasets.load_dataset(aime24_dataset_path, split="test") aime25_dataset = datasets.load_dataset(aime25_dataset_path, split="test") aime24_dataset = aime24_dataset.map(function=make_map_fn("aime24"), with_indices=True) aime25_dataset = aime25_dataset.map(function=make_map_fn("aime25"), with_indices=True) local_save_dir = os.path.expanduser(args.local_save_dir) os.makedirs(local_save_dir, exist_ok=True) aime24_dataset.to_parquet(os.path.join(local_save_dir, "aime24_test.parquet")) aime25_dataset.to_parquet(os.path.join(local_save_dir, "aime25_test.parquet")) ================================================ FILE: verl_distillation/recipe/open_math_reasoning/prepare_nvidia-OpenMathReasoning_sft.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ huggingface-cli download nvidia/OpenMathReasoning --repo-type dataset --include data/cot* \ --local-dir /path/to/nvidia/OpenMathReasoning huggingface-cli download nvidia/OpenMathReasoning --repo-type dataset --include data/cot* \ --local-dir /opt/tiger/nvidia/OpenMathReasoning """ import argparse import os import datasets if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.") parser.add_argument( "--local_save_dir", default="~/data/open_math_reasoning", help="The save directory for the preprocessed dataset.", ) args = parser.parse_args() local_dataset_path = args.local_dataset_path data_source = "nvidia/OpenMathReasoning" if local_dataset_path is not None: dataset = datasets.load_dataset(local_dataset_path, split="cot") else: dataset = datasets.load_dataset(data_source, split="cot") def make_map_fn(split): def process_fn(example, idx): question = example.pop("problem") solution = example.pop("generated_solution") extra_info = {} for key, value in example.items(): extra_info[key] = value example.clear() data = { "messages": [ {"role": "user", "content": question, "loss_mask": 0}, {"role": "assistant", "content": solution, "loss_mask": 1}, ], "extra_info": extra_info, } return data return process_fn # filter out data where the problem_type is not has_answer_extracted dataset = dataset.filter(lambda example: example["problem_type"] == "has_answer_extracted") dataset = dataset.map(function=make_map_fn("cot"), with_indices=True) local_save_dir = os.path.expanduser(args.local_save_dir) os.makedirs(local_save_dir, exist_ok=True) dataset.to_parquet(os.path.join(local_save_dir, "cot_dataset.parquet")) ================================================ FILE: verl_distillation/recipe/open_math_reasoning/run_eval.sh ================================================ #!/usr/bin/env bash # Evaluation python3 -m verl.trainer.main_eval \ data.path=$HOME/data/gen/qwen_8b_gen_test.parquet \ custom_reward_function.path=recipe/open_math_reasoning/compute_score.py \ custom_reward_function.name=compute_score_data_source ================================================ FILE: verl_distillation/recipe/open_math_reasoning/run_generation.sh ================================================ #!/usr/bin/env bash MODEL_PATH=${MODEL_PATH:-/path/to/ckpt/global_step_19751/huggingface} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} NNODES=${NNODES:-1} OUTPUT_PATH=${OUTPUT_PATH:-$HOME/data/gen/qwen_8b_gen_test.parquet} GEN_TP=${GEN_TP:-1} # Default tensor parallel size to 2 aime24_test_path=${HOME}/data/math-ai/aime24_test.parquet aime25_test_path=${HOME}/data/math-ai/aime25_test.parquet train_files="['$aime24_test_path', '$aime25_test_path']" python3 -m verl.trainer.main_generation_server \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.trust_remote_code=True \ actor_rollout_ref.rollout.temperature=1.0 \ actor_rollout_ref.rollout.top_p=0.7 \ actor_rollout_ref.rollout.prompt_length=2048 \ actor_rollout_ref.rollout.response_length=20480 \ actor_rollout_ref.rollout.tensor_model_parallel_size="${GEN_TP}" \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.n=32 \ data.train_files="$train_files" \ data.prompt_key=prompt \ +data.output_path="${OUTPUT_PATH}" \ ================================================ FILE: verl_distillation/recipe/open_math_reasoning/run_sft_qwen3_8b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail ENTRYPOINT=${ENTRYPOINT:-"-m verl.trainer.sft_trainer"} TRAIN_FILES=${TRAIN_FILES:-/path/to/cot_dataset.parquet} backend=${BACKEND:-fsdp} project_name=verl_sft_test RESUME_MODE=auto MODEL_ID=${MODEL_ID:-Qwen/Qwen3-8B-Base} SP_SIZE=${SP_SIZE:-8} FSDP_SIZE=${FSDP_SIZE:-16} FSDP_STRATEGY=${FSDP_STRATEGY:-"fsdp2"} TP_SIZE=${TP_SIZE:-8} PP_SIZE=${PP_SIZE:-2} VPP_SIZE=${VPP_SIZE:-null} CP_SIZE=${CP_SIZE:-1} PAD_MODE=${PAD_MODE:-no_padding} USE_REMOVE_PADDING=${USE_REMOVE_PADDING:-True} FSDP_ENGINE_CONFIG="\ engine=${backend} \ optim=${backend} \ optim.lr=2e-5 \ optim.lr_warmup_steps_ratio=0.01 \ optim.weight_decay=0.1 \ optim.betas="[0.9,0.95]" \ optim.clip_grad=1.0 \ optim.min_lr_ratio=0.1 \ optim.warmup_style=cosine \ engine.ulysses_sequence_parallel_size=${SP_SIZE} \ engine.strategy=${FSDP_STRATEGY} \ engine.fsdp_size=${FSDP_SIZE}" MEGATRON_ENGINE_CONFIG="\ engine=${backend} \ optim=${backend} \ optim.lr=2e-5 \ optim.lr_warmup_steps_ratio=0.01 \ optim.weight_decay=0.1 \ optim.betas="[0.9,0.95]" \ optim.clip_grad=1.0 \ optim.lr_warmup_init=0 \ optim.lr_decay_style=cosine \ optim.min_lr=2e-6 \ engine.tensor_model_parallel_size=${TP_SIZE} \ engine.pipeline_model_parallel_size=${PP_SIZE} \ engine.virtual_pipeline_model_parallel_size=${VPP_SIZE} \ engine.context_parallel_size=${CP_SIZE} \ engine.use_mbridge=False" if [ "$backend" = "fsdp" ]; then ENGINE_CONFIG="$FSDP_ENGINE_CONFIG" echo "Using fsdp engine" exp_name=nvidia-openmathreasoning-qwen3-8b-${backend}-${FSDP_STRATEGY}-sp${SP_SIZE}-fsdp-1008a1 else ENGINE_CONFIG="$MEGATRON_ENGINE_CONFIG" echo "Using megatron engine" exp_name=nvidia-openmathreasoning-${backend}-tp${TP_SIZE}-pp${PP_SIZE}-vpp${VPP_SIZE}-cp${CP_SIZE}-megatron-1018a1 fi CKPT_HOME=${CKPT_HOME:-$HOME/open_verl/sft/${project_name}/${exp_name}} mkdir -p "${CKPT_HOME}" torchrun --standalone --nnodes=1 --nproc-per-node=${NUM_TRAINERS:-8} \ ${ENTRYPOINT} \ data.train_files="${TRAIN_FILES}" \ data.train_batch_size=96 \ data.max_length=32768 \ data.pad_mode=${PAD_MODE} \ data.truncation=error \ data.use_dynamic_bsz=True \ data.max_token_len_per_gpu=65536 \ data.messages_key=messages \ model.path=$MODEL_ID \ model.use_remove_padding=${USE_REMOVE_PADDING} \ ${ENGINE_CONFIG} \ trainer.test_freq=-1 \ trainer.save_freq=4000 \ trainer.logger=['console','wandb'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPT_HOME}" \ trainer.resume_mode=${RESUME_MODE} \ trainer.max_ckpt_to_keep=5 \ checkpoint.save_contents=[model,optimizer,extra] ================================================ FILE: verl_distillation/recipe/prime/__init__.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/recipe/prime/config/prime_trainer.yaml ================================================ # the prime config will override default ppo_trainer.yaml hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: filter_accuracy: True accuracy_lower_bound: 0.2 accuracy_upper_bound: 0.8 oversample_factor: 4.0 # Sample more responses than the batch size. prompts satisfying the filter will be prioritized. filter_truncate: True truncation: right actor_rollout_ref: hybrid_engine: True model: use_remove_padding: True rollout: # number of responses (i.e. num sample times) n: 4 actor: entropy_coeff: 0.001 reward_model: enable: True strategy: fsdp model: ref_path: ${reward_model.model.path} use_remove_padding: True use_fused_kernels: ${actor_rollout_ref.model.use_fused_kernels} fused_kernel_options: impl_backend: torch # triton, torch tokenizer_path: ${actor_rollout_ref.model.path} enable_gradient_checkpointing: ${actor_rollout_ref.model.enable_gradient_checkpointing} ref_type: freeze fsdp_config: min_num_params: 0 param_offload: ${actor_rollout_ref.actor.fsdp_config.param_offload} optimizer_offload: ${actor_rollout_ref.actor.fsdp_config.optimizer_offload} update: before # ``before`` for double-forward, ``after`` for single-forward optim: lr: 1e-6 lr_warmup_steps: -1 # Prioritized. Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime min_lr_ratio: null warmup_style: null # deprecated lr_scheduler_type: constant total_training_steps: -1 # must be overridden by program weight_decay: 0. grad_clip: 10.0 beta_train: 0.05 loss_type: ce # currently only supports ce loss prime_granularity: token prime_norm: batch_norm # batch_norm or none. if set to none, the normalizer is beta_train mini_batch_size: ${actor_rollout_ref.actor.ppo_mini_batch_size} reward_manager: prime algorithm: adv_estimator: rloo # now supports rloo. it treats different source of reward separately. kl_ctrl: type: fixed kl_coef: 0.000 reward_gt_coef: 5 reward_dpo_coef: 5 trainer: project_name: prime experiment_name: examples val_before_train: False balance_batch: False ================================================ FILE: verl_distillation/recipe/prime/main_prime.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import hydra import ray from omegaconf import OmegaConf from verl.trainer.ppo.utils import need_reference_policy from verl.utils.config import validate_config from .prime_ray_trainer import RayPRIMETrainer @hydra.main(config_path="config", config_name="prime_trainer", version_base=None) def main(config): run_prime(config) def run_prime(config, compute_score=None): if not ray.is_initialized(): default_runtime_env = {"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}} ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") # this is for local ray cluster ray.init(**OmegaConf.to_container(ray_init_kwargs)) ray.get(main_task.remote(config, compute_score)) @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head def main_task(config, compute_score=None): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_local_path_from_hdfs pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup from verl.workers.fsdp_workers import ActorRolloutRefWorker ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray import RayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ray.remote(ActorRolloutRefWorker), } global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, } # use reference model if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id if config.reward_model.enable: from .prime_fsdp_workers import PRIMERewardModelWorker role_worker_mapping[Role.RewardModel] = ray.remote(PRIMERewardModelWorker) mapping[Role.RewardModel] = global_pool_id # validate config # TODO: Additional config checks can be added with proper function under prime recipe validate_config( config=config, use_reference_policy=need_reference_policy(role_worker_mapping), use_critic=False, ) # download the checkpoint from hdfs local_path = copy_local_path_from_hdfs(config.actor_rollout_ref.model.path) # instantiate tokenizer from verl.utils import hf_tokenizer tokenizer = hf_tokenizer(local_path) reward_manager_name = config.reward_model.get("reward_manager", "naive") if reward_manager_name == "naive": from verl.workers.reward_manager import NaiveRewardManager reward_manager_cls = NaiveRewardManager elif reward_manager_name == "prime": from verl.workers.reward_manager import PrimeRewardManager reward_manager_cls = PrimeRewardManager else: raise NotImplementedError reward_fn = reward_manager_cls(tokenizer=tokenizer, num_examine=0, compute_score=compute_score) # Note that we always use function-based RM for validation val_reward_fn = reward_manager_cls(tokenizer=tokenizer, num_examine=1, compute_score=compute_score) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) trainer = RayPRIMETrainer( config=config, tokenizer=tokenizer, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, ) trainer.init_workers() trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/prime/prime_core_algos.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import verl import verl.utils.torch_functional as verl_F def compute_rloo_advantage_return(data: verl.DataProto, response_mask: torch.Tensor, n_samples, config): # calculate rloo reward on different reward sources, and sum again def masked_rloo(reward_tensor_original, mask_tensor): reward_tensor = reward_tensor_original.clone() reward_tensor[~mask_tensor] = 0 for start_pos in range(0, reward_tensor.shape[0], n_samples): cur_rewards_mean = torch.cat( [ reward_tensor[pos : pos + 1][mask_tensor[pos : pos + 1]].mean(dim=0, keepdim=True) for pos in range(start_pos, start_pos + n_samples) ], dim=0, ) cur_rewards_sum = cur_rewards_mean.sum() cur_reward_baseline = cur_rewards_sum / (n_samples - 1) reward_tensor[start_pos : start_pos + n_samples][mask_tensor[start_pos : start_pos + n_samples]] = ( reward_tensor[start_pos : start_pos + n_samples][mask_tensor[start_pos : start_pos + n_samples]] * (n_samples / (n_samples - 1)) - cur_reward_baseline ) return reward_tensor reward_tensors = [] with torch.no_grad(): if "rm_scores" in data.batch.keys() and config.algorithm.reward_dpo_coef != 0.0: reward_tensor = data.batch["rm_scores"] reward_mask = response_mask.bool() reward_tensors.append(masked_rloo(reward_tensor, reward_mask) * config.algorithm.reward_dpo_coef) if "acc" in data.batch.keys() and config.algorithm.reward_gt_coef != 0.0: reward_tensor = torch.zeros_like(response_mask, dtype=torch.float32) reward_mask = torch.zeros_like(response_mask, dtype=torch.bool) prompt_ids = data.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_response_length = data.batch["attention_mask"][:, prompt_length:].sum(-1) reward_mask[ torch.arange(0, valid_response_length.shape[0], dtype=torch.long, device=valid_response_length.device), valid_response_length - 1, ] = True reward_tensor[ torch.arange(0, valid_response_length.shape[0], dtype=torch.long, device=valid_response_length.device), valid_response_length - 1, ] = data.batch["acc"] reward_tensors.append(masked_rloo(reward_tensor, reward_mask) * config.algorithm.reward_gt_coef) final_reward_tensor = sum(reward_tensors) returns = (final_reward_tensor * response_mask).flip(dims=[-1]).cumsum(dim=-1).flip(dims=[-1]) advantages = returns.clone() advantages = verl_F.masked_whiten(advantages, response_mask) return advantages, returns def compute_ce_dpo_loss_rm(token_level_scores, acc, response_mask, beta): cur_scores = ((token_level_scores * response_mask).sum(dim=1) * beta).sigmoid() cur_dpo_loss = torch.nn.functional.binary_cross_entropy(cur_scores, acc) return cur_dpo_loss def compute_detach_dpo_loss_rm(token_level_scores, acc, Q_bc, acc_bc, response_mask, beta, bon_mode="none"): # we always assume that the BoN size equals n_samples # mode1: use acc as rm # mode2: use Q as rm cur_Q = (token_level_scores * response_mask).sum(dim=1) * beta other_Q = torch.zeros_like(cur_Q) for i in range(token_level_scores.shape[0]): Q_chosen = Q_bc[i][acc_bc[i] < acc[i]] if acc[i] > 0 else Q_bc[i][acc_bc[i] > acc[i]] if len(Q_chosen) > 0: other_Q[i] = Q_chosen.mean() * beta else: other_Q[i] = 0 dpo_loss = -torch.log(torch.sigmoid((cur_Q - other_Q) * ((acc > 0).float() * 2 - 1))) if bon_mode == "none": dpo_loss = dpo_loss.mean() else: weight = torch.zeros_like(dpo_loss) n_samples = acc_bc.shape[1] if bon_mode == "bon_rm": for i in range(token_level_scores.shape[0]): weight[i] = n_samples * torch.pow((Q_bc[i] * beta <= cur_Q[i]).float().mean(), n_samples - 1) elif bon_mode == "bon_acc": for i in range(token_level_scores.shape[0]): weight[i] = n_samples * torch.pow((acc_bc[i] <= acc[i]).float().mean(), n_samples - 1) else: raise NotImplementedError dpo_loss = (dpo_loss * weight).sum() return dpo_loss def compute_dpo_accuracy(token_level_scores, acc, response_mask, n_samples): dpo_acc = [] for start_id in range(0, token_level_scores.shape[0], n_samples): cur_scores = ( token_level_scores[start_id : start_id + n_samples] * response_mask[start_id : start_id + n_samples] ).sum(dim=1) def get_upper_triangle(tensor_x): diff_matrix = tensor_x.unsqueeze(1) - tensor_x.unsqueeze(0) upper_tri_indices = torch.triu(torch.ones_like(diff_matrix).bool(), diagonal=1) return diff_matrix[upper_tri_indices] cur_acc_diff = get_upper_triangle(acc[start_id : start_id + n_samples]) # in range [-1,1] cur_score_diff = get_upper_triangle(cur_scores) # in R cur_score_prediction = (cur_score_diff > 0).float() # in [0,1] if cur_acc_diff.abs().sum() == 0: cur_acc = torch.zeros_like(cur_score_prediction[0]) + 0.5 else: cur_acc = ( ((cur_score_diff > 0) == (cur_acc_diff > 0)).float() * cur_acc_diff.abs() ).sum() / cur_acc_diff.abs().sum() dpo_acc.append(cur_acc.unsqueeze(0)) return torch.cat(dpo_acc, dim=0).mean() def compute_dpo_abs_accuracy(token_level_scores, acc, response_mask, n_samples): return (torch.sign((token_level_scores * response_mask).sum(dim=-1)) == torch.sign(acc * 2 - 1)).float().mean() ================================================ FILE: verl_distillation/recipe/prime/prime_dp_rm.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implement a multiprocess PPOCritic """ import itertools import torch import torch.distributed from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input from torch import nn, optim from torch.distributed.fsdp import FullyShardedDataParallel as FSDP import verl.utils.torch_functional as verl_F from verl import DataProto from verl.utils.device import get_device_name from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad_and_slice_inputs from .prime_core_algos import compute_ce_dpo_loss_rm, compute_detach_dpo_loss_rm __all__ = ["DataParallelPRIMERewardModel"] class DataParallelPRIMERewardModel: def __init__(self, config, reward_module: nn.Module, ref_module: nn.Module, reward_optimizer: optim.Optimizer): self.config = config self.reward_module = reward_module self.ref_module = ref_module self.reward_optimizer = reward_optimizer self.use_remove_padding = self.config.model.get("use_remove_padding", False) print(f"Reward model use_remove_padding={self.use_remove_padding}") self.use_fused_kernels = self.config.model.get("use_fused_kernels", False) print(f"Reward model use_fused_kernels={self.use_fused_kernels}") self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) def _forward_micro_batch(self, micro_batch, prompt_length): input_ids = micro_batch["input_ids"] batch_size, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] num_actions = micro_batch["input_ids"].shape[-1] - prompt_length max_positions = micro_batch["attention_mask"][:, prompt_length:].sum(-1) if self.use_remove_padding: input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # for compute the log_prob input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz) # pad and slice the inputs if sp > 1 if self.ulysses_sequence_parallel_size > 1: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size ) input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs( input_ids_rmpad_rolled, None, self.ulysses_sequence_parallel_size ) input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) output = self.reward_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, use_cache=False, return_dict=self.use_fused_kernels, ) if self.use_fused_kernels: rm_log_labels = output.log_probs.squeeze(0) # (total_nnz,) rm_log_labels = rm_log_labels.to(torch.float32) else: rm_output_logits = output.logits.squeeze(0) rm_log_labels = verl_F.logprobs_from_logits( logits=rm_output_logits, labels=input_ids_rmpad_rolled, ) if self.ulysses_sequence_parallel_size > 1: rm_log_labels = gather_outputs_and_unpad( rm_log_labels, gather_dim=0, unpad_dim=0, padding_size=pad_size ) rm_log_labels = pad_input( hidden_states=rm_log_labels.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen ).squeeze(-1)[:, -num_actions - 1 : -1] else: output = self.reward_module( input_ids=micro_batch["input_ids"], attention_mask=micro_batch["attention_mask"], position_ids=micro_batch["position_ids"], use_cache=False, return_dict=self.use_fused_kernels, ) if self.use_fused_kernels: rm_log_labels = output.log_probs[:, :-1] # (bsz, seq_length) rm_log_labels = rm_log_labels.to(torch.float32) else: rm_output_logits = output.logits rm_log_prob = torch.nn.functional.log_softmax( rm_output_logits[:, :-1, :], dim=-1 ) # (batch_size, seq_length, vocab_size) rm_log_labels = rm_log_prob.gather(dim=-1, index=micro_batch["input_ids"][:, 1:].unsqueeze(-1)).squeeze( -1 ) # (batch, seq_length) if self.ref_module is not None: # do not have to pad again with torch.no_grad(), torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16): if self.ulysses_sequence_parallel_size > 1 and self.use_remove_padding: ref_output = self.ref_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, use_cache=False, ) if self.use_fused_kernels: ref_log_labels = ref_output.log_probs.squeeze(0) # (total_nnz,) ref_log_labels = ref_log_labels.to(torch.float32) else: ref_output_logits = ref_output.logits.squeeze(0) ref_log_labels = verl_F.logprobs_from_logits( logits=ref_output_logits, labels=input_ids_rmpad_rolled ) ref_log_labels = gather_outputs_and_unpad( ref_log_labels, gather_dim=0, unpad_dim=0, padding_size=pad_size ) ref_log_labels = pad_input( hidden_states=ref_log_labels.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen ).squeeze(-1)[:, -num_actions - 1 : -1] else: ref_output = self.ref_module( input_ids=micro_batch["input_ids"], attention_mask=micro_batch["attention_mask"], position_ids=micro_batch["position_ids"], use_cache=False, ) if self.use_fused_kernels: ref_log_labels = ref_output.log_probs[:, :-1] # (batch_size, seq_length) ref_log_labels = ref_log_labels.to(torch.float32) else: ref_output_logits = ref_output.logits ref_log_prob = torch.nn.functional.log_softmax( ref_output_logits[:, :-1, :], dim=-1 ) # (batch_size, seq_length, vocab_size) ref_log_labels = ref_log_prob.gather( dim=-1, index=micro_batch["input_ids"][:, 1:].unsqueeze(-1) ).squeeze(-1) # (batch, seq_length) else: ref_log_labels = micro_batch["old_log_probs"] ref_log_labels.to(rm_log_labels.dtype) q = rm_log_labels[:, -num_actions:] - ref_log_labels[:, -num_actions:] # this is actually diff of q # trim unnecessary logprobs here for i in range(micro_batch["input_ids"].shape[0]): q[i, max_positions[i] :] = 0 # reward computation does not need gradient. only q needs with torch.no_grad(): # generalized estimation of r should go before the reward filling. r means process reward for policy # model, or the advantage of reward model. lam = self.config.get("lambda", 0.0) beta = self.config.model.get("beta_train", 0.05) if lam == 0.0: r = q * beta else: # reward coefficient takes no effect here acc = micro_batch["acc"] q_ = q * beta r = torch.zeros_like(q) lastgaelam = 0 # change the last token and mask out all paddings to make this process easier if we rely on # outcome reward to calculate V for i in range(q.shape[0]): if self.config.prime_use_gt: q_[i, max_positions[i] - 1] = acc[i] - q_[i, : max_positions[i] - 1].sum() q_[i, max_positions[i] :] = 0 for t in reversed(range(num_actions)): delta = q_[:, t] lastgaelam = delta + lam * lastgaelam r[:, t] = lastgaelam token_level_score = torch.zeros_like(q) if self.config.prime_granularity == "token": for i in range(micro_batch["input_ids"].shape[0]): token_level_score[i, : max_positions[i] - 1] = r[i, : max_positions[i] - 1] elif self.config.prime_granularity == "whole": for i in range(micro_batch["input_ids"].shape[0]): token_level_score[i, max_positions[i] - 1] = r[i, : max_positions[i]] else: raise NotImplementedError return token_level_score, q def _optimizer_step(self): assert self.config.model.optim.grad_clip is not None if isinstance(self.reward_module, FSDP): grad_norm = self.reward_module.clip_grad_norm_(self.config.model.optim.grad_clip) else: grad_norm = torch.nn.utils.clip_grad_norm_( self.reward_module.parameters(), max_norm=self.config.model.optim.grad_clip ) self.reward_optimizer.step() return grad_norm def prime_norm(self, token_level_scores): if self.config.prime_norm == "batch_norm": reverse_cumsum = torch.cumsum(token_level_scores.flip(dims=[1]), dim=-1).flip(dims=[1]) token_level_scores = token_level_scores / (reverse_cumsum.abs().max() + 1e-6) return token_level_scores def compute_rm_score(self, data: DataProto): self.reward_module.eval() self.ref_module.eval() micro_batch_size = data.meta_info["micro_batch_size"] select_keys = ["responses", "input_ids", "attention_mask", "position_ids", "acc"] batch = data.select(batch_keys=select_keys).batch use_dynamic_bsz = data.meta_info["use_dynamic_bsz"] prompt_length = data.batch["input_ids"].shape[-1] - data.batch["responses"].shape[-1] if use_dynamic_bsz: # split using dynamic bsz max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size micro_batches, indices = rearrange_micro_batches(batch=batch, max_token_len=max_token_len) else: micro_batches = batch.split(micro_batch_size) rm_scores_lst = [] q_lst = [] for micro_batch in micro_batches: with torch.no_grad(): rm_score, q = self._forward_micro_batch(micro_batch, prompt_length) rm_scores_lst.append(rm_score) q_lst.append(q) rm_scores = torch.concat(rm_scores_lst, dim=0) q = torch.concat(q_lst, dim=0) rm_scores = self.prime_norm(rm_scores) if use_dynamic_bsz: indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == rm_scores.size(0), f"{len(indices)} vs. {rm_scores.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) rm_scores = rm_scores[revert_indices] return ( rm_scores, q.detach(), { "reward_model/reward": rm_scores.sum(dim=-1).mean().item(), "reward_model/raw_reward": q.sum(dim=-1).mean().item(), }, ) def update_rm(self, data: DataProto): # make sure we are in training mode self.reward_module.train() metrics = {} beta = self.config.model.get("beta_train", 0.05) select_keys = ["input_ids", "responses", "attention_mask", "position_ids", "acc", "prompts"] for key in ["Q_bc", "acc_bc"]: if key in data.batch.keys(): select_keys.append(key) batch = data.select(batch_keys=select_keys).batch # Split to make minibatch iterator for updating the actor # See PPO paper for details. https://arxiv.org/abs/1707.06347 dataloader = batch.split(self.config.mini_batch_size) rm_scores_lst = [] q_lst = [] for batch_idx, data in enumerate(dataloader): # split batch into micro_batches mini_batch = data if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, _ = rearrange_micro_batches(batch=mini_batch, max_token_len=max_token_len) else: micro_batches = mini_batch.split(self.config.micro_batch_size_per_gpu) self.gradient_accumulation = self.config.mini_batch_size // self.config.micro_batch_size_per_gpu self.reward_optimizer.zero_grad() for data in micro_batches: data = data.to(get_device_name()) attention_mask = data["attention_mask"] acc = data["acc"] prompt_ids = data["prompts"] prompt_length = prompt_ids.shape[-1] response_mask = attention_mask[:, prompt_length:] rm_score, q = self._forward_micro_batch(data, prompt_length) rm_scores_lst.append(rm_score) q_lst.append(q.detach()) if self.config.model.loss_type == "ce": dpo_loss = compute_ce_dpo_loss_rm(q, acc, response_mask=response_mask, beta=beta) elif self.config.model.loss_type == "dpo": # the implementation of dpo is actually detached, which means we have to know the average # value of w/l reward before the update. dpo_loss = compute_detach_dpo_loss_rm( q, acc, Q_bc=data["Q_bc"], acc_bc=data["acc_bc"], response_mask=response_mask, beta=beta ) elif self.config.model.loss_type == "bon_acc": # change the original distribution of each sample to BoN distribution, then update reward model dpo_loss = compute_detach_dpo_loss_rm( q, acc, Q_bc=data["Q_bc"], acc_bc=data["acc_bc"], response_mask=response_mask, beta=beta, bon_mode="bon_acc", ) elif self.config.model.loss_type == "bon_rm": dpo_loss = compute_detach_dpo_loss_rm( q, acc, Q_bc=data["Q_bc"], acc_bc=data["acc_bc"], response_mask=response_mask, beta=beta, bon_mode="bon_rm", ) else: raise NotImplementedError data = {"reward_model/dpo_loss": dpo_loss.detach().item()} if self.config.use_dynamic_bsz: # relative to the dynamic bsz loss = dpo_loss * (len(data) / self.config.ppo_mini_batch_size) else: loss = dpo_loss / self.gradient_accumulation loss.backward() append_to_dict(metrics, data) grad_norm = self._optimizer_step() data = {"reward_model/grad_norm": grad_norm.detach().item()} append_to_dict(metrics, data) self.reward_optimizer.zero_grad() rm_scores = torch.cat(rm_scores_lst, dim=0) q = torch.concat(q_lst, dim=0) rm_scores = self.prime_norm(rm_scores) metrics.update( { "reward_model/reward": rm_scores.sum(dim=-1).mean().item(), "reward_model/raw_reward": q.sum(dim=-1).mean().item(), } ) return rm_scores, metrics ================================================ FILE: verl_distillation/recipe/prime/prime_fsdp_workers.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import warnings import torch import torch.distributed from omegaconf import OmegaConf from torch.distributed.device_mesh import init_device_mesh from verl import DataProto from verl.models.transformers.monkey_patch import apply_monkey_patch from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.utils import hf_tokenizer from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.device import get_device_id, get_device_name, get_nccl_backend from verl.utils.flops_counter import FlopsCounter from verl.utils.fs import copy_local_path_from_hdfs from verl.utils.fsdp_utils import ( get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, load_fsdp_model_to_gpu, load_fsdp_optimizer, offload_fsdp_model_to_cpu, offload_fsdp_optimizer, ) from verl.utils.import_utils import import_external_libs from verl.utils.profiler import log_gpu_memory_usage from verl.workers.config.optimizer import build_optimizer from verl.workers.fsdp_workers import create_device_mesh, get_sharding_strategy from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager from .prime_core_algos import compute_dpo_abs_accuracy, compute_dpo_accuracy logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class PRIMERewardModelWorker(Worker): def __init__(self, config): super().__init__() import torch.distributed if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend=get_nccl_backend()) self.config = config # build device mesh for Ulysses Sequence Parallel world_size = torch.distributed.get_world_size() fsdp_size = self.config.model.fsdp_config.fsdp_size self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size) self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( get_device_name(), mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) # set FSDP offload params self._is_offload_param = self.config.model.fsdp_config.param_offload self._is_offload_optimizer = self.config.model.fsdp_config.optimizer_offload # normalize config self.config.mini_batch_size //= torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size if self.config.micro_batch_size is not None: self.config.micro_batch_size //= torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size self.config.micro_batch_size_per_gpu = self.config.micro_batch_size assert self.config.mini_batch_size % self.config.micro_batch_size_per_gpu == 0 def _build_reward_ref_model_optimizer(self, config): # the following line is necessary from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import MixedPrecision from verl.utils.model import print_model_size from verl.utils.torch_dtypes import PrecisionType local_path = copy_local_path_from_hdfs(config.model.path) tokenizer_path = copy_local_path_from_hdfs(config.model.tokenizer_path) self.tokenizer = hf_tokenizer(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False)) override_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_config) if self.rank == 0: print(f"Reward model overriding config {override_config_kwargs}") torch_dtype = self.config.model.fsdp_config.get("model_dtype", "fp32") torch_dtype = PrecisionType.to_dtype(torch_dtype) from transformers import AutoConfig, AutoModelForCausalLM trust_remote_code = False reward_model_config = AutoConfig.from_pretrained(local_path, trust_remote_code=trust_remote_code) reward_model_config.num_labels = 1 init_context = get_init_weight_context_manager(use_meta_tensor=not reward_model_config.tie_word_embeddings) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") reward_model_config.classifier_dropout = 0.0 reward_model_config.hidden_dropout = "0" reward_module = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=local_path, torch_dtype=torch_dtype, config=reward_model_config, attn_implementation="flash_attention_2", trust_remote_code=trust_remote_code, ) fused_kernel_options = config.model.get("fused_kernel_options", None) fused_kernels_backend = ( fused_kernel_options.get("impl_backend", None) if fused_kernel_options is not None else None ) apply_monkey_patch( model=reward_module, ulysses_sp_size=self.ulysses_sequence_parallel_size, use_remove_padding=config.model.get("use_remove_padding", False), use_fused_kernels=config.model.get("use_fused_kernels", False), fused_kernels_backend=fused_kernels_backend, ) # some parameters may not in torch_dtype reward_module.to(torch_dtype) if config.model.get("enable_gradient_checkpointing", False): reward_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) if self.rank == 0: print_model_size(reward_module) self.reward_model_config = reward_model_config fsdp_config = self.config.model.fsdp_config mixed_precision_config = fsdp_config.get("mixed_precision", None) if mixed_precision_config is not None: param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16")) reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32")) buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32")) else: param_dtype = torch.bfloat16 reduce_dtype = torch.float32 buffer_dtype = torch.float32 mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype) auto_wrap_policy = get_fsdp_wrap_policy(module=reward_module, config=self.config.model.fsdp_config.wrap_policy) log_gpu_memory_usage("Before reward model FSDP", logger=None) fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") reward_model_config.classifier_dropout = 0.0 reward_model_config.hidden_dropout = "0" ref_module = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=copy_local_path_from_hdfs(config.model.ref_path), torch_dtype=torch_dtype, config=reward_model_config, attn_implementation="flash_attention_2", trust_remote_code=trust_remote_code, ) # some parameters may not in torch_dtype ref_module.to(torch_dtype) reward_module = FSDP( reward_module, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, mixed_precision=mixed_precision, sync_module_states=True, forward_prefetch=False, device_mesh=self.device_mesh, cpu_offload=None, ) log_gpu_memory_usage("After reward FSDP", logger=None) ref_module = FSDP( ref_module, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, mixed_precision=mixed_precision, sync_module_states=True, forward_prefetch=False, device_mesh=self.device_mesh, cpu_offload=None, ) reward_optimizer = build_optimizer(reward_module.parameters(), config.model.optim) total_steps = config.model.optim.get("total_training_steps", 0) num_warmup_steps = int(config.model.optim.get("lr_warmup_steps", -1)) if num_warmup_steps < 0: num_warmup_steps_ratio = config.model.optim.get("lr_warmup_steps_ratio", 0.0) num_warmup_steps = int(num_warmup_steps_ratio * total_steps) print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}") from verl.utils.torch_functional import get_constant_schedule_with_warmup reward_lr_scheduler = get_constant_schedule_with_warmup( optimizer=reward_optimizer, num_warmup_steps=num_warmup_steps ) return reward_module, ref_module, reward_optimizer, reward_lr_scheduler @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) from .prime_dp_rm import DataParallelPRIMERewardModel self.reward_module, self.ref_module, self.reward_optimizer, self.reward_lr_scheduler = ( self._build_reward_ref_model_optimizer(config=self.config) ) if self._is_offload_param: offload_fsdp_model_to_cpu(self.reward_module) offload_fsdp_model_to_cpu(self.ref_module) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.reward_optimizer) self.rm = DataParallelPRIMERewardModel( config=self.config, reward_module=self.reward_module, ref_module=self.ref_module, reward_optimizer=self.reward_optimizer, ) self.flops_counter = FlopsCounter(self.reward_model_config) self.checkpoint_manager = FSDPCheckpointManager( model=self.reward_module, optimizer=self.reward_optimizer, lr_scheduler=self.reward_lr_scheduler, tokenizer=self.tokenizer, ) @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_rm_score(self, data: DataProto): data = data.to(get_device_name()) if self._is_offload_param: load_fsdp_model_to_gpu(self.reward_module) load_fsdp_model_to_gpu(self.ref_module) micro_batch_size = self.config.micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz # perform forward computation with self.ulysses_sharding_manager: data = self.ulysses_sharding_manager.preprocess_data(data=data) rm_scores, q, metrics = self.rm.compute_rm_score(data=data) prompt_length = data.batch["prompts"].shape[-1] response_mask = data.batch["attention_mask"][:, prompt_length:] acc = data.batch["acc"] dpo_acc = compute_dpo_accuracy(rm_scores, acc, response_mask=response_mask, n_samples=data.meta_info["n"]) dpo_acc_abs = compute_dpo_abs_accuracy(rm_scores, acc, response_mask, n_samples=data.meta_info["n"]) metrics["reward_model/dpo_acc"] = dpo_acc.detach().item() metrics["reward_model/dpo_acc_abs"] = dpo_acc_abs.detach().item() output = DataProto.from_dict(tensors={"rm_scores": rm_scores, "q": q}, meta_info={"metrics": metrics}) output = self.ulysses_sharding_manager.postprocess_data(data=output) output = output.to("cpu") if self._is_offload_param: offload_fsdp_model_to_cpu(self.reward_module) offload_fsdp_model_to_cpu(self.ref_module) return output @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def update_rm(self, data: DataProto): data = data.to(get_device_name()) if self._is_offload_param: load_fsdp_model_to_gpu(self.ref_module) load_fsdp_model_to_gpu(self.reward_module) if self._is_offload_optimizer: load_fsdp_optimizer(optimizer=self.reward_optimizer, device_id=get_device_id()) # perform forward computation with self.ulysses_sharding_manager: data = self.ulysses_sharding_manager.preprocess_data(data=data) rm_scores, metrics = self.rm.update_rm(data=data) self.reward_lr_scheduler.step() lr = self.reward_lr_scheduler.get_last_lr()[0] metrics["rm/lr"] = lr prompt_length = data.batch["prompts"].shape[-1] response_mask = data.batch["attention_mask"][:, prompt_length:] acc = data.batch["acc"] dpo_acc_before = compute_dpo_accuracy( rm_scores, acc, response_mask=response_mask, n_samples=data.meta_info["n"] ) dpo_acc_abs = compute_dpo_abs_accuracy(rm_scores, acc, response_mask, n_samples=data.meta_info["n"]) metrics["reward_model/dpo_acc_before"] = dpo_acc_before.detach().item() metrics["reward_model/dpo_acc_abs_before"] = dpo_acc_abs.detach().item() output = DataProto.from_dict(tensors={"rm_scores": rm_scores}, meta_info={"metrics": metrics}) output = self.ulysses_sharding_manager.postprocess_data(data=output) if self._is_offload_param: offload_fsdp_model_to_cpu(self.reward_module) offload_fsdp_model_to_cpu(self.ref_module) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.reward_optimizer) output = output.to("cpu") return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): import torch if self._is_offload_param: load_fsdp_model_to_gpu(self.reward_module) self.checkpoint_manager.save_checkpoint( local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.reward_module) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, del_local_after_load=True): import torch if self._is_offload_param: load_fsdp_model_to_gpu(self.reward_module) self.checkpoint_manager.load_checkpoint(local_path=local_path, del_local_after_load=del_local_after_load) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.reward_module) ================================================ FILE: verl_distillation/recipe/prime/prime_ray_trainer.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSDP PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import os import statistics import uuid from copy import deepcopy from pprint import pprint import numpy as np import torch from omegaconf import OmegaConf, open_dict from verl import DataProto from verl.single_controller.ray import RayWorkerGroup from verl.trainer.ppo.core_algos import agg_loss from verl.trainer.ppo.metric_utils import _compute_response_info from verl.trainer.ppo.ray_trainer import RayPPOTrainer, ResourcePoolManager from verl.trainer.ppo.utils import Role, WorkerType from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn from verl.utils.metric import reduce_metrics from verl.utils.profiler.performance import simple_timer from . import prime_core_algos def compute_advantage(data: DataProto, adv_estimator, config): if adv_estimator == "rloo": responses = data.batch["responses"] response_length = responses.size(-1) attention_mask = data.batch["attention_mask"] response_mask = attention_mask[:, -response_length:] advantages, returns = prime_core_algos.compute_rloo_advantage_return( data, response_mask, config.actor_rollout_ref.rollout.n, config ) data.batch["advantages"] = advantages data.batch["returns"] = returns else: raise NotImplementedError return data def compute_data_metrics(batch, use_critic=True): advantages = batch.batch["advantages"] returns = batch.batch["returns"] max_response_length = batch.batch["responses"].shape[-1] prompt_mask = batch.batch["attention_mask"][:, :-max_response_length].bool() response_mask = batch.batch["attention_mask"][:, -max_response_length:].bool() max_prompt_length = prompt_mask.size(-1) response_info = _compute_response_info(batch) prompt_length = response_info["prompt_length"] response_length = response_info["response_length"] valid_adv = torch.masked_select(advantages, response_mask) valid_returns = torch.masked_select(returns, response_mask) if use_critic: values = batch.batch["values"] valid_values = torch.masked_select(values, response_mask) return_diff_var = torch.var(valid_returns - valid_values) return_var = torch.var(valid_returns) metrics = { # adv "critic/advantages/mean": torch.mean(valid_adv).detach().item(), "critic/advantages/max": torch.max(valid_adv).detach().item(), "critic/advantages/min": torch.min(valid_adv).detach().item(), # returns "critic/returns/mean": torch.mean(valid_returns).detach().item(), "critic/returns/max": torch.max(valid_returns).detach().item(), "critic/returns/min": torch.min(valid_returns).detach().item(), **( { # values "critic/values/mean": torch.mean(valid_values).detach().item(), "critic/values/max": torch.max(valid_values).detach().item(), "critic/values/min": torch.min(valid_values).detach().item(), # vf explained var "critic/vf_explained_var": (1.0 - return_diff_var / (return_var + 1e-5)).detach().item(), } if use_critic else {} ), # response length "response_length/mean": torch.mean(response_length).detach().item(), "response_length/max": torch.max(response_length).detach().item(), "response_length/min": torch.min(response_length).detach().item(), "response_length/clip_ratio": torch.mean(torch.eq(response_length, max_response_length).float()) .detach() .item(), # prompt length "prompt_length/mean": torch.mean(prompt_length).detach().item(), "prompt_length/max": torch.max(prompt_length).detach().item(), "prompt_length/min": torch.min(prompt_length).detach().item(), "prompt_length/clip_ratio": torch.mean(torch.eq(prompt_length, max_prompt_length).float()).detach().item(), } return metrics def compute_response_mask(data: DataProto): responses = data.batch["responses"] response_length = responses.size(1) attention_mask = data.batch["attention_mask"] return attention_mask[:, -response_length:] def compute_timing_metrics(batch, timing_raw): response_info = _compute_response_info(batch) num_prompt_tokens = torch.sum(response_info["prompt_length"]).item() num_response_tokens = torch.sum(response_info["response_length"]).item() num_overall_tokens = num_prompt_tokens + num_response_tokens num_tokens_of_section = { "gen": num_response_tokens, **{name: num_overall_tokens for name in ["ref", "values", "adv", "update_critic", "update_actor"]}, } return { **{f"timing_s/{name}": value for name, value in timing_raw.items()}, **{ f"timing_per_token_ms/{name}": timing_raw[name] * 1000 / num_tokens_of_section[name] for name in set(num_tokens_of_section.keys()) & set(timing_raw.keys()) }, } class RayPRIMETrainer(RayPPOTrainer): """ Note that this trainer runs on the driver process on a single CPU/GPU node. """ # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, reward_fn=None, val_reward_fn=None, device_name="cuda", ): # assert get_torch_device().is_available(), 'cuda must be available on driver' super().__init__( config, tokenizer, role_worker_mapping, resource_pool_manager, ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, device_name=device_name, ) self.use_critic = False def _create_dataloader(self, *args, **kwargs): from torch.utils.data import DataLoader, RandomSampler, SequentialSampler # TODO: we have to make sure the batch size is divisible by the dp size self.train_dataset = RLHFDataset( data_files=self.config.data.train_files, tokenizer=self.tokenizer, config=self.config.data ) # use sampler for better ckpt resume if self.config.data.shuffle: train_dataloader_generator = torch.Generator() seed = self.config.data.get("seed") if seed is not None: train_dataloader_generator.manual_seed(seed) sampler = RandomSampler(data_source=self.train_dataset, generator=train_dataloader_generator) else: sampler = SequentialSampler(data_source=self.train_dataset) self.train_dataloader = DataLoader( dataset=self.train_dataset, batch_size=int(self.config.data.train_batch_size * self.config.data.oversample_factor), drop_last=True, collate_fn=collate_fn, sampler=sampler, ) self.val_dataset = RLHFDataset( data_files=self.config.data.val_files, tokenizer=self.tokenizer, config=self.config.data ) self.val_dataloader = DataLoader( dataset=self.val_dataset, batch_size=len(self.val_dataset), shuffle=True, drop_last=True, collate_fn=collate_fn, ) assert len(self.train_dataloader) >= 1 assert len(self.val_dataloader) >= 1 print(f"Size of train dataloader: {len(self.train_dataloader)}") print(f"Size of val dataloader: {len(self.val_dataloader)}") # inject total_training_steps to actor/critic optim_config. This is hacky. total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.trainer.total_training_steps is not None: total_training_steps = self.config.trainer.total_training_steps self.total_training_steps = total_training_steps print(f"Total training steps: {self.total_training_steps}") OmegaConf.set_struct(self.config, True) with open_dict(self.config): self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps self.config.critic.optim.total_training_steps = total_training_steps def _save_checkpoint(self): # path: given_path + `/global_step_{global_steps}` + `/actor` local_global_step_folder = os.path.join( self.config.trainer.default_local_dir, f"global_step_{self.global_steps}" ) print(f"local_global_step_folder: {local_global_step_folder}") actor_local_path = os.path.join(local_global_step_folder, "actor") actor_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor") ) self.actor_rollout_wg.save_checkpoint( actor_local_path, actor_remote_path, self.global_steps, ) if self.use_rm: reward_local_path = os.path.join(local_global_step_folder, "reward") reward_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "reward") ) self.rm_wg.save_checkpoint( reward_local_path, reward_remote_path, self.global_steps, ) # save dataloader dataloader_local_path = os.path.join(local_global_step_folder, "data.pt") import dill torch.save(self.train_dataloader, dataloader_local_path, pickle_module=dill) # latest checkpointed iteration tracker (for atomic usage) local_latest_checkpointed_iteration = os.path.join( self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt" ) with open(local_latest_checkpointed_iteration, "w") as f: f.write(str(self.global_steps)) def _load_checkpoint(self): if self.config.trainer.resume_mode == "disable": return 0 # load from hdfs if self.config.trainer.default_hdfs_dir is not None: NotImplementedError("load from hdfs is not implemented yet") else: checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path if not os.path.isabs(checkpoint_folder): working_dir = os.getcwd() checkpoint_folder = os.path.join(working_dir, checkpoint_folder) global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest # find global_step_folder if self.config.trainer.resume_mode == "auto": if global_step_folder is None: print("Training from scratch") return 0 else: if self.config.trainer.resume_mode == "resume_path": assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type" assert "global_step_" in self.config.trainer.resume_from_path, ( "resume ckpt must specify the global_steps" ) global_step_folder = self.config.trainer.resume_from_path if not os.path.isabs(global_step_folder): working_dir = os.getcwd() global_step_folder = os.path.join(working_dir, global_step_folder) print(f"Load from checkpoint folder: {global_step_folder}") # set global step self.global_steps = int(global_step_folder.split("global_step_")[-1]) print(f"Setting global step to {self.global_steps}") print(f"Resuming from {global_step_folder}") actor_path = os.path.join(global_step_folder, "actor") reward_path = os.path.join(global_step_folder, "reward") # load actor self.actor_rollout_wg.load_checkpoint( actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load rm if self.use_rm: self.rm_wg.load_checkpoint(reward_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load) # load dataloader, # TODO: from remote not implemented yet dataloader_local_path = os.path.join(global_step_folder, "data.pt") self.train_dataloader = torch.load(dataloader_local_path) if isinstance(self.train_dataloader.dataset, RLHFDataset): self.train_dataloader.dataset.resume_dataset_state() def compute_reward(self, batch: DataProto, n_samples: int): update_style = self.config.reward_model.model.get("update", "none") reward_output_metrics = {} if update_style == "none": # only run forward reward_output = self.rm_wg.compute_rm_score(batch) elif update_style == "after": # update and directly return the reward reward_output = self.rm_wg.update_rm(batch) elif update_style == "before": # update reward model, and then run forward reward_output = self.rm_wg.update_rm(batch) if "metrics" in reward_output.meta_info.keys(): reward_output_metrics = reduce_metrics(reward_output.meta_info["metrics"]) reward_output = self.rm_wg.compute_rm_score(batch) elif update_style == "reverse": # run forward to calculate statistics, then update reward model reward_output = self.rm_wg.compute_rm_score(batch) # broadcast q and acc tensor to each result bc_td = DataProto.from_dict( tensors={ "Q_bc": reward_output.batch["q"] .sum(dim=-1) .view(-1, n_samples) .unsqueeze(1) .expand(-1, n_samples, -1) .reshape(-1, n_samples), "acc_bc": batch.batch["acc"] .view(-1, n_samples) .unsqueeze(1) .expand(-1, n_samples, -1) .reshape(-1, n_samples), } ) batch = batch.union(bc_td) reward_output = self.rm_wg.update_rm(batch) else: raise NotImplementedError return reward_output, reward_output_metrics def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # we start from step 1 self.global_steps += 1 for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} batch: DataProto = DataProto.from_single_dict(batch_dict) # pop those keys for generation gen_batch = batch.pop(batch_keys=["input_ids", "attention_mask", "position_ids"]) gen_batch_output = gen_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True ) with simple_timer("step", timing_raw): # generate a batch with simple_timer("gen", timing_raw): gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch_output) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == "remax": with simple_timer("gen_max", timing_raw): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) batch = batch.union(gen_baseline_output) rm_scores, _ = self.compute_reward(batch, 1) reward_baseline_tensor = rm_scores.batch.get( "rm_scores", rm_scores.batch.get("acc_bc", None) ) if reward_baseline_tensor is None: raise ValueError( "Neither 'rm_scores' nor 'acc_bc' found in reward model output for baseline." ) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) keys_to_pop = set(gen_baseline_output.batch.keys()) keys_to_pop.update(rm_scores.batch.keys()) batch.pop(batch_keys=list(keys_to_pop)) batch.batch["reward_baselines"] = reward_baseline_tensor del gen_baseline_batch, gen_baseline_output batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() # verify with simple_timer("verify", timing_raw): scores = self.reward_fn.verify(batch) metrics["acc"] = statistics.mean(scores) # filter the batch. 1/oversample_factor samples will be kept. # If there is a filter, prompts passing it will be prioritized. batch = self.filter_and_downsample(scores, batch) batch.meta_info["n"] = self.config.actor_rollout_ref.rollout.n n_samples = self.config.actor_rollout_ref.rollout.n # recompute old_log_probs with simple_timer("old_log_prob", timing_raw): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = compute_response_mask(batch) loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with simple_timer("ref", timing_raw): ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) with simple_timer("adv", timing_raw): if self.use_rm: reward_output, reward_output_metrics = self.compute_reward(batch, n_samples) batch = batch.union(reward_output) if "metrics" in reward_output.meta_info.keys(): reward_output_metrics.update(reduce_metrics(reward_output.meta_info["metrics"])) metrics.update(reward_output_metrics) # compute advantages, executed on the driver process batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, config=self.config ) # update actor with simple_timer("update_actor", timing_raw): actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and self.global_steps % self.config.trainer.test_freq == 0 ): with simple_timer("testing", timing_raw): val_metrics: dict = self._validate() metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and self.global_steps % self.config.trainer.save_freq == 0: with simple_timer("save_checkpoint", timing_raw): self._save_checkpoint() # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) self.global_steps += 1 if self.global_steps >= self.total_training_steps: # perform validation after training if self.val_reward_fn is not None: val_metrics = self._validate() pprint(f"Final validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if ( self.config.trainer.save_freq > 0 and (self.global_steps - 1) % self.config.trainer.save_freq != 0 ): with simple_timer("save_checkpoint", timing_raw): self._save_checkpoint() return def filter_and_downsample(self, scores, batch: DataProto): """ downsample the batch according to oversample_factor samples passing the filters will be prioritized """ n_samples = int(self.config.actor_rollout_ref.rollout.n) reward_matrix = torch.tensor(scores).reshape(-1, n_samples) filter_mask = torch.ones((reward_matrix.shape[0]), dtype=torch.bool) if self.config.data.filter_accuracy: acc_tensor = torch.mean(reward_matrix, dim=-1) filter_mask[ (acc_tensor > self.config.data.accuracy_upper_bound) | (acc_tensor < self.config.data.accuracy_lower_bound) ] = False if self.config.data.filter_truncate: length_matrix = ( batch.batch["attention_mask"][:, -batch.batch["responses"].shape[-1] :] .sum(dim=-1) .reshape(-1, n_samples) ) length_tensor = torch.max(length_matrix, dim=-1)[0] filter_mask[length_tensor >= self.config.data.max_response_length - 1] = False reorder_index = torch.argsort(filter_mask, descending=True) reorder_index = (reorder_index.unsqueeze(-1) * n_samples + torch.arange(0, n_samples).unsqueeze(0)).view(-1) batch.reorder( reorder_index[: int(len(batch) // self.config.data.oversample_factor)] ) # this operation is inplace return batch ================================================ FILE: verl_distillation/recipe/prime/run_prime_qwen.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet # download from https://huggingface.co/datasets/PRIME-RL/Eurus-2-RL-Data math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" model_path=PRIME-RL/Eurus-2-7B-SFT # model_path=Qwen/Qwen2.5-0.5B-Instruct python3 -m recipe.prime.main_prime \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=64 \ data.val_batch_size=6312 \ data.max_prompt_length=1024 \ data.max_response_length=3072 \ data.filter_overlong_prompts=True \ data.filter_accuracy=True \ data.accuracy_lower_bound=0.2 \ data.accuracy_upper_bound=0.8 \ data.oversample_factor=4 \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=5e-7 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ algorithm.adv_estimator=rloo \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ reward_model.model.path=$model_path \ reward_model.micro_batch_size_per_gpu=1 \ reward_model.model.update=before \ reward_model.model.beta_train=0.05 \ reward_model.model.optim.lr=1e-6 \ reward_model.model.optim.grad_clip=10.0 \ reward_model.model.input_tokenizer=null \ reward_model.mini_batch_size=64 \ trainer.val_before_train=False \ trainer.logger='["console","wandb"]' \ trainer.project_name='prime_example' \ trainer.experiment_name='Eurus-2-7B-SFT-gsm8k' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=64 \ trainer.test_freq=64 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/recipe/prime/run_prime_qwen_code.sh ================================================ set -x # download from https://huggingface.co/datasets/PRIME-RL/Eurus-2-RL-Data code_train_path=$HOME/data/code/train.parquet code_test_path=$HOME/data/code/test.parquet train_files="['$code_train_path']" test_files="['$code_test_path']" model_path=PRIME-RL/Eurus-2-7B-SFT # model_path=Qwen/Qwen2.5-0.5B-Instruct python3 -m recipe.prime.main_prime \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=64 \ data.val_batch_size=6312 \ data.max_prompt_length=1024 \ data.max_response_length=3072 \ data.filter_overlong_prompts=True \ data.filter_accuracy=True \ data.accuracy_lower_bound=0.2 \ data.accuracy_upper_bound=0.8 \ data.oversample_factor=4 \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=5e-7 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ algorithm.adv_estimator=rloo \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ reward_model.model.path=$model_path \ reward_model.micro_batch_size_per_gpu=1 \ reward_model.model.update=before \ reward_model.model.beta_train=0.05 \ reward_model.model.optim.lr=1e-6 \ reward_model.model.optim.grad_clip=10.0 \ reward_model.model.input_tokenizer=null \ reward_model.mini_batch_size=64 \ trainer.val_before_train=False \ trainer.logger='["console","wandb"]' \ trainer.project_name='prime_example' \ trainer.experiment_name='Eurus-2-7B-SFT-code' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=64 \ trainer.test_freq=64 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_distillation/recipe/r1/README.md ================================================ # DeepSeek R1 Reproduction This recipe is under development, if you are interested, checkout the TODO list and join this project! https://github.com/volcengine/verl/issues/708 ## Reproducing Evaluation Eval Results of DS-R1-Distill-Qwen2.5-1.5B (k=8) Dataset | Test Results | Reported -- | -- | -- GPQA Diamond | 35.3 | 33.8 LiveCodeBench | 16.9 | 16.9 AIME 2024 | 30.4 | 28.9 CNMO 2024 (en) | 45.1 | - CNMO 2024 (zh) | 41.0 | - --- Eval Results (DS-R1) Dataset | Test Results (k=1) | Test Results (k=4) | Reported -- | -- | -- | -- GPQA Diamond | 67.7 | 69.6 | 71.5 LiveCodeBench | 64.7 | 63.1 | 65.9 AIME 2024 | 86.7 | 79.2 | 79.8 CNMO 2024 | 75.0 | 78.5 | 78.8 ================================================ FILE: verl_distillation/recipe/r1/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/recipe/r1/config/evaluation.yaml ================================================ data: path: /tmp/math_Qwen2-7B-Instruct.parquet prompt_key: prompt response_key: responses data_source_key: data_source reward_model_key: reward_model custom_reward_function: path: null name: compute_score ray_kwargs: ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. ================================================ FILE: verl_distillation/recipe/r1/data_process.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the dataset to parquet format """ import argparse import os from functools import partial from datasets import concatenate_datasets, load_dataset from verl.utils.hdfs_io import copy, makedirs def example_map_fn(example, idx, process_fn, data_source, ability, split): question, solution = process_fn(example) data = { "data_source": data_source, "prompt": [{"role": "user", "content": question}], "ability": ability, "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": {"split": split, "index": idx}, } return data def build_aime2024_dataset(): def process_aime2024(example): return example["Problem"], str(example["Answer"]) data_source = "Maxwell-Jia/AIME_2024" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset = load_dataset(data_source, split="train") map_fn = partial( example_map_fn, process_fn=process_aime2024, data_source=data_source, ability="English", split="test" ) dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names) return dataset def build_gpqa_dimond_dataset(): import random GPQA_QUERY_TEMPLATE = ( "Answer the following multiple choice question. The last line of your response should be of the following " "format: 'Answer: $LETTER' (without quotes) where LETTER is one of ABCD. Think step by step before " "answering.\n\n{Question}\n\nA) {A}\nB) {B}\nC) {C}\nD) {D}" ) def process_gpqa_diamond(example): choices = [example["Incorrect Answer 1"], example["Incorrect Answer 2"], example["Incorrect Answer 3"]] random.shuffle(choices) gold_index = random.randint(0, 3) choices.insert(gold_index, example["Correct Answer"]) query_prompt = GPQA_QUERY_TEMPLATE.format( A=choices[0], B=choices[1], C=choices[2], D=choices[3], Question=example["Question"] ) gold_choice = "ABCD"[gold_index] return query_prompt, gold_choice data_source = "Idavidrein/gpqa" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset = load_dataset(data_source, "gpqa_diamond", split="train") map_fn = partial( example_map_fn, process_fn=process_gpqa_diamond, data_source=data_source, ability="Math", split="test" ) dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names) return dataset def build_cnmo2024_dataset(): def process_cnmo2024(example): return example["question"], example["answer"] data_source = "opencompass/LiveMathBench" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset_en = load_dataset(data_source, "v202412_CNMO_en", split="test") map_fn_en = partial( example_map_fn, process_fn=process_cnmo2024, data_source="opencompass/cnmo2024_en", ability="Math", split="test" ) dataset_en = dataset_en.map(map_fn_en, with_indices=True, remove_columns=dataset_en.column_names) dataset_zh = load_dataset(data_source, "v202412_CNMO_cn", split="test") map_fn_zh = partial( example_map_fn, process_fn=process_cnmo2024, data_source="opencompass/cnmo2024_zh", ability="Math", split="test" ) dataset_zh = dataset_zh.map(map_fn_zh, with_indices=True, remove_columns=dataset_zh.column_names) dataset = concatenate_datasets([dataset_en, dataset_zh]) return dataset def build_livecodebench_dataset(): import base64 import json import pickle import zlib def process_livecodebench(example): # Construct Query Prompt # From https://github.com/LiveCodeBench/LiveCodeBench/blob/998c52d394b836f15fff3b9a29866191108ff81b/lcb_runner/prompts/code_generation.py#L140 query_prompt = ( f"You will be given a question (problem specification) and will generate a correct Python program " f"that matches the specification and passes all tests.\n\nQuestion: {example['question_content']}\n\n" ) if example["starter_code"]: query_prompt += ( f"You will use the following starter code to write the solution to the problem and enclose your " f"code within delimiters.\n```python\n{example['starter_code']}\n```" ) else: query_prompt += ( "Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test " "on the sample inputs). Enclose your code within delimiters as follows. Ensure that when the python " "program runs, it reads the inputs, runs the algorithm and writes output to STDOUT." "```python\n# YOUR CODE HERE\n```" ) # Construct test cases public_test_cases = json.loads(example["public_test_cases"]) try: private_test_cases = json.loads(example["private_test_cases"]) except Exception as e: print(f"Error loading private test cases: {e}") private_test_cases = json.loads( pickle.loads(zlib.decompress(base64.b64decode(example["private_test_cases"].encode("utf-8")))) ) full_test_cases = public_test_cases + private_test_cases metadata = json.loads(example["metadata"]) test_cases = { "inputs": [t["input"] for t in full_test_cases], "outputs": [t["output"] for t in full_test_cases], "fn_name": metadata.get("func_name", None), } text_cases_compressed = base64.b64encode(zlib.compress(pickle.dumps(json.dumps(test_cases)))).decode("utf-8") return query_prompt, text_cases_compressed data_source = "livecodebench/code_generation_lite" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset = load_dataset(data_source, split="test") # R1 Evaluation use LiveCodeBench 24.08-25.01 dataset = dataset.filter(lambda line: "2024-08-00T00:00:00" <= line["contest_date"] < "2025-01-00T00:00:00") map_fn = partial( example_map_fn, process_fn=process_livecodebench, data_source=data_source, ability="Code", split="test" ) dataset = dataset.map(map_fn, with_indices=True, remove_columns=dataset.column_names, num_proc=8) return dataset TASK2DATA = { "aime2024": build_aime2024_dataset, "gpqa_diamond": build_gpqa_dimond_dataset, "cnmo2024": build_cnmo2024_dataset, "livecodebench": build_livecodebench_dataset, } SUPPORTED_TASKS = TASK2DATA.keys() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/r1") parser.add_argument("--hdfs_dir", default=None) parser.add_argument("--tasks", default="all") args = parser.parse_args() if args.tasks.lower() == "all": args.tasks = SUPPORTED_TASKS else: args.tasks = [task.strip() for task in args.tasks.split(",") if task.strip()] for task in args.tasks: if task not in SUPPORTED_TASKS: raise NotImplementedError(f"{task} has not been supported.") datasets = [] for task in args.tasks: datasets.append(TASK2DATA[task]()) test_dataset = concatenate_datasets(datasets) local_dir = args.local_dir hdfs_dir = args.hdfs_dir test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_distillation/recipe/r1/main_eval.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Offline evaluate the performance of a generated file using reward model and ground truth verifier. The input is a parquet file that contains N generated sequences and (optional) the ground truth. """ from collections import defaultdict import hydra import numpy as np import pandas as pd import ray from omegaconf import OmegaConf from tqdm import tqdm from verl.trainer.ppo.reward import get_custom_reward_fn from verl.utils.fs import copy_to_local @ray.remote def process_item(config, data_source, response_lst, reward_data): reward_fn = get_custom_reward_fn(config) ground_truth = reward_data["ground_truth"] score_lst = [reward_fn(data_source, r, ground_truth) for r in response_lst] return data_source, np.mean(score_lst) @hydra.main(config_path="config", config_name="evaluation", version_base=None) def main(config): local_path = copy_to_local(config.data.path) dataset = pd.read_parquet(local_path) responses = dataset[config.data.response_key] data_sources = dataset[config.data.data_source_key] reward_model_data = dataset[config.data.reward_model_key] total = len(dataset) # Initialize Ray if not ray.is_initialized(): ray.init(**OmegaConf.to_container(config.ray_kwargs.get("ray_init", {}))) # evaluate test_score based on data source data_source_reward = defaultdict(list) # Create remote tasks remote_tasks = [ process_item.remote(config, data_sources[i], responses[i], reward_model_data[i]) for i in range(total) ] # Process results as they come in with tqdm(total=total) as pbar: while len(remote_tasks) > 0: # Use ray.wait to get completed tasks done_ids, remote_tasks = ray.wait(remote_tasks) for result_id in done_ids: data_source, score = ray.get(result_id) data_source_reward[data_source].append(score) pbar.update(1) metric_dict = {} for data_source, rewards in data_source_reward.items(): metric_dict[f"test_score/{data_source}"] = np.mean(rewards) print(metric_dict) if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/r1/reward_score.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def reward_func(data_source, solution_str, ground_truth, extra_info=None): if data_source in ["Maxwell-Jia/AIME_2024", "opencompass/cnmo2024_en", "opencompass/cnmo2024_zh"]: from recipe.r1.tasks import math_reward return math_reward.compute_score(solution_str, ground_truth) elif data_source == "Idavidrein/gpqa": from recipe.r1.tasks import gpqa return gpqa.compute_score(solution_str, ground_truth) elif data_source in ["livecodebench/code_generation_lite", "livecodebench/code_generation"]: from recipe.r1.tasks import livecodebench return livecodebench.compute_score(solution_str, ground_truth) else: raise NotImplementedError ================================================ FILE: verl_distillation/recipe/r1/run_r1_distill_qwen.sh ================================================ MODEL_PATH=Qwen/DeepSeek-R1-Distill-Qwen-1.5B DATA_PATH=/workspace/datasets/r1_bench # Eval Data Process python3 -m recipe.r1.data_process \ --local_dir $DATA_PATH \ --tasks all # Generation python3 -m verl.trainer.main_generation \ trainer.nnodes=1 \ trainer.n_gpus_per_node=8 \ data.path=$DATA_PATH/test.parquet \ data.prompt_key=prompt \ data.batch_size=1024 \ data.n_samples=8 \ data.output_path=$DATA_PATH/test-output-8.parquet \ model.path=$MODEL_PATH \ rollout.temperature=0.6 \ rollout.top_p=0.95 \ rollout.prompt_length=1024 \ rollout.response_length=32768 \ rollout.tensor_model_parallel_size=1 \ rollout.gpu_memory_utilization=0.9 \ rollout.max_num_batched_tokens=65536 # Evaluation python3 -m recipe.r1.main_eval \ data.path=$DATA_PATH/test-output-8.parquet \ data.prompt_key=prompt \ data.response_key=responses \ custom_reward_function.path=recipe/r1/reward_score.py \ custom_reward_function.name=reward_func ================================================ FILE: verl_distillation/recipe/r1/tasks/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/recipe/r1/tasks/gpqa.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re # Extraction Template from https://github.com/openai/simple-evals/blob/90e3e821cabba2aeb6be651dcb662b253df04225/common.py#L25 ANSWER_PATTERN_MULTICHOICE = r"(?i)Answer[ \t]*:[ \t]*\$?([A-D])\$?" def compute_score(solution_str, ground_truth) -> float: match = re.search(ANSWER_PATTERN_MULTICHOICE, solution_str) extracted_answer = match.group(1) if match else None score = 1.0 if extracted_answer == ground_truth else 0.0 return score ================================================ FILE: verl_distillation/recipe/r1/tasks/livecodebench.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import json import multiprocessing import pickle import zlib # Reuse `run_test` for convenience from verl.utils.reward_score.prime_code.testing_util import run_test def _temp_run(in_outs, generation, debug, result, metadata_list, timeout): res, metadata = run_test(in_outs, test=generation, debug=debug, timeout=timeout) result.append(res) metadata_list.append(metadata) def check_correctness(in_outs, generation, timeout, debug=True): """Check correctness of code generation with a global timeout. The global timeout is to catch some extreme/rare cases not handled by the timeouts inside `run_test`""" manager = multiprocessing.Manager() result = manager.list() metadata_list = manager.list() p = multiprocessing.Process( target=_temp_run, args=(in_outs, generation, debug, result, metadata_list, timeout), ) p.start() p.join(timeout=(timeout + 1) * len(in_outs["inputs"]) + 5) if p.is_alive(): p.kill() if not result: # consider that all tests failed result = [[-1 for i in range(len(in_outs["inputs"]))]] if debug: print("global timeout") return result[0], metadata_list[0] def compute_score(completion, test_cases): solution = completion.split("```python")[-1].split("```")[0] # extract test cases try: in_outs = json.loads(test_cases) except Exception as e: print(f"Error loading test cases: {e}") in_outs = json.loads(pickle.loads(zlib.decompress(base64.b64decode(test_cases.encode("utf-8"))))) success = False try: res, metadata = check_correctness(in_outs=in_outs, generation=solution, timeout=6, debug=False) success = all(map(lambda x: x is True, res)) except Exception: pass return success ================================================ FILE: verl_distillation/recipe/r1/tasks/math_reward.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib try: from math_verify.metric import math_metric from math_verify.parser import ExprExtractionConfig, LatexExtractionConfig except ImportError: print("To use Math-Verify, please install it first by running `pip install math-verify`.") def compute_score(model_output: str, ground_truth: str) -> bool: verify_func = math_metric( gold_extraction_target=(LatexExtractionConfig(),), pred_extraction_target=(ExprExtractionConfig(), LatexExtractionConfig()), ) ret_score = 0.0 # Wrap the ground truth in \boxed{} format for verification ground_truth_boxed = "\\boxed{" + ground_truth + "}" with contextlib.suppress(Exception): ret_score, _ = verify_func([ground_truth_boxed], [model_output]) return ret_score ================================================ FILE: verl_distillation/recipe/retool/README.md ================================================ # Retool [ReTool: Reinforcement Learning for Strategic Tool Use in LLMs](https://arxiv.org/abs/2504.11536) ## Overview - Base model: [Qwen/Qwen2.5-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) - SFT dataset: [JoeYing/ReTool-SFT](https://huggingface.co/datasets/JoeYing/ReTool-SFT) - RL dataset: [BytedTsinghua-SIA/DAPO-Math-17k](https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k) - Val dataset: [yentinglin/aime_2025](https://huggingface.co/datasets/yentinglin/aime_2025) ## How it works Retool's workflow is divided into two key phases: 1. Cold Start and Supervised Fine Tuning (SFT) The data generation pipeline builds a high-quality dataset containing code-enhanced inference trajectories, and supervised fine-tuning enables the model to master basic Tool call (e.g., code execution) and analysis of the execution results. 2. Dynamic Interaction and Policy Optimization (RL). With the verl Reinforcement Learning framework, the model dynamically inserts code blocks during inference and interacts with the sandbox environment in real-time, generating a hybrid trajectory of natural language thinking and code snippets, sending the code to the sandbox for asynchronous execution when code termination markers are detected, and the execution results (success outputs/errors) are fed back to the model for guiding the subsequent inference. This "think-execute-feedback" cycle, together with the design of rewards based on the accuracy of the final answer, enables the model to independently optimize the Tool call strategy, and improves the reasoning efficiency and computational accuracy. ## SFT 1. Data preparation ```bash python3 recipe/retool/retool_sft_preprocess.py ``` 2. Training ```bash bash recipe/retool/run_qwen2-32b_sft.sh ``` After 6 epoches, validation metrics: - val-core/aime_2025/acc/mean@30: 0.24 - val-aux/num_turns/mean: 7.2 ## RL ### GRPO ```bash bash recipe/retool/run_qwen2-32b_dapo.sh ``` After 150 steps, validation metrics: - val-core/aime_2025/acc/mean@30: 0.6 - val-aux/num_turns/mean: 10 ### PPO ```bash bash recipe/retool/run_qwen2-32b_ppo.sh ``` After 250 steps, validation metrics: - val-core/aime_2025/acc/mean@30: 0.55 - val-aux/num_turns/mean: 8.3 ================================================ FILE: verl_distillation/recipe/retool/retool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import re from typing import Any import datasets from verl.tools.base_tool import OpenAIFunctionToolSchema from verl.tools.sandbox_fusion_tools import SandboxFusionTool from verl.utils.dataset import RLHFDataset from verl.utils.reward_score import math_dapo from verl.utils.rollout_trace import rollout_trace_op logger = logging.getLogger(__name__) class CustomSandboxFusionTool(SandboxFusionTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): super().__init__(config, tool_schema) self.code_pattern = re.compile(r"```python(.*?)```", re.DOTALL) @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[str, float, dict]: code = parameters["code"] matches = self.code_pattern.findall(code) if matches: code = matches[0].strip() # NOTE: some script may not explicitly print result, we need to add a print statement to the end of the script lines = code.split("\n") for i, line in reversed(list(enumerate(lines))): if line == "": continue if not lines[i].startswith("print"): lines[i] = f"print({line})" break code = "\n".join(lines) timeout = parameters.get("timeout", self.default_timeout) language = parameters.get("language", self.default_language) if not isinstance(code, str): code = str(code) result = await self.execution_pool.execute.remote(self.execute_code, instance_id, code, timeout, language) # sandbox has no score or metrics, use Nones return result, None, None answer_format = """\nThe answer format must be: \\boxed{'The final answer goes here.'}""" class CustomRLHFDataset(RLHFDataset): """Custom dataset class to process Maxwell-Jia/AIME_2024, yentinglin/aime_2025 datasets.""" def _read_files_and_tokenize(self): dataframes = [] for parquet_file in self.data_files: # read parquet files and cache dataframe = datasets.load_dataset(parquet_file)["train"] data_source = "/".join(parquet_file.split("/")[-2:]) if data_source in ["Maxwell-Jia/AIME_2024", "yentinglin/aime_2025"]: dataframe = dataframe.map( self.map_fn, fn_kwargs={"data_source": data_source}, remove_columns=dataframe.column_names ) else: dataframe = dataframe.map(self.map_fn2, num_proc=16) dataframes.append(dataframe) self.dataframe: datasets.Dataset = datasets.concatenate_datasets(dataframes) print(f"dataset len: {len(self.dataframe)}") def map_fn(self, row: dict, *, data_source: str = None): if data_source == "Maxwell-Jia/AIME_2024": problem, answer = row["Problem"], row["Answer"] elif data_source == "yentinglin/aime_2025": problem, answer = row["problem"], row["answer"] prompt = problem + answer_format data = { "data_source": data_source.split("/")[1].lower(), # aime_2024, aime_2025 "prompt": [{"role": "user", "content": prompt}], "ability": "MATH", "reward_model": {"ground_truth": str(answer)}, "agent_name": "tool_agent", } return data def map_fn2(self, row: dict): content = row["prompt"][0]["content"] row["prompt"][0]["content"] = content + answer_format row["agent_name"] = "tool_agent" return row def compute_score(data_source, solution_str, ground_truth, extra_info): # use \\boxed{...} answer result = math_dapo.compute_score(solution_str, ground_truth, strict_box_verify=True) # encourage model to call tools num_turns = extra_info["num_turns"] if result["score"] < 0: tool_call_reward = (num_turns - 2) / 2 * 0.1 result["score"] = min(-0.6, result["score"] + tool_call_reward) if result["pred"] is None: result["pred"] = "" return result ================================================ FILE: verl_distillation/recipe/retool/retool_sft_preprocess.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert JoeYing/ReTool-SFT to standard multi-turn tool calling messages. """ import json import os import re from typing import Any import datasets from omegaconf import OmegaConf code_pattern = re.compile(r"```python(.*?)```", re.DOTALL) def extract_code_message(content: str) -> tuple[dict[str, Any], str]: start, stop = "", "" i = content.find(start) if i == -1: return None, content j = content.find(stop) assert j > i code = content[i + len(start) : j] matches = code_pattern.findall(code) if matches: code = matches[0].strip() message = { "role": "assistant", "content": content[:i].strip(), "tool_calls": [ { "type": "function", "function": { "name": "code_interpreter", "arguments": {"code": code}, }, }, ], } return message, content[j + len(stop) :] def extract_answer_message(content: str) -> tuple[dict[str, Any], str]: start, stop = "", "" i = content.find(start) if i == -1: return None, content j = content.find(stop) assert j > i answer = content[:i] + content[i + len(start) : j] message = { "role": "assistant", "content": answer.strip(), } return message, content[j + len(stop) :] def extract_interpreter_message(content: str) -> tuple[dict[str, Any], str]: start, stop = "", "" i = content.find(start) if i == -1: return None, content j = content.find(stop) assert j > i interpreter = content[i + len(start) : j] message = { "role": "tool", "content": interpreter.strip(), } return message, content[j + len(stop) :] def process(row: dict, *, tools: str): messages = [] # extract problem content = row["messages"][0]["content"] start = "*user question:*" i = content.find(start) assert i != -1 prompt = content[i + len(start) :].replace("", "").replace("", "").strip() messages.append( { "role": "user", "content": prompt, } ) # extract multi turns content = row["messages"][1]["content"] role = "assistant" while len(content) > 0: if role == "assistant": message, content = extract_code_message(content) if message is None: message, content = extract_answer_message(content) assert message is not None messages.append(message) role = "tool" else: message, content = extract_interpreter_message(content) assert message is not None messages.append(message) role = "assistant" tools = json.loads(tools) return {"messages": messages, "tools": tools} if __name__ == "__main__": tools_config_file = "recipe/retool/sandbox_fusion_tool_config.yaml" tools_config = OmegaConf.load(tools_config_file) tool_schema = OmegaConf.to_container(tools_config["tools"][0]["tool_schema"]) tools = json.dumps([tool_schema]) data = datasets.load_dataset("JoeYing/ReTool-SFT")["train"] data = data.map(process, fn_kwargs={"tools": tools}) save_path = os.path.expanduser("~/ReTool-SFT/data/train-00000-of-00001.parquet") data.to_parquet(save_path) ================================================ FILE: verl_distillation/recipe/retool/run_gpt_oss_ppo.sh ================================================ set -x # ================= data/model/tool ================= HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} dapo_math_17k=$DATA_ROOT/dataset/BytedTsinghua-SIA/DAPO-Math-17k aime_2024=$DATA_ROOT/dataset/Maxwell-Jia/AIME_2024 aime_2025=$DATA_ROOT/dataset/yentinglin/aime_2025 actor_model_path=lmsys/gpt-oss-20b-bf16 critic_model_path=$actor_model_path train_files="['$dapo_math_17k']" test_files="['$aime_2025']" # tool tool_config_path=recipe/retool/sandbox_fusion_tool_config.yaml # wandb project_name=wuxibin_retool experiment_name=gpt-oss-20b-bf16_ppo default_local_dir=$DATA_ROOT/checkpoint/$experiment_name # ================= algorithm ================= adv_estimator=gae use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_turns=8 max_prompt_length=2048 max_response_length=16384 actor_lr=1e-6 critic_lr=2e-6 gae_gamma=1.0 gae_lam=1.0 critic_warmup=20 train_batch_size=512 ppo_mini_batch_size=512 n_resp_per_prompt_val=30 # ================= perfomance ================= infer_tp=4 # vllm train_sp=4 # train offload=True actor_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 2 )) critic_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 4 )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=$adv_estimator \ algorithm.use_kl_in_reward=$use_kl_in_reward \ algorithm.kl_ctrl.kl_coef=$kl_coef \ algorithm.gamma=$gae_gamma \ algorithm.lam=$gae_lam \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=True \ data.train_batch_size=$train_batch_size \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=True \ +data.apply_chat_template_kwargs.reasoning_effort=medium \ data.truncation='error' \ data.custom_cls.path=recipe/retool/retool.py \ data.custom_cls.name=CustomRLHFDataset \ custom_reward_function.path=recipe/retool/retool.py \ custom_reward_function.name=compute_score \ actor_rollout_ref.model.path=$actor_model_path \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.use_kl_loss=$use_kl_loss \ actor_rollout_ref.actor.kl_loss_coef=$kl_loss_coef \ actor_rollout_ref.actor.clip_ratio_low=$clip_ratio_low \ actor_rollout_ref.actor.clip_ratio_high=$clip_ratio_high \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.optim.lr=$actor_lr \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_mini_batch_size=$ppo_mini_batch_size \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$actor_max_token_len_per_gpu \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=$train_sp \ actor_rollout_ref.actor.fsdp_config.param_offload=$offload \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$offload \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.tensor_model_parallel_size=$infer_tp \ actor_rollout_ref.rollout.multi_turn.enable=True \ actor_rollout_ref.rollout.multi_turn.max_user_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.tool_config_path=$tool_config_path \ actor_rollout_ref.rollout.multi_turn.format=gpt-oss \ +actor_rollout_ref.rollout.engine_kwargs.sglang.attention_backend=triton \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.val_kwargs.top_p=1.0 \ actor_rollout_ref.rollout.val_kwargs.temperature=1.0 \ actor_rollout_ref.rollout.val_kwargs.n=$n_resp_per_prompt_val \ critic.optim.lr=$critic_lr \ critic.model.use_remove_padding=True \ critic.model.path=$critic_model_path \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_max_token_len_per_gpu=$critic_max_token_len_per_gpu \ critic.ulysses_sequence_parallel_size=$train_sp \ critic.model.fsdp_config.param_offload=$offload \ critic.model.fsdp_config.optimizer_offload=$offload \ trainer.critic_warmup=$critic_warmup \ trainer.logger=['console','wandb'] \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=True \ trainer.log_val_generations=100 \ trainer.nnodes=2 \ trainer.save_freq=30 \ trainer.default_local_dir=$default_local_dir \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/recipe/retool/run_qwen2-32b_dapo.sh ================================================ set -x # ================= data/model/tool ================= HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} dapo_math_17k=$DATA_ROOT/dataset/BytedTsinghua-SIA/DAPO-Math-17k aime_2024=$DATA_ROOT/dataset/Maxwell-Jia/AIME_2024 aime_2025=$DATA_ROOT/dataset/yentinglin/aime_2025 model_path=$HDFS_ROOT/checkpoint/multiturn-sft-qwen-2.5-32b-instruct/global_step_372 train_files="['$dapo_math_17k']" test_files="['$aime_2025']" # tool tool_config_path=recipe/retool/sandbox_fusion_tool_config.yaml # wandb project_name=wuxibin_retool experiment_name=qwen2.5-32b_dapo default_local_dir=$DATA_ROOT/checkpoint/$experiment_name # ================= algorithm ================= adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_turns=8 max_prompt_length=2048 max_response_length=16384 actor_lr=1e-6 train_batch_size=512 ppo_mini_batch_size=64 n_resp_per_prompt=16 n_resp_per_prompt_val=30 # ================= perfomance ================= infer_tp=4 # vllm train_sp=8 # train offload=True actor_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 1 )) log_prob_max_token_len_per_gpu=$(( actor_max_token_len_per_gpu * 4 )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=$adv_estimator \ algorithm.use_kl_in_reward=$use_kl_in_reward \ algorithm.kl_ctrl.kl_coef=$kl_coef \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=True \ data.train_batch_size=$train_batch_size \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.custom_cls.path=recipe/retool/retool.py \ data.custom_cls.name=CustomRLHFDataset \ custom_reward_function.path=recipe/retool/retool.py \ custom_reward_function.name=compute_score \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.use_kl_loss=$use_kl_loss \ actor_rollout_ref.actor.kl_loss_coef=$kl_loss_coef \ actor_rollout_ref.actor.clip_ratio_low=$clip_ratio_low \ actor_rollout_ref.actor.clip_ratio_high=$clip_ratio_high \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.optim.lr=$actor_lr \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_mini_batch_size=$ppo_mini_batch_size \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$actor_max_token_len_per_gpu \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=$train_sp \ actor_rollout_ref.actor.fsdp_config.param_offload=$offload \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$offload \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=$log_prob_max_token_len_per_gpu \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.tensor_model_parallel_size=$infer_tp \ actor_rollout_ref.rollout.multi_turn.enable=True \ actor_rollout_ref.rollout.multi_turn.max_user_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.tool_config_path=$tool_config_path \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.n=$n_resp_per_prompt \ actor_rollout_ref.rollout.val_kwargs.top_p=0.6 \ actor_rollout_ref.rollout.val_kwargs.temperature=1.0 \ actor_rollout_ref.rollout.val_kwargs.n=$n_resp_per_prompt_val \ trainer.logger=['console','wandb'] \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=True \ trainer.log_val_generations=100 \ trainer.nnodes=2 \ trainer.save_freq=30 \ trainer.default_local_dir=$default_local_dir \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/recipe/retool/run_qwen2-32b_ppo.sh ================================================ set -x # ================= data/model/tool ================= HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} dapo_math_17k=$DATA_ROOT/dataset/BytedTsinghua-SIA/DAPO-Math-17k aime_2024=$DATA_ROOT/dataset/Maxwell-Jia/AIME_2024 aime_2025=$DATA_ROOT/dataset/yentinglin/aime_2025 actor_model_path=$HDFS_ROOT/checkpoint/multiturn-sft-qwen-2.5-32b-instruct/global_step_372 critic_model_path=$actor_model_path train_files="['$dapo_math_17k']" test_files="['$aime_2025']" # tool tool_config_path=recipe/retool/sandbox_fusion_tool_config.yaml # wandb project_name=wuxibin_retool experiment_name=qwen2.5-32b_ppo default_local_dir=$DATA_ROOT/checkpoint/$experiment_name # ================= algorithm ================= adv_estimator=gae use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_turns=8 max_prompt_length=2048 max_response_length=16384 actor_lr=1e-6 critic_lr=2e-6 gae_gamma=1.0 gae_lam=1.0 critic_warmup=20 train_batch_size=1024 ppo_mini_batch_size=256 n_resp_per_prompt_val=30 # ================= perfomance ================= infer_tp=4 # vllm train_sp=4 # train offload=True actor_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 2 )) critic_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 4 )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=$adv_estimator \ algorithm.use_kl_in_reward=$use_kl_in_reward \ algorithm.kl_ctrl.kl_coef=$kl_coef \ algorithm.gamma=$gae_gamma \ algorithm.lam=$gae_lam \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=True \ data.train_batch_size=$train_batch_size \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.custom_cls.path=recipe/retool/retool.py \ data.custom_cls.name=CustomRLHFDataset \ custom_reward_function.path=recipe/retool/retool.py \ custom_reward_function.name=compute_score \ actor_rollout_ref.model.path=$actor_model_path \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.use_kl_loss=$use_kl_loss \ actor_rollout_ref.actor.kl_loss_coef=$kl_loss_coef \ actor_rollout_ref.actor.clip_ratio_low=$clip_ratio_low \ actor_rollout_ref.actor.clip_ratio_high=$clip_ratio_high \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.optim.lr=$actor_lr \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_mini_batch_size=$ppo_mini_batch_size \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$actor_max_token_len_per_gpu \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=$train_sp \ actor_rollout_ref.actor.fsdp_config.param_offload=$offload \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$offload \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.tensor_model_parallel_size=$infer_tp \ actor_rollout_ref.rollout.multi_turn.enable=True \ actor_rollout_ref.rollout.multi_turn.max_user_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.tool_config_path=$tool_config_path \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.val_kwargs.top_p=0.6 \ actor_rollout_ref.rollout.val_kwargs.temperature=1.0 \ actor_rollout_ref.rollout.val_kwargs.n=$n_resp_per_prompt_val \ critic.optim.lr=$critic_lr \ critic.model.use_remove_padding=True \ critic.model.path=$critic_model_path \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_max_token_len_per_gpu=$critic_max_token_len_per_gpu \ critic.ulysses_sequence_parallel_size=$train_sp \ critic.model.fsdp_config.param_offload=$offload \ critic.model.fsdp_config.optimizer_offload=$offload \ trainer.critic_warmup=$critic_warmup \ trainer.logger=['console','wandb'] \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=True \ trainer.log_val_generations=100 \ trainer.nnodes=2 \ trainer.save_freq=30 \ trainer.default_local_dir=$default_local_dir \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/recipe/retool/run_qwen2-32b_sft.sh ================================================ #!/bin/bash set -x nnodes=2 nproc_per_node=8 master_addr= master_port= experiment_name=multiturn-sft-qwen-2.5-32b-instruct HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} TRAIN_DATA=$DATA_ROOT/dataset/wuxibin/ReTool-SFT/data/train-00000-of-00001.parquet EVAL_DATA=$DATA_ROOT/dataset/wuxibin/ReTool-SFT/data/train-00000-of-00001.parquet MODEL_PATH=$HDFS_ROOT/model/Qwen2.5-32B-Instruct SAVE_PATH=$DATA_ROOT/checkpoint/$experiment_name torchrun --nnodes=$nnodes \ --nproc_per_node=$nproc_per_node \ --master-addr=$master_addr \ --master-port=$master_port \ --node-rank=$node_rank \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$TRAIN_DATA \ data.val_files=$EVAL_DATA \ data.max_length=16384 \ data.train_batch_size=32 \ data.multiturn.enable=true \ data.multiturn.messages_key=messages \ data.multiturn.tools_key=tools \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=$MODEL_PATH \ model.strategy=fsdp \ trainer.default_local_dir=$SAVE_PATH \ trainer.project_name=wuxibin-multiturn-sft \ trainer.experiment_name=$experiment_name \ trainer.logger='["console","wandb"]' \ trainer.total_epochs=6 \ ulysses_sequence_parallel_size=4 \ use_remove_padding=true ================================================ FILE: verl_distillation/recipe/retool/run_qwen2_7b_dapo.sh ================================================ set -x export VLLM_USE_V1=1 # ================= data/model/tool ================= HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} dapo_math_17k=$DATA_ROOT/dataset/BytedTsinghua-SIA/DAPO-Math-17k aime_2024=$DATA_ROOT/dataset/Maxwell-Jia/AIME_2024 aime_2025=$DATA_ROOT/dataset/yentinglin/aime_2025 model_path=$HDFS_ROOT/checkpoint/multiturn-sft-qwen-2.5-7b-instruct/global_step_372 train_files="['$dapo_math_17k']" test_files="['$aime_2025', '$aime_2024']" # tool tool_config_path=recipe/retool/sandbox_fusion_tool_config.yaml # wandb project_name=retool experiment_name=qwen2.5-7b_dapo default_local_dir=$DATA_ROOT/checkpoint/$experiment_name # ================= algorithm ================= adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_turns=16 max_prompt_length=2048 max_response_length=16384 actor_lr=1e-6 train_batch_size=64 ppo_mini_batch_size=16 n_resp_per_prompt=16 n_resp_per_prompt_val=30 # ================= perfomance ================= infer_tp=4 # vllm train_sp=4 # train offload=True actor_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 1 )) log_prob_max_token_len_per_gpu=$(( actor_max_token_len_per_gpu * 4 )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=$adv_estimator \ algorithm.use_kl_in_reward=$use_kl_in_reward \ algorithm.kl_ctrl.kl_coef=$kl_coef \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=True \ data.train_batch_size=$train_batch_size \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.custom_cls.path=recipe/retool/retool.py \ data.custom_cls.name=CustomRLHFDataset \ custom_reward_function.path=recipe/retool/retool.py \ custom_reward_function.name=compute_score \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.use_kl_loss=$use_kl_loss \ actor_rollout_ref.actor.kl_loss_coef=$kl_loss_coef \ actor_rollout_ref.actor.clip_ratio_low=$clip_ratio_low \ actor_rollout_ref.actor.clip_ratio_high=$clip_ratio_high \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.optim.lr=$actor_lr \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_mini_batch_size=$ppo_mini_batch_size \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$actor_max_token_len_per_gpu \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=$train_sp \ actor_rollout_ref.actor.fsdp_config.param_offload=$offload \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$offload \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=$log_prob_max_token_len_per_gpu \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.tensor_model_parallel_size=$infer_tp \ actor_rollout_ref.rollout.multi_turn.enable=True \ actor_rollout_ref.rollout.multi_turn.max_user_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.tool_config_path=$tool_config_path \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.n=$n_resp_per_prompt \ actor_rollout_ref.rollout.val_kwargs.top_p=0.6 \ actor_rollout_ref.rollout.val_kwargs.temperature=1.0 \ actor_rollout_ref.rollout.val_kwargs.n=$n_resp_per_prompt_val \ trainer.logger=['console','wandb'] \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=True \ trainer.log_val_generations=20 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.default_local_dir=$default_local_dir \ trainer.test_freq=10 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/recipe/retool/run_qwen2_7b_sft.sh ================================================ #!/bin/bash set -x nnodes=1 nproc_per_node=8 master_addr= master_port= node_rank=${ARNOLD_ID:-0} project_name=retool experiment_name=multiturn-sft-qwen-2.5-7b-instruct HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} TRAIN_DATA=$DATA_ROOT/dataset/wuxibin/ReTool-SFT/data/train-00000-of-00001.parquet EVAL_DATA=$DATA_ROOT/dataset/wuxibin/ReTool-SFT/data/train-00000-of-00001.parquet MODEL_PATH=$HDFS_ROOT/model/Qwen2.5-7B-Instruct SAVE_PATH=$DATA_ROOT/checkpoint/$experiment_name torchrun --nnodes=$nnodes \ --nproc_per_node=$nproc_per_node \ --master-addr=$master_addr \ --master-port=$master_port \ --node-rank=$node_rank \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$TRAIN_DATA \ data.val_files=$EVAL_DATA \ data.max_length=16384 \ data.train_batch_size=32 \ data.multiturn.enable=true \ data.multiturn.messages_key=messages \ data.multiturn.tools_key=tools \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=$MODEL_PATH \ model.strategy=fsdp \ trainer.default_local_dir=$SAVE_PATH \ trainer.project_name=wuxibin-multiturn-sft \ trainer.experiment_name=$experiment_name \ trainer.logger='["console","wandb"]' \ trainer.total_epochs=6 \ trainer.save_freq=62 \ ulysses_sequence_parallel_size=4 \ use_remove_padding=true ================================================ FILE: verl_distillation/recipe/retool/run_qwen2_7b_sft_npu.sh ================================================ #!/bin/bash set -x nnodes=1 nproc_per_node=8 project_name=retool_sft experiment_name=multiturn-sft-qwen-2.5-7b-instruct TRAIN_DATA=PATH/TO/ReTool-SFT/data/train-00000-of-00001.parquet EVAL_DATA=PATH/TO/ReTool-SFT/data/train-00000-of-00001.parquet MODEL_PATH=PATH/TO/Qwen2.5-7B-Instruct SAVE_PATH=PATH/TO/checkpoint/$experiment_name torchrun --nnodes=$nnodes \ --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$TRAIN_DATA \ data.val_files=$EVAL_DATA \ data.max_length=16384 \ data.train_batch_size=64 \ data.multiturn.enable=true \ data.multiturn.messages_key=messages \ data.multiturn.tools_key=tools \ data.micro_batch_size_per_gpu=8 \ model.partial_pretrain=$MODEL_PATH \ model.strategy=fsdp \ trainer.default_local_dir=$SAVE_PATH \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.logger='["console"]' \ trainer.total_epochs=6 \ trainer.save_freq=10 \ trainer.device=npu \ ulysses_sequence_parallel_size=4 \ use_remove_padding=true ================================================ FILE: verl_distillation/recipe/retool/sandbox_fusion_tool_config.yaml ================================================ tools: - class_name: "recipe.retool.retool.CustomSandboxFusionTool" config: sandbox_fusion_url: "http://localhost:8080/run_code" num_workers: 128 enable_global_rate_limit: true rate_limit: 128 default_timeout: 30 default_language: "python" memory_limit_mb: 1024 type: native tool_schema: type: "function" function: name: "code_interpreter" description: "A tool for executing code." parameters: type: "object" properties: code: type: "string" description: "The code to execute." required: ["code"] ================================================ FILE: verl_distillation/recipe/spin/README.md ================================================ # SPIN: Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models This repository hosts a `verl` recipe inspired by the paper **"Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models"** (SPIN). SPIN is a language model finetuning algorithm that enables iterative self-improvement through a self-play mechanism inspired by game theory. **Core Idea:** Models learn by playing against themselves, reducing reliance on external preference datasets or stronger teacher models: 1. **Synthetic Data Generation:** The current model generates responses, creating its own training data from previous iterations. 2. **Two-Player Game Setup:** A game involving two players acted by a single LLM. 3. **Iterative Training:** The model progressively improves by refining its policy, with each iteration's model becoming the opponent for the next iteration. Paper Authors: [Zixiang Chen](https://github.com/uclaml/SPIN)\*, [Yihe Deng](https://github.com/uclaml/SPIN)\*, [Huizhuo Yuan](https://scholar.google.com/citations?user=8foZzX4AAAAJ)\*, [Kaixuan Ji](https://scholar.google.com/citations?user=FOoKDukAAAAJ), [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) [[Webpage](https://uclaml.github.io/SPIN/)] [[Huggingface](https://huggingface.co/papers/2401.01335)] [[Paper](https://arxiv.org/abs/2401.01335)] [[Original Implementation](https://github.com/uclaml/SPIN)] verl Implementation Authors: [Chendong Wang](https://cdwang96.github.io/), [Chenyang Zhao](https://github.com/zhaochenyang20) --- ## Key Function (compute_online_dpo_loss) and Related works SPIN (Chen et al., 2024) proposes an iterative self-play mechanism to fine-tune language models. In each iteration, SPIN's training objective, when using a logistic loss function, is equivalent to Direct Preference Optimization (DPO) loss (Rafailov et al., 2023). This `verl` recipe realizes SPIN's core concept by using DPO loss iteratively (Xu et al., 2023; Xiong et al., 2023; Snorkel AI, 2024). This means that in each iteration, we fine-tune the LLM using DPO loss for preference optimization. Notably, Xu et al. (2023) explored iterative preference optimization with pairwise cringe loss, while Xiong et al. (2023) discussed how to bridge theory and practice for RLHF under KL constraints using iterative training. The concept of iterative preference learning was also explored in online DPO (Guo et al., 2024), which focuses on direct alignment from online AI feedback. In online DPO, preference data is dynamically updated during training, allowing the model to learn from its own generated data. Specifically, we developed the **`compute_online_dpo_loss`** function and built this SPIN recipe on top of it. By incorporating online preference generation, this approach enables continuously refining language models without relying on fixed external preference datasets. **Reference Papers:** * [Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models](https://arxiv.org/abs/2401.01335) (Chen et al., 2024) * [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://arxiv.org/abs/2305.18290) (Rafailov et al., 2023) * [Somethings are more cringe than others: Preference optimization with the pairwise cringe loss](https://arxiv.org/abs/2312.16682) (Xu et al., 2023) * [Iterative preference learning from human feedback: Bridging theory and practice for rlhf under kl-constraint](https://arxiv.org/abs/2312.11456) (Xiong et al., 2023) * [Snorkel-Mistral-PairRM-DPO](https://huggingface.co/snorkelai/Snorkel-Mistral-PairRM-DPO) (Snorkel AI, 2024) * [Direct language model alignment from online ai feedback](https://arxiv.org/abs/2402.04792) (Guo et al., 2024) ## Our Online DPO Implementation Our `compute_online_dpo_loss` function adapts `verl`'s existing PPO infrastructure (based on `verl` v0.3.0.post1) for this iterative online DPO. Key aspects of our implementation include: * **No Critic:** Unlike PPO, we omit the value function critic. * **Dynamic Reference Model:** An explicit reference policy (`ref_policy_wg`) is used for DPO loss. This reference model's weights can be periodically updated from the actor (`ref_update_freq`), providing a dynamic baseline. * **Online Preference Generation:** The `compute_onlineDPO_pref` function (in `core_algos.py`) dynamically creates chosen/rejected pairs based on a reward source (e.g., rule-based ranking for math problems). * **DPO Loss Integration:** We replace PPO's policy loss with our `compute_online_dpo_loss` (in `core_algos.py`) within the actor update (`dp_actor.py`), directly optimizing the policy using the generated preferences. * **Iterative Training Orchestration:** The `SpinTrainer` (in `spin_trainer.py`) manages the entire self-play loop: generation, preference labeling, optional reference model updates, and policy updates, enabling continuous self-improvement aligned with SPIN's principles. --- ## Algorithm This recipe implements an Online algorithm adapted to the `verl` Reinforcement Learning framework, which provides an alternative to PPO for fine-tuning language models. **Online Loop:** Instead of maximizing a scalar reward signal in PPO, this approach directly optimizes the policy model to align with preference data generated *online* during training: 1. **Generation:** The current model generates multiple responses for each prompt in a batch. 2. **Preference Labeling:** A function evaluates these generated responses to determine which one is preferred (chosen) and which is dispreferred (rejected). This can be done using a reward function or implicit ranking based on specific rules. (In this recipe, we use rule-based ranking on the math problem). 3. **Update:** This preference tuple (`prompt`, `chosen_response`, `rejected_response`) is used to update the actor model using `compute_online_dpo_loss`, comparing against a reference model. **Connection with SPIN:** Instead of only using a fixed target data distribution, the online generation loop in step 2 will dynamically change the target data distribution by using a certain Preference Labeling method (rule-based ranking on the math problem by selecting the better one in this recipe). This explores the direction mentioned in SPIN's paper Section 7 about "dynamically changing target data distribution" to potentially elevate LLM performance beyond the fixed human-annotated data ceiling. --- ## Reproduce the Experiment (Example Setup) The following steps outline how to set up the environment and run the SPIN recipe, based on the provided test log using GSM8K and Qwen2.5-3B-Instruct. 1. **Setup Environment (Example using Docker):** ```bash # Start a container with GPU access and shared memory docker run -it --name spin_test --gpus all \ --shm-size=32g \ --ipc=host \ -v /path/to/host/.cache:/root/.cache \ -e HF_TOKEN= \ lmsysorg/sglang:latest \ /bin/bash # Inside the container or on your host machine: # Ensure /tmp is writable mkdir -p /tmp chmod 1777 /tmp # Install Python 3.10 (if not present) and venv sudo apt update sudo apt install -y python3.10 python3.10-venv tmux python3 -m ensurepip --upgrade # Create and activate a virtual environment python3 -m venv ~/.python/spin_env source ~/.python/spin_env/bin/activate # Install uv (fast package installer) python3 -m pip install uv ``` 2. **Install verl and Dependencies:** ```bash # Clone the verl repository and checkout the spin branch cd ~ git clone git@github.com:volcengine/verl.git && cd verl # Install flash-attn (handle potential build issues) python3 -m uv pip install wheel packaging python3 -m uv pip install flash-attn --no-build-isolation --no-deps # Install verl with sglang extras python3 -m uv pip install -e ".[sglang]" ``` *Note: If `flash-attn` installation fails, try the manual steps again or consult its documentation.* 3. **Login & Download Data/Model:** ```bash # Login to Weights & Biases (optional, for logging) export WANDB_API_KEY= # wandb login # Download the GSM8K dataset python3 examples/data_preprocess/gsm8k.py --local_save_dir ~/data/gsm8k # Adjusted path # Download the base model (Example: Qwen2.5-3B-Instruct) huggingface-cli download Qwen/Qwen2.5-3B-Instruct --local-dir $HOME/models/Qwen2.5-3B-Instruct ``` 4. **Configure:** * Modify the configuration file (e.g., `config/spin_trainer.yaml` or the one specified in the run script) with correct paths to your downloaded model, data, desired hyperparameters (`dpo_beta`, learning rate, etc.), and distributed training settings (nodes, GPUs per node). * Pay attention to `actor_rollout_ref.model_path`, `data` paths, `reward_model` config (if using one), and `trainer.ref_update_freq`. 5. **Run Training:** ```bash # Set CUDA visible devices (adjust based on your hardware and config) export CUDA_VISIBLE_DEVICES=0,1,2,3 # Launch the training script (e.g., test.sh or a custom script) # Ensure test.sh points to the correct config and main script bash recipe/spin/run_spin.sh ``` --- ## Configuration * The primary configuration is typically managed through a YAML file specified in the launch script (e.g., `config/spin_trainer.yaml`). * Key configuration sections: * `data`: Paths to training/validation prompt files, batch sizes, sequence lengths. * `actor_rollout_ref`: Paths to the base model (used for actor and initial reference), FSDP settings, optimization parameters (learning rate, scheduler). * `reward_model`: Configuration for the reward model used for online preference labeling (path, batch size, etc.). Can be omitted if using a simpler reward function. * `algorithm`: DPO-specific hyperparameters like `dpo_beta`, `dpo_loss_type`. * `trainer`: Distributed training settings (nodes, GPUs per node), logging (WandB), checkpointing frequency, and `ref_update_freq` (set > 0 to enable periodic reference model updates from the actor). --- ## Key Files * `main_spin.py`: Main entry point using Hydra to load the config and launch the `SpinTrainer`. * `spin_trainer.py`: Defines the `SpinTrainer` class, orchestrating the Online DPO training loop. * `fsdp_workers.py`: Implements Ray workers (Actor, Reference) potentially using FSDP. * `dp_actor.py`: Contains the actor class, including the DPO policy update logic. * `core_algos.py`: Includes helper functions for `compute_online_dpo_loss` and `compute_onlineDPO_pref`. * `config/spin_trainer.yaml` (or similar): Main Hydra configuration file for the recipe. * `run_spin.sh` (or similar): Example bash script for launching a training run. * `README.md`: This file. --- ## Acknowledgement We sincerely thank the contribution and guidance from the `verl` community and advisors, including (adapted from SPPO): * [Zixiang Chen](https://sites.google.com/view/zxchen) * [Yuhao Yang](https://github.com/yhyang201) * [Yifan Zhang](https://github.com/yifanzhang-pro) * [Yongan Xiang](https://github.com/BearBiscuit05) * [Junrong Lin](https://github.com/ocss884) * [Yuxuan Tong](https://github.com/tongyx361) * [Guangming Shen](https://github.com/PeterSH6) * [Biao He](https://www.linkedin.com/in/biao-he/) * [Qingquan Song](https://qingquansong.github.io/) * [Chenyang Zhao](https://zhaochenyang20.github.io/Chayenne/) * [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) --- ================================================ FILE: verl_distillation/recipe/spin/config/spin_trainer.yaml ================================================ # the sppo config will override default ppo_trainer.yaml hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ actor_rollout_ref: actor: dpo_beta: 0.1 optim: lr_warmup_steps: 15 rollout: name: sglang tensor_model_parallel_size: 2 gpu_memory_utilization: 0.5 val_kwargs: n: 2 # 2 will trigger validation, 1 will bypass algorithm: adv_estimator: null trainer: log_val_generations: 0 ref_update_freq: 1 ================================================ FILE: verl_distillation/recipe/spin/core_algos.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch class AdaptiveKLController: """ Adaptive KL controller described in the paper: https://arxiv.org/pdf/1909.08593.pdf """ def __init__(self, init_kl_coef, target_kl, horizon): self.value = init_kl_coef self.target = target_kl self.horizon = horizon def update(self, current_kl, n_steps): target = self.target proportional_error = np.clip(current_kl / target - 1, -0.2, 0.2) mult = 1 + proportional_error * n_steps / self.horizon self.value *= mult class FixedKLController: """Fixed KL controller.""" def __init__(self, kl_coef): self.value = kl_coef def update(self, current_kl, n_steps): pass def get_kl_controller(kl_ctrl): if kl_ctrl.type == "fixed": return FixedKLController(kl_coef=kl_ctrl.kl_coef) elif kl_ctrl.type == "adaptive": assert kl_ctrl.horizon > 0, f"horizon must be larger than 0. Got {kl_ctrl.horizon}" return AdaptiveKLController(init_kl_coef=kl_ctrl.kl_coef, target_kl=kl_ctrl.target_kl, horizon=kl_ctrl.horizon) else: raise NotImplementedError def compute_onlinedpo_pref( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, ) -> torch.Tensor: """ Computes preferences between pairs of sequences based on summed rewards and returns a mask aligned with the interleaved batch. Assumes inputs are interleaved: [Resp1_Prompt0, Resp2_Prompt0, Resp1_Prompt1, Resp2_Prompt1, ...] Args: token_level_rewards: Tensor of shape [batch_size * 2, seq_len] response_mask: Tensor of shape [batch_size * 2, seq_len] Returns: torch.Tensor: A boolean mask of shape [batch_size * 2], where True indicates the corresponding entry is the chosen response for its pair. Example: [True, False, False, True, ...] means for prompt 0, response 1 was chosen; for prompt 1, response 2 was chosen. """ # print(f"---- [DEBUG] Inside compute_onlinedpo_pref ----") if token_level_rewards.shape[0] % 2 != 0 or response_mask.shape[0] % 2 != 0: raise ValueError( f"Input tensor batch dimension must be even for pair comparison, got shapes: " f"{token_level_rewards.shape}, {response_mask.shape}" ) if token_level_rewards.shape != response_mask.shape: raise ValueError(f"Shape mismatch between rewards {token_level_rewards.shape} and mask {response_mask.shape}") # 1. Calculate Sequence Scores scores = (token_level_rewards * response_mask).sum(dim=-1) # print(f" Calculated sequence scores shape: {scores.shape}") # [batch_size * 2] # 2. Reshape scores to group pairs: [batch_size, 2] try: score_pairs = scores.view(-1, 2) except RuntimeError as e: print(f"ERROR reshaping scores (shape {scores.shape}) into pairs: {e}") raise e print(f" Reshaped score pairs shape: {score_pairs.shape}") # [batch_size, 2] # 3. Compare scores to find which index (0 or 1) is the winner within each pair # winner_indices[i] = 0 if score_pairs[i, 0] >= score_pairs[i, 1] else 1 winner_indices = torch.argmax(score_pairs, dim=1) # 0 if first is max, 1 if second is max # Handle ties explicitly if argmax behavior isn't guaranteed (usually picks first max) # Alternatively: winner_mask_original = score_pairs[:, 0] >= score_pairs[:, 1] # print(f" Winner indices shape: {winner_indices.shape}") # [batch_size] # print(f" Number where Response 2 (index 1) is preferred: {winner_indices.sum().item()}") # Counts number of 1s # 4. Create the final [batch_size * 2] mask num_pairs = score_pairs.shape[0] full_batch_size = num_pairs * 2 # Create indices for the full batch [0, 1, 2, 3, ..., N*2-1] # full_indices = torch.arange(full_batch_size, device=scores.device) # Create indices corresponding to the winner within each pair's original index # E.g., if winner_indices is [0, 1, 0], pair_indices is [0, 1, 2] # winner_global_indices = (pair_indices * 2) + winner_indices -> [ (0*2)+0, (1*2)+1, (2*2)+0 ] -> [0, 3, 4] pair_indices = torch.arange(num_pairs, device=scores.device) winner_global_indices = (pair_indices * 2) + winner_indices # Create boolean mask - True at the winner's position output_preference_mask = torch.zeros(full_batch_size, dtype=torch.bool, device=scores.device) output_preference_mask[winner_global_indices] = True # print(f" Output preference mask shape: {output_preference_mask.shape}") # Should be [batch_size * 2] # print(f" Output mask True count (Chosen): {output_preference_mask.sum().item()}") # Should be batch_size # print(f" Output mask False count (Rejected): {(~output_preference_mask).sum().item()}") # Should be batch_size # print(f"---- [DEBUG] Exiting compute_onlinedpo_pref ----") return output_preference_mask def compute_online_dpo_loss( policy_chosen_logps: torch.Tensor, policy_rejected_logps: torch.Tensor, reference_chosen_logps: torch.Tensor, reference_rejected_logps: torch.Tensor, beta: float, label_smoothing: float = 0.0, loss_type: str = "sigmoid", reference_free: bool = False, ) -> torch.Tensor: import torch.nn.functional as F pi_logratios = policy_chosen_logps - policy_rejected_logps ref_logratios = reference_chosen_logps - reference_rejected_logps if reference_free: ref_logratios = torch.zeros_like(pi_logratios) logits = pi_logratios - ref_logratios if loss_type == "sigmoid": losses = -F.logsigmoid(beta * logits) * (1 - label_smoothing) - F.logsigmoid(-beta * logits) * label_smoothing elif loss_type == "ipo": losses = (logits - 1 / (2 * beta)) ** 2 else: raise ValueError(f"Unsupported loss_type: {loss_type}. Choose 'sigmoid', 'ipo', or 'hinge'.") return losses.mean() def get_batch_logps( logits: torch.FloatTensor, labels: torch.LongTensor, average_log_prob: bool = False ) -> torch.FloatTensor: """ Compute the log probabilities of the given labels under the given logits. Args: logits: Logits of the model (e.g., huggingface CausalLMOutputs `logits`). Shape: (batch_size, sequence_length, vocab_size) labels: Labels for computing the sequence log probabilities. Shape: (batch_size, sequence_length) average_log_prob: If True, return the average log probability per sequence. Otherwise, return the sum. Returns: A tensor of shape (batch_size,) containing the average/sum log probabilities of the given sequences. """ if logits.shape[:-1] != labels.shape: raise ValueError("Logits and labels must have the same shape[:-1]") # Ensure labels are contiguous and on the same device as logits labels = labels.contiguous().to(logits.device) # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Calculate per token log probability loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-100, reduction="none") per_token_logps = -loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) per_token_logps = per_token_logps.view( shift_logits.size(0), shift_logits.size(1) ) # Reshape back to (batch_size, seq_len-1) # Create a mask for the labels that are not -100 loss_mask = shift_labels != -100 # Apply the mask to the per token log probabilities masked_logps = per_token_logps * loss_mask # Calculate the sum or average log probability per sequence sequence_logps = masked_logps.sum(dim=-1) if average_log_prob: # Avoid division by zero for sequences with no valid tokens num_valid_tokens = loss_mask.sum(dim=-1) return sequence_logps / torch.clamp(num_valid_tokens, min=1) else: return sequence_logps ================================================ FILE: verl_distillation/recipe/spin/dp_actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import math from collections import defaultdict import numpy as np import torch from recipe.spin.core_algos import compute_online_dpo_loss, get_batch_logps from verl import DataProto from verl.utils.device import get_device_name from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.workers.actor import DataParallelPPOActor __all__ = ["DataParallelPPOActor"] class SPINDataParallelPPOActor(DataParallelPPOActor): def compute_log_prob(self, data: DataProto) -> torch.Tensor: """Compute the log probability of the responses given input_ids, attention_mask and position_ids Args: data (DataProto): a DataProto containing keys ``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``. ``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64. ``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. ``responses``: tensor of shape [batch_size, response_length]. torch.int64. Returns: torch.Tensor: the log_prob tensor """ # set to eval self.actor_module.eval() micro_batch_size = data.meta_info["micro_batch_size"] temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid silent error use_dynamic_bsz = data.meta_info["use_dynamic_bsz"] select_keys = ["responses", "input_ids", "attention_mask", "position_ids"] batch = data.select(batch_keys=select_keys).batch has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() if has_multi_modal_inputs: num_micro_batches = data.batch.batch_size[0] // micro_batch_size non_tensor_select_keys = ["multi_modal_inputs"] micro_batches = data.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches) elif use_dynamic_bsz: # split using dynamic bsz max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size micro_batches, indices = rearrange_micro_batches(batch=batch, max_token_len=max_token_len) else: micro_batches = batch.split(micro_batch_size) log_probs_lst = [] for micro_batch in micro_batches: if isinstance(micro_batch, DataProto): micro_batch = {**micro_batch.batch, **micro_batch.non_tensor_batch} with torch.no_grad(): _, log_probs = self._forward_micro_batch(micro_batch, temperature=temperature) log_probs_lst.append(log_probs) log_probs = torch.concat(log_probs_lst, dim=0) if use_dynamic_bsz: indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == log_probs.size(0), f"{len(indices)} vs. {log_probs.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) log_probs = log_probs[revert_indices] return log_probs def update_policy_dpo_with_ref(self, data: DataProto): """ Performs the DPO update step using pre-calculated reference log probs from an external, periodically updated reference model. """ self.actor_module.train() # Ensure training mode # --- Retrieve necessary data --- try: # Expects batch prepared by fit_dpo loop, including reference log probs batch_td = data.batch chosen_labels = batch_td["chosen_labels"] rejected_labels = batch_td["rejected_labels"] # ... other needed tensors like chosen/rejected input_ids, attention_mask, position_ids ... # === Get PRE-CALCULATED reference log probs from input data === reference_chosen_logps = batch_td["reference_chosen_logps"] # Should be sequence-level logps reference_rejected_logps = batch_td["reference_rejected_logps"] # Should be sequence-level logps # ============================================================ # Get DPO params from meta_info # beta = data.meta_info.get('dpo_beta', 0.1) # Default beta beta = self.config.get("dpo_beta", 0.1) # Default beta loss_type = data.meta_info.get("dpo_loss_type", "sigmoid") label_smoothing = data.meta_info.get("dpo_label_smoothing", 0.0) # reference_free should now be False as we provide ref logps reference_free = data.meta_info.get("reference_free", False) # Default False except KeyError as e: print(f"ERROR: Missing required key for DPO update (in update_policy_dpo): {e}") print(f"Available keys in data.batch: {list(batch_td.keys())}") # Debug print return {} # Return empty metrics on error except Exception as e_data: print(f"ERROR accessing data for DPO update (in update_policy_dpo): {e_data}") return {} # --- Micro-batching Setup --- micro_batch_size = self.config.get("ppo_micro_batch_size_per_gpu") if micro_batch_size is None: # Fallback or default if not set, or raise error micro_batch_size = 1 # Example fallback, adjust as needed print(f"Warning: 'ppo_micro_batch_size_per_gpu' not set, defaulting to {micro_batch_size}") # raise ValueError("Config 'ppo_micro_batch_size_per_gpu' must be set.") # Ensure chosen_input_ids exists before getting shape if "chosen_input_ids" not in batch_td: print("ERROR: 'chosen_input_ids' not found in batch_td for DPO update.") return {} bsz = batch_td["chosen_input_ids"].shape[0] if bsz == 0: print("Warning: DPO batch size is 0 in update_policy_dpo. Skipping update.") return {"actor/dpo_loss": 0.0, "actor/grad_norm": 0.0} # Return zero metrics if batch is empty num_micro_batches = math.ceil(bsz / micro_batch_size) gradient_accumulation_steps = num_micro_batches # --- Metrics Accumulation --- total_loss = 0.0 accumulated_metrics = defaultdict(list) metrics = {} # Final metrics dict # --- Zero Gradients --- self.actor_optimizer.zero_grad(set_to_none=True) # --- Micro-batch Loop --- for i in range(num_micro_batches): start_idx = i * micro_batch_size end_idx = min(start_idx + micro_batch_size, bsz) if start_idx >= end_idx: continue # Slice the full DPO batch into micro-batches # Important: Slice ALL required tensors, including labels and inputs micro_batch_chosen_labels = chosen_labels[start_idx:end_idx] micro_batch_rejected_labels = rejected_labels[start_idx:end_idx] micro_batch_chosen_inputs = { "input_ids": batch_td["chosen_input_ids"][start_idx:end_idx], "attention_mask": batch_td["chosen_attention_mask"][start_idx:end_idx], } if "chosen_position_ids" in batch_td: micro_batch_chosen_inputs["position_ids"] = batch_td["chosen_position_ids"][start_idx:end_idx] micro_batch_rejected_inputs = { "input_ids": batch_td["rejected_input_ids"][start_idx:end_idx], "attention_mask": batch_td["rejected_attention_mask"][start_idx:end_idx], } if "rejected_position_ids" in batch_td: micro_batch_rejected_inputs["position_ids"] = batch_td["rejected_position_ids"][start_idx:end_idx] # Determine autocast dtype autocast_dtype = torch.bfloat16 # Or get dynamically from config/FSDP settings # --- Autocast Forward Pass --- with torch.autocast(device_type=get_device_name(), dtype=autocast_dtype): # --- Step 1: Forward pass for CURRENT policy log probs (with grad) --- policy_chosen_outputs = self.actor_module(**micro_batch_chosen_inputs, use_cache=False) policy_rejected_outputs = self.actor_module(**micro_batch_rejected_inputs, use_cache=False) # --- Step 2: Calculate CURRENT policy log probs using get_batch_logps --- policy_chosen_logps = get_batch_logps( policy_chosen_outputs.logits, micro_batch_chosen_labels, average_log_prob=False ) policy_rejected_logps = get_batch_logps( policy_rejected_outputs.logits, micro_batch_rejected_labels, average_log_prob=False ) # --- Step 3: Retrieve PRE-CALCULATED reference log probs (NO grad needed) --- # Slice the full batch reference logps for the current micro-batch micro_ref_chosen_logps = reference_chosen_logps[start_idx:end_idx] micro_ref_rejected_logps = reference_rejected_logps[start_idx:end_idx] # --- The ActorAsRef calculation block is REMOVED --- # --- Step 4: Calculate DPO Logits and Loss --- pi_logratios = policy_chosen_logps - policy_rejected_logps ref_logratios = micro_ref_chosen_logps - micro_ref_rejected_logps # Uses pre-calculated values logits = pi_logratios - ref_logratios # DPO logits loss = compute_online_dpo_loss( policy_chosen_logps=policy_chosen_logps, # Has grad policy_rejected_logps=policy_rejected_logps, # Has grad reference_chosen_logps=micro_ref_chosen_logps, # No grad (from input) reference_rejected_logps=micro_ref_rejected_logps, # No grad (from input) beta=beta, label_smoothing=label_smoothing, loss_type=loss_type, reference_free=reference_free, # Should be False now ) # --- Scale loss for gradient accumulation --- scaled_loss = loss / gradient_accumulation_steps # --- Accumulate Metrics --- total_loss += loss.item() # Unscaled loss accumulated_metrics["actor/dpo_loss_batch"].append(loss.item()) accumulated_metrics["actor/dpo_logits_batch"].append(logits.mean().item()) # Accumulate policy and reference log probs/ratios if needed for debugging accumulated_metrics["actor/policy_chosen_logps_batch"].append(policy_chosen_logps.mean().item()) accumulated_metrics["actor/policy_rejected_logps_batch"].append(policy_rejected_logps.mean().item()) accumulated_metrics["actor/reference_chosen_logps_batch"].append(micro_ref_chosen_logps.mean().item()) accumulated_metrics["actor/reference_rejected_logps_batch"].append( micro_ref_rejected_logps.mean().item() ) # --- Backward Pass (outside autocast) --- # Check if loss requires grad before backward if scaled_loss.requires_grad: scaled_loss.backward() else: print(f"Warning: Scaled loss at micro-batch {i} does not require grad. Skipping backward.") # --- End Micro-batch Loop --- # --- Optimizer Step (after accumulating gradients for all micro-batches) --- grad_norm = self._optimizer_step() # --- Populate Final Metrics --- if num_micro_batches > 0 and bsz > 0: # Check if any processing happened metrics["actor/dpo_loss"] = total_loss / num_micro_batches metrics["actor/grad_norm"] = ( grad_norm.item() if torch.is_tensor(grad_norm) and torch.isfinite(grad_norm) else float("inf") ) # Average other accumulated metrics for key, val_list in accumulated_metrics.items(): if val_list: metrics[key.replace("_batch", "")] = np.mean(val_list) # Calculate accuracy / rewards / margins based on averaged logprobs if desired if ( "actor/policy_chosen_logps" in metrics and "actor/policy_rejected_logps" in metrics and "actor/reference_chosen_logps" in metrics and "actor/reference_rejected_logps" in metrics ): policy_ratio_mean = metrics["actor/policy_chosen_logps"] - metrics["actor/policy_rejected_logps"] ref_ratio_mean = metrics["actor/reference_chosen_logps"] - metrics["actor/reference_rejected_logps"] logits_mean = policy_ratio_mean - ref_ratio_mean metrics["actor/rewards_chosen"] = beta * ( metrics["actor/policy_chosen_logps"] - metrics["actor/reference_chosen_logps"] ) metrics["actor/rewards_rejected"] = beta * ( metrics["actor/policy_rejected_logps"] - metrics["actor/reference_rejected_logps"] ) metrics["actor/rewards_accuracies"] = float(logits_mean > 0) # Mean accuracy proxy metrics["actor/rewards_margins"] = metrics["actor/rewards_chosen"] - metrics["actor/rewards_rejected"] else: # Handle case where no micro-batches were run (e.g., bsz=0) metrics["actor/dpo_loss"] = 0.0 metrics["actor/grad_norm"] = 0.0 # Initialize other metrics to 0 or NaN as appropriate for key in accumulated_metrics.keys(): metrics[key.replace("_batch", "")] = 0.0 metrics["actor/rewards_chosen"] = 0.0 metrics["actor/rewards_rejected"] = 0.0 metrics["actor/rewards_accuracies"] = 0.0 metrics["actor/rewards_margins"] = 0.0 return metrics # Return aggregated metrics ================================================ FILE: verl_distillation/recipe/spin/fsdp_workers.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import warnings import numpy as np import psutil import torch import torch.distributed from codetiming import Timer from omegaconf import OmegaConf, open_dict from torch.distributed.device_mesh import init_device_mesh import verl.utils.torch_functional as verl_F from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register from verl.utils import hf_tokenizer from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.device import get_device_id, get_device_name, get_nccl_backend, get_torch_device from verl.utils.flops_counter import FlopsCounter from verl.utils.fs import copy_to_local from verl.utils.fsdp_utils import ( get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, load_fsdp_model_to_gpu, load_fsdp_optimizer, offload_fsdp_model_to_cpu, offload_fsdp_optimizer, ) from verl.utils.import_utils import import_external_libs from verl.utils.model import compute_position_id_with_mask from verl.utils.profiler import log_gpu_memory_usage from verl.workers.fsdp_workers import ActorRolloutRefWorker from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_PPO_LOGGING_LEVEL", "WARN")) def create_device_mesh(world_size, fsdp_size): if fsdp_size < 0 or fsdp_size >= world_size: device_mesh = init_device_mesh(get_device_name(), mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) else: device_mesh = init_device_mesh( get_device_name(), mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"] ) return device_mesh def get_sharding_strategy(device_mesh): from torch.distributed.fsdp import ShardingStrategy if device_mesh.ndim == 1: sharding_strategy = ShardingStrategy.FULL_SHARD elif device_mesh.ndim == 2: sharding_strategy = ShardingStrategy.HYBRID_SHARD else: raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2") return sharding_strategy class SPINRolloutRefWorker(ActorRolloutRefWorker): @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): from recipe.spin.dp_actor import SPINDataParallelPPOActor as DataParallelPPOActor # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) use_remove_padding = self.config.model.get("use_remove_padding", False) use_fused_kernels = self.config.model.get("use_fused_kernels", False) if self._is_actor or self._is_rollout or self._is_ref: # we need the model for actor and rollout if self._is_actor or self._is_ref: optim_config = self.config.actor.optim fsdp_config = self.config.actor.fsdp_config else: optim_config = None fsdp_config = OmegaConf.create() self.actor_module_fsdp, self.actor_optimizer, self.actor_lr_scheduler, self.actor_model_config = ( self._build_model_optimizer( model_path=self.config.model.path, fsdp_config=fsdp_config, optim_config=optim_config, override_model_config=override_model_config, use_remove_padding=use_remove_padding, use_fused_kernels=use_fused_kernels, enable_gradient_checkpointing=self.config.model.get("enable_gradient_checkpointing", False), trust_remote_code=self.config.model.get("trust_remote_code", False), use_liger=self.config.model.get("use_liger", False), role="actor", ) ) # get the original unwrapped module self.actor_module = self.actor_module_fsdp._fsdp_wrapped_module if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during init", logger=logger) # load from checkpoint if self._is_actor or self._is_ref: OmegaConf.set_struct(self.config.actor, True) with open_dict(self.config.actor): self.config.actor.use_remove_padding = use_remove_padding self.config.actor.use_fused_kernels = use_fused_kernels self.actor = DataParallelPPOActor( config=self.config.actor, actor_module=self.actor_module_fsdp, actor_optimizer=self.actor_optimizer ) if self._is_rollout: self._build_rollout(trust_remote_code=self.config.model.get("trust_remote_code", False)) if self._is_ref: self.ref_module_fsdp = self._build_model_optimizer( model_path=self.config.model.path, fsdp_config=self.config.ref.fsdp_config, optim_config=None, override_model_config=override_model_config, use_remove_padding=use_remove_padding, use_fused_kernels=use_fused_kernels, trust_remote_code=self.config.model.get("trust_remote_code", False), use_liger=self.config.model.get("use_liger", False), role="ref", )[0] OmegaConf.set_struct(self.config.ref, True) with open_dict(self.config.ref): self.config.ref.use_remove_padding = use_remove_padding self.config.ref.use_fused_kernels = use_fused_kernels self.ref_policy = DataParallelPPOActor(config=self.config.ref, actor_module=self.ref_module_fsdp) self.checkpoint_manager = FSDPCheckpointManager( model=self.actor_module_fsdp, optimizer=self.actor.actor_optimizer, lr_scheduler=self.actor_lr_scheduler, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=self.config.actor.checkpoint, ) if self._is_actor: self.flops_counter = FlopsCounter(self.actor_model_config) self.checkpoint_manager = FSDPCheckpointManager( model=self.actor_module_fsdp, optimizer=self.actor.actor_optimizer, lr_scheduler=self.actor_lr_scheduler, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=self.config.actor.checkpoint, ) @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) def compute_ref_log_prob(self, data: DataProto): assert self._is_ref # Support all hardwares data = data.to(get_device_id()) micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["temperature"] = self.config.rollout.temperature data.meta_info["max_token_len"] = self.config.ref.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.ref.log_prob_use_dynamic_bsz with self.ulysses_sharding_manager: output = self.ref_policy.compute_log_prob(data=data) output = DataProto.from_dict(tensors={"ref_log_prob": output}) output = output.to("cpu") # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.world_size > 1: self.ref_policy.actor_module._handle.reshard(True) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) def compute_log_prob(self, data: DataProto): assert self._is_actor if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) # Support all hardwares data = data.to(get_device_id()) # we should always recompute old_log_probs when it is HybridEngine data.meta_info["micro_batch_size"] = self.config.rollout.log_prob_micro_batch_size_per_gpu data.meta_info["max_token_len"] = self.config.rollout.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.rollout.log_prob_use_dynamic_bsz data.meta_info["temperature"] = self.config.rollout.temperature # perform recompute log_prob with self.ulysses_sharding_manager: output = self.actor.compute_log_prob(data=data) output = DataProto.from_dict( tensors={"old_log_probs": output}, meta_info={"temperature": self.config.rollout.temperature} ) output = output.to("cpu") # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.world_size > 1: self.actor.actor_module._handle.reshard(True) if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After compute_log_prob", logger=logger) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) def update_actor_dpo(self, data: DataProto): """ Wrapper for actor update step. Handles FSDP state management. Calls self.actor.update_policy which now contains DPO logic based on pre-calculated log probabilities. """ # Support all hardwares data = data.to(get_device_id()) assert self._is_actor # Make sure this worker has the actor role if self.actor is None: raise RuntimeError("Actor instance (self.actor) not initialized in worker.") # --- FSDP State Management --- if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) if self._is_offload_optimizer: load_fsdp_optimizer(optimizer=self.actor_optimizer, device_id=get_device_id()) log_gpu_memory_usage("Before update policy (DPO via PPO path)", logger=logger) # --- Ulysses Sharding (if used) --- with self.ulysses_sharding_manager: # --- Call the core update method (now containing DPO logic) --- with Timer(name="update_policy_dpo_via_ppo", logger=None) as timer: # Use a distinct timer name # Calls the modified update_policy method metrics = self.actor.update_policy_dpo_with_ref(data=data) # <-- THIS CALLS THE MODIFIED FUNCTION delta_time = timer.last # --- Add Performance Metrics --- # MFU calculation might be less accurate/meaningful here for DPO metrics["perf/approx_tokens_processed"] = torch.sum( data.batch.get("attention_mask", torch.tensor(0)) ).item() # Approx tokens metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3) metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3) metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3) global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/actor"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size # --- LR Scheduler Step --- lr = self.actor_lr_scheduler.get_last_lr()[0] metrics["actor/lr"] = lr self.actor_lr_scheduler.step() log_gpu_memory_usage("After update policy (DPO via PPO path)", logger=logger) # --- Prepare Output --- output = DataProto(meta_info={"metrics": metrics}) output = output.to("cpu") # --- FSDP State Management (Offload) --- if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.actor_optimizer) return output # TODO(sgm): we may need to extract it to dp_reward_model.py class RewardModelWorker(Worker): """ Note that we only implement the reward model that is subclass of AutoModelForTokenClassification. """ def __init__(self, config): super().__init__() import torch.distributed if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend=get_nccl_backend()) self.config = config # build device mesh for Ulysses Sequence Parallel world_size = torch.distributed.get_world_size() from torch.distributed.device_mesh import init_device_mesh fsdp_size = self.config.model.fsdp_config.fsdp_size self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size) self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( get_device_name(), mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) if self.ulysses_device_mesh is not None: is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0 self._register_dispatch_collect_info( "reward", dp_rank=self.ulysses_device_mesh["dp"].get_local_rank(), is_collect=is_collect ) else: self._register_dispatch_collect_info("reward", dp_rank=self.rank, is_collect=True) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) self.use_remove_padding = self.config.model.get("use_remove_padding", False) # normalize config if self.config.micro_batch_size is not None: self.config.micro_batch_size //= torch.distributed.get_world_size() self.config.micro_batch_size_per_gpu = self.config.micro_batch_size def _build_model(self, config): # the following line is necessary from torch.distributed.fsdp import CPUOffload from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from transformers import AutoConfig, AutoModelForTokenClassification # download the checkpoint from hdfs local_path = copy_to_local(config.model.path) if self.config.model.input_tokenizer is None: self._do_switch_chat_template = False else: self._do_switch_chat_template = True input_tokenizer_local_path = copy_to_local(config.model.input_tokenizer) self.input_tokenizer = hf_tokenizer( input_tokenizer_local_path, trust_remote_code=config.model.get("trust_remote_code", False) ) self.tokenizer = hf_tokenizer(local_path, trust_remote_code=config.model.get("trust_remote_code", False)) trust_remote_code = config.model.get("trust_remote_code", False) model_config = AutoConfig.from_pretrained(local_path, trust_remote_code=trust_remote_code) model_config.num_labels = 1 # note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect init_context = get_init_weight_context_manager( use_meta_tensor=not model_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") model_config.classifier_dropout = 0.0 reward_module = AutoModelForTokenClassification.from_pretrained( pretrained_model_name_or_path=local_path, config=model_config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", trust_remote_code=trust_remote_code, ) if config.model.get("use_remove_padding", False) or self.ulysses_sequence_parallel_size > 1: from verl.models.transformers.monkey_patch import apply_monkey_patch apply_monkey_patch(model=reward_module, ulysses_sp_size=self.ulysses_sequence_parallel_size) reward_module.to(torch.bfloat16) auto_wrap_policy = get_fsdp_wrap_policy(module=reward_module, config=self.config.model.fsdp_config) fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) reward_module = FSDP( reward_module, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, # zero3 sync_module_states=True, cpu_offload=CPUOffload(offload_params=True), forward_prefetch=False, device_mesh=self.device_mesh, ) return reward_module @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) self.reward_module = self._build_model(config=self.config) def _forward_micro_batch(self, micro_batch): from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad_and_slice_inputs with torch.no_grad(), torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16): input_ids = micro_batch["input_ids"] batch_size, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] if self.use_remove_padding: input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # pad and slice the inputs if sp > 1 if self.ulysses_sequence_parallel_size > 1: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size ) # only pass input_ids and position_ids to enable flash_attn_varlen output = self.reward_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, use_cache=False ) # prevent model thinks we are generating reward_rmpad = output.logits reward_rmpad = reward_rmpad.squeeze(0) # (total_nnz) # gather output if sp > 1 if self.ulysses_sequence_parallel_size > 1: reward_rmpad = gather_outputs_and_unpad( reward_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size ) # pad it back rm_score = pad_input(reward_rmpad, indices=indices, batch=batch_size, seqlen=seqlen).squeeze(-1) else: output = self.reward_module( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False ) rm_score = output.logits # (batch_size, seq_len, 1) rm_score = rm_score.squeeze(-1) # extract the result of the last valid token eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,) rm_score = rm_score[torch.arange(batch_size), eos_mask_idx] return rm_score def _expand_to_token_level(self, data: DataProto, scores: torch.Tensor): batch_size = data.batch.batch_size[0] # expand as token_level_reward attention_mask = data.batch["attention_mask"] position_ids = data.batch["position_ids"] response_length = data.batch["responses"].shape[-1] eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,) token_level_scores = torch.zeros_like(attention_mask, dtype=scores.dtype) # (bsz, seqlen) token_level_scores[torch.arange(batch_size), eos_mask_idx] = scores # select the response part token_level_scores = token_level_scores[:, -response_length:] return token_level_scores def _switch_chat_template(self, data: DataProto): src_max_length = data.batch["attention_mask"].shape[-1] src_tokenizer = self.input_tokenizer target_tokenizer = self.tokenizer rm_input_ids = [] rm_attention_mask = [] for i in range(data.batch.batch_size[0]): if not isinstance(data.non_tensor_batch["raw_prompt"][i], list | np.ndarray): raise TypeError( f"raw_prompt must be a list or numpy array, got {type(data.non_tensor_batch['raw_prompt'][i])}" ) # extract raw prompt chat: list = list(data.non_tensor_batch["raw_prompt"][i]) # extract response response_ids = data.batch["responses"][i] response_length = response_ids.shape[-1] valid_response_length = data.batch["attention_mask"][i][-response_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode response = src_tokenizer.decode(valid_response_ids) # remove bos and eos response = response.replace(src_tokenizer.eos_token, "") chat.append({"role": "assistant", "content": response}) prompt_with_chat_template = target_tokenizer.apply_chat_template( chat, add_generation_prompt=False, tokenize=False ) if self.rank == 0 and i == 0: # for debugging purpose print(f"Switch template. chat: {prompt_with_chat_template}") # the maximum length is actually determined by the reward model itself max_length = self.config.get("max_length", src_max_length) if max_length is None: max_length = src_max_length model_inputs = target_tokenizer(prompt_with_chat_template, return_tensors="pt", add_special_tokens=False) input_ids, attention_mask = verl_F.postprocess_data( input_ids=model_inputs["input_ids"], attention_mask=model_inputs["attention_mask"], max_length=max_length, pad_token_id=target_tokenizer.pad_token_id, left_pad=False, # right padding truncation=self.config.get("truncation", "right"), ) # truncate from the right rm_input_ids.append(input_ids) rm_attention_mask.append(attention_mask) rm_input_ids = torch.cat(rm_input_ids, dim=0) rm_attention_mask = torch.cat(rm_attention_mask, dim=0) rm_position_ids = compute_position_id_with_mask(rm_attention_mask) rm_inputs = {"input_ids": rm_input_ids, "attention_mask": rm_attention_mask, "position_ids": rm_position_ids} return DataProto.from_dict(rm_inputs) @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="reward")) def compute_rm_score(self, data: DataProto): import itertools from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches # Support all hardwares data = data.to(get_device_id()) if self._do_switch_chat_template: rm_data = self._switch_chat_template(data) else: rm_input_ids = data.batch["input_ids"] rm_attention_mask = data.batch["attention_mask"] rm_position_ids = data.batch["position_ids"] rm_inputs = { "input_ids": rm_input_ids, "attention_mask": rm_attention_mask, "position_ids": rm_position_ids, } rm_data = DataProto.from_dict(rm_inputs) # Support all hardwares rm_data.batch = rm_data.batch.to(get_device_id()) # perform forward computation with self.ulysses_sharding_manager: rm_data = self.ulysses_sharding_manager.preprocess_data(data=rm_data) data = self.ulysses_sharding_manager.preprocess_data(data=data) use_dynamic_bsz = self.config.use_dynamic_bsz if use_dynamic_bsz: max_token_len = self.config.forward_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, indices = rearrange_micro_batches(batch=rm_data.batch, max_token_len=max_token_len) else: micro_batches = rm_data.batch.split(self.config.micro_batch_size_per_gpu) output = [] for micro_batch in micro_batches: rm_score = self._forward_micro_batch(micro_batch) output.append(rm_score) scores = torch.cat(output, dim=0) # (batch_size) if use_dynamic_bsz: indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == scores.size(0), f"{len(indices)} vs. {scores.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) scores = scores[revert_indices] token_level_scores = self._expand_to_token_level(data, scores) # Note that this is only the scores, may not be the final rewards used to train RL output = DataProto.from_dict(tensors={"rm_scores": token_level_scores}) output = self.ulysses_sharding_manager.postprocess_data(data=output) # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module self.reward_module._handle.reshard(True) output = output.to("cpu") return output ================================================ FILE: verl_distillation/recipe/spin/main_spin.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import hydra import ray from recipe.spin.spin_trainer import RaySPINTrainer from recipe.spin.utils import validate_config from verl.trainer.ppo.reward import get_custom_reward_fn from verl.trainer.ppo.utils import need_reference_policy @hydra.main(config_path="config", config_name="spin_trainer", version_base=None) def main(config): run_ppo(config) def run_ppo(config) -> None: # TODO(linjunrong.ocss884): this ENV is left for resolving SGLang conflict with ray devices # isolation, will solve in the future os.environ["ENSURE_CUDA_VISIBLE_DEVICES"] = os.environ.get("CUDA_VISIBLE_DEVICES", "") if not ray.is_initialized(): # this is for local ray cluster ray.init( runtime_env={ "env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN"} } ) runner = TaskRunner.remote() ray.get(runner.run.remote(config)) @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: def run(self, config): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} # from recipe.spin.fsdp_workers import ActorRolloutRefWorker from recipe.spin.fsdp_workers import SPINRolloutRefWorker from verl.single_controller.ray import RayWorkerGroup ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray import RayWorkerGroup ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError from recipe.spin.spin_trainer import ResourcePoolManager, Role role_worker_mapping = { # Role.ActorRollout: ray.remote(ActorRolloutRefWorker), Role.ActorRollout: ray.remote(SPINRolloutRefWorker), # Role.Critic: ray.remote(CriticWorker), } global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, # Role.Critic: global_pool_id, } if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from recipe.spin.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # use reference model # if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: # role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) role_worker_mapping[Role.RefPolicy] = ray.remote(SPINRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id # validate config validate_config( config=config, use_reference_policy=need_reference_policy(role_worker_mapping), use_critic=False, ) # download the checkpoint from hdfs local_path = copy_to_local(config.actor_rollout_ref.model.path) # instantiate tokenizer from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) processor = hf_processor(local_path, use_fast=True) # used for multimodal LLM, could be none from verl.workers.reward_manager import get_reward_manager_cls # Note(haibin.lin): please make sure custom reward managers are imported and # registered via `verl.workers.reward_manager.register` reward_manager_name = config.reward_model.get("reward_manager", "naive") reward_manager_cls = get_reward_manager_cls(reward_manager_name) compute_score = get_custom_reward_fn(config) reward_kwargs = dict(config.reward_model.get("reward_kwargs", {})) reward_fn = reward_manager_cls( tokenizer=tokenizer, num_examine=0, compute_score=compute_score, reward_fn_key=config.data.reward_fn_key, **reward_kwargs, ) # Note that we always use function-based RM for validation val_reward_fn = reward_manager_cls( tokenizer=tokenizer, num_examine=1, compute_score=compute_score, reward_fn_key=config.data.reward_fn_key ) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) trainer = RaySPINTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, ) trainer.init_workers() trainer.fit_dpo() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/spin/run_spin.sh ================================================ set -e set -x VISIBLE_DEVICES="4,5,6,7" export HYDRA_FULL_ERROR=1 CUDA_VISIBLE_DEVICES=${VISIBLE_DEVICES} python3 -m recipe.spin.main_spin \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size=8 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=64 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size=64 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.val_before_train=True \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=1 \ +trainer.log_freq=1 \ trainer.ref_update_freq=1 \ trainer.total_epochs=1000 2>&1 | tee verl_demo.log ================================================ FILE: verl_distillation/recipe/spin/spin_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import traceback import uuid from collections import defaultdict from contextlib import contextmanager from dataclasses import dataclass, field from pprint import pprint from typing import Any, Optional import numpy as np import ray import torch from codetiming import Timer from omegaconf import OmegaConf, open_dict from torch.utils.data import Dataset, Sampler from torchdata.stateful_dataloader import StatefulDataLoader from tqdm import tqdm from recipe.spin import core_algos from verl import DataProto from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup from verl.single_controller.ray.base import create_colocated_worker_cls from verl.trainer.ppo.metric_utils import compute_throughout_metrics, compute_timing_metrics, process_validation_metrics from verl.trainer.ppo.utils import Role, WorkerType, need_reference_policy, need_reward_model from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path from verl.utils.metric import reduce_metrics from verl.utils.seqlen_balancing import get_seqlen_balanced_partitions, log_seqlen_unbalance from verl.utils.torch_functional import masked_mean from verl.utils.tracking import ValidationGenerationsLogger @dataclass class ResourcePoolManager: """ Define a resource pool specification. Resource pool will be initialized first. Mapping """ resource_pool_spec: dict[str, list[int]] mapping: dict[Role, str] resource_pool_dict: dict[str, RayResourcePool] = field(default_factory=dict) def create_resource_pool(self): for resource_pool_name, process_on_nodes in self.resource_pool_spec.items(): # max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool # For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one. # For Megatron backend, we recommend using max_colocate_count>1 that can utilize different # WorkerGroup for different models resource_pool = RayResourcePool( process_on_nodes=process_on_nodes, use_gpu=True, max_colocate_count=1, name_prefix=resource_pool_name ) self.resource_pool_dict[resource_pool_name] = resource_pool self._check_resource_available() def get_resource_pool(self, role: Role) -> RayResourcePool: """Get the resource pool of the worker_cls""" return self.resource_pool_dict[self.mapping[role]] def get_n_gpus(self) -> int: """Get the number of gpus in this cluster.""" return sum([n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes]) def _check_resource_available(self): """Check if the resource pool can be satisfied in this ray cluster.""" node_available_resources = ray._private.state.available_resources_per_node() node_available_gpus = {node: node_info.get("GPU", 0) for node, node_info in node_available_resources.items()} # check total required gpus can be satisfied total_available_gpus = sum(node_available_gpus.values()) total_required_gpus = sum( [n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes] ) if total_available_gpus < total_required_gpus: raise ValueError( f"Total available GPUs {total_available_gpus} is less than total desired GPUs {total_required_gpus}" ) # check each resource pool can be satisfied, O(#resource_pools * #nodes) for resource_pool_name, process_on_nodes in self.resource_pool_spec.items(): num_gpus, num_nodes = process_on_nodes[0], len(process_on_nodes) for node, available_gpus in node_available_gpus.items(): if available_gpus >= num_gpus: node_available_gpus[node] -= num_gpus num_nodes -= 1 if num_nodes == 0: break if num_nodes > 0: raise ValueError( f"Resource pool {resource_pool_name}: {num_gpus}*{num_nodes} cannot be satisfied in this " f"ray cluster" ) def _compute_response_info(batch: DataProto) -> dict[str, Any]: """Placeholder: Computes prompt and response lengths.""" try: # Assuming 'prompts' and 'responses' keys exist after generation/union prompt_len = batch.batch["prompts"].shape[1] resp_len = batch.batch["responses"].shape[1] # This is simplified - real implementation might use attention masks # to get actual lengths per sample. batch_size = batch.batch.batch_size[0] prompt_lengths_tensor = torch.full((batch_size,), prompt_len, dtype=torch.float32, device=batch.batch.device) response_lengths_tensor = torch.full((batch_size,), resp_len, dtype=torch.float32, device=batch.batch.device) # Try getting actual lengths from attention mask if possible (more accurate) if "response_mask" in batch.batch: response_lengths_tensor = batch.batch["response_mask"].sum(dim=1).float() # if "attention_mask" in batch.batch and "response_mask" in batch.batch: # full_mask = batch.batch["attention_mask"] # resp_mask = batch.batch["response_mask"] # Infer prompt mask length based on where response mask starts or total length # This logic depends heavily on how your masks are constructed. # Example: prompt_lengths_tensor = full_mask.sum(dim=1).float() - response_lengths_tensor # Fallback to using prompt shape if mask logic is complex: prompt_lengths_tensor = torch.tensor( [batch.batch["prompts"].shape[1]] * batch_size, dtype=torch.float32, device=batch.batch.device ) return { "prompt_length": prompt_lengths_tensor, "response_length": response_lengths_tensor, "max_response_length": resp_len, "max_prompt_length": prompt_len, # Or from config if fixed padding } except KeyError as e: print(f"Warning: Missing key in _compute_response_info: {e}. Returning defaults.") # Return default/dummy values if keys are missing b_size = batch.batch.batch_size[0] if batch.batch.batch_size else 1 max_resp = batch.batch.get("responses").shape[1] if batch.batch.get("responses") is not None else 0 max_prompt = batch.batch.get("prompts").shape[1] if batch.batch.get("prompts") is not None else 0 return { "prompt_length": torch.zeros(b_size), "response_length": torch.zeros(b_size), "max_response_length": max_resp, "max_prompt_length": max_prompt, } # --- Modified Metric Function --- def compute_dpo_data_metrics(batch: DataProto) -> dict[str, Any]: """ Computes and returns metrics relevant for the DPO-like process. Assumes 'batch' contains results after generation and preference marking, potentially including 'dpo_logits', 'preferences', 'chosen_logps', etc. Removes PPO-specific advantage/return/critic metrics. """ print("---- [DEBUG] Computing DPO Data Metrics ----") metrics = {} try: # --- Scores and Rewards (from reward_fn) --- if "token_level_scores" in batch.batch and batch.batch["token_level_scores"] is not None: sequence_score = batch.batch["token_level_scores"].sum(-1) metrics.update( { "reward/score/mean": torch.mean(sequence_score).item(), "reward/score/max": torch.max(sequence_score).item(), "reward/score/min": torch.min(sequence_score).item(), } ) else: print("DEBUG compute_dpo_data_metrics: 'token_level_scores' not found.") if "token_level_rewards" in batch.batch and batch.batch["token_level_rewards"] is not None: sequence_reward = batch.batch["token_level_rewards"].sum(-1) metrics.update( { "reward/rewards/mean": torch.mean(sequence_reward).item(), "reward/rewards/max": torch.max(sequence_reward).item(), "reward/rewards/min": torch.min(sequence_reward).item(), } ) else: print("DEBUG compute_dpo_data_metrics: 'token_level_rewards' not found.") # --- DPO Specific Metrics (if stored previously) --- if "dpo_logits" in batch.batch and batch.batch["dpo_logits"] is not None: metrics["actor/dpo_logits"] = batch.batch["dpo_logits"].mean().item() else: print("DEBUG compute_dpo_data_metrics: 'dpo_logits' not found.") if "chosen_logps" in batch.batch and batch.batch["chosen_logps"] is not None: metrics["actor/chosen_logps"] = batch.batch["chosen_logps"].mean().item() else: print("DEBUG compute_dpo_data_metrics: 'chosen_logps' not found.") if "rejected_logps" in batch.batch and batch.batch["rejected_logps"] is not None: metrics["actor/rejected_logps"] = batch.batch["rejected_logps"].mean().item() else: print("DEBUG compute_dpo_data_metrics: 'rejected_logps' not found.") # Add metrics based on the 'preferences' mask if available # if "preferences" in batch.batch and batch.batch["preferences"] is not None: # prefs_mask = batch.batch["preferences"] # Shape [batch_size * n] # Calculate accuracy based on RM scores (assuming higher score -> True in mask) # Requires chosen/rejected scores to be available or recalculated # This is complex here, better calculated in the main loop or update function # --- Length Metrics --- response_info = _compute_response_info(batch) prompt_length = response_info["prompt_length"] response_length = response_info["response_length"] max_response_length = response_info["max_response_length"] max_prompt_length = response_info["max_prompt_length"] # Use calculated or from config metrics.update( { "response_length/mean": torch.mean(response_length).item(), "response_length/max": torch.max(response_length).item(), "response_length/min": torch.min(response_length).item(), "response_length/clip_ratio": torch.mean(torch.eq(response_length, max_response_length).float()).item(), "prompt_length/mean": torch.mean(prompt_length).item(), "prompt_length/max": torch.max(prompt_length).item(), "prompt_length/min": torch.min(prompt_length).item(), # Prompt clip ratio might need adjustment based on how max_prompt_length is defined "prompt_length/clip_ratio": torch.mean(torch.eq(prompt_length, max_prompt_length).float()).item(), } ) except KeyError as e: print(f"ERROR in compute_dpo_data_metrics: Missing key {e}") except Exception as e: print(f"ERROR in compute_dpo_data_metrics: {e}") traceback.print_exc() print(f"---- [DEBUG] Calculated DPO Data Metrics: {list(metrics.keys())} ----") return metrics def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty="kl"): responses = data.batch["responses"] response_length = responses.size(1) token_level_scores = data.batch["token_level_scores"] batch_size = data.batch.batch_size[0] attention_mask = data.batch["attention_mask"] response_mask = attention_mask[:, -response_length:] # compute kl between ref_policy and current policy # When apply_kl_penalty, algorithm.use_kl_in_reward=True, so the reference model has been enabled. kld = core_algos.kl_penalty( data.batch["old_log_probs"], data.batch["ref_log_prob"], kl_penalty=kl_penalty ) # (batch_size, response_length) kld = kld * response_mask beta = kl_ctrl.value token_level_rewards = token_level_scores - beta * kld current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence current_kl = torch.mean(current_kl, dim=0).item() # according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837 kl_ctrl.update(current_kl=current_kl, n_steps=batch_size) data.batch["token_level_rewards"] = token_level_rewards metrics = {"actor/reward_kl_penalty": current_kl, "actor/reward_kl_penalty_coeff": beta} return data, metrics def compute_response_mask(data: DataProto): responses = data.batch["responses"] response_length = responses.size(1) attention_mask = data.batch["attention_mask"] return attention_mask[:, -response_length:] def compute_onlineDPO_pref(data: DataProto): """ Wrapper to compute DPO preference and add it to the DataProto batch. Includes debugging prints. """ # print(f"\n---- [DEBUG] Entering compute_onlineDPO_pref ----") # print(f" Input batch keys: {list(data.batch.keys())}") # Check inputs rewards_tensor = data.batch.get("token_level_rewards") mask_tensor = data.batch.get("response_mask") if rewards_tensor is None or mask_tensor is None: print(" ERROR: Missing 'token_level_rewards' or 'response_mask' in input data!") # Handle error case - maybe return original data or raise? # Returning original data for now to potentially allow skipping return data try: preferences = core_algos.compute_onlinedpo_pref(token_level_rewards=rewards_tensor, response_mask=mask_tensor) # Store the result data.batch["preferences"] = preferences except AttributeError: print("ERROR: Function 'compute_online_dpo_preference' not found in core_algos.py!") # Assign dummy value or raise error data.batch["preferences"] = None # Indicate failure except Exception as e_pref: print(f"ERROR during core_algos.compute_online_dpo_preference: {e_pref}") import traceback traceback.print_exc() data.batch["preferences"] = None # Indicate failure # print(f"---- [DEBUG] Exiting compute_onlineDPO_pref ----") return data @contextmanager def _timer(name: str, timing_raw: dict[str, float]): with Timer(name=name, logger=None) as timer: yield timing_raw[name] = timer.last class RaySPINTrainer: """ Note that this trainer runs on the driver process on a single CPU/GPU node. """ # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, train_dataset: Optional[Dataset] = None, val_dataset: Optional[Dataset] = None, collate_fn=None, train_sampler: Optional[Sampler] = None, device_name=None, ): # assert get_torch_device().is_available(), 'cuda must be available on driver' self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert self.hybrid_engine, "Currently, only support hybrid engine" if self.hybrid_engine: assert Role.ActorRollout in role_worker_mapping, f"{role_worker_mapping.keys()=}" self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = need_reference_policy(role_worker_mapping) self.use_rm = need_reward_model(role_worker_mapping) self.use_critic = False self.ray_worker_group_cls = ray_worker_group_cls self.validation_generations_logger = ValidationGenerationsLogger() self.async_rollout_mode = False self.device_name = device_name if device_name else self.config.trainer.device # define in-reward KL control # kl loss control currently not suppoorted if config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(config.algorithm.kl_ctrl) self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler): """ Creates the train and validation dataloaders. """ # TODO: we have to make sure the batch size is divisible by the dp size from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler if train_dataset is None: train_dataset = create_rl_dataset( self.config.data.train_files, self.config.data, self.tokenizer, self.processor, max_samples=self.config.data.get("train_max_samples", -1), ) if val_dataset is None: val_dataset = create_rl_dataset( self.config.data.val_files, self.config.data, self.tokenizer, self.processor, max_samples=self.config.data.get("val_max_samples", -1), ) self.train_dataset, self.val_dataset = train_dataset, val_dataset if train_sampler is None: train_sampler = create_rl_sampler(self.config.data, self.train_dataset) if collate_fn is None: from verl.utils.dataset.rl_dataset import collate_fn as default_collate_fn collate_fn = default_collate_fn self.train_dataloader = StatefulDataLoader( dataset=self.train_dataset, batch_size=self.config.data.get("gen_batch_size", self.config.data.train_batch_size), num_workers=self.config.data.get("dataloader_num_workers", 8), drop_last=True, collate_fn=collate_fn, sampler=train_sampler, ) val_batch_size = self.config.data.val_batch_size # Prefer config value if set if val_batch_size is None: val_batch_size = len(self.val_dataset) self.val_dataloader = StatefulDataLoader( dataset=self.val_dataset, batch_size=val_batch_size, num_workers=self.config.data.get("dataloader_num_workers", 8), shuffle=False, drop_last=False, collate_fn=collate_fn, ) assert len(self.train_dataloader) >= 1, "Train dataloader is empty!" assert len(self.val_dataloader) >= 1, "Validation dataloader is empty!" print( f"Size of train dataloader: {len(self.train_dataloader)}, " f"Size of val dataloader: {len(self.val_dataloader)}" ) total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.trainer.total_training_steps is not None: total_training_steps = self.config.trainer.total_training_steps self.total_training_steps = total_training_steps print(f"Total training steps: {self.total_training_steps}") try: OmegaConf.set_struct(self.config, True) with open_dict(self.config): if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"): self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps if OmegaConf.select(self.config, "critic.optim"): self.config.critic.optim.total_training_steps = total_training_steps except Exception as e: print(f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}") def _maybe_log_val_generations(self, inputs, outputs, scores): """Log a table of validation samples to the configured logger (wandb or swanlab)""" generations_to_log = self.config.trainer.log_val_generations if generations_to_log == 0: return import numpy as np # Create tuples of (input, output, score) and sort by input text samples = list(zip(inputs, outputs, scores, strict=True)) samples.sort(key=lambda x: x[0]) # Sort by input text # Use fixed random seed for deterministic shuffling rng = np.random.RandomState(42) rng.shuffle(samples) # Take first N samples after shuffling samples = samples[:generations_to_log] # Log to each configured logger self.validation_generations_logger.log(self.config.trainer.logger, samples, self.global_steps) def _validate(self): data_source_lst = [] reward_extra_infos_dict: dict[str, list] = defaultdict(list) # Lists to collect samples for the table sample_inputs = [] sample_outputs = [] sample_scores = [] for test_data in self.val_dataloader: test_batch = DataProto.from_single_dict(test_data) # repeat test batch test_batch = test_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.val_kwargs.n, interleave=True ) # we only do validation on rule-based rm if self.config.reward_model.enable and test_batch[0].non_tensor_batch["reward_model"]["style"] == "model": return {} # Store original inputs input_ids = test_batch.batch["input_ids"] # TODO: Can we keep special tokens except for padding tokens? input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids] sample_inputs.extend(input_texts) batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] if "multi_modal_inputs" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.extend(["multi_modal_data", "multi_modal_inputs"]) if "raw_prompt" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("raw_prompt") if "tools_kwargs" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("tools_kwargs") test_gen_batch = test_batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) test_gen_batch.meta_info = { "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, "recompute_log_prob": False, "do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample, "validate": True, } print(f"test_gen_batch meta info: {test_gen_batch.meta_info}") # pad to be divisible by dp_size test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, self.actor_rollout_wg.world_size) if not self.async_rollout_mode: test_output_gen_batch_padded = self.actor_rollout_wg.generate_sequences(test_gen_batch_padded) else: test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(test_gen_batch_padded) # unpad test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size) print("validation generation end") # Store generated outputs output_ids = test_output_gen_batch.batch["responses"] output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids] sample_outputs.extend(output_texts) test_batch = test_batch.union(test_output_gen_batch) # evaluate using reward_function result = self.val_reward_fn(test_batch, return_dict=True) reward_tensor = result["reward_tensor"] scores = reward_tensor.sum(-1).cpu().tolist() sample_scores.extend(scores) reward_extra_infos_dict["reward"].extend(scores) if "reward_extra_info" in result: for key, lst in result["reward_extra_info"].items(): reward_extra_infos_dict[key].extend(lst) data_source_lst.append(test_batch.non_tensor_batch.get("data_source", ["unknown"] * reward_tensor.shape[0])) self._maybe_log_val_generations(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores) # dump generations val_data_dir = self.config.trainer.get("validation_data_dir", None) if val_data_dir: sample_gts = [ item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in test_batch ] self._dump_generations( inputs=sample_inputs, outputs=sample_outputs, gts=sample_gts, scores=sample_scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=val_data_dir, ) for key_info, lst in reward_extra_infos_dict.items(): assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}" data_sources = np.concatenate(data_source_lst, axis=0) print(f"DEBUG: Data sources shape: {data_sources.shape}") # Added Print print(f"DEBUG: reward_extra_infos_dict keys before processing: {reward_extra_infos_dict.keys()}") # Added Print data_src2var2metric2val = process_validation_metrics(data_sources, sample_inputs, reward_extra_infos_dict) print( f"DEBUG: Output of process_validation_metrics (data_src2var2metric2val): {data_src2var2metric2val}" ) # Added Print metric_dict = {} for data_source, var2metric2val in data_src2var2metric2val.items(): core_var = "acc" if "acc" in var2metric2val else "reward" for var_name, metric2val in var2metric2val.items(): n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()]) for metric_name, metric_val in metric2val.items(): if ( (var_name == core_var) and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"]) and (f"@{n_max}" in metric_name) ): metric_sec = "val-core" else: metric_sec = "val-aux" pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}" metric_dict[pfx] = metric_val return metric_dict def init_workers(self): """Init resource pool and worker group""" self.resource_pool_manager.create_resource_pool() self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()} # create actor and rollout if self.hybrid_engine: resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout) actor_rollout_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[Role.ActorRollout], config=self.config.actor_rollout_ref, role="actor_rollout", ) self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls else: raise NotImplementedError # create critic if self.use_critic: resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic) critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=self.config.critic) self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls # create reference policy if needed if self.use_reference_policy: resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy) ref_policy_cls = RayClassWithInitArgs( self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role="ref" ) self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls # create a reward model if reward_fn is None if self.use_rm: # we create a RM here resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model) self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different # parallel size, # you should not use `create_colocated_worker_cls`. Instead, directly pass different resource pool to # different worker groups. # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information. all_wg = {} self.wg_dicts = [] wg_kwargs = {} # Setting up kwargs for RayWorkerGroup if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None: wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout wg_kwargs["device_name"] = self.device_name for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls( resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls, **wg_kwargs, ) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) # keep the referece of WorkerDict to support ray >= 2.31. Ref: https://github.com/ray-project/ray/pull/45699 self.wg_dicts.append(wg_dict) if self.use_critic: self.critic_wg = all_wg["critic"] self.critic_wg.init_model() if self.use_reference_policy: self.ref_policy_wg = all_wg["ref"] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = all_wg["rm"] self.rm_wg.init_model() # we should create rollout at the end so that vllm can have a better estimation of kv cache memory self.actor_rollout_wg = all_wg["actor_rollout"] self.actor_rollout_wg.init_model() def _save_checkpoint(self): # path: given_path + `/global_step_{global_steps}` + `/actor` local_global_step_folder = os.path.join( self.config.trainer.default_local_dir, f"global_step_{self.global_steps}" ) print(f"local_global_step_folder: {local_global_step_folder}") actor_local_path = os.path.join(local_global_step_folder, "actor") actor_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor") ) remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False) if remove_previous_ckpt_in_save: print( "Warning: remove_previous_ckpt_in_save is deprecated, set max_actor_ckpt_to_keep=1 and " "max_critic_ckpt_to_keep=1 instead" ) max_actor_ckpt_to_keep = ( self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) max_critic_ckpt_to_keep = ( self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) self.actor_rollout_wg.save_checkpoint( actor_local_path, actor_remote_path, self.global_steps, max_ckpt_to_keep=max_actor_ckpt_to_keep ) if self.use_critic: critic_local_path = os.path.join(local_global_step_folder, "critic") critic_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "critic") ) self.critic_wg.save_checkpoint( critic_local_path, critic_remote_path, self.global_steps, max_ckpt_to_keep=max_critic_ckpt_to_keep ) # save dataloader dataloader_local_path = os.path.join(local_global_step_folder, "data.pt") dataloader_state_dict = self.train_dataloader.state_dict() torch.save(dataloader_state_dict, dataloader_local_path) # latest checkpointed iteration tracker (for atomic usage) local_latest_checkpointed_iteration = os.path.join( self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt" ) with open(local_latest_checkpointed_iteration, "w") as f: f.write(str(self.global_steps)) def _load_checkpoint(self): if self.config.trainer.resume_mode == "disable": return 0 # load from hdfs if self.config.trainer.default_hdfs_dir is not None: raise NotImplementedError("load from hdfs is not implemented yet") else: checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path if not os.path.isabs(checkpoint_folder): working_dir = os.getcwd() checkpoint_folder = os.path.join(working_dir, checkpoint_folder) global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest # find global_step_folder if self.config.trainer.resume_mode == "auto": if global_step_folder is None: print("Training from scratch") return 0 else: if self.config.trainer.resume_mode == "resume_path": assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type" assert "global_step_" in self.config.trainer.resume_from_path, ( "resume ckpt must specify the global_steps" ) global_step_folder = self.config.trainer.resume_from_path if not os.path.isabs(global_step_folder): working_dir = os.getcwd() global_step_folder = os.path.join(working_dir, global_step_folder) print(f"Load from checkpoint folder: {global_step_folder}") # set global step self.global_steps = int(global_step_folder.split("global_step_")[-1]) print(f"Setting global step to {self.global_steps}") print(f"Resuming from {global_step_folder}") actor_path = os.path.join(global_step_folder, "actor") critic_path = os.path.join(global_step_folder, "critic") # load actor self.actor_rollout_wg.load_checkpoint( actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load critic if self.use_critic: self.critic_wg.load_checkpoint( critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load dataloader, # TODO: from remote not implemented yet dataloader_local_path = os.path.join(global_step_folder, "data.pt") if os.path.exists(dataloader_local_path): dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False) self.train_dataloader.load_state_dict(dataloader_state_dict) else: print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch") def _balance_batch(self, batch: DataProto, metrics, logging_prefix="global_seqlen"): """Reorder the data on single controller such that each dp rank gets similar total tokens""" attention_mask = batch.batch["attention_mask"] batch_size = attention_mask.shape[0] global_seqlen_lst = batch.batch["attention_mask"].view(batch_size, -1).sum(-1).tolist() # (train_batch_size,) world_size = self.actor_rollout_wg.world_size global_partition_lst = get_seqlen_balanced_partitions( global_seqlen_lst, k_partitions=world_size, equal_size=True ) # reorder based on index. The data will be automatically equally partitioned by dispatch function global_idx = torch.tensor([j for partition in global_partition_lst for j in partition]) batch.reorder(global_idx) global_balance_stats = log_seqlen_unbalance( seqlen_list=global_seqlen_lst, partitions=global_partition_lst, prefix=logging_prefix ) metrics.update(global_balance_stats) def fit_dpo(self): # Renamed for clarity as standard PPO loop """ The training loop of Online DPO using a periodically updated reference model. The driver process calls worker groups for computation. Advantage computation is replaced by DPO logic. """ import traceback # Ensure traceback is imported from omegaconf import OmegaConf from verl.utils.tracking import Tracking # Initialize logger logger = None try: logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True, throw_on_missing=False), ) except Exception as e: print(f"Warning: Failed to initialize logger: {e}") self.global_steps = 0 # Load checkpoint before doing anything loaded_step = self._load_checkpoint() self.global_steps = loaded_step + 1 if loaded_step is not None and loaded_step > 0 else 1 print( f"Starting Online DPO training from global step {self.global_steps}. " f"Total steps: {self.total_training_steps}" ) print(f"Reference model update frequency: {self.config.trainer.get('ref_update_freq', 'Not Set')}") # Check if reference policy is configured correctly for this mode if not self.use_reference_policy: print( "WARNING: 'use_reference_policy' is False. Periodic reference model update requires a " "reference policy worker. DPO updates might fail or use incorrect logic." ) # Consider raising an error if strict adherence is required: # raise ValueError("Periodic reference model update requires 'use_reference_policy' to be True " # "and a configured reference worker.") # Perform validation before training if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): print("Running validation before Online DPO training...") val_metrics = self._validate() pprint(f"Initial validation metrics: {val_metrics}") if logger and val_metrics: logger.log(data=val_metrics, step=max(0, self.global_steps - 1)) if self.config.trainer.get("val_only", False): print("Validation only mode enabled. Exiting training.") if logger and hasattr(logger, "finish"): logger.finish() return # Add tqdm progress bar progress_bar = tqdm( total=self.total_training_steps, initial=self.global_steps, desc="Online DPO Training Progress", position=0, leave=True, ) last_val_metrics = None should_stop = False for epoch in range(self.config.trainer.total_epochs): if should_stop: break print(f"--- Starting Online DPO Epoch {epoch} ---") try: train_iterator = iter(self.train_dataloader) except TypeError: print("Warning: Dataloader is not iterable.") train_iterator = self.train_dataloader # Fallback attempt for batch_idx, batch_dict in enumerate(train_iterator): if self.global_steps > self.total_training_steps: should_stop = True break metrics = {} timing_raw = {} step_timer = Timer(logger=None) ref_log_prob_computed = False # Flag to track if ref log probs were computed try: # Outer try-except for the whole step step_timer.start() with _timer("step", timing_raw): batch: DataProto = DataProto.from_single_dict(batch_dict) current_batch_size = batch.batch.batch_size[0] print( f"\n[Step {self.global_steps}, Batch {batch_idx}] Processing batch size: " f"{current_batch_size}" ) # --- Reference Model Update --- ref_update_freq = self.config.trainer.get("ref_update_freq", -1) if ( self.use_reference_policy and ref_update_freq > 0 and self.global_steps % ref_update_freq == 0 ): print(f"\n[Step {self.global_steps}] Updating Reference Model Weights from Actor...") try: # --- This requires careful implementation with FSDP --- # 1. Save actor state dict (potentially to CPU memory or disk) # This needs to be done collectively across actor worker ranks. # The checkpoint_manager might be adaptable, or use FSDP APIs directly. # Example placeholder using a conceptual save/load mechanism: actor_state_path = "/tmp/actor_state_mid" # Temporary path self.actor_rollout_wg.save_checkpoint(actor_state_path) # Adapt save logic # 2. Load the state dict onto the reference model worker group # This also needs collective loading on the ref worker ranks. self.ref_policy_wg.load_checkpoint(actor_state_path, None, True) # Adapt load logic print(f"[Step {self.global_steps}] Reference Model Weights Updated.") # Optionally remove the temporary state file # os.remove(actor_state_path) # Needs rank-aware removal or shared storage except Exception as sync_e: print(f"ERROR during reference model sync at step {self.global_steps}: {sync_e}") traceback.print_exc() # Pop keys for generation pop_batch_keys = ["input_ids", "attention_mask"] if "position_ids" in batch.batch: pop_batch_keys.append("position_ids") pop_non_tensor_keys = ["raw_prompt_ids"] if "raw_prompt_ids" in batch.non_tensor_batch else [] if "multi_modal_inputs" in batch.non_tensor_batch.keys(): pop_non_tensor_keys.extend(["multi_modal_data", "multi_modal_inputs"]) original_non_tensor_data = batch.non_tensor_batch gen_batch = batch.pop( batch_keys=pop_batch_keys, non_tensor_batch_keys=pop_non_tensor_keys, ) gen_batch = gen_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True ) # (Add Debug prints for gen_batch if needed) # Generate sequences (chosen/rejected pairs) with _timer("gen", timing_raw): try: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) # (Add Debug prints for gen_batch_output if needed) except Exception as gen_e: print(f"\n!!!!!!!! ERROR DURING GENERATION (Step {self.global_steps}) !!!!!!!!") print(gen_e) traceback.print_exc() print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") step_timer.stop() continue # Combine original prompts with generated sequences batch.non_tensor_batch = original_non_tensor_data # Restore non-tensor data batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(current_batch_size)], dtype=object ) batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) # (Add Debug prints after union if needed) # Compute response mask (needed for ref logprob calc and DPO prep) batch.batch["response_mask"] = compute_response_mask(batch) if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() # --- Compute Log Probs for the CURRENT policy (used for KL if enabled, or ActorAsRef # fallback) --- # Note: For pure DPO with external ref, this 'old_log_probs' might not be strictly needed # unless used for other metrics or a fallback. Keep it for now. with _timer("policy_log_prob", timing_raw): policy_log_prob_output = self.actor_rollout_wg.compute_log_prob(batch) batch = batch.union(policy_log_prob_output) # Adds 'old_log_probs' # (Debug prints for old_log_probs) # --- Compute Log Probs using the EXTERNAL Reference Model --- if self.use_reference_policy: with _timer("ref_log_prob_dpo", timing_raw): # print(f"---- [Step {self.global_steps}] DEBUG DPO: Calling compute_ref_log_prob ----") try: # 'batch' contains interleaved chosen/rejected sequences ref_log_prob_output = self.ref_policy_wg.compute_ref_log_prob( batch ) # Returns DataProto with 'ref_log_prob' batch = batch.union( ref_log_prob_output ) # Adds 'ref_log_prob' key [batch_size * n, seq_len] ref_log_prob_computed = True # Mark success # print(f"---- [Step {self.global_steps}] DEBUG DPO: ref_log_prob tensor shape: " # f"{batch.batch['ref_log_prob'].shape} ----") except Exception as ref_e: print(f"ERROR computing reference log probs at step {self.global_steps}: {ref_e}") traceback.print_exc() batch.batch["ref_log_prob"] = None # Mark as failed ref_log_prob_computed = False else: print( "Warning: Skipping external reference log prob calculation as use_reference_policy " "is False." ) # DPO update will likely fail unless ActorAsRef logic is re-enabled in dp_actor # --- Compute Rewards/Scores (used to determine preference) --- with _timer("reward_calc", timing_raw): # (Reward calculation logic using RM or reward_fn as before) # ... Ensure this calculates 'token_level_rewards' or similar ... if self.use_rm: reward_tensor_rm = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor_rm) # Adds 'rm_scores' reward_extra_infos_dict = {} try: if self.reward_fn is None: # print(f"---- [DEBUG Step {self.global_steps}] ERROR: self.reward_fn is None! " # f"Using dummy rewards. ----") # Use rm_scores if available, otherwise zeros reward_tensor = batch.batch.get( "rm_scores", torch.zeros_like(batch.batch["response_mask"], dtype=torch.float32) ) else: reward_result = self.reward_fn(batch, return_dict=True) reward_tensor = reward_result["reward_tensor"] # Final combined reward reward_extra_infos_dict = reward_result.get("reward_extra_info", {}) except Exception: # print(f'---- [DEBUG Step {self.global_steps}] Error in reward_fn call: {e}. ' # f'Using dummy rewards. ----') traceback.print_exc() reward_tensor = torch.zeros_like(batch.batch["response_mask"], dtype=torch.float32) reward_extra_infos_dict = {} # Use 'token_level_rewards' as the key for preference calculation batch.batch["token_level_rewards"] = reward_tensor if reward_extra_infos_dict: batch.non_tensor_batch.update( {k: np.array(v) for k, v in reward_extra_infos_dict.items()} ) # --- Determine Preferences --- # Uses 'token_level_rewards' to determine chosen/rejected based on score batch = compute_onlineDPO_pref(batch) # Adds 'preferences' key # --- Prepare DPO Batch --- dpo_update_batch_proto = None # Initialize with _timer("prepare_dpo_batch", timing_raw): try: if "preferences" not in batch.batch or batch.batch["preferences"] is None: raise ValueError("'preferences' key missing or None after compute_onlineDPO_pref.") # Check if reference log probs were computed successfully (if needed) if self.use_reference_policy and not ref_log_prob_computed: raise ValueError("Reference log probs required but failed to compute.") # Check required base keys required_keys = ["input_ids", "attention_mask", "response_mask"] for rk in required_keys: if rk not in batch.batch or batch.batch[rk] is None: raise KeyError(f"Required key '{rk}' missing from batch for DPO prep.") preferences_mask = batch.batch["preferences"] # Shape [batch_size * n] not_preferences_mask = ~preferences_mask # Gather Chosen/Rejected Base Tensors chosen_input_ids = batch.batch["input_ids"][preferences_mask] chosen_attention_mask = batch.batch["attention_mask"][preferences_mask] rejected_input_ids = batch.batch["input_ids"][not_preferences_mask] rejected_attention_mask = batch.batch["attention_mask"][not_preferences_mask] chosen_position_ids = ( batch.batch.get("position_ids")[preferences_mask] if "position_ids" in batch.batch else None ) rejected_position_ids = ( batch.batch.get("position_ids")[not_preferences_mask] if "position_ids" in batch.batch else None ) # Create Labels print("WARNING: Creating DPO labels using configured max_prompt_length...") prompt_len = self.config.data.max_prompt_length chosen_labels = chosen_input_ids.clone() chosen_labels[:, :prompt_len] = -100 rejected_labels = rejected_input_ids.clone() rejected_labels[:, :prompt_len] = -100 # Calculate and Gather Reference Log Probs (Sequence Level) if self.use_reference_policy: ref_log_prob_tensor = batch.batch["ref_log_prob"] # Token level [bsz * n, seq_len] response_mask_full = batch.batch[ "response_mask" ] # Response mask [bsz * n, seq_len] ref_sequence_logps = (ref_log_prob_tensor * response_mask_full).sum( dim=-1 ) # Sequence level [bsz * n] reference_chosen_logps = ref_sequence_logps[preferences_mask] reference_rejected_logps = ref_sequence_logps[not_preferences_mask] else: # If not using external ref, DPO needs ActorAsRef logic in dp_actor # We won't add the keys here, dp_actor will handle it (or fail if not modified) print( "Info: Not adding explicit reference logps to DPO batch " "(use_reference_policy=False)." ) reference_chosen_logps = None # Explicitly None reference_rejected_logps = None # Package Tensors dpo_tensors = { "chosen_input_ids": chosen_input_ids, "chosen_attention_mask": chosen_attention_mask, "chosen_labels": chosen_labels, "rejected_input_ids": rejected_input_ids, "rejected_attention_mask": rejected_attention_mask, "rejected_labels": rejected_labels, } # Conditionally add reference logps if computed if reference_chosen_logps is not None: dpo_tensors["reference_chosen_logps"] = reference_chosen_logps if reference_rejected_logps is not None: dpo_tensors["reference_rejected_logps"] = reference_rejected_logps # Add position ids if they exist if chosen_position_ids is not None: dpo_tensors["chosen_position_ids"] = chosen_position_ids if rejected_position_ids is not None: dpo_tensors["rejected_position_ids"] = rejected_position_ids # Prepare Meta Info dpo_meta = { "dpo_beta": OmegaConf.select(self.config.algorithm, "dpo_beta", default=0.1), "dpo_loss_type": OmegaConf.select( self.config.algorithm, "dpo_loss_type", default="sigmoid" ), "dpo_label_smoothing": OmegaConf.select( self.config.algorithm, "dpo_label_smoothing", default=0.0 ), "use_reference_policy": self.use_reference_policy, "reference_free": not self.use_reference_policy, # False if using external ref "global_step": self.global_steps, } dpo_update_batch_proto = DataProto.from_dict(tensors=dpo_tensors, meta_info=dpo_meta) # print(f"---- [Step {self.global_steps}] DEBUG DPO: Prepared DPO Update Batch ----") # print(f" Keys: {list(dpo_update_batch_proto.batch.keys())}") # print(f" Meta Info: {dpo_meta}") except Exception as e_prep: print(f"ERROR preparing DPO batch at step {self.global_steps}: {e_prep}") traceback.print_exc() dpo_update_batch_proto = None # Skip update on error # --- Actor Update Step --- actor_output = None if self.config.trainer.critic_warmup <= self.global_steps and dpo_update_batch_proto: with _timer("update_actor", timing_raw): # Pass the batch containing reference log probs (if computed) # The modified update_actor_dpo expects them if reference_free=False actor_output = self.actor_rollout_wg.update_actor_dpo(dpo_update_batch_proto) if actor_output and "metrics" in actor_output.meta_info: metrics.update(reduce_metrics(actor_output.meta_info["metrics"])) elif dpo_update_batch_proto is None: print( f"Skipping actor update at step {self.global_steps} due to DPO batch preparation error." ) # --- Validation and Saving --- test_freq = OmegaConf.select(self.config.trainer, "test_freq", default=-1) is_last_step = self.global_steps >= self.total_training_steps if ( self.val_reward_fn is not None and test_freq > 0 and (is_last_step or self.global_steps % test_freq == 0) ): print(f"\nRunning DPO validation at step {self.global_steps}...") val_timing_raw = {} with _timer("testing", val_timing_raw): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics if val_metrics: metrics["time/validation_run"] = val_timing_raw.get("testing", 0) metrics.update(val_metrics) else: print("Validation skipped or returned no metrics.") save_freq = OmegaConf.select(self.config.trainer, "save_freq", default=-1) if save_freq > 0 and (is_last_step or self.global_steps % save_freq == 0): print(f"\nSaving DPO checkpoint at step {self.global_steps}...") with _timer("save_checkpoint", timing_raw): self._save_checkpoint() # Saves actor (and potentially critic if used elsewhere) metrics["time/save_checkpoint"] = timing_raw.get("save_checkpoint", 0) # --- End main step timer context --- # --- Metrics calculation AFTER the 'step' timer block --- metrics.update(compute_dpo_data_metrics(batch=batch)) # Use DPO-specific metrics metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) n_gpus = self.resource_pool_manager.get_n_gpus() if "step" in timing_raw: metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) else: print( f"Warning: 'step' key missing from timing_raw at step {self.global_steps}. " f"Skipping throughput." ) step_timer.stop() metrics["time/step"] = step_timer.last # Log metrics log_freq = OmegaConf.select(self.config.trainer, "log_freq", default=1) if logger and self.global_steps % log_freq == 0: log_payload = metrics.copy() # Add learning rate to log payload if actor_output and "actor/lr" in metrics: log_payload["actor/lr"] = metrics["actor/lr"] print(f"[Step {self.global_steps} DPO] Logging Step Payload Keys: {list(log_payload.keys())}") try: logger.log(data=log_payload, step=self.global_steps) except Exception as e: print(f"Logging failed at step {self.global_steps}: {e}") # Update progress bar postfix_metrics = { k: f"{v:.3f}" if isinstance(v, float) else v for k, v in metrics.items() if isinstance(v, int | float) } progress_bar.set_postfix(postfix_metrics) except Exception as step_e: print(f"\n!!!!!!!! ERROR DURING DPO Step {self.global_steps} !!!!!!!!") print(f"Caught Exception: {step_e}") traceback.print_exc() print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") step_timer.stop() should_stop = True break if is_last_step or should_stop: print(f"Stopping DPO training at step {self.global_steps}.") break self.global_steps += 1 progress_bar.update(1) # End of epoch handling if hasattr(self.train_dataloader, "reset"): try: self.train_dataloader.reset() except Exception as e: print(f"Warning: Failed to reset train dataloader state: {e}") if should_stop: break # --- Final cleanup and logging --- progress_bar.close() final_step = max(0, self.global_steps - 1) print(f"Online DPO Training finished at step {final_step}.") # Save final checkpoint save_freq = OmegaConf.select(self.config.trainer, "save_freq", default=-1) if not self.config.trainer.get("val_only", False) and (save_freq <= 0 or final_step % save_freq != 0): print(f"Saving final DPO checkpoint at step {final_step}...") self._save_checkpoint() # Final validation run if self.val_reward_fn and last_val_metrics is None and not self.config.trainer.get("val_only", False): print("Running final validation...") last_val_metrics = self._validate() if last_val_metrics and logger: last_val_metrics["final_validation"] = True try: logger.log(data=last_val_metrics, step=final_step) except Exception as e: print(f"[Final Val Metrics Log Error]: {e}") pprint(f"Final validation metrics: {last_val_metrics}") if logger and hasattr(logger, "finish"): logger.finish() print("Online DPO Training Run Complete.") ================================================ FILE: verl_distillation/recipe/spin/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from omegaconf import DictConfig def validate_config( config: DictConfig, use_reference_policy: bool, use_critic: bool, ) -> None: """ Validate an OmegaConf DictConfig Args: config: The OmegaConf DictConfig to validate. use_reference_policy (bool): is ref policy needed use_critic (bool): is critic needed """ # number of GPUs total n_gpus = config.trainer.n_gpus_per_node * config.trainer.nnodes # 1. Check total batch size for data correctness real_train_batch_size = config.data.train_batch_size * config.actor_rollout_ref.rollout.n assert real_train_batch_size % n_gpus == 0, ( f"real_train_batch_size ({real_train_batch_size}) must be divisible by total n_gpus ({n_gpus})." ) # A helper function to check "micro_batch_size" vs "micro_batch_size_per_gpu" # We throw an error if the user sets both. The new convention is "..._micro_batch_size_per_gpu". def check_mutually_exclusive(mbs, mbs_per_gpu, name: str): settings = { "actor_rollout_ref.actor": "micro_batch_size", "critic": "micro_batch_size", "reward_model": "micro_batch_size", "actor_rollout_ref.ref": "log_prob_micro_batch_size", "actor_rollout_ref.rollout": "log_prob_micro_batch_size", } if name in settings: param = settings[name] param_per_gpu = f"{param}_per_gpu" if mbs is None and mbs_per_gpu is None: raise ValueError(f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.") if mbs is not None and mbs_per_gpu is not None: raise ValueError( f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. " f"Please remove '{name}.{param}' because only '*_{param_per_gpu}' is supported " f"(the former is deprecated)." ) if not config.actor_rollout_ref.actor.use_dynamic_bsz: # actor: ppo_micro_batch_size vs. ppo_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.actor.ppo_micro_batch_size, config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu, "actor_rollout_ref.actor", ) if use_reference_policy: # reference: log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.ref.log_prob_micro_batch_size, config.actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu, "actor_rollout_ref.ref", ) # The rollout section also has log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.rollout.log_prob_micro_batch_size, config.actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu, "actor_rollout_ref.rollout", ) if use_critic and not config.critic.use_dynamic_bsz: # Check for critic micro-batch size conflicts check_mutually_exclusive( config.critic.ppo_micro_batch_size, config.critic.ppo_micro_batch_size_per_gpu, "critic" ) # Check for reward model micro-batch size conflicts if config.reward_model.enable and not config.reward_model.use_dynamic_bsz: check_mutually_exclusive( config.reward_model.micro_batch_size, config.reward_model.micro_batch_size_per_gpu, "reward_model" ) # Actor # check if train_batch_size is larger than ppo_mini_batch_size # if NOT dynamic_bsz, we must ensure: # ppo_mini_batch_size is divisible by ppo_micro_batch_size # ppo_micro_batch_size * sequence_parallel_size >= n_gpus if not config.actor_rollout_ref.actor.use_dynamic_bsz: assert config.data.train_batch_size >= config.actor_rollout_ref.actor.ppo_mini_batch_size sp_size = config.actor_rollout_ref.actor.get("ulysses_sequence_parallel_size", 1) if config.actor_rollout_ref.actor.ppo_micro_batch_size is not None: assert ( config.actor_rollout_ref.actor.ppo_mini_batch_size % config.actor_rollout_ref.actor.ppo_micro_batch_size == 0 ) assert config.actor_rollout_ref.actor.ppo_micro_batch_size * sp_size >= n_gpus assert config.actor_rollout_ref.actor.loss_agg_mode in [ "token-mean", "seq-mean-token-sum", "seq-mean-token-mean", ], f"Invalid loss_agg_mode: {config.actor_rollout_ref.actor.loss_agg_mode}" if config.algorithm.use_kl_in_reward and config.actor_rollout_ref.actor.use_kl_loss: print("NOTICE: You have both enabled in-reward kl and kl loss.") # critic if use_critic and not config.critic.use_dynamic_bsz: assert config.data.train_batch_size >= config.critic.ppo_mini_batch_size sp_size = config.critic.get("ulysses_sequence_parallel_size", 1) if config.critic.ppo_micro_batch_size is not None: assert config.critic.ppo_mini_batch_size % config.critic.ppo_micro_batch_size == 0 assert config.critic.ppo_micro_batch_size * sp_size >= n_gpus # Check if use_remove_padding is enabled when using sequence parallelism for fsdp if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: if ( config.actor_rollout_ref.actor.get("ulysses_sequence_parallel_size", 1) > 1 or config.actor_rollout_ref.ref.get("ulysses_sequence_parallel_size", 1) > 1 ): assert config.actor_rollout_ref.model.use_remove_padding, ( "When using sequence parallelism for actor/ref policy, you must enable `use_remove_padding`." ) if use_critic and config.critic.strategy in {"fsdp", "fsdp2"}: if config.critic.get("ulysses_sequence_parallel_size", 1) > 1: assert config.critic.model.use_remove_padding, ( "When using sequence parallelism for critic, you must enable `use_remove_padding`." ) if config.data.get("val_batch_size", None) is not None: print( "WARNING: val_batch_size is deprecated. Validation datasets are sent to inference engines " "as a whole batch, which will schedule the memory themselves." ) # check eval config if config.actor_rollout_ref.rollout.val_kwargs.do_sample: assert config.actor_rollout_ref.rollout.temperature > 0, ( "validation gen temperature should be greater than 0 when enabling do_sample" ) print("[validate_config] All configuration checks passed successfully!") ================================================ FILE: verl_distillation/recipe/sppo/README.md ================================================ # SPPO: Self-Play Preference Optimization for Language Model Alignment This repository hosts the community implementation for the paper [Self-Play Preference Optimization for Language Model Alignment](https://arxiv.org/abs/2405.00675). SPPO can significantly enhance the performance of an LLM without strong external signals such as responses or preferences from GPT-4. It can outperform the model trained with iterative direct preference optimization (DPO), among other methods. SPPO is theoretically grounded, ensuring that the LLM can converge to the von Neumann winner (i.e., Nash equilibrium) under general, potentially intransitive preference, and empirically validated through extensive evaluations on multiple datasets. Paper Authors: [Yue Wu](https://yuewu.us/)\*, [Zhiqing Sun](https://www.cs.cmu.edu/~zhiqings/)\*, [Huizhuo Yuan](https://scholar.google.com/citations?user=8foZzX4AAAAJ)\*, [Kaixuan Ji](https://scholar.google.com/citations?user=FOoKDukAAAAJ), [Yiming Yang](https://www.cs.cmu.edu/~yiming/), [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) verl Implementation Authors: [Yuhao Yang](https://github.com/yhyang201), [Chenyang Zhao](https://github.com/zhaochenyang20) [[Webpage](https://uclaml.github.io/SPPO/)] [[Huggingface](https://huggingface.co/papers/2405.00675)] [[Paper](https://arxiv.org/abs/2405.00675)][[Original Implementation](https://github.com/uclaml/SPPO)] ## Reproduce the Experiment We evaluate the performance of SPPO on the MATH dataset. Starting from an initial score of 46.6 with Qwen2.5-7B-Instruct, we achieve a score of 65.6 after 20 epochs of training, placing our model approximately in the top 20 on the [MATH leaderboard](https://paperswithcode.com/sota/math-word-problem-solving-on-math). It's important to note that verl's internal evaluation metrics may not perfectly align with the official evaluation methodology for Qwen2.5-7B-Instruct. Therefore, for consistency and fair comparison, we report only the results based on verl's evaluation framework. ``` git clone git@github.com:volcengine/verl.git cd verl python3 -m uv pip install -e ".[sglang]" export WANDB_API_KEY= python3 examples/data_preprocess/math_dataset.py --local_dir ~/data/math huggingface-cli download Qwen/Qwen2.5-7B-Instruct --local-dir $HOME/models/Qwen2.5-7B-Instruct export CUDA_VISIBLE_DEVICES=0,1,2,3 bash recipe/sppo/run_qwen2.5-7b_rm.sh ``` Note that the installation would occasionally fail to install flash-attn. If this happens, you can install it manually by running: ```bash python3 -m uv pip install wheel python3 -m uv pip install packaging python3 -m uv pip install flash-attn --no-build-isolation --no-deps ``` ## Acknowledgement We sincerely thank the contribution and guidance from: - [Yue Wu](https://yuewu.us/) - [Chendong Wang](https://cdwang96.github.io/) - [Yifan Zhang](https://github.com/yifanzhang-pro) - [Yongan Xiang](https://github.com/BearBiscuit05) - [Junrong Lin](https://github.com/ocss884) - [Yuxuan Tong](https://github.com/tongyx361) - [Guangming Shen](https://github.com/PeterSH6) - [Biao He](https://www.linkedin.com/in/biao-he/) - [Qingquan Song](https://qingquansong.github.io/) - [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) ================================================ FILE: verl_distillation/recipe/sppo/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/recipe/sppo/config/sppo_trainer.yaml ================================================ # the sppo config will override default ppo_trainer.yaml hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ actor_rollout_ref: actor: _target_: recipe.sppo.config.SPPOActorConfig # sppo_eta is an additional hyperparameter for SPPO, not available in # verl core. specifying _target_ with SPPOActorConfig is needed to # extend verl ActorConfig with custom fields. # additional, it is also possible to use the `extra` field natively supported # by all verl core dataclasses, without having to define SPPOActorConfig # extra: # sppo_eta: 1.0 sppo_eta: 1.0 optim: lr_warmup_steps: 15 rollout: name: sglang tensor_model_parallel_size: 2 gpu_memory_utilization: 0.5 val_kwargs: n: 2 # 2 will trigger validation, 1 will bypass algorithm: adv_estimator: null sppo_eta: 1.0 trainer: log_val_generations: 0 ================================================ FILE: verl_distillation/recipe/sppo/config.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from verl.workers.config import FSDPActorConfig @dataclass class SPPOActorConfig(FSDPActorConfig): sppo_eta: float = 1.0 ================================================ FILE: verl_distillation/recipe/sppo/dp_actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import torch import verl.utils.torch_functional as verl_F from verl import DataProto from verl.trainer.ppo.core_algos import agg_loss, kl_penalty from verl.utils.device import get_device_id from verl.utils.profiler import GPUMemoryLogger from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import rearrange_micro_batches from verl.workers.actor.dp_actor import DataParallelPPOActor logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def compute_sppo_loss( old_log_prob: torch.Tensor, # (bs, seq_len) log_prob: torch.Tensor, # (bs, seq_len) rewards: torch.Tensor, # (bs,) response_mask: torch.Tensor, # (bs, seq_len) eta: float = 1.0, loss_agg_mode: str = "token-mean", ): """ SPPO Loss computation. """ # Compute log-ratios over masked tokens log_prob_sum = (log_prob * response_mask).sum(dim=1) # (bs,) old_log_prob_sum = (old_log_prob * response_mask).sum(dim=1) # (bs,) log_ratios = log_prob_sum - old_log_prob_sum # (bs,) scaled_rewards = eta * (rewards) loss_vec = (log_ratios - scaled_rewards) ** 2 # (bs,) if loss_agg_mode == "token-mean": sample_mask = response_mask.any(dim=1).float() # (bs,) loss = verl_F.masked_mean(loss_vec, sample_mask) return loss, log_ratios, scaled_rewards class DataParallelSPPOActor(DataParallelPPOActor): @GPUMemoryLogger(role="dp actor", logger=logger) def update_policy(self, data: DataProto): # make sure we are in training mode self.actor_module.train() temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid slient error multi_turn = data.meta_info.get("multi_turn", False) select_keys = ["responses", "input_ids", "attention_mask", "position_ids", "old_log_probs", "seq_level_rewards"] if multi_turn: select_keys.append("loss_mask") if self.config.use_kl_loss: select_keys.append("ref_log_prob") batch = data.select(batch_keys=select_keys).batch has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() # Split to make minibatch iterator for updating the actor # See PPO paper for details. https://arxiv.org/abs/1707.06347 if has_multi_modal_inputs: num_mini_batches = data.batch.batch_size[0] // self.config.ppo_mini_batch_size non_tensor_select_keys = ["multi_modal_inputs"] dataloader = data.select(select_keys, non_tensor_select_keys).chunk(num_mini_batches) else: dataloader = batch.split(self.config.ppo_mini_batch_size) metrics = {} for epoch in range(self.config.ppo_epochs): for batch_idx, data in enumerate(dataloader): # split batch into micro_batches mini_batch = data if has_multi_modal_inputs: self.gradient_accumulation = ( self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu ) num_micro_batches = mini_batch.batch.batch_size[0] // self.config.ppo_micro_batch_size_per_gpu micro_batches = data.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches) elif self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, _ = rearrange_micro_batches(batch=mini_batch, max_token_len=max_token_len) else: self.gradient_accumulation = ( self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu ) # split batch into micro_batches micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu) self.actor_optimizer.zero_grad() for data in micro_batches: # Support all hardwares if isinstance(data, DataProto): data = {**data.batch.to(get_device_id()), **data.non_tensor_batch} else: data = data.to(get_device_id()) # actor device is cpu when using offload responses = data["responses"] response_length = responses.size(1) attention_mask = data["attention_mask"] if multi_turn: response_mask = data["loss_mask"][:, -response_length:] else: response_mask = attention_mask[:, -response_length:] old_log_prob = data["old_log_probs"] rewards = data["seq_level_rewards"] entropy_coeff = self.config.entropy_coeff loss_agg_mode = self.config.loss_agg_mode eta = self.config.get("sppo_eta", 1.0) # all return: (bsz, response_length) calculate_entropy = False if entropy_coeff != 0: calculate_entropy = True entropy, log_prob = self._forward_micro_batch( micro_batch=data, temperature=temperature, calculate_entropy=calculate_entropy ) pg_loss, log_ratios, preference = compute_sppo_loss( old_log_prob=old_log_prob, log_prob=log_prob, rewards=rewards, response_mask=response_mask, eta=eta, loss_agg_mode=loss_agg_mode, ) if entropy_coeff != 0: entropy_loss = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) # compute policy loss policy_loss = pg_loss - entropy_loss * entropy_coeff else: policy_loss = pg_loss if self.config.use_kl_loss: ref_log_prob = data["ref_log_prob"] # compute kl loss kld = kl_penalty( logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=self.config.kl_loss_type ) kl_loss = agg_loss( loss_mat=kld, loss_mask=response_mask, loss_agg_mode=self.config.loss_agg_mode ) policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef metrics["actor/kl_loss"] = kl_loss.detach().item() metrics["actor/kl_coef"] = self.config.kl_loss_coef if self.config.use_dynamic_bsz: # relative to the dynamic bsz loss = policy_loss * (len(data) / self.config.ppo_mini_batch_size) else: loss = policy_loss / self.gradient_accumulation loss.backward() data = { "actor/loss": loss.detach().item(), "actor/log_ratio_mean": log_ratios.mean().detach().item(), "actor/preference_mean": preference.mean().detach().item(), } append_to_dict(metrics, data) grad_norm = self._optimizer_step() data = {"actor/grad_norm": grad_norm.detach().item()} append_to_dict(metrics, data) self.actor_optimizer.zero_grad() return metrics ================================================ FILE: verl_distillation/recipe/sppo/main_sppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import os import hydra import ray from omegaconf import OmegaConf from verl.trainer.ppo.reward import load_reward_manager from verl.trainer.ppo.utils import need_reference_policy from verl.utils.config import validate_config from .sppo_ray_trainer import RaySPPOTrainer @hydra.main(config_path="config", config_name="sppo_trainer", version_base=None) def main(config): run_ppo(config) def run_ppo(config) -> None: # TODO(linjunrong.ocss884): this ENV is left for resolving SGLang conflict with ray devices # isolation, will solve in the future os.environ["ENSURE_CUDA_VISIBLE_DEVICES"] = os.environ.get("CUDA_VISIBLE_DEVICES", "") if not ray.is_initialized(): # this is for local ray cluster default_runtime_env = { "env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN"} } ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") ray.init(**OmegaConf.to_container(ray_init_kwargs)) runner = TaskRunner.remote() ray.get(runner.run.remote(config)) @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: def run(self, config): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup from .sppo_worker import SPPOActorRolloutRefWorker # , CriticWorker actor_rollout_cls = SPPOActorRolloutRefWorker ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray import RayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker actor_rollout_cls = ActorRolloutRefWorker ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role # sppo does not use critic role_worker_mapping = { Role.ActorRollout: ray.remote(actor_rollout_cls), } global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, } # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # use reference model if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(SPPOActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id # validate config validate_config( config=config, use_reference_policy=need_reference_policy(role_worker_mapping), use_critic=False, ) # download the checkpoint from hdfs local_path = copy_to_local(config.actor_rollout_ref.model.path) # instantiate tokenizer from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) processor = hf_processor(local_path, use_fast=True) # used for multimodal LLM, could be none reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager(config, tokenizer, num_examine=1) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) trainer = RaySPPOTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, ) trainer.init_workers() trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/sppo/run_qwen2.5-7b_rm.sh ================================================ # Discliamer: the model used in the script is only for academic purpose. set -x # Data preparation scripts are available in ``examples/data_preprocess``. # Example usage: # # python3 examples/data_preprocess/math_dataset.py --local_dir ~/data/math # python3 examples/data_preprocess/gsm8k.py --local_save_dir ~/data/gsm8k gsm8k_train_path=$HOME/data/math/train.parquet gsm8k_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" # prepare model ckpt huggingface-cli download Qwen/Qwen2.5-7B-Instruct --local-dir $HOME/models/Qwen2.5-7B-Instruct & # huggingface-cli download sfairXC/FsfairX-LLaMA3-RM-v0.1 --local-dir $HOME/models/FsfairX-LLaMA3-RM-v0.1 & wait python3 -m recipe.sppo.main_sppo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path="$HOME/models/Qwen2.5-7B-Instruct" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='sppo-sglang' \ trainer.val_before_train=True \ trainer.experiment_name='Qwen2-7B-Instruct_hybrid_rm' \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=1 \ trainer.total_epochs=1000 $@ # Note that we set lr_warmup_steps = 15 in config/sppo_trainer.yaml # The experiment will converge to 0.656 on MATH dataset after 20 epochs ================================================ FILE: verl_distillation/recipe/sppo/sppo_ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSDP PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import uuid from copy import deepcopy from pprint import pprint from typing import Optional import numpy as np import ray import torch from torch.utils.data import Dataset, Sampler from tqdm import tqdm from verl import DataProto from verl.single_controller.ray import RayWorkerGroup from verl.trainer.ppo import core_algos from verl.trainer.ppo.core_algos import agg_loss from verl.trainer.ppo.ray_trainer import ( AdvantageEstimator, RayPPOTrainer, ResourcePoolManager, apply_kl_penalty, compute_response_mask, ) from verl.trainer.ppo.reward import compute_reward, compute_reward_async from verl.trainer.ppo.utils import Role, WorkerType, need_reference_policy, need_reward_model from verl.utils.metric import reduce_metrics from verl.utils.profiler.performance import simple_timer from verl.utils.tracking import ValidationGenerationsLogger def softmean(x: torch.Tensor, beta: float, dim: int = -1, keepdim: bool = False) -> torch.Tensor: """ Compute SoftMean_β(x) = (1/β) * log( (1/n) * Σ exp(β * x_i) ) Falls back to arithmetic mean when β=0. """ if beta == 0.0: return x.mean(dim=dim, keepdim=keepdim) # cast beta to tensor on same device/dtype beta_t = x.new_tensor(beta) # numerically-stable logsumexp(β x) lse = torch.logsumexp(x * beta_t, dim=dim, keepdim=keepdim) n = x.size(dim) log_n = x.new_tensor(n).log() return (lse - log_n) / beta_t def compute_advantage(data: DataProto, beta=1.0): rewards = data.batch["token_level_rewards"].sum(axis=-1) # (bs, ) s_mean = softmean(rewards, beta, keepdim=True) # (bs, ) rewards = rewards - s_mean # (bs, ) data.batch["seq_level_rewards"] = rewards # (bs, ) return data class RaySPPOTrainer(RayPPOTrainer): """ Note that this trainer runs on the driver process on a single CPU/GPU node. """ # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, train_dataset: Optional[Dataset] = None, val_dataset: Optional[Dataset] = None, collate_fn=None, train_sampler: Optional[Sampler] = None, device_name=None, ): self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert self.hybrid_engine, "Currently, only support hybrid engine" if self.hybrid_engine: assert Role.ActorRollout in role_worker_mapping, f"{role_worker_mapping.keys()=}" self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = need_reference_policy(role_worker_mapping) self.use_rm = need_reward_model(role_worker_mapping) self.use_critic = False self.ray_worker_group_cls = ray_worker_group_cls self.validation_generations_logger = ValidationGenerationsLogger() self.device_name = device_name if device_name else self.config.trainer.device # define in-reward KL control # kl loss control currently not supported if config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(config.algorithm.kl_ctrl) self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} batch: DataProto = DataProto.from_single_dict(batch_dict) # pop those keys for generation batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] if "multi_modal_data" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_data") if "raw_prompt" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("raw_prompt") if "tools_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("tools_kwargs") gen_batch = batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) gen_batch_output = gen_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True ) is_last_step = self.global_steps >= self.total_training_steps with simple_timer("step", timing_raw): # generate a batch with simple_timer("gen", timing_raw): if not self.async_rollout_mode: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch_output) else: gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch_output) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with simple_timer("gen_max", timing_raw): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) batch = batch.union(gen_baseline_output) # compute reward model score on batch rm_scores = None if self.use_rm and "rm_scores" not in batch.batch.keys(): rm_scores = self.rm_wg.compute_rm_score(batch) batch = batch.union(rm_scores) reward_baseline_tensor, _ = compute_reward(batch, self.reward_fn) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) keys_to_pop = set(gen_baseline_output.batch.keys()) if rm_scores is not None: keys_to_pop.update(rm_scores.batch.keys()) batch.pop(batch_keys=list(keys_to_pop)) batch.batch["reward_baselines"] = reward_baseline_tensor del rm_scores, gen_baseline_batch, gen_baseline_output batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) batch.batch["response_mask"] = compute_response_mask(batch) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() with simple_timer("reward", timing_raw): # compute reward model score if self.use_rm and "rm_scores" not in batch.batch.keys(): reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) if self.config.reward_model.launch_reward_fn_async: future_reward = compute_reward_async.remote(batch, self.config, self.tokenizer) else: reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn) # recompute old_log_probs with simple_timer("old_log_prob", timing_raw): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with simple_timer("ref", timing_raw): ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with simple_timer("values", timing_raw): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with simple_timer("adv", timing_raw): # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] if self.config.reward_model.launch_reward_fn_async: reward_tensor, reward_extra_infos_dict = ray.get(future_reward) batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()}) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] batch.batch["seq_level_rewards"] = batch.batch["token_level_scores"] beta = self.config.algorithm.sppo_eta batch = compute_advantage(batch, beta=beta) # update critic if self.use_critic: with simple_timer("update_critic", timing_raw): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with simple_timer("update_actor", timing_raw): batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: self._log_rollout_data(batch, reward_extra_infos_dict, timing_raw, rollout_data_dir) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with simple_timer("testing", timing_raw): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with simple_timer("save_checkpoint", timing_raw): self._save_checkpoint() # training metrics metrics.update( { "training/global_step": self.global_steps, "training/epoch": epoch, } ) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return progress_bar.update(1) self.global_steps += 1 ================================================ FILE: verl_distillation/recipe/sppo/sppo_worker.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from omegaconf import OmegaConf, open_dict from verl.single_controller.base.decorator import Dispatch, register from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.flops_counter import FlopsCounter from verl.utils.fsdp_utils import offload_fsdp_model_to_cpu, offload_fsdp_optimizer from verl.utils.import_utils import import_external_libs from verl.utils.profiler import log_gpu_memory_usage from verl.workers.fsdp_workers import ActorRolloutRefWorker logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_PPO_LOGGING_LEVEL", "WARN")) class SPPOActorRolloutRefWorker(ActorRolloutRefWorker): """ This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy or a hybrid engine based on the config.rollout """ @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): from .dp_actor import DataParallelSPPOActor # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) use_remove_padding = self.config.model.get("use_remove_padding", False) use_fused_kernels = self.config.model.get("use_fused_kernels", False) if self._is_actor or self._is_rollout: # we need the model for actor and rollout if self._is_actor: optim_config = self.config.actor.optim fsdp_config = self.config.actor.fsdp_config else: optim_config = None fsdp_config = OmegaConf.create() self.actor_module_fsdp, self.actor_optimizer, self.actor_lr_scheduler, self.actor_model_config = ( self._build_model_optimizer( model_path=self.config.model.path, fsdp_config=fsdp_config, optim_config=optim_config, override_model_config=override_model_config, use_remove_padding=use_remove_padding, use_fused_kernels=use_fused_kernels, enable_gradient_checkpointing=self.config.model.get("enable_gradient_checkpointing", False), trust_remote_code=self.config.model.get("trust_remote_code", False), use_liger=self.config.model.get("use_liger", False), role="actor", ) ) # get the original unwrapped module self.actor_module = self.actor_module_fsdp._fsdp_wrapped_module if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After offload actor model during init", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during init", logger=logger) # load from checkpoint if self._is_actor: OmegaConf.set_struct(self.config.actor, True) with open_dict(self.config.actor): self.config.actor.use_remove_padding = use_remove_padding self.config.actor.use_fused_kernels = use_fused_kernels self.actor = DataParallelSPPOActor( config=self.config.actor, actor_module=self.actor_module_fsdp, actor_optimizer=self.actor_optimizer ) if self._is_rollout: self._build_rollout(trust_remote_code=self.config.model.get("trust_remote_code", False)) if self._is_ref: self.ref_module_fsdp = self._build_model_optimizer( model_path=self.config.model.path, fsdp_config=self.config.ref.fsdp_config, optim_config=None, override_model_config=override_model_config, use_remove_padding=use_remove_padding, use_fused_kernels=use_fused_kernels, trust_remote_code=self.config.model.get("trust_remote_code", False), use_liger=self.config.model.get("use_liger", False), role="ref", )[0] OmegaConf.set_struct(self.config.ref, True) with open_dict(self.config.ref): self.config.ref.use_remove_padding = use_remove_padding self.config.ref.use_fused_kernels = use_fused_kernels self.ref_policy = DataParallelSPPOActor(config=self.config.ref, actor_module=self.ref_module_fsdp) if self._is_actor: self.flops_counter = FlopsCounter(self.actor_model_config) self.checkpoint_manager = FSDPCheckpointManager( model=self.actor_module_fsdp, optimizer=self.actor.actor_optimizer, lr_scheduler=self.actor_lr_scheduler, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=self.config.actor.checkpoint, ) ================================================ FILE: verl_distillation/recipe/transfer_queue/agent_loop.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import ray from transfer_queue import BatchMeta import verl.experimental.agent_loop.agent_loop as agent_loop from verl import DataProto class AgentLoopManager(agent_loop.AgentLoopManager): def generate_sequences(self, prompts: BatchMeta) -> BatchMeta: """Split input batch and dispatch to agent loop workers. Args: prompts (BatchMeta): Input batch. Returns: BatchMeta: Output batch metadata. """ if self.rm_micro_batch_size and len(prompts) % self.rm_micro_batch_size != 0: raise ValueError( f"The length of prompts {len(prompts)} cannot divide the world size of rm_wg {self.rm_micro_batch_size}" ) if self.config.actor_rollout_ref.rollout.free_cache_engine: self.wake_up() chunkes = prompts.chunk(len(self.agent_loop_workers)) outputs = ray.get( [ worker.generate_sequences.remote(chunk) for worker, chunk in zip(self.agent_loop_workers, chunkes, strict=True) ] ) output = BatchMeta.concat(outputs) if self.config.actor_rollout_ref.rollout.free_cache_engine: self.sleep() # calculate performance metrics metrics = [output.extra_info.pop("metrics") for output in outputs] # List[List[Dict[str, str]]] timing = self._performance_metrics(metrics, output) output.set_extra_info("timing", timing) return output def _performance_metrics(self, metrics: list[list[dict[str, str]]], output: DataProto) -> dict[str, float]: timing = {} t_generate_sequences = np.array([metric["generate_sequences"] for chunk in metrics for metric in chunk]) t_tool_calls = np.array([metric["tool_calls"] for chunk in metrics for metric in chunk]) timing["agent_loop/generate_sequences/min"] = t_generate_sequences.min() timing["agent_loop/generate_sequences/max"] = t_generate_sequences.max() timing["agent_loop/generate_sequences/mean"] = t_generate_sequences.mean() timing["agent_loop/tool_calls/min"] = t_tool_calls.min() timing["agent_loop/tool_calls/max"] = t_tool_calls.max() timing["agent_loop/tool_calls/mean"] = t_tool_calls.mean() return timing def create_transferqueue_client(self, controller_infos, storage_infos, role): ray.get( [ worker.create_transferqueue_client.remote(controller_infos, storage_infos, role) for worker in self.agent_loop_workers ] ) ================================================ FILE: verl_distillation/recipe/transfer_queue/config/transfer_queue_ppo_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ # config for TransferQueue transfer_queue: enable: True ================================================ FILE: verl_distillation/recipe/transfer_queue/main_ppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import os import socket import hydra import ray from omegaconf import OmegaConf from verl.trainer.constants_ppo import get_ppo_ray_runtime_env from verl.trainer.main_ppo import ( TaskRunner as MainTaskRunner, ) from verl.trainer.main_ppo import ( create_rl_dataset, create_rl_sampler, ) from verl.trainer.ppo.reward import load_reward_manager from verl.trainer.ppo.utils import need_critic, need_reference_policy from verl.utils.config import validate_config from verl.utils.device import is_cuda_available from .ray_trainer import RayPPOTrainer @hydra.main(config_path="config", config_name="ppo_trainer", version_base=None) def main(config): """Main entry point for PPO training with Hydra configuration management. Args: config_dict: Hydra configuration dictionary containing training parameters. """ run_ppo(config) # Define a function to run the PPO-like training process def run_ppo(config, task_runner_class=None) -> None: """Initialize Ray cluster and run distributed PPO training process. Args: config: Training configuration object containing all necessary parameters for distributed PPO training including Ray initialization settings, model paths, and training hyperparameters. task_runner_class: For recipe to change TaskRunner. """ # Check if Ray is not initialized if not ray.is_initialized(): # Initialize Ray with a local cluster configuration # Set environment variables in the runtime environment to control tokenizer parallelism, # NCCL debug level, VLLM logging level, and allow runtime LoRA updating # `num_cpus` specifies the number of CPU cores Ray can use, obtained from the configuration default_runtime_env = get_ppo_ray_runtime_env() ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) if config.transfer_queue.enable: # Add runtime environment variables for transfer queue runtime_env_vars = runtime_env_kwargs.get("env_vars", {}) runtime_env_vars["TRANSFER_QUEUE_ENABLE"] = "1" runtime_env_kwargs["env_vars"] = runtime_env_vars runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") ray.init(**OmegaConf.to_container(ray_init_kwargs)) if task_runner_class is None: task_runner_class = ray.remote(num_cpus=1)(TaskRunner) # please make sure main_task is not scheduled on head # Create a remote instance of the TaskRunner class, and # Execute the `run` method of the TaskRunner instance remotely and wait for it to complete if ( is_cuda_available and config.global_profiler.tool == "nsys" and config.global_profiler.get("steps") is not None and len(config.global_profiler.get("steps", [])) > 0 ): from verl.utils.import_utils import is_nvtx_available assert is_nvtx_available(), "nvtx is not available in CUDA platform. Please 'pip3 install nvtx'" nsight_options = OmegaConf.to_container( config.global_profiler.global_tool_config.nsys.controller_nsight_options ) runner = task_runner_class.options(runtime_env={"nsight": nsight_options}).remote() else: runner = task_runner_class.remote() ray.get(runner.run.remote(config)) # [Optional] get the path of the timeline trace file from the configuration, default to None # This file is used for performance analysis timeline_json_file = config.ray_kwargs.get("timeline_json_file", None) if timeline_json_file: ray.timeline(filename=timeline_json_file) class TaskRunner(MainTaskRunner): def run(self, config): """Execute the main PPO training workflow. This method sets up the distributed training environment, initializes workers, datasets, and reward functions, then starts the training process. Args: config: Training configuration object containing all parameters needed for setting up and running the PPO training process. """ # Print the initial configuration. `resolve=True` will evaluate symbolic values. from pprint import pprint from verl.utils.fs import copy_to_local print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) OmegaConf.resolve(config) actor_rollout_cls, ray_worker_group_cls = self.add_actor_rollout_worker(config) self.add_critic_worker(config) # We should adopt a multi-source reward function here: # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # finally, we combine all the rewards together # The reward type depends on the tag of the data self.add_reward_model_worker(config) # Add a reference policy worker if KL loss or KL reward is used. self.add_ref_policy_worker(config, actor_rollout_cls) # validate config validate_config( config=config, use_reference_policy=need_reference_policy(self.role_worker_mapping), use_critic=need_critic(config), ) # Download the checkpoint from HDFS to the local machine. # `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on local_path = copy_to_local( config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False) ) # Instantiate the tokenizer and processor. from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) # Used for multimodal LLM, could be None processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) # Load the reward manager for training and validation. reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager( config, tokenizer, num_examine=1, **config.reward_model.get("reward_kwargs", {}) ) resource_pool_manager = self.init_resource_pool_mgr(config) from verl.utils.dataset.rl_dataset import collate_fn # Create training and validation datasets. train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor, is_train=True) val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor, is_train=False) train_sampler = create_rl_sampler(config.data, train_dataset) # Initialize the PPO trainer. trainer = RayPPOTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=self.role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, ) # Initialize the workers of the trainer. trainer.init_workers() # Start the training process. trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/recipe/transfer_queue/ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import asyncio import json import logging import math import os import uuid from collections import defaultdict from dataclasses import dataclass, field from pprint import pprint from typing import Any, Optional import numpy as np import ray import tensordict import torch from omegaconf import OmegaConf, open_dict from packaging.version import parse as parse_version from tensordict import TensorDict from torch.utils.data import Dataset, Sampler from torchdata.stateful_dataloader import StatefulDataLoader from tqdm import tqdm from transfer_queue import ( BatchMeta, TransferQueueController, TransferQueueStorageSimpleUnit, get_placement_group, process_zmq_server_info, ) from verl import DataProto from verl.experimental.dataset.sampler import AbstractCurriculumSampler from verl.single_controller.ray import ( RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, ) from verl.single_controller.ray.base import create_colocated_worker_cls from verl.trainer.config import AlgoConfig from verl.trainer.ppo import core_algos from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss from verl.trainer.ppo.metric_utils import ( compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, process_validation_metrics, ) from verl.trainer.ppo.reward import compute_reward, compute_reward_async from verl.trainer.ppo.utils import ( Role, WorkerType, need_critic, need_reference_policy, need_reward_model, ) from verl.utils.checkpoint.checkpoint_manager import ( find_latest_ckpt_path, should_save_ckpt_esi, ) from verl.utils.config import omega_conf_to_dataclass from verl.utils.debug import marked_timer from verl.utils.metric import reduce_metrics from verl.utils.rollout_skip import RolloutSkip from verl.utils.seqlen_balancing import ( get_seqlen_balanced_partitions, log_seqlen_unbalance, ) from verl.utils.torch_functional import masked_mean from verl.utils.tracking import ValidationGenerationsLogger from verl.utils.transferqueue_utils import ( create_transferqueue_client, get_transferqueue_client, get_val_transferqueue_client, tqbridge, ) @dataclass class ResourcePoolManager: """ Define a resource pool specification. Resource pool will be initialized first. """ resource_pool_spec: dict[str, list[int]] mapping: dict[Role, str] resource_pool_dict: dict[str, RayResourcePool] = field(default_factory=dict) def create_resource_pool(self): """Create Ray resource pools for distributed training. Initializes resource pools based on the resource pool specification, with each pool managing GPU resources across multiple nodes. For FSDP backend, uses max_colocate_count=1 to merge WorkerGroups. For Megatron backend, uses max_colocate_count>1 for different models. """ for resource_pool_name, process_on_nodes in self.resource_pool_spec.items(): # max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool # For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one. # For Megatron backend, we recommend using max_colocate_count>1 # that can utilize different WorkerGroup for differnt models resource_pool = RayResourcePool( process_on_nodes=process_on_nodes, use_gpu=True, max_colocate_count=1, name_prefix=resource_pool_name ) self.resource_pool_dict[resource_pool_name] = resource_pool self._check_resource_available() def get_resource_pool(self, role: Role) -> RayResourcePool: """Get the resource pool of the worker_cls""" return self.resource_pool_dict[self.mapping[role]] def get_n_gpus(self) -> int: """Get the number of gpus in this cluster.""" return sum([n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes]) def _check_resource_available(self): """Check if the resource pool can be satisfied in this ray cluster.""" node_available_resources = ray._private.state.available_resources_per_node() node_available_gpus = { node: node_info.get("GPU", 0) if "GPU" in node_info else node_info.get("NPU", 0) for node, node_info in node_available_resources.items() } # check total required gpus can be satisfied total_available_gpus = sum(node_available_gpus.values()) total_required_gpus = sum( [n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes] ) if total_available_gpus < total_required_gpus: raise ValueError( f"Total available GPUs {total_available_gpus} is less than total desired GPUs {total_required_gpus}" ) @tqbridge(put_data=False) def compute_reward_decorated(data, reward_fn): return compute_reward(data, reward_fn) @tqbridge(put_data=False) def compute_reward_async_decorated(data, reward_fn): return compute_reward_async.remote(data, reward_fn) @tqbridge(put_data=False) def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty="kl"): """Apply KL penalty to the token-level rewards. This function computes the KL divergence between the reference policy and current policy, then applies a penalty to the token-level rewards based on this divergence. Args: data (DataProto): The data containing batched model outputs and inputs. kl_ctrl (core_algos.AdaptiveKLController): Controller for adaptive KL penalty. kl_penalty (str, optional): Type of KL penalty to apply. Defaults to "kl". Returns: tuple: A tuple containing: - The updated data with token-level rewards adjusted by KL penalty - A dictionary of metrics related to the KL penalty """ response_mask = data.batch["response_mask"] token_level_scores = data.batch["token_level_scores"] batch_size = data.batch.batch_size[0] # compute kl between ref_policy and current policy # When apply_kl_penalty, algorithm.use_kl_in_reward=True, so the reference model has been enabled. kld = core_algos.kl_penalty( data.batch["old_log_probs"], data.batch["ref_log_prob"], kl_penalty=kl_penalty ) # (batch_size, response_length) kld = kld * response_mask beta = kl_ctrl.value token_level_rewards = token_level_scores - beta * kld current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence current_kl = torch.mean(current_kl, dim=0).item() # according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837 kl_ctrl.update(current_kl=current_kl, n_steps=batch_size) metrics = {"actor/reward_kl_penalty": current_kl, "actor/reward_kl_penalty_coeff": beta} return token_level_rewards, metrics def compute_response_mask(batch_meta: BatchMeta, data_system_client): """Compute the attention mask for the response part of the sequence. This function extracts the portion of the attention mask that corresponds to the model's response, which is used for masking computations that should only apply to response tokens. Args: batch_meta (BatchMeta): The data containing batched model outputs and inputs. Returns: BatchMeta: The BatchMeta of attention mask for the response tokens. """ data = asyncio.run(data_system_client.async_get_data(batch_meta)) responses = data["responses"] response_length = responses.size(1) attention_mask = data["attention_mask"] response_mask = attention_mask[:, -response_length:] output = TensorDict({"response_mask": response_mask}, batch_size=response_mask.size(0)) asyncio.run(data_system_client.async_put(data=output, metadata=batch_meta)) batch_meta.add_fields(output) return batch_meta @tqbridge(put_data=False) def compute_advantage( data: DataProto, adv_estimator: AdvantageEstimator, gamma: float = 1.0, lam: float = 1.0, num_repeat: int = 1, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, ) -> tuple[Any, Any]: """Compute advantage estimates for policy optimization. This function computes advantage estimates using various estimators like GAE, GRPO, REINFORCE++, etc. The advantage estimates are used to guide policy optimization in RL algorithms. Args: data (DataProto): The data containing batched model outputs and inputs. adv_estimator (AdvantageEstimator): The advantage estimator to use (e.g., GAE, GRPO, REINFORCE++). gamma (float, optional): Discount factor for future rewards. Defaults to 1.0. lam (float, optional): Lambda parameter for GAE. Defaults to 1.0. num_repeat (int, optional): Number of times to repeat the computation. Defaults to 1. norm_adv_by_std_in_grpo (bool, optional): Whether to normalize advantages by standard deviation in GRPO. Defaults to True. config (dict, optional): Configuration dictionary for algorithm settings. Defaults to None. Returns: tuple: A tuple containing: - advantages: The computed advantage estimates. - returns: The computed returns. """ # prepare response group if adv_estimator == AdvantageEstimator.GAE: # Compute advantages and returns using Generalized Advantage Estimation (GAE) advantages, returns = core_algos.compute_gae_advantage_return( token_level_rewards=data.batch["token_level_rewards"], values=data.batch["values"], response_mask=data.batch["response_mask"], gamma=gamma, lam=lam, ) # TODO: (TQ) adapt core_algos.compute_pf_ppo_reweight_data function to support transfer queue if config.get("use_pf_ppo", False): data = core_algos.compute_pf_ppo_reweight_data( data, config.pf_ppo.get("reweight_method"), config.pf_ppo.get("weight_pow"), ) elif adv_estimator == AdvantageEstimator.GRPO: # Initialize the mask for GRPO calculation grpo_calculation_mask = data.batch["response_mask"] # Call compute_grpo_outcome_advantage with parameters matching its definition advantages, returns = core_algos.compute_grpo_outcome_advantage( token_level_rewards=data.batch["token_level_rewards"], response_mask=grpo_calculation_mask, index=data.non_tensor_batch["uid"], norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) else: # handle all other adv estimator type other than GAE and GRPO adv_estimator_fn = core_algos.get_adv_estimator_fn(adv_estimator) adv_kwargs = { "token_level_rewards": data.batch["token_level_rewards"], "response_mask": data.batch["response_mask"], "config": config, } if "uid" in data.non_tensor_batch: # optional adv_kwargs["index"] = data.non_tensor_batch["uid"] if "reward_baselines" in data.batch: # optional adv_kwargs["reward_baselines"] = data.batch["reward_baselines"] # calculate advantage estimator advantages, returns = adv_estimator_fn(**adv_kwargs) return advantages, returns @tqbridge(put_data=False) def compute_data_metrics_decorated(batch, use_critic: bool = True): return compute_data_metrics(batch, use_critic) @tqbridge(put_data=False) def compute_timing_metrics_decorated(batch, timing_raw: dict[str, float]) -> dict[str, Any]: return compute_timing_metrics(batch, timing_raw) @tqbridge(put_data=False) def compute_throughout_metrics_decorated(batch, timing_raw: dict[str, float], n_gpus: int) -> dict[str, Any]: return compute_throughout_metrics(batch, timing_raw, n_gpus) @tqbridge(put_data=False) def calculate_debug_metrics_decorated(data): from verl.utils.debug.metrics import calculate_debug_metrics return calculate_debug_metrics(data) @tqbridge(put_data=False) def compute_val_reward_decorated(reward_fn, data, return_dict): return reward_fn(data, return_dict) class RayPPOTrainer: """Distributed PPO trainer using Ray for scalable reinforcement learning. This trainer orchestrates distributed PPO training across multiple nodes and GPUs, managing actor rollouts, critic training, and reward computation with Ray backend. Supports various model architectures including FSDP, Megatron, vLLM, and SGLang integration. """ # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: type[RayWorkerGroup] = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, train_dataset: Optional[Dataset] = None, val_dataset: Optional[Dataset] = None, collate_fn=None, train_sampler: Optional[Sampler] = None, device_name=None, ): """ Initialize distributed PPO trainer with Ray backend. Note that this trainer runs on the driver process on a single CPU/GPU node. Args: config: Configuration object containing training parameters. tokenizer: Tokenizer used for encoding and decoding text. role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes. resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools. ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup. processor: Optional data processor, used for multimodal data reward_fn: Function for computing rewards during training. val_reward_fn: Function for computing rewards during validation. train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None. val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None. collate_fn: Function to collate data samples into batches. train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None. device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to None. """ # Store the tokenizer for text processing self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert self.hybrid_engine, "Currently, only support hybrid engine" if self.hybrid_engine: assert Role.ActorRollout in role_worker_mapping, f"{role_worker_mapping.keys()=}" self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = need_reference_policy(self.role_worker_mapping) self.use_rm = need_reward_model(self.role_worker_mapping) self.use_critic = need_critic(self.config) self.ray_worker_group_cls = ray_worker_group_cls self.device_name = device_name if device_name else self.config.trainer.device self.validation_generations_logger = ValidationGenerationsLogger( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, ) # if ref_in_actor is True, the reference policy will be actor without lora applied self.ref_in_actor = config.actor_rollout_ref.model.get("lora_rank", 0) > 0 # define in-reward KL control # kl loss control currently not suppoorted if self.config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl) self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) self.data_system_client = self._initialize_train_data_system( self.config.data.train_batch_size, self.config.actor_rollout_ref.rollout.n ) self.val_data_system_client = self._initialize_val_data_system( self.val_batch_size, self.config.actor_rollout_ref.rollout.val_kwargs.n ) def _initialize_train_data_system(self, global_batch_size, num_n_samples, role="train"): # 1. initialize TransferQueueStorage total_storage_size = global_batch_size * self.config.trainer.num_global_batch * num_n_samples self.data_system_storage_units = {} storage_placement_group = get_placement_group(self.config.trainer.num_data_storage_units, num_cpus_per_actor=1) for storage_unit_rank in range(self.config.trainer.num_data_storage_units): storage_node = TransferQueueStorageSimpleUnit.options( placement_group=storage_placement_group, placement_group_bundle_index=storage_unit_rank ).remote(storage_size=math.ceil(total_storage_size / self.config.trainer.num_data_storage_units)) self.data_system_storage_units[storage_unit_rank] = storage_node logging.info(f"TransferQueueStorageSimpleUnit #{storage_unit_rank} has been created.") # 2. initialize TransferQueueController # we support inilialize multiple controller instances for large-scale scenario. Please allocate exactly # one controller for a single WorkerGroup. self.data_system_controllers = {} controller_placement_group = get_placement_group(self.config.trainer.num_data_controllers, num_cpus_per_actor=1) for controller_rank in range(self.config.trainer.num_data_controllers): self.data_system_controllers[controller_rank] = TransferQueueController.options( placement_group=controller_placement_group, placement_group_bundle_index=controller_rank ).remote( num_storage_units=self.config.trainer.num_data_storage_units, global_batch_size=global_batch_size, num_global_batch=self.config.trainer.num_global_batch, num_n_samples=num_n_samples, ) logging.info(f"TransferQueueController #{controller_rank} has been created.") # 3. register controller & storage self.data_system_controller_infos = process_zmq_server_info(self.data_system_controllers) self.data_system_storage_unit_infos = process_zmq_server_info(self.data_system_storage_units) ray.get( [ storage_unit.register_controller_info.remote(self.data_system_controller_infos) for storage_unit in self.data_system_storage_units.values() ] ) # 4. create client # each client should be allocated to exactly one controller create_transferqueue_client( client_id="Trainer-" + role, controller_infos=self.data_system_controller_infos, storage_infos=self.data_system_storage_unit_infos, ) data_system_client = get_transferqueue_client() return data_system_client def _initialize_val_data_system(self, global_batch_size, num_n_samples, role="val"): # 1. initialize TransferQueueStorage total_storage_size = global_batch_size * self.config.trainer.num_global_batch * num_n_samples self.val_data_system_storage_units = {} storage_placement_group = get_placement_group(self.config.trainer.num_data_storage_units, num_cpus_per_actor=1) for storage_unit_rank in range(self.config.trainer.num_data_storage_units): storage_node = TransferQueueStorageSimpleUnit.options( placement_group=storage_placement_group, placement_group_bundle_index=storage_unit_rank ).remote(storage_size=math.ceil(total_storage_size / self.config.trainer.num_data_storage_units)) self.val_data_system_storage_units[storage_unit_rank] = storage_node logging.info(f"TransferQueueStorageSimpleUnit #{storage_unit_rank} has been created.") # 2. initialize TransferQueueController # we support inilialize multiple controller instances for large-scale scenario. Please allocate exactly # one controller for a single WorkerGroup. self.val_data_system_controllers = {} controller_placement_group = get_placement_group(self.config.trainer.num_data_controllers, num_cpus_per_actor=1) for controller_rank in range(self.config.trainer.num_data_controllers): self.val_data_system_controllers[controller_rank] = TransferQueueController.options( placement_group=controller_placement_group, placement_group_bundle_index=controller_rank ).remote( num_storage_units=self.config.trainer.num_data_storage_units, global_batch_size=global_batch_size, num_global_batch=self.config.trainer.num_global_batch, num_n_samples=num_n_samples, ) logging.info(f"TransferQueueController #{controller_rank} has been created.") # 3. register controller & storage self.val_data_system_controller_infos = process_zmq_server_info(self.val_data_system_controllers) self.val_data_system_storage_unit_infos = process_zmq_server_info(self.val_data_system_storage_units) ray.get( [ storage_unit.register_controller_info.remote(self.val_data_system_controller_infos) for storage_unit in self.val_data_system_storage_units.values() ] ) # 4. create client # each client should be allocated to exactly one controller create_transferqueue_client( client_id="Trainer-" + role, controller_infos=self.val_data_system_controller_infos, storage_infos=self.val_data_system_storage_unit_infos, ) data_system_client = get_val_transferqueue_client() return data_system_client def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler: Optional[Sampler]): """ Creates the train and validation dataloaders. """ # TODO: we have to make sure the batch size is divisible by the dp size from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler if train_dataset is None: train_dataset = create_rl_dataset( self.config.data.train_files, self.config.data, self.tokenizer, self.processor ) if val_dataset is None: val_dataset = create_rl_dataset( self.config.data.val_files, self.config.data, self.tokenizer, self.processor ) self.train_dataset, self.val_dataset = train_dataset, val_dataset if train_sampler is None: train_sampler = create_rl_sampler(self.config.data, self.train_dataset) if collate_fn is None: from verl.utils.dataset.rl_dataset import collate_fn as default_collate_fn collate_fn = default_collate_fn num_workers = self.config.data["dataloader_num_workers"] self.train_dataloader = StatefulDataLoader( dataset=self.train_dataset, batch_size=self.config.data.get("gen_batch_size", self.config.data.train_batch_size), num_workers=num_workers, drop_last=True, collate_fn=collate_fn, sampler=train_sampler, ) val_batch_size = self.config.data.val_batch_size # Prefer config value if set if val_batch_size is None: val_batch_size = len(self.val_dataset) self.val_batch_size = val_batch_size self.val_dataloader = StatefulDataLoader( dataset=self.val_dataset, batch_size=val_batch_size, num_workers=num_workers, shuffle=self.config.data.get("validation_shuffle", True), drop_last=False, collate_fn=collate_fn, ) assert len(self.train_dataloader) >= 1, "Train dataloader is empty!" assert len(self.val_dataloader) >= 1, "Validation dataloader is empty!" print( f"Size of train dataloader: {len(self.train_dataloader)}, Size of val dataloader: " f"{len(self.val_dataloader)}" ) total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.trainer.total_training_steps is not None: total_training_steps = self.config.trainer.total_training_steps self.total_training_steps = total_training_steps print(f"Total training steps: {self.total_training_steps}") try: OmegaConf.set_struct(self.config, True) with open_dict(self.config): if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"): self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps if OmegaConf.select(self.config, "critic.optim"): self.config.critic.optim.total_training_steps = total_training_steps except Exception as e: print(f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}") def _dump_generations(self, inputs, outputs, gts, scores, reward_extra_infos_dict, dump_path): """Dump rollout/validation samples as JSONL.""" os.makedirs(dump_path, exist_ok=True) filename = os.path.join(dump_path, f"{self.global_steps}.jsonl") n = len(inputs) base_data = { "input": inputs, "output": outputs, "gts": gts, "score": scores, "step": [self.global_steps] * n, } for k, v in reward_extra_infos_dict.items(): if len(v) == n: base_data[k] = v lines = [] for i in range(n): entry = {k: v[i] for k, v in base_data.items()} lines.append(json.dumps(entry, ensure_ascii=False)) with open(filename, "w") as f: f.write("\n".join(lines) + "\n") print(f"Dumped generations to {filename}") def _log_rollout_data( self, log_rollout_meta: BatchMeta, reward_extra_infos_dict: dict, timing_raw: dict, rollout_data_dir: str ): """ Log rollout data to disk. Args: log_rollout_meta (BatchMeta): The batch_meta of rollout data reward_extra_infos_dict (dict): Additional reward information to log timing_raw (dict): Timing information for profiling rollout_data_dir (str): Directory path to save the rollout data """ with marked_timer("dump_rollout_generations", timing_raw, color="green"): data = asyncio.run(self.data_system_client.async_get_data(log_rollout_meta)) inputs = self.tokenizer.batch_decode(data["prompts"], skip_special_tokens=True) outputs = self.tokenizer.batch_decode(data["responses"], skip_special_tokens=True) scores = data["token_level_scores"].sum(-1).cpu().tolist() sample_gts = [item.get("ground_truth", None) for item in data.get("reward_model", {})] reward_extra_infos_to_dump = reward_extra_infos_dict.copy() if "request_id" in log_rollout_meta.field_names: reward_extra_infos_dict.setdefault( "request_id", data["request_id"].tolist(), ) self._dump_generations( inputs=inputs, outputs=outputs, gts=sample_gts, scores=scores, reward_extra_infos_dict=reward_extra_infos_to_dump, dump_path=rollout_data_dir, ) def _maybe_log_val_generations(self, inputs, outputs, scores): """Log a table of validation samples to the configured logger (wandb or swanlab)""" generations_to_log = self.config.trainer.log_val_generations if generations_to_log == 0: return import numpy as np # Create tuples of (input, output, score) and sort by input text samples = list(zip(inputs, outputs, scores, strict=True)) samples.sort(key=lambda x: x[0]) # Sort by input text # Use fixed random seed for deterministic shuffling rng = np.random.RandomState(42) rng.shuffle(samples) # Take first N samples after shuffling samples = samples[:generations_to_log] # Log to each configured logger self.validation_generations_logger.log(self.config.trainer.logger, samples, self.global_steps) def _get_gen_batch(self, batch: DataProto) -> DataProto: reward_model_keys = set({"data_source", "reward_model", "extra_info", "uid"}) & batch.non_tensor_batch.keys() # pop those keys for generation batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = set(batch.non_tensor_batch.keys()) - reward_model_keys gen_batch = batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=list(non_tensor_batch_keys_to_pop), ) # For agent loop, we need reward model keys to compute score. if self.async_rollout_mode: gen_batch.non_tensor_batch.update(batch.non_tensor_batch) return gen_batch def _validate(self): data_source_lst = [] reward_extra_infos_dict: dict[str, list] = defaultdict(list) # Lists to collect samples for the table sample_inputs = [] sample_outputs = [] sample_gts = [] sample_scores = [] sample_turns = [] sample_uids = [] for test_data in self.val_dataloader: if "uid" not in test_data.keys(): test_data["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(test_data["input_ids"]))], dtype=object ) # repeat test data repeated_test_data = self.repeat_dict( test_data, repeat_times=self.config.actor_rollout_ref.rollout.val_kwargs.n, interleave=True ) test_batch: TensorDict = self.dict_to_tensordict(repeated_test_data) # we only do validation on rule-based rm if self.config.reward_model.enable and test_batch[0]["reward_model"]["style"] == "model": return {} asyncio.run(self.val_data_system_client.async_put(data=test_batch, global_step=self.global_steps - 1)) # Store original inputs batch_meta = asyncio.run( self.val_data_system_client.async_get_meta( data_fields=["input_ids", "uid", "reward_model"], batch_size=self.val_batch_size * self.config.actor_rollout_ref.rollout.val_kwargs.n, global_step=self.global_steps - 1, get_n_samples=False, task_name="get_data", ) ) data = asyncio.run(self.val_data_system_client.async_get_data(batch_meta)) input_ids = data["input_ids"] # TODO: Can we keep special tokens except for padding tokens? input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids] sample_inputs.extend(input_texts) sample_uids.extend(data["uid"]) ground_truths = [item.get("ground_truth", None) for item in data.get("reward_model", {})] sample_gts.extend(ground_truths) test_gen_meta = asyncio.run( self.val_data_system_client.async_get_meta( data_fields=list(test_batch.keys()), # TODO: (TQ) Get metadata by specified fields batch_size=self.val_batch_size * self.config.actor_rollout_ref.rollout.val_kwargs.n, global_step=self.global_steps - 1, # self.global_steps start from 1 get_n_samples=False, task_name="generate_sequences", ) ) test_gen_meta.extra_info = { "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, "recompute_log_prob": False, "do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample, "validate": True, "global_steps": self.global_steps, } print(f"test_gen_batch meta info: {test_gen_meta.extra_info}") # TODO: (TQ) Support padding and unpadding to make DataProto divisible by dp_size with TransferQueue if not self.async_rollout_mode: test_output_gen_meta = self.actor_rollout_wg.generate_sequences(test_gen_meta) else: test_output_gen_meta = self.async_rollout_manager.generate_sequences(test_gen_meta) test_batch_meta = test_gen_meta.union(test_output_gen_meta) print("validation generation end") # Store generated outputs test_response_meta = asyncio.run( self.val_data_system_client.async_get_meta( data_fields=["responses"], batch_size=self.val_batch_size * self.config.actor_rollout_ref.rollout.val_kwargs.n, global_step=self.global_steps - 1, # self.global_steps start from 1 get_n_samples=False, task_name="get_response", ) ) data = asyncio.run(self.val_data_system_client.async_get_data(test_response_meta)) output_ids = data["responses"] output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids] sample_outputs.extend(output_texts) test_batch_meta.set_extra_info("validate", True) # evaluate using reward_function if self.val_reward_fn is None: raise ValueError("val_reward_fn must be provided for validation.") compute_reward_fields = [ "responses", "prompts", "attention_mask", "reward_model", "data_source", ] if "rm_scores" in batch_meta.field_names: compute_reward_fields = ["rm_scores"] val_reward_meta = asyncio.run( self.val_data_system_client.async_get_meta( data_fields=compute_reward_fields, batch_size=self.val_batch_size * self.config.actor_rollout_ref.rollout.val_kwargs.n, global_step=self.global_steps - 1, get_n_samples=False, task_name="compute_reward", ) ) val_reward_meta.update_extra_info(test_batch_meta.extra_info) result = compute_val_reward_decorated(self.val_reward_fn, val_reward_meta, return_dict=True) reward_tensor = result["reward_tensor"] scores = reward_tensor.sum(-1).cpu().tolist() sample_scores.extend(scores) reward_extra_infos_dict["reward"].extend(scores) print(f"len reward_extra_infos_dict['reward']: {len(reward_extra_infos_dict['reward'])}") if "reward_extra_info" in result: for key, lst in result["reward_extra_info"].items(): reward_extra_infos_dict[key].extend(lst) print(f"len reward_extra_infos_dict['{key}']: {len(reward_extra_infos_dict[key])}") # collect num_turns of each prompt if "__num_turns__" in test_batch_meta.field_names: num_turns_meta = asyncio.run( self.val_data_system_client.async_get_meta( data_fields=["__num_turns__"], batch_size=self.val_batch_size * self.config.actor_rollout_ref.rollout.val_kwargs.n, global_step=self.global_steps - 1, # self.global_steps start from 1 get_n_samples=False, task_name="get_num_turns", ) ) data = asyncio.run(self.val_data_system_client.async_get_data(num_turns_meta)) sample_turns.append(data["__num_turns__"]) data_source = ["unknown"] * reward_tensor.shape[0] if "data_source" in test_batch_meta.field_names: data_source_meta = asyncio.run( self.val_data_system_client.async_get_meta( data_fields=["data_source"], batch_size=self.val_batch_size * self.config.actor_rollout_ref.rollout.val_kwargs.n, global_step=self.global_steps - 1, # self.global_steps start from 1 get_n_samples=False, task_name="get_data_source", ) ) data = asyncio.run(self.val_data_system_client.async_get_data(data_source_meta)) data_source = data["data_source"] data_source_lst.append(data_source) self._maybe_log_val_generations(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores) # dump generations val_data_dir = self.config.trainer.get("validation_data_dir", None) if val_data_dir: self._dump_generations( inputs=sample_inputs, outputs=sample_outputs, gts=sample_gts, scores=sample_scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=val_data_dir, ) for key_info, lst in reward_extra_infos_dict.items(): assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}" data_sources = np.concatenate(data_source_lst, axis=0) data_src2var2metric2val = process_validation_metrics(data_sources, sample_uids, reward_extra_infos_dict) metric_dict = {} for data_source, var2metric2val in data_src2var2metric2val.items(): core_var = "acc" if "acc" in var2metric2val else "reward" for var_name, metric2val in var2metric2val.items(): n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()]) for metric_name, metric_val in metric2val.items(): if ( (var_name == core_var) and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"]) and (f"@{n_max}" in metric_name) ): metric_sec = "val-core" else: metric_sec = "val-aux" pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}" metric_dict[pfx] = metric_val if len(sample_turns) > 0: sample_turns = np.concatenate(sample_turns) metric_dict["val-aux/num_turns/min"] = sample_turns.min() metric_dict["val-aux/num_turns/max"] = sample_turns.max() metric_dict["val-aux/num_turns/mean"] = sample_turns.mean() asyncio.run(self.val_data_system_client.async_clear(self.global_steps - 1)) return metric_dict def init_workers(self): """Initialize distributed training workers using Ray backend. Creates: 1. Ray resource pools from configuration 2. Worker groups for each role (actor, critic, etc.) """ self.resource_pool_manager.create_resource_pool() self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()} # create actor and rollout if self.hybrid_engine: resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout) actor_rollout_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[Role.ActorRollout], config=self.config.actor_rollout_ref, role="actor_rollout", ) self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls else: raise NotImplementedError # create critic if self.use_critic: resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic) critic_cfg = omega_conf_to_dataclass(self.config.critic) critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=critic_cfg) self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls # create reference policy if needed if self.use_reference_policy: resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy) ref_policy_cls = RayClassWithInitArgs( self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role="ref", ) self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls # create a reward model if reward_fn is None if self.use_rm: # we create a RM here resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model) self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. # Instead, directly pass different resource pool to different worker groups. # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information. all_wg = {} wg_kwargs = {} # Setting up kwargs for RayWorkerGroup if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None: wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout if OmegaConf.select(self.config.global_profiler, "steps") is not None: wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps") # Only require nsight worker options when tool is nsys if OmegaConf.select(self.config.global_profiler, "tool") == "nsys": assert ( OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options") is not None ), "worker_nsight_options must be set when using nsys with profile_steps" wg_kwargs["worker_nsight_options"] = OmegaConf.to_container( OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options") ) wg_kwargs["device_name"] = self.device_name for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls( resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls, **wg_kwargs, ) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) if self.use_critic: self.critic_wg = all_wg["critic"] self.critic_wg.init_model() if self.use_reference_policy and not self.ref_in_actor: self.ref_policy_wg = all_wg["ref"] self.ref_policy_wg.init_model() self.rm_wg = None if self.use_rm: self.rm_wg = all_wg["rm"] self.rm_wg.init_model() # we should create rollout at the end so that vllm can have a better estimation of kv cache memory self.actor_rollout_wg = all_wg["actor_rollout"] self.actor_rollout_wg.init_model() # set transferqueue server info for each worker for _, wg in all_wg.items(): wg.create_transferqueue_client( self.data_system_controller_infos, self.data_system_storage_unit_infos, role="train" ) wg.create_transferqueue_client( self.val_data_system_controller_infos, self.val_data_system_storage_unit_infos, role="val" ) # create async rollout manager and request scheduler self.async_rollout_mode = False if self.config.actor_rollout_ref.rollout.mode == "async": from .agent_loop import AgentLoopManager self.async_rollout_mode = True self.async_rollout_manager = AgentLoopManager( config=self.config, worker_group=self.actor_rollout_wg, rm_wg=self.rm_wg ) self.async_rollout_manager.create_transferqueue_client( self.data_system_controller_infos, self.data_system_storage_unit_infos, role="train" ) self.async_rollout_manager.create_transferqueue_client( self.val_data_system_controller_infos, self.val_data_system_storage_unit_infos, role="val" ) def _save_checkpoint(self): from verl.utils.fs import local_mkdir_safe # path: given_path + `/global_step_{global_steps}` + `/actor` local_global_step_folder = os.path.join( self.config.trainer.default_local_dir, f"global_step_{self.global_steps}" ) print(f"local_global_step_folder: {local_global_step_folder}") actor_local_path = os.path.join(local_global_step_folder, "actor") actor_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor") ) remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False) if remove_previous_ckpt_in_save: print( "Warning: remove_previous_ckpt_in_save is deprecated," + " set max_actor_ckpt_to_keep=1 and max_critic_ckpt_to_keep=1 instead" ) max_actor_ckpt_to_keep = ( self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) max_critic_ckpt_to_keep = ( self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) self.actor_rollout_wg.save_checkpoint( actor_local_path, actor_remote_path, self.global_steps, max_ckpt_to_keep=max_actor_ckpt_to_keep ) if self.use_critic: critic_local_path = os.path.join(local_global_step_folder, "critic") critic_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "critic") ) self.critic_wg.save_checkpoint( critic_local_path, critic_remote_path, self.global_steps, max_ckpt_to_keep=max_critic_ckpt_to_keep ) # save dataloader local_mkdir_safe(local_global_step_folder) dataloader_local_path = os.path.join(local_global_step_folder, "data.pt") dataloader_state_dict = self.train_dataloader.state_dict() torch.save(dataloader_state_dict, dataloader_local_path) # latest checkpointed iteration tracker (for atomic usage) local_latest_checkpointed_iteration = os.path.join( self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt" ) with open(local_latest_checkpointed_iteration, "w") as f: f.write(str(self.global_steps)) def _load_checkpoint(self): if self.config.trainer.resume_mode == "disable": return 0 # load from hdfs if self.config.trainer.default_hdfs_dir is not None: raise NotImplementedError("load from hdfs is not implemented yet") else: checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path if not os.path.isabs(checkpoint_folder): working_dir = os.getcwd() checkpoint_folder = os.path.join(working_dir, checkpoint_folder) global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest # find global_step_folder if self.config.trainer.resume_mode == "auto": if global_step_folder is None: print("Training from scratch") return 0 else: if self.config.trainer.resume_mode == "resume_path": assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type" assert "global_step_" in self.config.trainer.resume_from_path, ( "resume ckpt must specify the global_steps" ) global_step_folder = self.config.trainer.resume_from_path if not os.path.isabs(global_step_folder): working_dir = os.getcwd() global_step_folder = os.path.join(working_dir, global_step_folder) print(f"Load from checkpoint folder: {global_step_folder}") # set global step self.global_steps = int(global_step_folder.split("global_step_")[-1]) print(f"Setting global step to {self.global_steps}") print(f"Resuming from {global_step_folder}") actor_path = os.path.join(global_step_folder, "actor") critic_path = os.path.join(global_step_folder, "critic") # load actor self.actor_rollout_wg.load_checkpoint( actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load critic if self.use_critic: self.critic_wg.load_checkpoint( critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load dataloader, # TODO: from remote not implemented yet dataloader_local_path = os.path.join(global_step_folder, "data.pt") if os.path.exists(dataloader_local_path): dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False) self.train_dataloader.load_state_dict(dataloader_state_dict) else: print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch") def _start_profiling(self, do_profile: bool) -> None: """Start profiling for all worker groups if profiling is enabled.""" if do_profile: self.actor_rollout_wg.start_profile(role="e2e", profile_step=self.global_steps) if self.use_reference_policy: self.ref_policy_wg.start_profile(profile_step=self.global_steps) if self.use_critic: self.critic_wg.start_profile(profile_step=self.global_steps) if self.use_rm: self.rm_wg.start_profile(profile_step=self.global_steps) def _stop_profiling(self, do_profile: bool) -> None: """Stop profiling for all worker groups if profiling is enabled.""" if do_profile: self.actor_rollout_wg.stop_profile() if self.use_reference_policy: self.ref_policy_wg.stop_profile() if self.use_critic: self.critic_wg.stop_profile() if self.use_rm: self.rm_wg.stop_profile() def _balance_batch(self, batch: BatchMeta, data_system_client, metrics, logging_prefix="global_seqlen"): """Reorder the batchmeta on single controller such that each dp rank gets similar total tokens""" data = asyncio.run(data_system_client.async_get_data(batch)) attention_mask = data["attention_mask"] batch_size = attention_mask.shape[0] global_seqlen_lst = data["attention_mask"].view(batch_size, -1).sum(-1).tolist() # (train_batch_size,) world_size = self.actor_rollout_wg.world_size global_partition_lst = get_seqlen_balanced_partitions( global_seqlen_lst, k_partitions=world_size, equal_size=True ) # reorder based on index. The data will be automatically equally partitioned by dispatch function global_idx = [j for partition in global_partition_lst for j in partition] global_balance_stats = log_seqlen_unbalance( seqlen_list=global_seqlen_lst, partitions=global_partition_lst, prefix=logging_prefix ) metrics.update(global_balance_stats) return global_idx @classmethod def repeat_dict( cls, batch_dict: dict[str, torch.Tensor | np.ndarray], repeat_times=2, interleave=True ) -> dict[str, torch.Tensor | np.ndarray]: """ Repeat the batch dict a specified number of times. Args: repeat_times (int): Number of times to repeat the data. interleave (bool): Whether to interleave the repeated data. Returns: dict: A new dict with repeated data. """ if repeat_times == 1: return batch_dict repeated_batch_dict = {} if batch_dict: if interleave: # Interleave the data for key, val in batch_dict.items(): if isinstance(val, torch.Tensor): repeated_batch_dict[key] = val.repeat_interleave(repeat_times, dim=0) elif isinstance(val, np.ndarray): repeated_batch_dict[key] = np.repeat(val, repeat_times, axis=0) else: raise ValueError(f"Unsupported type in data {type(val)}") else: # Stack the data for key, val in batch_dict.items(): if isinstance(val, torch.Tensor): repeated_batch_dict[key] = ( val.unsqueeze(0).expand(repeat_times, *val.shape).reshape(-1, *val.shape[1:]) ) elif isinstance(val, np.ndarray): repeated_batch_dict[key] = np.tile(val, (repeat_times,) + (1,) * (val.ndim - 1)) else: raise ValueError(f"Unsupported type in data {type(val)}") return repeated_batch_dict @classmethod def dict_to_tensordict(cls, data: dict[str, torch.Tensor | np.ndarray]) -> TensorDict: """ Create a TensorDict from a dict of tensors and non_tensors. Note that this requires tensordict version at least 0.10 """ assert parse_version(tensordict.__version__) >= parse_version("0.10"), ( "Storing non-tensor data in TensorDict at least requires tensordict version 0.10" ) tensors_batch = {} batch_size = None for key, val in data.items(): if isinstance(val, torch.Tensor | np.ndarray): tensors_batch[key] = val else: raise ValueError(f"Unsupported type in data {type(val)}") if batch_size is None: batch_size = len(val) else: assert len(val) == batch_size if batch_size is None: batch_size = [] else: batch_size = [batch_size] return TensorDict(tensors_batch, batch_size=batch_size) def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return if self.config.actor_rollout_ref.rollout.get("skip_rollout", False): rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg) rollout_skip.wrap_generate_sequences() # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None self.max_steps_duration = 0 prev_step_profile = False curr_step_profile = ( self.global_steps in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) next_step_profile = False for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} base_get_meta_kwargs = dict( batch_size=self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n, global_step=self.global_steps - 1, # self.global_steps starts from 1 get_n_samples=False, ) with marked_timer("start_profile", timing_raw): self._start_profiling( not prev_step_profile and curr_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) # add uid to batch batch_dict["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch_dict["input_ids"]))], dtype=object ) # When n > 1, repeat input data before putting to data system, simulating DataProto repeat. repeated_batch_dict = self.repeat_dict( batch_dict, repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True ) batch: TensorDict = self.dict_to_tensordict(repeated_batch_dict) asyncio.run(self.data_system_client.async_put(data=batch, global_step=self.global_steps - 1)) gen_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=list(batch.keys()), # TODO: (TQ) Get metadata by specified fields task_name="generate_sequences", **base_get_meta_kwargs, ) ) # pass global_steps to trace gen_meta.set_extra_info("global_steps", self.global_steps) is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw, color="red"): if not self.async_rollout_mode: gen_output_meta = self.actor_rollout_wg.generate_sequences(gen_meta) else: gen_output_meta = self.async_rollout_manager.generate_sequences(gen_meta) timing_raw.update(gen_output_meta.extra_info["timing"]) gen_output_meta.extra_info.pop("timing", None) # TODO: (TQ) support transfer queue # if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: # if self.reward_fn is None: # raise ValueError("A reward_fn is required for REMAX advantage estimation.") # # with marked_timer("gen_max", timing_raw, color="purple"): # gen_baseline_meta = deepcopy(gen_meta) # gen_baseline_meta.extra_info["do_sample"] = False # if not self.async_rollout_mode: # gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_meta) # else: # gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_meta) # batch = batch.union(gen_baseline_output) # reward_baseline_tensor = self.reward_fn(batch) # reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) # # batch.pop(batch_keys=list(gen_baseline_output.batch.keys())) # # batch.batch["reward_baselines"] = reward_baseline_tensor # # del gen_baseline_batch, gen_baseline_output batch_meta: BatchMeta = gen_meta.union(gen_output_meta) if "response_mask" not in batch_meta.field_names: response_mask_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=["responses", "attention_mask"], task_name="compute_response_mask", **base_get_meta_kwargs, ) ) response_mask_output_meta = compute_response_mask(response_mask_meta, self.data_system_client) batch_meta = batch_meta.union(response_mask_output_meta) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. balanced_idx = None if self.config.trainer.balance_batch: attention_mask_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=["attention_mask"], task_name="balance_batch", **base_get_meta_kwargs, ) ) balanced_idx = self._balance_batch( attention_mask_meta, self.data_system_client, metrics=metrics ) batch_meta.reorder(balanced_idx) # compute global_valid tokens data = asyncio.run(self.data_system_client.async_get_data(attention_mask_meta)) batch_meta.extra_info["global_token_num"] = torch.sum(data["attention_mask"], dim=-1).tolist() with marked_timer("reward", timing_raw, color="yellow"): # compute reward model score if self.use_rm and "rm_scores" not in batch_meta.field_names: reward_meta = self.rm_wg.compute_rm_score(batch_meta) batch_meta = batch_meta.union(reward_meta) compute_reward_fields = [ "responses", "prompts", "attention_mask", "reward_model", "data_source", ] if "rm_scores" in batch_meta.field_names: compute_reward_fields.append("rm_scores") compute_reward_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=compute_reward_fields, task_name="compute_reward", **base_get_meta_kwargs, ) ) compute_reward_meta.reorder(balanced_idx) if self.config.reward_model.launch_reward_fn_async: future_reward = compute_reward_async_decorated( data=compute_reward_meta, reward_fn=self.reward_fn, ) else: reward_tensor, reward_extra_infos_dict = compute_reward_decorated( compute_reward_meta, self.reward_fn ) batch_meta = batch_meta.union(compute_reward_meta) # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, color="blue"): old_log_prob_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=[ "input_ids", "attention_mask", "position_ids", "prompts", "responses", "response_mask", "data_source", "reward_model", "extra_info", "uid", "index", "tools_kwargs", "interaction_kwargs", "ability", ], task_name="compute_log_prob", **base_get_meta_kwargs, ) ) old_log_prob_meta.reorder(balanced_idx) old_log_prob_output_meta = self.actor_rollout_wg.compute_log_prob(old_log_prob_meta) data = asyncio.run(self.data_system_client.async_get_data(old_log_prob_output_meta)) entropys = data["entropys"] response_masks = data["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) batch_meta = batch_meta.union(old_log_prob_output_meta) if "rollout_log_probs" in batch_meta.field_names: # TODO: we may want to add diff of probs too. data_fields = ["rollout_log_probs", "old_log_probs", "responses"] if "response_mask" in batch_meta.field_names: data_fields.append("response_mask") if "attention_mask" in batch_meta.field_names: data_fields.append("attention_mask") calculate_debug_metrics_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=data_fields, task_name="calculate_debug_metrics", **base_get_meta_kwargs, ) ) calculate_debug_metrics_meta.reorder(balanced_idx) metrics.update(calculate_debug_metrics_decorated(calculate_debug_metrics_meta)) if self.use_reference_policy: # compute reference log_prob ref_log_prob_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=[ "input_ids", "attention_mask", "position_ids", "prompts", "responses", "response_mask", "old_log_probs", "data_source", "reward_model", "extra_info", "uid", "index", "tools_kwargs", "interaction_kwargs", "ability", ], task_name="compute_ref_log_prob", **base_get_meta_kwargs, ) ) ref_log_prob_meta.reorder(balanced_idx) with marked_timer("ref", timing_raw, color="olive"): if not self.ref_in_actor: ref_log_prob_output_meta = self.ref_policy_wg.compute_ref_log_prob(ref_log_prob_meta) else: ref_log_prob_output_meta = self.actor_rollout_wg.compute_ref_log_prob(ref_log_prob_meta) batch_meta = batch_meta.union(ref_log_prob_output_meta) # compute values if self.use_critic: with marked_timer("values", timing_raw, color="cyan"): values_meta = self.critic_wg.compute_values(batch_meta) batch_meta = batch_meta.union(values_meta) with marked_timer("adv", timing_raw, color="brown"): # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] if self.config.reward_model.launch_reward_fn_async: reward_tensor, reward_extra_infos_dict = ray.get(future_reward) reward_td = TensorDict({"token_level_scores": reward_tensor}, batch_size=reward_tensor.size(0)) asyncio.run(self.data_system_client.async_put(data=reward_td, metadata=batch_meta)) batch_meta.add_fields(reward_td) if reward_extra_infos_dict: reward_extra_infos_dict_new = {k: np.array(v) for k, v in reward_extra_infos_dict.items()} reward_extra_infos_td = self.dict_to_tensordict(reward_extra_infos_dict_new) asyncio.run( self.data_system_client.async_put(data=reward_extra_infos_td, metadata=batch_meta) ) batch_meta.add_fields(reward_extra_infos_td) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: apply_kl_penalty_fields = [ "response_mask", "token_level_scores", "old_log_probs", "ref_log_prob", ] apply_kl_penalty_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=apply_kl_penalty_fields, task_name="apply_kl_penalty", **base_get_meta_kwargs, ) ) apply_kl_penalty_meta.reorder(balanced_idx) token_level_rewards, kl_metrics = apply_kl_penalty( apply_kl_penalty_meta, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty, ) token_level_rewards_td = TensorDict( {"token_level_rewards": token_level_rewards}, batch_size=token_level_rewards.size(0) ) asyncio.run( self.data_system_client.async_put( data=token_level_rewards_td, metadata=apply_kl_penalty_meta ) ) apply_kl_penalty_meta.add_fields(token_level_rewards_td) metrics.update(kl_metrics) batch_meta = batch_meta.union(apply_kl_penalty_meta) else: token_level_scores_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=["token_level_scores"], task_name="token_level_scores", **base_get_meta_kwargs, ) ) token_level_scores_meta.reorder(balanced_idx) data = asyncio.run(self.data_system_client.async_get_data(token_level_scores_meta)) token_level_rewards_td = TensorDict( {"token_level_rewards": data["token_level_scores"]}, batch_size=data["token_level_scores"].size(0), ) asyncio.run( self.data_system_client.async_put( data=token_level_rewards_td, metadata=token_level_scores_meta ) ) batch_meta.add_fields(token_level_rewards_td) # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get( "norm_adv_by_std_in_grpo", True ) # GRPO adv normalization factor assert "response_mask" in batch_meta.field_names, ( f"`response_mask` must be in batch_meta {batch_meta.field_names} for advantage computation" ) compute_advantage_fields = [ "response_mask", "token_level_rewards", ] if self.config.algorithm.adv_estimator == AdvantageEstimator.GAE: compute_advantage_fields.append("values") elif self.config.algorithm.adv_estimator == AdvantageEstimator.GRPO: compute_advantage_fields.append("uid") else: if "uid" in batch_meta.field_names: compute_advantage_fields.append("uid") if "reward_baselines" in batch_meta.field_names: compute_advantage_fields.append("reward_baselines") compute_advantage_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=compute_advantage_fields, task_name="compute_advantage", **base_get_meta_kwargs, ) ) compute_advantage_meta.reorder(balanced_idx) advantages, returns = compute_advantage( compute_advantage_meta, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, config=self.config.algorithm, ) advantages_td = TensorDict( {"advantages": advantages, "returns": returns}, batch_size=advantages.size(0) ) asyncio.run( self.data_system_client.async_put(data=advantages_td, metadata=compute_advantage_meta) ) compute_advantage_meta.add_fields(advantages_td) batch_meta = batch_meta.union(compute_advantage_meta) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, color="pink"): critic_output_meta = self.critic_wg.update_critic(batch_meta) batch_meta = batch_meta.union(critic_output_meta) critic_output_metrics = reduce_metrics(critic_output_meta.extra_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, color="red"): batch_meta.extra_info["multi_turn"] = ( self.config.actor_rollout_ref.rollout.multi_turn.enable ) update_actor_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=[ "input_ids", "attention_mask", "position_ids", "prompts", "responses", "response_mask", "old_log_probs", "ref_log_prob", "advantages", "returns", "token_level_rewards", "token_level_scores", "data_source", "reward_model", "extra_info", "uid", "index", "tools_kwargs", "interaction_kwargs", "ability", ], batch_size=self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n, global_step=self.global_steps - 1, get_n_samples=False, task_name="update_actor", ) ) update_actor_meta.reorder(balanced_idx) update_actor_meta.set_extra_info( "global_token_num", batch_meta.get_extra_info("global_token_num") ) update_actor_meta.set_extra_info("temperature", batch_meta.get_extra_info("temperature")) actor_output_meta = self.actor_rollout_wg.update_actor(update_actor_meta) batch_meta = batch_meta.union(actor_output_meta) actor_output_metrics = reduce_metrics(actor_output_meta.extra_info["metrics"]) metrics.update(actor_output_metrics) # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: data_fields = ["prompts", "responses", "token_level_scores", "reward_model"] if "request_id" in batch_meta.field_names: data_fields.append("request_id") log_rollout_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=data_fields, batch_size=self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n, global_step=self.global_steps - 1, get_n_samples=False, task_name="log_rollout", ) ) log_rollout_meta.reorder(balanced_idx) self._log_rollout_data(log_rollout_meta, reward_extra_infos_dict, timing_raw, rollout_data_dir) # TODO: clear meta after iteration # TODO: validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, color="green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) # Check if the ESI (Elastic Server Instance)/training plan is close to expiration. esi_close_to_expiration = should_save_ckpt_esi( max_steps_duration=self.max_steps_duration, redundant_time=self.config.trainer.esi_redundant_time, ) # Check if the conditions for saving a checkpoint are met. # The conditions include a mandatory condition (1) and # one of the following optional conditions (2/3/4): # 1. The save frequency is set to a positive value. # 2. It's the last training step. # 3. The current step number is a multiple of the save frequency. # 4. The ESI(Elastic Server Instance)/training plan is close to expiration. if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration ): if esi_close_to_expiration: print("Force saving checkpoint: ESI instance expiration approaching.") with marked_timer("save_checkpoint", timing_raw, color="green"): self._save_checkpoint() with marked_timer("stop_profile", timing_raw): next_step_profile = ( self.global_steps + 1 in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) self._stop_profiling( curr_step_profile and not next_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) prev_step_profile = curr_step_profile curr_step_profile = next_step_profile steps_duration = timing_raw["step"] self.max_steps_duration = max(self.max_steps_duration, steps_duration) # training metrics metrics.update( { "training/global_step": self.global_steps, "training/epoch": epoch, } ) # collect metrics compute_data_metrics_fields = [ "token_level_rewards", "token_level_scores", "advantages", "returns", "responses", "attention_mask", "response_mask", ] if "__num_turns__" in batch_meta.field_names: compute_data_metrics_fields.append("__num_turns__") if "tool_call_counts" in batch_meta.field_names: compute_data_metrics_fields.append("tool_call_counts") compute_data_metrics_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=compute_data_metrics_fields, task_name="compute_data_metrics", **base_get_meta_kwargs, ) ) compute_data_metrics_meta.reorder(balanced_idx) metrics.update( compute_data_metrics_decorated(batch=compute_data_metrics_meta, use_critic=self.use_critic) ) compute_timing_metrics_fields = ["responses", "attention_mask"] compute_timing_metrics_meta = asyncio.run( self.data_system_client.async_get_meta( data_fields=compute_timing_metrics_fields, task_name="compute_timing_metrics", **base_get_meta_kwargs, ) ) compute_timing_metrics_meta.reorder(balanced_idx) metrics.update( compute_timing_metrics_decorated(batch=compute_timing_metrics_meta, timing_raw=timing_raw) ) compute_throughout_metrics_meta = BatchMeta( samples=[], extra_info={"global_token_num": batch_meta.get_extra_info("global_token_num")}, ) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update( compute_throughout_metrics_decorated( batch=compute_throughout_metrics_meta, timing_raw=timing_raw, n_gpus=n_gpus ) ) # this is experimental and may be changed/removed in the future in favor of a general-purpose one if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler): # TODO: (TQ) support transfer queue self.train_dataloader.sampler.update(batch=batch) asyncio.run(self.data_system_client.async_clear(self.global_steps - 1)) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) progress_bar.update(1) self.global_steps += 1 if ( hasattr(self.config.actor_rollout_ref.actor, "profiler") and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory" ): self.actor_rollout_wg.dump_memory_snapshot( tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}" ) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return # this is experimental and may be changed/removed in the future # in favor of a general-purpose data buffer pool if hasattr(self.train_dataset, "on_batch_end"): # The dataset may be changed after each training batch # TODO: (TQ) support transfer queue self.train_dataset.on_batch_end(batch=batch) ================================================ FILE: verl_distillation/recipe/transfer_queue/run_qwen3-8b_transferqueue_npu.sh ================================================ set -x project_name='GRPO-Qwen3' exp_name='GRPO-Qwen3-8B-npu' gen_tp=2 RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-8B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} python3 -m recipe.transfer_queue.main_ppo \ --config-name='transfer_queue_ppo_trainer' \ algorithm.adv_estimator=grpo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=${MODEL_PATH} \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.default_local_dir=${CKPTS_DIR} \ trainer.device=npu \ trainer.resume_mode=auto \ actor_rollout_ref.actor.fsdp_config.forward_prefetch=True \ actor_rollout_ref.ref.fsdp_config.forward_prefetch=True \ ++actor_rollout_ref.actor.entropy_from_logits_with_chunking=True \ ++actor_rollout_ref.ref.entropy_from_logits_with_chunking=True \ trainer.val_before_train=False \ trainer.save_freq=5 \ trainer.test_freq=5 \ trainer.total_epochs=15 \ +trainer.num_global_batch=1 \ +trainer.num_data_storage_units=2 \ +trainer.num_data_controllers=1 ================================================ FILE: verl_distillation/requirements-cuda.txt ================================================ flash-attn ================================================ FILE: verl_distillation/requirements-npu.txt ================================================ # requirements.txt records the full set of dependencies for development accelerate codetiming datasets dill hydra-core numpy<2.0.0 pandas peft>=0.15.2 pyarrow>=15.0.0 pybind11 pylatexenc tensordict>=0.8.0,<=0.10.0,!=0.9.0 transformers==4.52.4 ray==2.46.0 wandb mathruler torchdata einops qwen_vl_utils torchvision==0.20.1 ================================================ FILE: verl_distillation/requirements.txt ================================================ # requirements.txt records the full set of dependencies for development accelerate codetiming click==8.0.4 datasets dill # flash-attn hydra-core liger-kernel numpy<2.0.0 pandas peft pyarrow>=19.0.0 pybind11 pylatexenc pre-commit ray==2.49.0 tensordict>=0.8.0,<=0.9.1,!=0.9.0 torchdata transformers # vllm==0.8.4 opentelemetry-api>=1.26.0,<1.27.0 opentelemetry-sdk>=1.26.0,<1.27.0 opentelemetry-exporter-otlp-proto-grpc>=1.26.0,<1.27.0 opentelemetry-exporter-otlp-proto-http>=1.26.0,<1.27.0 wandb packaging>=20.0 uvicorn fastapi latex2sympy2_extended math_verify sglang==0.5.2 ================================================ FILE: verl_distillation/requirements_sglang.txt ================================================ # requirements.txt records the full set of dependencies for development accelerate codetiming datasets dill flash-attn hydra-core numpy<2.0.0 pandas peft pyarrow>=19.0.0 pybind11 pylatexenc ray[default]>=2.10 tensordict>=0.8.0,<=0.10.0,!=0.9.0 torchdata torchvision transformers wandb sglang[all]==0.5.2 huggingface_hub ================================================ FILE: verl_distillation/requirements_transferqueue.txt ================================================ # requirements.txt records the full set of dependencies for development git+https://github.com/TransferQueue/TransferQueue.git@68c04e7 ================================================ FILE: verl_distillation/scripts/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/scripts/converter_hf_to_mcore.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import warnings from contextlib import contextmanager from importlib.metadata import version from typing import Any, Callable, ContextManager, Optional import numpy as np import torch import torch.distributed as dist try: # NPU patch import mindspeed.megatron_adaptor # noqa: F401 except ImportError: pass from accelerate import init_empty_weights from megatron.core import dist_checkpointing from megatron.core import parallel_state as mpu from megatron.core.dist_checkpointing.mapping import ShardedTensor from megatron.core.dist_checkpointing.serialization import StrictHandling from megatron.core.models.gpt.gpt_model import ModelType from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from packaging.version import Version from transformers import AutoConfig from verl.model_merger.megatron_model_merger import get_dynamic_pipeline_shards from verl.models.mcore import hf_to_mcore_config from verl.utils.device import get_device_name, get_torch_device from verl.utils.megatron_utils import get_model def _init_args(): """ Examples: 1. single rank conversion for any model: > python converter_hf_to_mcore.py --hf_model_path %{hf_model} --output_path ${output_path} 2. distributed conversion for DeepseekV3 671B: > torchrun --nproc_per_node 1 --nnodes 4 --node_rank ${RANK} converter_hf_to_mcore.py \ --hf_model_path %{hf_model} --output_path ${output_path} """ parser = argparse.ArgumentParser() parser.add_argument("--hf_model_path", type=str, required=True, help="The path for the huggingface model") parser.add_argument("--output_path", type=str, required=True, help="The path for the output mcore model") parser.add_argument("--use_cpu_initialization", action="store_true", help="Whether to use cpu initialization") parser.add_argument("--test", action="store_true", help="Whether to test the conversion") parser.add_argument("--trust_remote_code", action="store_true", help="Whether to trust remote code") args = parser.parse_args() return args def test_conversion(megatron_model_provider, tfconfig, output_path, model): ########### test ########### # load model model_test = get_model( model_provider_func=megatron_model_provider, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=True, transformer_config=tfconfig, ) ref_state_dict = model_test[0].module.sharded_state_dict() dist_checkpointing.load(ref_state_dict, output_path, strict=StrictHandling.ASSUME_OK_UNEXPECTED) dut_state_dict = model[0].module.state_dict() for name in dut_state_dict.keys(): if dut_state_dict[name] is None: print(f"[Warning] {name} is none in dut_state_dict") continue dut_data = dut_state_dict[name].data if name in ref_state_dict: ref_data = ref_state_dict[name] if isinstance(ref_data, ShardedTensor): ref_data = ref_data.data.view(ref_data.local_shape) else: ref_data = ref_data.data assert dut_data.shape == ref_data.shape, f"{name=} {dut_data.shape=} {ref_data.shape=}" assert (dut_data == ref_data).all(), f"{name} is not equal" print(f"{name} is equal") else: print(f"[Warning] {name} is not in ref_state_dict") for name in ref_state_dict.keys(): if ref_state_dict[name] is None: print(f"[Warning] {name} is none in ref_state_dict") continue ref_data = ref_state_dict[name] if isinstance(ref_data, ShardedTensor): ref_data = ref_data.data.view(ref_data.local_shape) else: ref_data = ref_data.data if name in dut_state_dict: dut_data = dut_state_dict[name].data assert dut_data.shape == ref_data.shape, f"{name=} {dut_data.shape=} {ref_data.shape=}" assert (dut_data == ref_data).all(), f"{name} is not equal" print(f"{name} is equal") else: print(f"[Warning] {name} is not in dut_state_dict") print("Conversion test passed!") @torch.inference_mode() def convert_checkpoint_from_transformers_to_megatron( hf_model, model, hf_config, layer_start_end: Optional[tuple[int, int]] = None ): if layer_start_end is None: layer_start_end = (0, len(model.decoder.layers)) layer_start, layer_end = layer_start_end pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() numel = 0 num_attention_heads = hf_config.num_attention_heads num_key_value_heads = hf_config.num_key_value_heads hidden_dim = hf_config.hidden_size head_dim = getattr(hf_config, "head_dim", hidden_dim // num_attention_heads) if num_attention_heads != num_key_value_heads: print("[WARNING] Converting GQA model") has_qkv_bias = getattr(hf_config, "qkv_bias", False) or getattr(hf_config, "attention_bias", False) has_share_expert = getattr(hf_config, "shared_expert_intermediate_size", None) if pp_rank == 0: numel += safe_copy(hf_model.model.embed_tokens.weight, model.embedding.word_embeddings.weight) assert len(model.decoder.layers) == (layer_end - layer_start), ( f"Expected {len(model.decoder.layers)} layers, but got {layer_end - layer_start}" ) for layer_idx, (layer, hf_layer) in enumerate( zip(model.decoder.layers, hf_model.model.layers[layer_start:layer_end], strict=True) ): global_layer_idx = layer_idx + layer_start numel_cur = numel numel += safe_copy(hf_layer.input_layernorm.weight, layer.self_attention.linear_qkv.layer_norm_weight) q = hf_layer.self_attn.q_proj.weight.view( [num_key_value_heads, head_dim * num_attention_heads // num_key_value_heads, -1] ) k = hf_layer.self_attn.k_proj.weight.view([num_key_value_heads, head_dim, -1]) v = hf_layer.self_attn.v_proj.weight.view([num_key_value_heads, head_dim, -1]) qkv = torch.cat([q, k, v], dim=1).view(-1, hidden_dim).contiguous() numel += safe_copy(qkv, layer.self_attention.linear_qkv.weight) if has_qkv_bias: q_bias = hf_layer.self_attn.q_proj.bias.view([num_key_value_heads, -1]) k_bias = hf_layer.self_attn.k_proj.bias.view([num_key_value_heads, -1]) v_bias = hf_layer.self_attn.v_proj.bias.view([num_key_value_heads, -1]) qkv_bias = torch.cat([q_bias, k_bias, v_bias], dim=1).view(-1).contiguous() numel += safe_copy(qkv_bias, layer.self_attention.linear_qkv.bias) if hasattr(hf_layer.self_attn, "q_norm"): numel += safe_copy(hf_layer.self_attn.q_norm.weight.data, layer.self_attention.q_layernorm.weight) numel += safe_copy(hf_layer.self_attn.k_norm.weight.data, layer.self_attention.k_layernorm.weight) numel += safe_copy(hf_layer.self_attn.o_proj.weight, layer.self_attention.linear_proj.weight) numel += safe_copy(hf_layer.post_attention_layernorm.weight, layer.pre_mlp_layernorm.weight) numel += safe_copy(hf_layer.mlp.gate.weight, layer.mlp.router.weight) for idx, hf_expert in enumerate(hf_layer.mlp.experts): fc1_weight = torch.cat([hf_expert.gate_proj.weight, hf_expert.up_proj.weight]) numel += safe_copy(fc1_weight, layer.mlp.experts.linear_fc1._parameters[f"weight{idx}"]) numel += safe_copy(hf_expert.down_proj.weight, layer.mlp.experts.linear_fc2._parameters[f"weight{idx}"]) if has_share_expert: numel += safe_copy(hf_layer.mlp.shared_expert_gate.weight, layer.mlp.shared_experts.gate_weight) shared_fc1_weight = torch.cat( [hf_layer.mlp.shared_expert.gate_proj.weight, hf_layer.mlp.shared_expert.up_proj.weight] ) numel += safe_copy(shared_fc1_weight, layer.mlp.shared_experts.linear_fc1.weight) numel += safe_copy(hf_layer.mlp.shared_expert.down_proj.weight, layer.mlp.shared_experts.linear_fc2.weight) print(f"{pp_rank=} {global_layer_idx=} {layer_idx=} {numel=} numel this layer={numel - numel_cur}") if pp_rank == pp_size - 1: numel += safe_copy(hf_model.model.norm.weight, model.decoder.final_layernorm.weight) numel += safe_copy(hf_model.lm_head.weight, model.output_layer.weight) return numel def safe_copy( src_tensor: torch.Tensor, dst_tensor: torch.Tensor, skip_dtype_assert: bool = False, ): if not skip_dtype_assert: if src_tensor.dtype != dst_tensor.dtype: raise ValueError(f"Get source dtype {src_tensor.dtype}, but target dtype {dst_tensor.dtype}") assert src_tensor.shape == dst_tensor.shape dst_tensor.data.copy_(src_tensor.data) return src_tensor.numel() @torch.inference_mode() def convert_checkpoint_from_transformers_to_megatron_qwen2_5_vl(hfmodel, mgmodel, hf_config): mgmodel = mgmodel.bfloat16() hfmodel = hfmodel.bfloat16() num_attention_heads = hf_config.num_attention_heads num_query_groups = hf_config.num_key_value_heads hidden_size = hf_config.hidden_size head_dim = hidden_size // num_attention_heads # 1. vision model if Version(version("transformers")) < Version("4.52.0"): print("Using transformers < 4.52 API to load vision model") hfvision = hfmodel.visual else: hfvision = hfmodel.model.visual mgvision = mgmodel.vision_model vision_hidden_size = mgvision.config.hidden_size vision_num_query_groups = mgvision.config.num_query_groups vision_head_dim = vision_hidden_size // mgvision.config.num_attention_heads copied_numel = 0 safe_copy(hfvision.rotary_pos_emb.inv_freq, mgvision.rotary_pos_emb.inv_freq) copied_numel += safe_copy(hfvision.patch_embed.proj.weight, mgvision.patch_embed.proj.weight) for hfblock, mgblock in zip(hfvision.blocks, mgvision.decoder.layers, strict=True): # norm1 --> linear_qkv.norm copied_numel += safe_copy(hfblock.norm1.weight, mgblock.self_attention.linear_qkv.layer_norm_weight) # norm2 --> mlp.linear_fc1.norm copied_numel += safe_copy(hfblock.norm2.weight, mgblock.mlp.linear_fc1.layer_norm_weight) # qkv --> self_attention.linear_qkv converted_weight = ( hfblock.attn.qkv.weight.view(3, vision_num_query_groups, -1, vision_head_dim, vision_hidden_size) .transpose(0, 1) .flatten(1, 2) .reshape(-1, vision_hidden_size) .contiguous() ) copied_numel += safe_copy(converted_weight, mgblock.self_attention.linear_qkv.weight) converted_bias = ( hfblock.attn.qkv.bias.view(3, vision_num_query_groups, -1) .transpose(0, 1) .flatten(1, 2) .view(-1) .contiguous() ) copied_numel += safe_copy(converted_bias, mgblock.self_attention.linear_qkv.bias) # proj --> self_attention.linear_proj copied_numel += safe_copy(hfblock.attn.proj.weight, mgblock.self_attention.linear_proj.weight) copied_numel += safe_copy(hfblock.attn.proj.bias, mgblock.self_attention.linear_proj.bias) # mlp --> mlp: gate fc1_weight = torch.cat([hfblock.mlp.gate_proj.weight, hfblock.mlp.up_proj.weight]) fc1_bias = torch.cat([hfblock.mlp.gate_proj.bias, hfblock.mlp.up_proj.bias]) copied_numel += safe_copy(fc1_weight, mgblock.mlp.linear_fc1.weight) copied_numel += safe_copy(fc1_bias, mgblock.mlp.linear_fc1.bias) copied_numel += safe_copy(hfblock.mlp.down_proj.weight, mgblock.mlp.linear_fc2.weight) copied_numel += safe_copy(hfblock.mlp.down_proj.bias, mgblock.mlp.linear_fc2.bias) # 2. vision projector hfprojector = hfvision.merger mgprojector = mgvision.projection copied_numel += safe_copy(hfprojector.ln_q.weight, mgvision.decoder.final_layernorm.weight) copied_numel += safe_copy(hfprojector.mlp[0].weight, mgprojector.encoder.linear_fc1.weight) copied_numel += safe_copy(hfprojector.mlp[0].bias, mgprojector.encoder.linear_fc1.bias) copied_numel += safe_copy(hfprojector.mlp[2].weight, mgprojector.encoder.linear_fc2.weight) copied_numel += safe_copy(hfprojector.mlp[2].bias, mgprojector.encoder.linear_fc2.bias) n_params = sum([t.numel() for t in hfvision.state_dict().values()]) assert n_params == copied_numel, f"n_params={n_params} != copied_numel={copied_numel}" # 3. llm [just Qwen2] if Version(version("transformers")) < Version("4.52.0"): print("Using transformers < 4.52 API to load llm") hfllm = hfmodel.model else: hfllm = hfmodel.model.language_model mgllm = mgmodel.language_model copied_numel = 0 copied_numel += safe_copy(hfllm.embed_tokens.weight, mgllm.embedding.word_embeddings.weight) layermaps = zip(mgllm.decoder.layers, hfllm.layers, strict=True) for mglayer, hflayer in layermaps: copied_numel += safe_copy(hflayer.input_layernorm.weight, mglayer.self_attention.linear_qkv.layer_norm_weight) q_proj_weight = hflayer.self_attn.q_proj.weight.view(num_query_groups, -1, head_dim, hidden_size) k_proj_weight = hflayer.self_attn.k_proj.weight.view(num_query_groups, -1, head_dim, hidden_size) v_proj_weight = hflayer.self_attn.v_proj.weight.view(num_query_groups, -1, head_dim, hidden_size) qkv_proj = torch.cat([q_proj_weight, k_proj_weight, v_proj_weight], dim=1).view(-1, hidden_size).contiguous() copied_numel += safe_copy(qkv_proj, mglayer.self_attention.linear_qkv.weight) q_proj_bias = hflayer.self_attn.q_proj.bias.view(num_query_groups, -1) k_proj_bias = hflayer.self_attn.k_proj.bias.view(num_query_groups, -1) v_proj_bias = hflayer.self_attn.v_proj.bias.view(num_query_groups, -1) qkv_bias = torch.cat([q_proj_bias, k_proj_bias, v_proj_bias], dim=1).view(-1).contiguous() copied_numel += safe_copy(qkv_bias, mglayer.self_attention.linear_qkv.bias) copied_numel += safe_copy(hflayer.self_attn.o_proj.weight, mglayer.self_attention.linear_proj.weight) fc1_weight = torch.cat([hflayer.mlp.gate_proj.weight, hflayer.mlp.up_proj.weight]) copied_numel += safe_copy(fc1_weight, mglayer.mlp.linear_fc1.weight) copied_numel += safe_copy(hflayer.mlp.down_proj.weight, mglayer.mlp.linear_fc2.weight) copied_numel += safe_copy(hflayer.post_attention_layernorm.weight, mglayer.mlp.linear_fc1.layer_norm_weight) copied_numel += safe_copy(hfllm.norm.weight, mgllm.decoder.final_layernorm.weight) if not hf_config.tie_word_embeddings: safe_copy(hfmodel.lm_head.weight, mgllm.output_layer.weight) n_params = sum([t.numel() for t in hfllm.state_dict().values()]) assert n_params == copied_numel, f"n_params={n_params} != copied_numel={copied_numel}" @torch.inference_mode() def convert_checkpoint_from_transformers_to_megatron_dpskv3( hf_model, model, hf_config, tfconfig, layer_start_end: Optional[tuple[int, int]] = None, ): warnings.warn("MTP model is not supported yet", stacklevel=2) if layer_start_end is None: layer_start_end = (0, len(model.decoder.layers)) layer_start, layer_end = layer_start_end numel: int = 0 pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() if pp_rank == 0: numel += safe_copy(hf_model.model.embed_tokens.weight, model.embedding.word_embeddings.weight) assert len(model.decoder.layers) == (layer_end - layer_start), ( f"Expected {len(model.decoder.layers)} layers, but got {layer_end - layer_start}" ) for layer_idx, (layer, hf_layer) in enumerate( zip(model.decoder.layers, hf_model.model.layers[layer_start:layer_end], strict=True) ): global_layer_idx = layer_idx + layer_start numel_cur: int = numel numel += safe_copy(hf_layer.input_layernorm.weight, layer.input_layernorm.weight) if hf_config.q_lora_rank is None: numel += safe_copy(hf_layer.self_attn.q_proj.weight, layer.self_attention.linear_q_proj.weight) else: numel += safe_copy(hf_layer.self_attn.q_a_proj.weight, layer.self_attention.linear_q_down_proj.weight) numel += safe_copy(hf_layer.self_attn.q_b_proj.weight, layer.self_attention.linear_q_up_proj.weight) numel += safe_copy( hf_layer.self_attn.q_a_layernorm.weight, layer.self_attention.linear_q_up_proj.layer_norm_weight ) numel += safe_copy( hf_layer.self_attn.kv_a_proj_with_mqa.weight, layer.self_attention.linear_kv_down_proj.weight ) numel += safe_copy(hf_layer.self_attn.kv_b_proj.weight, layer.self_attention.linear_kv_up_proj.weight) numel += safe_copy( hf_layer.self_attn.kv_a_layernorm.weight, layer.self_attention.linear_kv_up_proj.layer_norm_weight ) numel += safe_copy(hf_layer.self_attn.o_proj.weight, layer.self_attention.linear_proj.weight) if not hasattr(layer.mlp, "router"): numel += safe_copy(hf_layer.post_attention_layernorm.weight, layer.mlp.linear_fc1.layer_norm_weight) numel += safe_copy( torch.cat([hf_layer.mlp.gate_proj.weight, hf_layer.mlp.up_proj.weight]), layer.mlp.linear_fc1.weight ) numel += safe_copy(hf_layer.mlp.down_proj.weight, layer.mlp.linear_fc2.weight) else: numel += safe_copy(hf_layer.mlp.gate.weight, layer.mlp.router.weight) # NOTE: the e_score_correction_bias in mcore model will be initialized with bfloat16 and \ # recover to fp32 in the first forward. There is always a diff in the bias between two models (~0.3%) numel += safe_copy( hf_layer.mlp.gate.e_score_correction_bias, layer.mlp.router.expert_bias, skip_dtype_assert=True ) if tfconfig.moe_grouped_gemm: for i, hf_expert in enumerate(hf_layer.mlp.experts): fc1_weight = torch.cat([hf_expert.gate_proj.weight, hf_expert.up_proj.weight]) linear_fc1_weighti = getattr(layer.mlp.experts.linear_fc1, "weight" + str(i)) numel += safe_copy(fc1_weight, linear_fc1_weighti) linear_fc2_weighti = getattr(layer.mlp.experts.linear_fc2, "weight" + str(i)) numel += safe_copy(hf_expert.down_proj.weight, linear_fc2_weighti) else: for i, hf_expert in enumerate(hf_layer.mlp.experts): expert = layer.mlp.experts.local_experts[i] fc1_weight = torch.cat([hf_expert.gate_proj.weight, hf_expert.up_proj.weight]) numel += safe_copy(fc1_weight, expert.linear_fc1.weight) numel += safe_copy(hf_expert.down_proj.weight, expert.linear_fc2.weight) numel += safe_copy(hf_layer.post_attention_layernorm.weight, layer.pre_mlp_layernorm.weight) shared_fc1_weight = torch.cat( [hf_layer.mlp.shared_experts.gate_proj.weight, hf_layer.mlp.shared_experts.up_proj.weight] ) numel += safe_copy(shared_fc1_weight, layer.mlp.shared_experts.linear_fc1.weight) numel += safe_copy(hf_layer.mlp.shared_experts.down_proj.weight, layer.mlp.shared_experts.linear_fc2.weight) print(f"{pp_rank=} {global_layer_idx=} {layer_idx=} {numel=} numel this layer={numel - numel_cur}") assert numel - numel_cur == sum([i.numel() for i in hf_layer.state_dict().values()]), "numel mismatch" if pp_rank == pp_size - 1: numel += safe_copy(hf_model.model.norm.weight, model.decoder.final_layernorm.weight) if not hf_config.tie_word_embeddings: numel += safe_copy(hf_model.lm_head.weight, model.output_layer.weight) print(f"{pp_rank=} {numel=}") return numel @contextmanager def noop_context() -> Any: yield def support_distributed_convert(hf_config: AutoConfig) -> bool: for arch in ["DeepseekV3ForCausalLM", "Qwen3MoeForCausalLM", "Qwen2MoeForCausalLM"]: if arch in hf_config.architectures: return True return False def convert_hf_to_mcore(hf_model_path, output_path, use_cpu_initialization=False, test=False, trust_remote_code=False): os.makedirs(output_path, exist_ok=True) if len(os.listdir(output_path)) > 0 and not test: print(f"Output path {output_path} is not empty, skipping conversion") return # init torch distributed and mpu if "WORLD_SIZE" not in os.environ: os.environ["RANK"] = "0" os.environ["WORLD_SIZE"] = "1" os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "12355" torch.distributed.init_process_group("nccl") rank = dist.get_rank() local_rank = os.getenv("LOCAL_RANK", 0) world_size = dist.get_world_size() get_torch_device().set_device(f"{get_device_name()}:{local_rank}") mpu.initialize_model_parallel( tensor_model_parallel_size=1, pipeline_model_parallel_size=world_size, virtual_pipeline_model_parallel_size=None, context_parallel_size=1, expert_model_parallel_size=1, ) model_parallel_cuda_manual_seed(0) # init hf config hf_config = AutoConfig.from_pretrained(hf_model_path) print(hf_config, flush=True) if world_size > 1 and not support_distributed_convert(hf_config): raise NotImplementedError(f"distributed conversion is not supported for {hf_config.architectures} yet.") pipeline_shards = get_dynamic_pipeline_shards(hf_config.num_hidden_layers, world_size) print(f"Pipeline shards: {pipeline_shards}", flush=True) tfconfig = hf_to_mcore_config( hf_config, torch.bfloat16, num_layers_in_first_pipeline_stage=pipeline_shards[0] if len(pipeline_shards) > 1 else None, num_layers_in_last_pipeline_stage=pipeline_shards[-1] if len(pipeline_shards) > 2 else None, ) tfconfig.use_cpu_initialization = use_cpu_initialization tie_word_embeddings = getattr(hf_config, "tie_word_embeddings", False) # init megatron model def megatron_model_provider(pre_process, post_process): from verl.models.mcore import init_mcore_model parallel_model = init_mcore_model( tfconfig, hf_config, pre_process, post_process, share_embeddings_and_output_weights=tie_word_embeddings, value=False, ) return parallel_model context: Callable[..., ContextManager] = init_empty_weights if use_cpu_initialization else noop_context with context(): model = get_model( model_provider_func=megatron_model_provider, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=False, transformer_config=tfconfig, ) if use_cpu_initialization: # convert meta device to empty tensor so it can use `copy_` function model[0].module = model[0].module.to_empty(device="cpu") with warnings.catch_warnings(): warnings.simplefilter("ignore") from transformers import AutoModelForCausalLM, AutoModelForImageTextToText # init hf model if "Qwen2_5_VLForConditionalGeneration" in hf_config.architectures: hf_model = AutoModelForImageTextToText.from_pretrained( hf_model_path, torch_dtype=torch.bfloat16, trust_remote_code=trust_remote_code ) else: hf_model = AutoModelForCausalLM.from_pretrained( hf_model_path, torch_dtype=torch.bfloat16, trust_remote_code=trust_remote_code ) hf_state_dict = hf_model.state_dict() # distributed convert if world_size > 1 and support_distributed_convert(hf_config): pipeline_cumsum = np.cumsum(pipeline_shards) layer_start = 0 if rank == 0 else pipeline_cumsum[rank - 1] layer_end = pipeline_cumsum[rank] if "DeepseekV3ForCausalLM" in hf_config.architectures: numel_partial: int = convert_checkpoint_from_transformers_to_megatron_dpskv3( hf_model, model[0].module, hf_config, tfconfig=tfconfig, layer_start_end=(layer_start, layer_end) ) elif "Qwen3MoeForCausalLM" in hf_config.architectures or "Qwen2MoeForCausalLM" in hf_config.architectures: numel_partial: int = convert_checkpoint_from_transformers_to_megatron( hf_model, model[0].module, hf_config, layer_start_end=(layer_start, layer_end) ) else: raise NotImplementedError(f"Distributed conversion is not supported for {hf_config.architectures} yet.") numel_tensor = torch.tensor([numel_partial]).to(get_device_name()) dist.all_reduce(numel_tensor, op=dist.ReduceOp.SUM) numel = int(numel_tensor.cpu().item()) print(f"total numel={numel} vs {hf_model.num_parameters()=}") if numel != hf_model.num_parameters(): warnings.warn(f"numel mismatch: {numel=} != {hf_model.num_parameters()=}", stacklevel=1) # load hf state dict to megatron model elif "Qwen2MoeForCausalLM" in hf_config.architectures: convert_checkpoint_from_transformers_to_megatron(hf_model, model[0].module, hf_config) elif "Qwen2_5_VLForConditionalGeneration" in hf_config.architectures: convert_checkpoint_from_transformers_to_megatron_qwen2_5_vl(hf_model, model[0].module, hf_config) elif "DeepseekV3ForCausalLM" in hf_config.architectures: convert_checkpoint_from_transformers_to_megatron_dpskv3(hf_model, model[0].module, hf_config, tfconfig=tfconfig) elif "Qwen3MoeForCausalLM" in hf_config.architectures: convert_checkpoint_from_transformers_to_megatron(hf_model, model[0].module, hf_config) else: assert not use_cpu_initialization, "use_cpu_initialization is only supported for MoE model" from verl.models.mcore.loader import load_state_dict_to_megatron_gptmodel load_state_dict_to_megatron_gptmodel( state_dict=hf_state_dict, wrapped_models=model, config=hf_config, params_dtype=torch.bfloat16, is_value_model=False, ) megatron_state_dict = model[0].module.sharded_state_dict() del hf_state_dict, hf_model # save megatron model if len(os.listdir(output_path)) == 0: dist_checkpointing.save(megatron_state_dict, output_path, sharded_strategy=None, async_sharded_save=False) if test: test_conversion(megatron_model_provider, tfconfig, output_path, model) if __name__ == "__main__": args = _init_args() convert_hf_to_mcore( args.hf_model_path, args.output_path, args.use_cpu_initialization, args.test, args.trust_remote_code ) ================================================ FILE: verl_distillation/scripts/diagnose.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Diagnose script for checking OS/hardware/python/pip/verl/network. The output of this script can be a very good hint to issue/problem. """ import os import platform import socket import subprocess import sys import time import psutil try: from urllib.parse import urlparse from urllib.request import urlopen except ImportError: from urllib2 import urlopen from urlparse import urlparse import argparse import importlib.metadata import torch URLS = { "PYPI": "https://pypi.python.org/pypi/pip", } REGIONAL_URLS = { "cn": { "PYPI(douban)": "https://pypi.douban.com/", "Conda(tsinghua)": "https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/", } } def test_connection(name, url, timeout=10): """Simple connection test""" urlinfo = urlparse(url) start = time.time() try: socket.gethostbyname(urlinfo.netloc) except Exception as e: print("Error resolving DNS for {}: {}, {}".format(name, url, e)) return dns_elapsed = time.time() - start start = time.time() try: _ = urlopen(url, timeout=timeout) except Exception as e: print("Error open {}: {}, {}, DNS finished in {} sec.".format(name, url, e, dns_elapsed)) return load_elapsed = time.time() - start print("Timing for {}: {}, DNS: {:.4f} sec, LOAD: {:.4f} sec.".format(name, url, dns_elapsed, load_elapsed)) def check_python(): print("----------Python Info----------") print("Version :", platform.python_version()) print("Compiler :", platform.python_compiler()) print("Build :", platform.python_build()) print("Arch :", platform.architecture()) def check_pip(): print("------------Pip Info-----------") try: import pip print("Version :", pip.__version__) print("Directory :", os.path.dirname(pip.__file__)) except ImportError: print("No corresponding pip install for current python.") def _get_current_git_commit(): try: result = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, check=True) return result.stdout.strip() except subprocess.CalledProcessError as e: print(f"Error running git command: {e.stderr.strip()}") return None except FileNotFoundError: print("Did not find command: git") return None def check_verl(): print("----------verl Info-----------") try: sys.path.insert(0, os.getcwd()) import verl print("Version :", verl.__version__) verl_dir = os.path.dirname(verl.__file__) print("Directory :", verl_dir) try: commit_hash = _get_current_git_commit() print("Commit Hash :", commit_hash) except AttributeError: print("Commit hash not found. ") except ImportError as e: print(f"No verl installed: {e}") except Exception as e: import traceback if not isinstance(e, IOError): print("An error occurred trying to import verl.") print("This is very likely due to missing or incompatible library files.") print(traceback.format_exc()) def check_os(): print("----------Platform Info----------") print("Platform :", platform.platform()) print("system :", platform.system()) print("node :", platform.node()) print("release :", platform.release()) print("version :", platform.version()) def check_hardware(): print("----------Hardware Info----------") print("machine :", platform.machine()) print("processor :", platform.processor()) if sys.platform.startswith("darwin"): pipe = subprocess.Popen(("sysctl", "-a"), stdout=subprocess.PIPE) output = pipe.communicate()[0] for line in output.split(b"\n"): if b"brand_string" in line or b"features" in line: print(line.strip()) elif sys.platform.startswith("linux"): subprocess.call(["lscpu"]) elif sys.platform.startswith("win32"): subprocess.call(["wmic", "cpu", "get", "name"]) def check_network(args): print("----------Network Test----------") if args.timeout > 0: print("Setting timeout: {}".format(args.timeout)) socket.setdefaulttimeout(10) for region in args.region.strip().split(","): r = region.strip().lower() if not r: continue if r in REGIONAL_URLS: URLS.update(REGIONAL_URLS[r]) else: import warnings warnings.warn("Region {} do not need specific test, please refer to global sites.".format(r), stacklevel=2) for name, url in URLS.items(): test_connection(name, url, args.timeout) def check_environment(): print("----------Environment----------") for k, v in os.environ.items(): if k.startswith("VERL_") or k.startswith("OMP_") or k.startswith("KMP_") or k == "CC" or k == "CXX": print('{}="{}"'.format(k, v)) def check_pip_package_versions(): packages = ["vllm", "sglang", "ray", "torch"] for package in packages: try: version = importlib.metadata.version(package) print(f"{package}\t : {version}") except importlib.metadata.PackageNotFoundError: print(f"{package}\t : not found.") def check_cuda_versions(): if torch.cuda.is_available(): try: cuda_runtime_version = torch.version.cuda print(f"CUDA Runtime : {cuda_runtime_version}") import subprocess nvcc_output = subprocess.check_output(["nvcc", "--version"]).decode("utf-8") cuda_compiler_version = next((line for line in nvcc_output.splitlines() if "release" in line), None) if cuda_compiler_version: print(f"CUDA Compiler : {cuda_compiler_version.strip()}") else: print("Could not determine CUDA compiler version.") except FileNotFoundError as e: print(f"CUDA compiler : Not found: {e}") except Exception as e: print(f"An error occurred while checking CUDA versions: {e}") else: print("CUDA is not available.") def _get_cpu_memory(): """ Get the total CPU memory capacity in GB. """ memory = psutil.virtual_memory() return memory.total / (1024**3) def _get_gpu_info(): """ Get GPU type, GPU memory, and GPU count using nvidia-smi command. """ try: result = subprocess.run( ["nvidia-smi", "--query-gpu=gpu_name,memory.total", "--format=csv,noheader,nounits"], capture_output=True, text=True, check=True, ) gpu_lines = result.stdout.strip().split("\n") gpu_count = len(gpu_lines) gpu_info = [] for line in gpu_lines: gpu_name, gpu_memory = line.split(", ") gpu_info.append( { "type": gpu_name, "memory": float(gpu_memory) / 1024, # Convert to GB } ) return gpu_count, gpu_info except (subprocess.CalledProcessError, FileNotFoundError): print("Failed to execute nvidia-smi command.") return 0, [] def _get_system_info(): """ Get CPU memory capacity, GPU type, GPU memory, and GPU count. """ cpu_memory = _get_cpu_memory() gpu_count, gpu_info = _get_gpu_info() return {"cpu_memory": cpu_memory, "gpu_count": gpu_count, "gpu_info": gpu_info} def check_system_info(): print("----------System Info----------") system_info = _get_system_info() print(f"CPU Memory\t: {system_info['cpu_memory']:.2f} GB") print(f"GPU Count\t: {system_info['gpu_count']}") for i, gpu in enumerate(system_info["gpu_info"]): print(f"GPU {i + 1}\tType : {gpu['type']}") print(f"GPU {i + 1}\tMemory : {gpu['memory']:.2f} GB") def parse_args(): """Parse arguments.""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Diagnose script for checking the current system.", ) choices = ["python", "pip", "verl", "system", "os", "environment"] for choice in choices: parser.add_argument("--" + choice, default=1, type=int, help="Diagnose {}.".format(choice)) parser.add_argument("--network", default=0, type=int, help="Diagnose network.") parser.add_argument("--hardware", default=0, type=int, help="Diagnose hardware.") parser.add_argument( "--region", default="", type=str, help="Additional sites in which region(s) to test. \ Specify 'cn' for example to test mirror sites in China.", ) parser.add_argument("--timeout", default=10, type=int, help="Connection test timeout threshold, 0 to disable.") args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() if args.python: check_python() if args.pip: check_pip() check_pip_package_versions() if args.verl: check_verl() if args.os: check_os() if args.hardware: check_hardware() if args.network: check_network(args) if args.environment: check_environment() check_cuda_versions() if args.system: check_system_info() ================================================ FILE: verl_distillation/scripts/generate_trainer_config.sh ================================================ #!/usr/bin/env bash set -euox pipefail # Define config specifications: "config_name:output_file:config_arg" CONFIG_SPECS=( "ppo_trainer:_generated_ppo_trainer.yaml:" "ppo_megatron_trainer:_generated_ppo_megatron_trainer.yaml:--config-name=ppo_megatron_trainer.yaml" ) generate_config() { local config_name="$1" local output_file="$2" local config_arg="$3" local target_cfg="verl/trainer/config/${output_file}" local tmp_header=$(mktemp) local tmp_cfg=$(mktemp) echo "# This reference configration yaml is automatically generated via 'scripts/generate_trainer_config.sh'" > "$tmp_header" echo "# in which it invokes 'python3 scripts/print_cfg.py --cfg job ${config_arg}' to flatten the 'verl/trainer/config/${config_name}.yaml' config fields into a single file." >> "$tmp_header" echo "# Do not modify this file directly." >> "$tmp_header" echo "# The file is usually only for reference and never used." >> "$tmp_header" echo "" >> "$tmp_header" python3 scripts/print_cfg.py --cfg job ${config_arg} > "$tmp_cfg" cat "$tmp_header" > "$target_cfg" sed -n '/^actor_rollout_ref/,$p' "$tmp_cfg" >> "$target_cfg" rm "$tmp_cfg" "$tmp_header" echo "Generated: $target_cfg" } for spec in "${CONFIG_SPECS[@]}"; do IFS=':' read -r config_name output_file config_arg <<< "$spec" generate_config "$config_name" "$output_file" "$config_arg" done for spec in "${CONFIG_SPECS[@]}"; do IFS=':' read -r config_name output_file config_arg <<< "$spec" target_cfg="verl/trainer/config/${output_file}" if ! git diff --exit-code -- "$target_cfg" >/dev/null; then echo "✖ $target_cfg is out of date. Please regenerate via 'scripts/generate_trainer_config.sh' and commit the changes." exit 1 fi done echo "All good" exit 0 ================================================ FILE: verl_distillation/scripts/init_random_model.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script override a model with custom config and random weights, mainly for create small models for debugging purposes. Usage: python scripts/init_random_model.py \ --hf_model_path \ --new_config_path \ --output_path """ import argparse import json import os import warnings from typing import Any from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, PretrainedConfig def _init_args(): parser = argparse.ArgumentParser() parser.add_argument("--hf_model_path", type=str, required=True, help="The path for the huggingface model") parser.add_argument("--new_config_path", type=str, required=True, help="The path for the new config file") parser.add_argument("--output_path", type=str, required=True, help="The path for the output random model") args = parser.parse_args() return args def check_output_path(output_path: str): if os.path.exists(output_path): warnings.warn(f"Output path '{output_path}' already exists. Will do nothing.", stacklevel=2) exit() else: os.makedirs(output_path, exist_ok=True) print(f"Output path '{output_path}' created.") def check_configs(original_config: dict[str, Any], new_config: dict[str, Any]) -> bool: """ Check if the original config and new config are compatible. This is a placeholder function; actual implementation may vary based on requirements. """ # Example check: ensure 'model_type' is the same if new_config.get("model_type", None) is not None and original_config.get("model_type") != new_config.get( "model_type" ): raise RuntimeError("Model types do not match.") for key in new_config: if key not in original_config: warnings.warn( f"Key '{key}' in new config does not exist in original config, may not take effect.", stacklevel=2 ) def init_random_model(hf_model_path, new_config_path, output_path): config = AutoConfig.from_pretrained(hf_model_path) tokenizer = AutoTokenizer.from_pretrained(hf_model_path) config_dict = PretrainedConfig.get_config_dict(hf_model_path)[0] print(config_dict) with open(new_config_path) as f: new_config_dict = json.load(f) check_configs(config_dict, new_config_dict) config_dict.update(new_config_dict) new_confg = config.from_dict(config_dict) print(f"new_config: {new_confg}") model = AutoModelForCausalLM.from_config(new_confg) model.save_pretrained(output_path) tokenizer.save_pretrained(output_path) new_confg.save_pretrained(output_path) print(f"Random model initialized and saved to {output_path}") if __name__ == "__main__": args = _init_args() check_output_path(args.output_path) init_random_model( hf_model_path=args.hf_model_path, new_config_path=args.new_config_path, output_path=args.output_path ) ================================================ FILE: verl_distillation/scripts/install_vllm_sglang_mcore.sh ================================================ #!/bin/bash USE_MEGATRON=${USE_MEGATRON:-1} USE_SGLANG=${USE_SGLANG:-1} export MAX_JOBS=32 echo "1. install inference frameworks and pytorch they need" if [ $USE_SGLANG -eq 1 ]; then pip install "sglang[all]==0.5.2" --no-cache-dir && pip install torch-memory-saver --no-cache-dir fi pip install --no-cache-dir "vllm==0.11.0" echo "2. install basic packages" pip install "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=15.0.0" pandas "tensordict>=0.8.0,<=0.10.0,!=0.9.0" torchdata \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler \ pytest py-spy pre-commit ruff tensorboard echo "pyext is lack of maintainace and cannot work with python 3.12." echo "if you need it for prime code rewarding, please install using patched fork:" echo "pip install git+https://github.com/ShaohonChen/PyExt.git@py311support" pip install "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" echo "3. install FlashAttention and FlashInfer" # Install flash-attn-2.8.1 (cxx11abi=False) wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.8.1/flash_attn-2.8.1+cu12torch2.8cxx11abiFALSE-cp312-cp312-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.8.1+cu12torch2.8cxx11abiFALSE-cp312-cp312-linux_x86_64.whl pip install --no-cache-dir flashinfer-python==0.3.1 if [ $USE_MEGATRON -eq 1 ]; then echo "4. install TransformerEngine and Megatron" echo "Notice that TransformerEngine installation can take very long time, please be patient" pip install "onnxscript==0.3.1" NVTE_FRAMEWORK=pytorch pip3 install --no-deps git+https://github.com/NVIDIA/TransformerEngine.git@v2.6 pip3 install --no-deps git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.13.1 fi echo "5. May need to fix opencv" pip install opencv-python pip install opencv-fixer && \ python -c "from opencv_fixer import AutoFix; AutoFix()" if [ $USE_MEGATRON -eq 1 ]; then echo "6. Install cudnn python package (avoid being overridden)" pip install nvidia-cudnn-cu12==9.10.2.21 fi echo "Successfully installed all packages" ================================================ FILE: verl_distillation/scripts/legacy_model_merger.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is used to merge huggingface model and test verl checkpoints from FSDP and Megatron backends. To merge FSDP checkpoints: ```sh python scripts/legacy_model_merger.py merge \ --backend fsdp \ --local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model ``` To merge Megatron checkpoints: ```sh python scripts/legacy_model_merger.py merge \ --backend megatron \ --tie-word-embedding \ --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model ``` For more details, please refer to documentation: https://verl.readthedocs.io/en/latest/advance/checkpoint.html#convert-fsdp-and-megatron-checkpoints-to-huggingface-format-model """ import argparse import os import re import warnings from abc import ABC, abstractmethod from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union import numpy as np import torch from accelerate import init_empty_weights from safetensors.torch import load_file from torch.distributed._tensor import Placement, Shard from transformers import ( AutoConfig, AutoModelForCausalLM, AutoModelForTokenClassification, AutoModelForVision2Seq, GenerationConfig, PretrainedConfig, ) try: # for torch 2.5+ from torch.distributed.tensor import DTensor except ImportError: from torch.distributed._tensor import DTensor from tqdm import tqdm from verl.utils import hf_processor, hf_tokenizer @dataclass class ModelMergerConfig: operation: str # 'merge' or 'test' backend: str local_dir: str hf_model_config_path: str target_dir: Optional[str] = "tmp" hf_upload_path: Optional[str] = None private: bool = False test_hf_dir: Optional[str] = None tie_word_embedding: bool = False is_value_model: bool = False hf_model_path: Optional[str] = None hf_upload: bool = field(init=False) def __post_init__(self): self.hf_upload = self.operation == "merge" and bool(self.hf_upload_path) if self.operation == "test": self.target_dir = None self.hf_upload_path = None self.private = False class BaseModelMerger(ABC): def __init__(self, config: ModelMergerConfig): self.config = config self.hf_model_config_path = config.hf_model_config_path if config.hf_model_path: print( "Warning: --hf_model_path is deprecated and will be removed in a future version. Currently verl will save huggingface model configuration files into checkpoint directories. Therefore, there is no need to provide --hf_model_path. " ) self.hf_model_config_path = config.hf_model_path # Auto-detect huggingface subdirectory if it exists huggingface_subdir = os.path.join(self.hf_model_config_path, "huggingface") if os.path.isdir(huggingface_subdir): self.hf_model_config_path = huggingface_subdir self.model_config = AutoConfig.from_pretrained(self.hf_model_config_path) def get_transformers_auto_model_class(self): # Handle case where architectures might be None or empty if self.model_config.architectures is None or len(self.model_config.architectures) == 0: # Try to infer from model_type if architectures is missing model_type = getattr(self.model_config, 'model_type', '').lower() if 'vision' in model_type or 'vl' in model_type: return AutoModelForVision2Seq elif 'causal' in model_type or 'gpt' in model_type or 'llama' in model_type or 'qwen' in model_type: return AutoModelForCausalLM else: raise NotImplementedError( f"Cannot determine model class: architectures is None and model_type '{model_type}' is not recognized" ) architecture = self.model_config.architectures[0] if "ForTokenClassification" in architecture: return AutoModelForTokenClassification elif "ForCausalLM" in architecture: return AutoModelForCausalLM elif "ForConditionalGeneration" in architecture: return AutoModelForVision2Seq raise NotImplementedError(f"Unknown architecture {self.model_config.architectures}") def patch_model_generation_config(self, model): """ The generation_config created from model config may be different to the pretrained model, this may lead to error when generating: https://github.com/volcengine/verl/issues/1246 This function patch the generation_config created from model config to the pretrained model. """ if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained(self.hf_model_config_path) except OSError: print( f"Warning: Generation config file not found in {self.hf_model_config_path}, using a generation config created from the model config." ) return model def save_lora_adapter(self, state_dict: dict[str, torch.Tensor]): """ Save lora adapter to safetensors. Returns: lora_path: str, the path to the lora adapter. None if no lora adapter found. Note: This function change the 'state_dict' in place. """ lora_params_names = [name for name in state_dict.keys() if "lora_" in name] if len(lora_params_names) == 0: return None import json from typing import OrderedDict import peft from safetensors.torch import save_file lora_params = OrderedDict() target_modules = set() lora_key = None for name in lora_params_names: lora_key = name.replace(".default.weight", ".weight") target_modules.add(lora_key.split(".")[-3]) lora_params[lora_key] = state_dict.pop(name) lora_rank = min(lora_params[lora_key].shape[0], lora_params[lora_key].shape[1]) peft_dict = { "r": lora_rank, "lora_alpha": 0, # lora_alpha is not set. An error should be raised to inform the user to set it manually. "target_modules": list(target_modules), } peft_config = peft.LoraConfig(**peft_dict).to_dict() peft_config["task_type"] = peft_config["task_type"].value if peft_config["task_type"] else None peft_config["peft_type"] = peft_config["peft_type"].value if peft_config["peft_type"] else None peft_config["target_modules"] = list(peft_config["target_modules"]) lora_path = os.path.join(self.config.target_dir, "lora_adapter") os.makedirs(lora_path, exist_ok=True) with open(os.path.join(lora_path, "adapter_config.json"), "w", encoding="utf-8") as f: json.dump(peft_config, f, ensure_ascii=False, indent=4) save_file(lora_params, os.path.join(lora_path, "adapter_model.safetensors")) for name in list(state_dict.keys()): key = ( name.replace("base_model.model.", "") .replace(".base_layer.weight", ".weight") .replace(".base_layer.bias", ".bias") ) state_dict[key] = state_dict.pop(name) return lora_path def save_hf_model_and_tokenizer(self, state_dict: dict[str, torch.Tensor]): auto_model_class = self.get_transformers_auto_model_class() with init_empty_weights(): model = auto_model_class.from_config(self.model_config, torch_dtype=torch.bfloat16) model.to_empty(device="cpu") model = self.patch_model_generation_config(model) lora_path = self.save_lora_adapter(state_dict) if lora_path: print(f"Saving lora adapter to {lora_path}") print(f"Saving model to {self.config.target_dir}") model.save_pretrained(self.config.target_dir, state_dict=state_dict) del state_dict del model processor = hf_processor(self.hf_model_config_path) try: tokenizer = hf_tokenizer(self.hf_model_config_path) except Exception as e: warnings.warn(f"Failed to create tokenizer: {e}. This may affect tokenizer saving", stacklevel=1) tokenizer = None if processor is not None: print(f"Saving processor to {self.config.target_dir}") processor.save_pretrained(self.config.target_dir) if tokenizer is not None: print(f"Saving tokenizer to {self.config.target_dir}") tokenizer.save_pretrained(self.config.target_dir) def upload_to_huggingface(self): from huggingface_hub import HfApi api = HfApi() api.create_repo(repo_id=self.config.hf_upload_path, private=self.config.private, exist_ok=True) api.upload_folder(folder_path=self.config.target_dir, repo_id=self.config.hf_upload_path, repo_type="model") @abstractmethod def merge_and_save(self): raise NotImplementedError("Subclasses should implement this method") class FSDPModelMerger(BaseModelMerger): def _get_world_size(self) -> int: """Extracts the FSDP world_size from checkpoint filenames (e.g., 'model_world_size_8_rank_0.pt').""" for filename in os.listdir(self.config.local_dir): match = re.match(r"model_world_size_(\d+)_rank_0\.pt", filename) if match: return int(match.group(1)) raise FileNotFoundError( f"Could not determine world size. No file matching 'model_world_size_(\\d+)_rank_0.pt' found in {self.config.local_dir}" ) def _load_rank_zero_state_dict(self, world_size: int) -> dict: return torch.load( Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_0.pt", map_location="cpu", weights_only=False, ) def _extract_device_mesh_info(self, state_dict: dict, world_size: int) -> tuple[np.ndarray, tuple[str, ...]]: """ Retrieves sharding information (device_mesh, mesh_dim_names) from a DTensor in the state_dict. If no DTensor is found, infers a simple FSDP mesh based on world_size. """ pivot_key = sorted(list(state_dict.keys()))[0] weight = state_dict[pivot_key] if isinstance(weight, DTensor): # get sharding info device_mesh = weight.device_mesh mesh = device_mesh.mesh mesh_dim_names = device_mesh.mesh_dim_names else: # for non-DTensor mesh = np.array([world_size], dtype=np.int64) mesh_dim_names = ("fsdp",) return mesh, mesh_dim_names def _calculate_shard_configuration( self, mesh: np.ndarray, mesh_dim_names: tuple[str, ...] ) -> tuple[int, tuple[int, ...]]: """Calculates the total number of shards and the shape of the device mesh.""" assert mesh_dim_names in (("fsdp",), ("ddp", "fsdp")), f"Unsupported mesh_dim_names {mesh_dim_names}" if "tp" in mesh_dim_names: # TODO: "tp" is not supported yet due to the above assert total_shards = mesh.shape[-1] * mesh.shape[-2] mesh_shape = (mesh.shape[-2], mesh.shape[-1]) else: total_shards = mesh.shape[-1] mesh_shape = (mesh.shape[-1],) return total_shards, mesh_shape def _merge_by_placement(self, tensors: list[torch.Tensor], placement: Placement) -> torch.Tensor: """Merges a list of tensors based on their DTensor placement""" if placement.is_replicate(): return tensors[0] elif placement.is_partial(): raise NotImplementedError("Partial placement is not supported yet") elif placement.is_shard(): return torch.cat(tensors, dim=placement.dim).contiguous() raise NotImplementedError(f"Unsupported placement: {placement}") def _load_and_merge_state_dicts( self, world_size: int, total_shards: int, mesh_shape: tuple[int, ...], mesh_dim_names: tuple[str, ...] ) -> dict[str, torch.Tensor]: model_state_dict_lst = [None] * total_shards def process_one_shard(rank: int, model_state_dict_lst: list): model_path = Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_{rank}.pt" state_dict = torch.load(model_path, map_location="cpu", weights_only=False) model_state_dict_lst[rank] = state_dict return state_dict with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor: futures = [executor.submit(process_one_shard, rank, model_state_dict_lst) for rank in range(total_shards)] for future in tqdm(futures, desc=f"Loading {total_shards} FSDP shards", total=total_shards): future.result() # Merge state dicts from all shards state_dict = {} param_placements: dict[str, list] = {} for key in set(model_state_dict_lst[0].keys()): state_dict[key] = [] for model_state_shard in model_state_dict_lst: # add tensor shard in order of rank to state_dict[key] tensor = model_state_shard.pop(key) if isinstance(tensor, DTensor): state_dict[key].append(tensor._local_tensor.bfloat16()) placements = tuple(tensor.placements) # replicated placement at dp dimension can be discarded if mesh_dim_names[0] in ("dp", "ddp"): placements = placements[1:] if key not in param_placements: param_placements[key] = placements else: assert param_placements[key] == placements else: state_dict[key].append(tensor.bfloat16()) del model_state_dict_lst # Merge tensors for key in sorted(state_dict): if not isinstance(state_dict[key], list): print(f"No need to merge key {key}") continue if key in param_placements: # merge shards placements: tuple[Shard] = param_placements[key] if len(mesh_shape) == 1: # 1-D list, FSDP without TP assert len(placements) == 1 shards = state_dict[key] state_dict[key] = self._merge_by_placement(shards, placements[0]) else: # 2-D list, FSDP + TP raise NotImplementedError("FSDP + TP is not supported yet") else: state_dict[key] = torch.cat(state_dict[key], dim=0) return state_dict def merge_and_save(self): world_size = self._get_world_size() rank_zero_state_dict = self._load_rank_zero_state_dict(world_size) mesh, mesh_dim_names = self._extract_device_mesh_info(rank_zero_state_dict, world_size) print(f"Got device mesh {mesh}, mesh_dim_names {mesh_dim_names}") total_shards, mesh_shape = self._calculate_shard_configuration(mesh, mesh_dim_names) print(f"Processing model shards with {total_shards} {mesh_shape} in total") merged_state_dict = self._load_and_merge_state_dicts(world_size, total_shards, mesh_shape, mesh_dim_names) if self.config.operation == "test": if not self.config.test_hf_dir: raise ValueError("test_hf_dir must be provided for test operation") self._test_state_dict(merged_state_dict) elif self.config.operation == "merge": self.save_hf_model_and_tokenizer(merged_state_dict) if self.config.hf_upload: self.upload_to_huggingface() else: raise ValueError(f"Unknown operation: {self.config.operation}") def _test_state_dict(self, state_dict: dict[str, torch.Tensor]): auto_model_class = self.get_transformers_auto_model_class() hf_model = auto_model_class.from_pretrained(self.config.test_hf_dir, torch_dtype=torch.bfloat16) hf_state_dict = hf_model.state_dict() del hf_model hf_model_keys = set(hf_state_dict.keys()) collected_keys = set(state_dict.keys()) missing_keys = hf_model_keys - collected_keys assert len(missing_keys) == 0, f"Missing keys in collected state dict: {list(sorted(missing_keys))}" extra_keys = collected_keys - hf_model_keys assert len(extra_keys) == 0, f"Extra keys in collected state dict: {list(sorted(extra_keys))}" for key in hf_model_keys: hf_shape = hf_state_dict[key].shape collected_shape = state_dict[key].shape assert hf_shape == collected_shape, ( f"Shape mismatch for key '{key}': original {hf_shape} vs collected {collected_shape}" ) hf_dtype = hf_state_dict[key].dtype collected_dtype = state_dict[key].dtype assert hf_dtype == collected_dtype, ( f"Dtype mismatch for key '{key}': original {hf_dtype} vs collected {collected_dtype}" ) torch.testing.assert_close(hf_state_dict[key], state_dict[key], atol=1e-6, rtol=1e-6) print("FSDP checks passed: The merged state_dict matches the hf model saved by FSDPCheckpointManager.") class MegatronModelMerger(BaseModelMerger): def __init__(self, config: ModelMergerConfig): from verl.utils.megatron_utils import get_hf_config_and_tokenizer_checkpoint_path config.hf_model_config_path = get_hf_config_and_tokenizer_checkpoint_path(config.local_dir) super().__init__(config) self.params_mapping = { # megatron core gpt model name, huggingface model name # NOTICE: It's a little bit tricky, when 2 keys have the same prefix, we need to make sure the longer key within the containing relationship is processed first. "embedding.word_embeddings": "model.embed_tokens", # attn "self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight", "self_attention.linear_qkv.layer_norm_bias": "input_layernorm.bias", "self_attention.linear_qkv": "self_attn.qkv_proj", "self_attention.q_layernorm": "self_attn.q_norm", "self_attention.k_layernorm": "self_attn.k_norm", "self_attention.linear_proj": "self_attn.o_proj", # mla "self_attention.linear_q_proj": "self_attn.q_proj", "self_attention.linear_q_down_proj": "self_attn.q_a_proj", "self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight", "self_attention.linear_q_up_proj": "self_attn.q_b_proj", "self_attention.linear_kv_down_proj": "self_attn.kv_a_proj_with_mqa", "self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight", "self_attention.linear_kv_up_proj": "self_attn.kv_b_proj", # mlp "pre_mlp_layernorm": "post_attention_layernorm", "mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight", "mlp.linear_fc1.layer_norm_bias": "post_attention_layernorm.bias", "mlp.linear_fc1": "mlp.gate_up_proj", "mlp.linear_fc2": "mlp.down_proj", # moe "mlp.router.expert_bias": "mlp.gate.e_score_correction_bias", "mlp.router": "mlp.gate", "mlp.shared_experts.linear_fc1": "mlp.shared_experts.gate_up_proj", "mlp.shared_experts.linear_fc2": "mlp.shared_experts.down_proj", "linear_fc1": "gate_up_proj", "linear_fc2": "down_proj", # output "final_layernorm": "norm", "output_layer": "lm_head", } def _get_tp_pp_rank_from_sharded_dir(self, sharded_dir: str) -> tuple[int, int]: tp_rank = pp_rank = None rank_list = sharded_dir.split("_")[2:] if re.match(r"mp_rank_(\d\d)_(\d\d\d)", sharded_dir): tp_rank = int(rank_list[0]) pp_rank = int(rank_list[1]) elif re.match(r"mp_rank_(\d\d)", sharded_dir): tp_rank = int(rank_list[0]) pp_rank = 0 assert tp_rank is not None and pp_rank is not None, f"Invalid sharded dir {sharded_dir}" return tp_rank, pp_rank def _check_megatron_checkpoint_path(self, model_path: str) -> tuple[list[str], int, int]: """ Validates the Megatron checkpoint structure (presence of 'model.pt' in sharded directories). Determines TP and PP sizes from directory names. """ tp_size = 0 pp_size = 0 sharded_dirs = sorted(os.listdir(model_path)) for sharded_dir in sharded_dirs: assert "model.pt" in os.listdir(Path(model_path) / sharded_dir), f"model.pt not found in {sharded_dir}" tp_rank, pp_rank = self._get_tp_pp_rank_from_sharded_dir(sharded_dir) tp_size = max(tp_size, tp_rank + 1) pp_size = max(pp_size, pp_rank + 1) return sharded_dirs, tp_size, pp_size def _merge_across_tp( self, key: str, tp_data: list[torch.Tensor], config: PretrainedConfig, tp_size: int, is_value_model: bool = False, ) -> Union[torch.Tensor, list[torch.Tensor]]: if "linear_fc1.weight" in key: # if the tensor is gate and proj gate_lst = [] up_lst = [] for infer_param in tp_data: gate, up = infer_param.chunk(2) gate_lst.append(gate) up_lst.append(up) gate = torch.cat(gate_lst, dim=0) up = torch.cat(up_lst, dim=0) return [gate, up] elif "self_attention.linear_qkv." in key and "layer_norm" not in key: # if the tensor is qkv, for each param on tp, split into q, k, v # concat q, k, v separately. q_lst = [] k_lst = [] v_lst = [] assert config.num_attention_heads % config.num_key_value_heads == 0 num_q_per_kv = config.num_attention_heads // config.num_key_value_heads assert tp_data[0].shape[0] % (num_q_per_kv + 2) == 0 kv_size_per_tp = tp_data[0].shape[0] // (num_q_per_kv + 2) split_size = [kv_size_per_tp * num_q_per_kv, kv_size_per_tp, kv_size_per_tp] for infer_param in tp_data: num_query_groups_per_partition = config.num_key_value_heads // tp_size for chunk in infer_param.chunk(num_query_groups_per_partition): split_size = [ kv_size_per_tp * num_q_per_kv // num_query_groups_per_partition, kv_size_per_tp // num_query_groups_per_partition, kv_size_per_tp // num_query_groups_per_partition, ] q, k, v = chunk.split(split_size) q_lst.append(q) k_lst.append(k) v_lst.append(v) q = torch.cat(q_lst, dim=0) k = torch.cat(k_lst, dim=0) v = torch.cat(v_lst, dim=0) return [q, k, v] elif "layer_norm" in key or "layernorm" in key or "router" in key or ("output_layer" in key and is_value_model): return tp_data[0] else: dim = 0 if "linear_fc2.weight" in key or "self_attention.linear_proj" in key: dim = 1 return torch.cat(tp_data, dim=dim) def _load_state_dicts( self, model_ckpt_path: str, sharded_dirs: list[str], tp_size: int, pp_size: int ) -> list[list[dict]]: model_state_dict_lst = [[None for _ in range(tp_size)] for _ in range(pp_size)] def _process_one_megatron_shard(sharded_dir: str): model_file_path = Path(model_ckpt_path) / sharded_dir / "model.pt" state_dict = torch.load(model_file_path, map_location="cpu", weights_only=False) tp_rank, pp_rank = self._get_tp_pp_rank_from_sharded_dir(sharded_dir) model_state_dict_lst[pp_rank][tp_rank] = state_dict with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor: futures = [executor.submit(_process_one_megatron_shard, sharded_dir) for sharded_dir in sharded_dirs] for future in tqdm(futures, desc=f"Loading {len(sharded_dirs)} Megatron shards", total=len(sharded_dirs)): future.result() return model_state_dict_lst def _check_megatron_state_key(self, key: str) -> bool: """ Checks if the key is a valid Megatron state key. Now the model merger only supports keys that start with "decoder/embedding/output_layer" in TransformerLayer. Shall not use key starts with "model." """ if key.startswith("model."): raise ValueError( f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder/embedding/output_layer' in TransformerLayer." ) skip_checking_keys = ["embedding.word_embeddings", "output_layer"] for skip_key in skip_checking_keys: if skip_key in key: print(f"skip checking key {key}") return # Exclude extra state keys if not key.startswith("decoder"): raise ValueError( f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder' in TransformerLayer." ) def _merge_state_dicts( self, model_state_dict_lst: list[list[dict]], tp_size: int, pp_size: int ) -> dict[str, torch.Tensor]: state_dict = {} vpp_size = len(model_state_dict_lst[0][0]) layers_cum = 0 for vpp_rank in range(vpp_size): for pp_rank in range(pp_size): layers_handled = 0 keys = model_state_dict_lst[pp_rank][0][vpp_rank].keys() for key in keys: if "extra_state" in key: continue if self.config.tie_word_embedding and ("output_layer" in key): print("skip lm_head and reward_head loading because of tie_word_embeddings") continue self._check_megatron_state_key(key) hf_name = self._replace_name(key, self.params_mapping) assert hf_name is not None, f"Failed to convert layer name [{key}] from megatron to huggingface." if "model.layers." in hf_name: local_layer_no = int(hf_name.split(".")[2]) layers_handled = max(local_layer_no, layers_handled) global_layer_no = local_layer_no + layers_cum new_key_list = hf_name.split(".") new_key_list[2] = str(global_layer_no) hf_name = ".".join(new_key_list) else: warnings.warn(f"hf_name {hf_name} will not be fixed with layer number", stacklevel=2) tp_data = [model_state_dict_lst[pp_rank][tp_rank][vpp_rank][key] for tp_rank in range(tp_size)] merged = self._merge_across_tp(key, tp_data, self.model_config, tp_size, self.config.is_value_model) if not isinstance(merged, list): state_dict[hf_name] = merged elif len(merged) == 3: # split qkv for n, d in zip(["q", "k", "v"], merged): state_dict[hf_name.replace("qkv", n)] = d elif len(merged) == 2: # split gate up state_dict[hf_name.replace("gate_up", "gate")] = merged[0] state_dict[hf_name.replace("gate_up", "up")] = merged[1] print( f"converted {key} to {hf_name} with shape {merged.shape if isinstance(merged, torch.Tensor) else [t.shape for t in merged]}" ) layers_cum += layers_handled + 1 # zero based return state_dict def merge_and_save(self): from verl.utils.megatron_utils import get_model_checkpoint_path model_ckpt_path = get_model_checkpoint_path(self.config.local_dir) sharded_dirs, tp_size, pp_size = self._check_megatron_checkpoint_path(model_ckpt_path) print(f"sharded_dirs: {sharded_dirs}, tp_size: {tp_size}, pp_size: {pp_size}, mp_size: {len(sharded_dirs)}") model_state_dict_lst = self._load_state_dicts(model_ckpt_path, sharded_dirs, tp_size, pp_size) merged_state_dict = self._merge_state_dicts(model_state_dict_lst, tp_size, pp_size) del model_state_dict_lst if self.config.operation == "test": if not self.config.test_hf_dir: raise ValueError("test_hf_dir must be provided for test operation") self._test_state_dict(merged_state_dict) elif self.config.operation == "merge": self.save_hf_model_and_tokenizer(merged_state_dict) if self.config.hf_upload: self.upload_to_huggingface() else: raise ValueError(f"Unknown operation: {self.config.operation}") def _test_state_dict(self, state_dict: dict[str, torch.Tensor]): """ Compares the merged Megatron state_dict against a reference safetensors model. Applies necessary name mappings from Megatron to Hugging Face conventions using _replace_name. """ ref_state_dict = load_file(Path(self.config.test_hf_dir) / "model.safetensors") for name, loaded_weight in state_dict.items(): # name = self._replace_name(original_name, self.params_mapping) if not name or name.endswith(".bias") and name not in ref_state_dict: continue if "rotary_emb.inv_freq" in name: continue if self.config.tie_word_embedding and "lm_head.weight" in name: continue if name not in ref_state_dict: raise RuntimeError(f"key: {name} not exist in state_dict") param = ref_state_dict[name] assert loaded_weight.dtype == param.dtype torch.testing.assert_close(loaded_weight, param, atol=1e-2, rtol=5e-2) def _replace_name(self, megatron_name: str, name_mapping: dict[str, str]) -> str: for m_name, v_name in name_mapping.items(): if m_name not in megatron_name: continue megatron_name = megatron_name.replace("decoder", "model") param_name = megatron_name.replace(m_name, v_name) return param_name return None # Return None if no mapping found def main(): parser = argparse.ArgumentParser(description="verl model merger") subparsers = parser.add_subparsers(dest="operation", required=True, help="Specify 'merge' or 'test' operation.") base_op_parser = argparse.ArgumentParser(add_help=False) base_op_parser.add_argument( "--backend", type=str, required=True, choices=["fsdp", "megatron"], help="The backend of the model" ) base_op_parser.add_argument("--local_dir", type=str, required=True, help="Path to the saved model checkpoints") base_op_parser.add_argument( "--hf_model_path", type=str, default=None, help="(Deprecated) Path to the original Hugging Face model for config.", ) base_op_parser.add_argument( "--tie-word-embedding", action="store_true", help="Whether to tie word embedding weights (currently only Megatron supported)", ) base_op_parser.add_argument( "--is-value-model", action="store_true", help="Whether the model is a value model (currently only Megatron supported)", ) merge_parser = subparsers.add_parser("merge", parents=[base_op_parser], help="Merge model checkpoints and save.") merge_parser.add_argument( "--target_dir", default="tmp", type=str, help="Directory to save the merged huggingface model" ) merge_parser.add_argument( "--hf_upload_path", default=None, type=str, help="Hugging Face repository ID to upload the model" ) merge_parser.add_argument( "--private", action="store_true", help="Whether to upload the model to a private Hugging Face repository" ) test_parser = subparsers.add_parser( "test", parents=[base_op_parser], help="Test merged model against a reference Hugging Face model" ) test_parser.add_argument( "--test_hf_dir", type=str, required=True, help="Path to the reference Hugging Face model directory for testing" ) args = parser.parse_args() common_config_args = { "operation": args.operation, "backend": args.backend, "tie_word_embedding": args.tie_word_embedding, "is_value_model": args.is_value_model, "local_dir": args.local_dir, "hf_model_path": args.hf_model_path, "hf_model_config_path": args.local_dir, } if args.operation == "merge": config = ModelMergerConfig( **common_config_args, target_dir=args.target_dir, hf_upload_path=args.hf_upload_path, private=args.private, test_hf_dir=None, ) os.makedirs(config.target_dir, exist_ok=True) elif args.operation == "test": config = ModelMergerConfig( **common_config_args, test_hf_dir=args.test_hf_dir, # the following args are not used by test operation target_dir=None, hf_upload_path=None, private=False, ) else: raise NotImplementedError(f"Unknown operation: {args.operation}") if config.backend == "fsdp": merger = FSDPModelMerger(config) elif config.backend == "megatron": merger = MegatronModelMerger(config) else: raise NotImplementedError(f"Unknown backend: {config.backend}") merger.merge_and_save() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/scripts/print_cfg.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import hydra except ImportError as e: raise ImportError("Please install hydra-core via 'pip install hydra-core' and retry.") from e @hydra.main(config_path="../verl/trainer/config", config_name="ppo_trainer", version_base=None) def main(config): """Main entry point for PPO training with Hydra configuration management. Args: config_dict: Hydra configuration dictionary containing training parameters. """ print(config) from verl.utils.config import omega_conf_to_dataclass profiler_config = omega_conf_to_dataclass(config.critic.profiler) print(profiler_config) if __name__ == "__main__": main() ================================================ FILE: verl_distillation/scripts/rollout_viewer.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import re import traceback from pathlib import Path from typing import Annotated, Optional import aiofiles try: import ujson as json except ImportError: import json import typer from rich.highlighter import ReprHighlighter from rich.markdown import Markdown from rich.table import Table from rich.text import Text from textual import on from textual.app import App, ComposeResult from textual.containers import Horizontal, Vertical, VerticalScroll from textual.widgets import Input, ProgressBar, Select, SelectionList, Static INDEX_KEY = "__IDX" FILE_SUFFIX = ".jsonl" def check_textual_version(): # check if textual version is equal to 0.52.1 import textual from packaging.version import Version if Version(textual.__version__) != Version("0.52.1"): raise ImportError(f"Textual version {textual.__version__} is not supported, please pip install textual==0.52.1") check_textual_version() async def load_path(p: Path, data: dict, mask_strs: str, idx: int, pbar): samples = [] async with aiofiles.open(p, encoding="utf-8") as f: async for line in f: d = json.loads(line) for k in d: if isinstance(d[k], str): if mask_strs: d[k] = re.sub(rf"{mask_strs}", "*", d[k]) else: d[k] = json.dumps(d[k], ensure_ascii=False, indent=4) d[INDEX_KEY] = len(samples) samples.append(d) data[idx] = {"samples": samples} print(f"path {p} loaded") pbar.advance(1) async def load_dir(path: Path, data: dict[int, dict], pbar, mask_strs: str = ""): paths = list(path.glob(f"*{FILE_SUFFIX}")) paths = sorted(paths, key=lambda x: int(x.stem)) tasks = [load_path(p, data, mask_strs, i, pbar) for i, p in enumerate(paths)] await asyncio.gather(*tasks) class Highlighter(ReprHighlighter): highlights = ReprHighlighter.highlights + [ r"(?P[][\<\>{}()\|()【】\[\]=`])", r"\<\|(?P[\w\W]*?)\|\>", ] def center_word_with_equals_exactly(word: str, total_length: int, char: str = "=") -> str: if len(word) > total_length: return word padding = total_length - len(word) left_pad = (padding) // 2 right_pad = (padding + 1) // 2 return char * left_pad + " " + word + " " + char * right_pad def highlight_keyword(content: str, keyword: Optional[str]): if not keyword: return Text(content) text = Text() parts = content.split(keyword) for i, part in enumerate(parts): text.append(part, style=None) if i < len(parts) - 1: # text.append(keyword, style=Style(color="#d154d1", bgcolor="yellow", bold=True)) text.append(keyword, style="on #8f51b5") return text help_doc = """ ⌨️ keybinds: - `f/esc`: find/cancel - `tab/←/→`: change focus - `j/k`: page down/up - `g/G`: scroll home/end - `n/N`: next sample/step - `p/P`: previous sample/step - `s`: switch display mode - plain text - rich table """ class JsonLineViewer(App): BINDINGS = [ ("left", "focus_previous", "Focus Previous"), ("right", "focus_next", "Focus Next"), ("s", "swith_render", "switch render"), # control ("n", "next_sample", "Next Sample"), ("N", "next_step", "Next Step"), ("p", "previous_sample", "Previous Sample"), ("P", "previous_step", "Previous Step"), # search ("f", "toggle_search", "find"), ("enter", "next_search", "find next"), ("escape", "cancel_search", "cancel find"), # scroll ("j", "page_down", "page down"), ("k", "page_up", "page up"), ("g", "page_home", "page home"), ("G", "page_end", "page end"), ] CSS = """ Select:focus > SelectCurrent { border: tall #8f51b5; } Select.-expanded > SelectCurrent { border: tall #8f51b5; } #select-container { width: 15%; height: 100%; align: center top; } #search-container { height: 10%; align: center top; } #search-box { width: 50%; } #reqid-box { width: 50%; } """ def __init__(self, step_num: int, data: dict[int, dict], pbar): super().__init__() self.step_num = step_num self.data = data self.render_table = False self.selected_step_index = 0 self.selected_sample_index = 0 self.pbar = pbar self.matches = [] self.current_match_index = 0 self.highlighter = Highlighter() first_samples = data[list(data.keys())[0]]["samples"] # Prepare the initial field filter list (all keys from the first sample) self.filter_fields = [(f, f, True) for f in first_samples[0].keys()] # Internal set used for fast membership checks when we add new fields on the fly. # We keep it here so that when new columns appear in later steps (e.g. `request_id`), # they can be added to the UI automatically without restarting the viewer. self._field_set: set[str] = set(first_samples[0].keys()) self.sample_num = len(first_samples) def compose(self) -> ComposeResult: with Horizontal(id="search-container"): yield Input(placeholder="find something...", id="search-box") yield Input(placeholder="request id...", id="reqid-box") with Vertical(id="search-container2"): yield self.pbar yield Static("", id="search-status") with Horizontal(): with Vertical(id="select-container"): yield Static("\n") yield Static( renderable=Markdown( help_doc, ), markup=False, ) yield Static("\n") yield Select( id="step-select", value=0, prompt="select step", options=[("step: 1", 0)], allow_blank=False, ) yield Select( id="sample-select", value=0, prompt="select sample", options=[("sample: 1", 0)], allow_blank=False, ) yield Select( id="sample-sort", value=0, prompt="排序", options=[ ("sort", 0), ("score asc", 1), ("score desc", 2), ], allow_blank=False, ) yield SelectionList[int](("Select ALL", 1, True), id="fields-select-all") with VerticalScroll(id="scroll-view2"): yield SelectionList[str](*self.filter_fields, id="fields-select") with VerticalScroll(id="scroll-view"): yield Static(id="content", markup=False) async def on_mount(self) -> None: self.step_select = self.query_one("#step-select", Select) self.sample_select = self.query_one("#sample-select", Select) self.sample_sort = self.query_one("#sample-sort", Select) self.content_display = self.query_one("#content", Static) self.search_box = self.query_one("#search-box", Input) self.reqid_box = self.query_one("#reqid-box", Input) self.scroll_view = self.query_one("#scroll-view", VerticalScroll) self.search_status = self.query_one("#search-status", Static) self.fields_select = self.query_one("#fields-select", SelectionList) self.fields_select.border_title = "field filter" if self.data: self.step_select.set_options([(f"step: {i + 1}", i) for i in range(self.step_num)]) self.sample_select.set_options([(f"sample: {i + 1}", i) for i in range(self.sample_num)]) self.step_select.focus() await self.update_content() def update_result_options(self, offset: int = 0, sort_desc: Optional[bool] = None): options = [] if isinstance(self.selected_step_index, int) and self.selected_step_index < len(self.data): if self.sample_num is None or sort_desc is not None: samples = self.data[self.selected_step_index].get("samples", []) if not samples: self.selected_sample_index = offset return if sort_desc is not None: samples = sorted( samples, key=lambda x: x.get("score", x.get("score_1", 0)), reverse=sort_desc, ) options = [(f"sample: {r[INDEX_KEY] + 1}", r[INDEX_KEY]) for r in samples] self.sample_select.set_options(options) self.sample_num = len(samples) if sort_desc is not None and options: self.selected_sample_index = options[0][1] else: self.selected_sample_index = offset async def update_content(self, search_keyword: Optional[str] = None): content = "" try: samples = self.data[self.selected_step_index].get("samples", []) content_dict_full = samples[self.selected_sample_index] # Dynamically track any NEW keys that appear and add them to the field filter. self._update_fields_select(content_dict_full.keys()) # Apply field selection filter (only show selected fields) content_dict = {k: v for k, v in content_dict_full.items() if k in self.fields_select.selected} if self.render_table: content = Table("key", "value", show_lines=True) for k in content_dict: v = content_dict[k] v = f"{v}" content.add_row( k, self.highlighter(highlight_keyword(v, search_keyword)), ) else: text = Text() for k in content_dict: v = content_dict[k] s = center_word_with_equals_exactly(k, 64) + f"\n{v}\n" text.append(highlight_keyword(s, search_keyword)) content = self.highlighter(text) except KeyError: content = f"Loading data asynchronously, progress: {len(self.data)}/{self.step_num} step" except Exception: content = self.highlighter(traceback.format_exc()) self.content_display.update(content) # --------------------------------------------------------------------- # Request-ID jump logic # --------------------------------------------------------------------- @on(Input.Submitted, "#reqid-box") async def on_reqid_submitted(self, event: Input.Submitted) -> None: """Jump to the sample that has a matching `request_id`.""" req_id_raw = event.value.strip() # Remove hyphens so search is tolerant to different id formats req_id = req_id_raw.replace("-", "") if not req_id: return found = False for step_idx, step_data in self.data.items(): for sample in step_data.get("samples", []): sample_id = str(sample.get("request_id", "")) if sample_id.replace("-", "") == req_id: # Update selected indices self.selected_step_index = step_idx self.step_select.value = step_idx # Ensure sample list is updated and select sample self.update_result_options(offset=sample[INDEX_KEY]) self.selected_sample_index = sample[INDEX_KEY] self.sample_select.value = sample[INDEX_KEY] await self._clear_search() await self.update_content() found = True break if found: break if not found: self.search_status.update(Text(f"request_id '{req_id_raw}' not found", style="bold red")) else: # Keep the typed id in the input box so users see what was searched. pass # --------------------------------------------------------------------- # Helper: add new fields to SelectionList on-the-fly # --------------------------------------------------------------------- def _update_fields_select(self, keys): """Add any unseen *keys* to the field-selection widget so they can be toggled. The viewer is often launched with only the first step loaded. Later steps may introduce new columns (e.g. `request_id`). This helper ensures those fields become visible without requiring a restart. """ # Ensure we have the widget (only after on_mount) if not hasattr(self, "fields_select"): return for k in keys: if k not in self._field_set: self._field_set.add(k) try: # By default, new fields are selected so they appear immediately. self.fields_select.add_option(k, k, selected=True) except Exception: # Fallback for older textual versions where signature is different. self.fields_select.add_option((k, k, True)) @on(Select.Changed, "#step-select") async def step_changed(self, event): self.selected_step_index = event.value self.update_result_options() await self.update_content() @on(Select.Changed, "#sample-select") async def sample_changed(self, event): self.selected_sample_index = event.value await self._clear_search() await self.update_content() @on(Select.Changed, "#sample-sort") async def sort_changed(self, event): v = event.value self.update_result_options(sort_desc=None if v == 0 else False if v == 1 else True) await self.update_content() @on(SelectionList.SelectedChanged, "#fields-select") async def fields_changed(self, event): await self.update_content() @on(SelectionList.SelectedChanged, "#fields-select-all") async def fields_all_changed(self, event): s = self.query_one("#fields-select-all", SelectionList) if s.selected: self.fields_select.select_all() else: self.fields_select.deselect_all() def action_focus_previous(self): self.screen.focus_previous() def action_focus_next(self): self.screen.focus_next() async def action_next_step(self) -> None: self.selected_step_index += 1 if self.selected_step_index >= self.step_num: self.selected_step_index = 0 self.step_select.value = self.selected_step_index self.update_result_options() await self.update_content() async def action_next_sample(self) -> None: self.selected_sample_index += 1 if not self.sample_num or self.selected_sample_index >= self.sample_num: self.selected_sample_index = 0 self.sample_select.value = self.selected_sample_index await self._clear_search() await self.update_content() async def action_previous_step(self) -> None: self.selected_step_index -= 1 if self.selected_step_index < 0: self.selected_step_index = self.step_num - 1 self.step_select.value = self.selected_step_index self.update_result_options() await self.update_content() async def action_previous_sample(self) -> None: self.selected_sample_index -= 1 if self.selected_sample_index < 0: self.selected_sample_index = self.sample_num - 1 self.sample_select.value = self.selected_sample_index await self._clear_search() await self.update_content() async def action_swith_render(self): self.render_table = not self.render_table await self.update_content() def action_toggle_search(self) -> None: self.search_box.focus() async def action_cancel_search(self) -> None: self.search_box.value = "" await self._clear_search() await self.update_content() async def _clear_search(self): self.matches = [] self.search_status.update("") self.current_match_index = 0 @on(Input.Submitted, "#search-box") async def on_search_submitted(self, event: Input.Submitted) -> None: self.matches = [] self.current_match_index = 0 if event.value: await self.update_content(event.value) renderable = self.content_display.render() if isinstance(renderable, Table): return assert isinstance(renderable, Text) console = self.content_display._console lines = renderable.wrap(console, self.scroll_view.container_size.width) line_idx_recorded = set() for line_idx, line in enumerate(lines): if line_idx in line_idx_recorded: continue if event.value in line: self.matches.append( { "line": line_idx, "word": event.value, } ) line_idx_recorded.add(line_idx) self.scroll_view.focus() await self.action_next_search() async def action_next_search(self) -> None: if not self.matches or self.current_match_index >= len(self.matches): return target_line = self.matches[self.current_match_index]["line"] self.scroll_view.scroll_to(x=0, y=target_line * 1, animate=False) self.current_match_index = (self.current_match_index + 1) % len(self.matches) self.search_status.update( Text( f"Find :{self.current_match_index + 1}/{len(self.matches)}", style="bold on #8f51b5", ) ) def action_page_up(self): self.scroll_view.scroll_page_up(animate=False) def action_page_down(self): self.scroll_view.scroll_page_down(animate=False) def action_page_home(self): self.scroll_view.scroll_home(animate=False) def action_page_end(self): self.scroll_view.scroll_end(animate=False) async def _run(path: Path, mask_str: str): assert path.exists(), f"{path} not exist" paths = list(path.glob(f"*{FILE_SUFFIX}")) paths = sorted(paths, key=lambda x: int(x.stem)) if not paths: raise ValueError(f"no available reward dump files under f{path}") print(f"get jsonl file nums: {len(paths)}") pbar = ProgressBar(total=len(paths), name="data load progress") data = {} await load_path(paths[0], data, mask_str, 0, pbar) app = JsonLineViewer(step_num=len(paths), data=data, pbar=pbar) await asyncio.gather(load_dir(path, data, pbar, mask_str), app.run_async()) app = typer.Typer() @app.command(help="launch TUI APP") def run( rollout_data_dir: Path, mask_str: Annotated[str, typer.Option(help="string that will be masked to *")] = r"<\|image_pad\|>|<\|imgpad\|>", ): loop = asyncio.get_event_loop() loop.run_until_complete(_run(rollout_data_dir, mask_str)) if __name__ == "__main__": app() ================================================ FILE: verl_distillation/setup.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # setup.py is the fallback installation script when pyproject.toml does not work import os from pathlib import Path from setuptools import find_packages, setup version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__))) with open(os.path.join(version_folder, "verl/version/version")) as f: __version__ = f.read().strip() install_requires = [ "accelerate", "codetiming", "datasets", "dill", "hydra-core", "numpy<2.0.0", "pandas", "peft", "pyarrow>=19.0.0", "pybind11", "pylatexenc", "ray[default]>=2.41.0", "torchdata", "tensordict>=0.8.0,<=0.10.0,!=0.9.0", "transformers", "wandb", "packaging>=20.0", "tensorboard", ] TEST_REQUIRES = ["pytest", "pre-commit", "py-spy", "pytest-asyncio"] PRIME_REQUIRES = ["pyext"] GEO_REQUIRES = ["mathruler", "torchvision", "qwen_vl_utils"] GPU_REQUIRES = ["liger-kernel", "flash-attn"] MATH_REQUIRES = ["math-verify"] # Add math-verify as an optional dependency VLLM_REQUIRES = ["tensordict>=0.8.0,<=0.10.0,!=0.9.0", "vllm>=0.8.5,<=0.11.0"] SGLANG_REQUIRES = [ "tensordict>=0.8.0,<=0.10.0,!=0.9.0", "sglang[srt,openai]==0.5.2", "torch==2.8.0", ] TRL_REQUIRES = ["trl<=0.9.6"] MCORE_REQUIRES = ["mbridge"] TRANSFERQUEUE_REQUIRES = ["TransferQueue @ git+https://github.com/TransferQueue/TransferQueue.git@68c04e7"] extras_require = { "test": TEST_REQUIRES, "prime": PRIME_REQUIRES, "geo": GEO_REQUIRES, "gpu": GPU_REQUIRES, "math": MATH_REQUIRES, "vllm": VLLM_REQUIRES, "sglang": SGLANG_REQUIRES, "trl": TRL_REQUIRES, "mcore": MCORE_REQUIRES, "transferqueue": TRANSFERQUEUE_REQUIRES, } this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup( name="verl", version=__version__, package_dir={"": "."}, packages=find_packages(where="."), url="https://github.com/volcengine/verl", license="Apache 2.0", author="Bytedance - Seed - MLSys", author_email="zhangchi.usc1992@bytedance.com, gmsheng@connect.hku.hk", description="verl: Volcano Engine Reinforcement Learning for LLM", install_requires=install_requires, extras_require=extras_require, package_data={ "": ["version/*"], "verl": ["trainer/config/*.yaml"], "recipe.onpolicy_distill": ["config/*.yaml"], }, include_package_data=True, long_description=long_description, long_description_content_type="text/markdown", ) ================================================ FILE: verl_distillation/tests/README.md ================================================ # Tests layout Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance: - `tests/trainer` for testing functionality related to `verl/trainer` - `tests/models` for testing functionality related to `verl/models` - ... There are a few folders with `special_` prefix, created for special purposes: - `special_distributed`: unit tests that must run with multiple GPUs - `special_e2e`: end-to-end tests with training/generation scripts - `special_npu`: tests for NPUs - `special_sanity`: a suite of quick sanity tests - `special_standalone`: a set of test that are designed to run in dedicated environments Accelerators for tests - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`. - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment. # Workflow layout All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs: 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml` 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml` 3. End-to-end tests: `e2e_*.yml` 4. Unit tests - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py` - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix. - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when - new workflow yaml is added to `.github/workflows` - new tests are added to workflow mentioned in 2. ================================================ FILE: verl_distillation/tests/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/tests/experimental/agent_loop/agent_utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ray from omegaconf import DictConfig from verl.experimental.agent_loop import AgentLoopManager from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup from verl.single_controller.ray.base import create_colocated_worker_cls from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role from verl.workers.fsdp_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, RewardModelWorker def init_agent_loop_manager(config: DictConfig) -> AgentLoopManager | RayWorkerGroup: # =========================== 1. Create hybrid ActorRollout workers =========================== actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) role_worker_mapping = { Role.ActorRollout: ray.remote(actor_rollout_cls), } if config.reward_model.enable: role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, } if config.reward_model.enable_resource_pool: mapping[Role.RewardModel] = "reward_pool" if config.reward_model.n_gpus_per_node <= 0: raise ValueError("config.reward_model.n_gpus_per_node must be greater than 0") if config.reward_model.nnodes <= 0: raise ValueError("config.reward_model.nnodes must be greater than 0") reward_pool = [config.reward_model.n_gpus_per_node] * config.reward_model.nnodes resource_pool_spec["reward_pool"] = reward_pool resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) resource_pool_manager.create_resource_pool() resource_pool_to_cls = {pool: {} for pool in resource_pool_manager.resource_pool_dict.values()} # create actor and rollout resource_pool = resource_pool_manager.get_resource_pool(Role.ActorRollout) actor_rollout_cls = RayClassWithInitArgs( cls=role_worker_mapping[Role.ActorRollout], config=config.actor_rollout_ref, role="actor_rollout" ) resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls if config.reward_model.enable: # we create a RM here resource_pool = resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(role_worker_mapping[Role.RewardModel], config=config.reward_model) resource_pool_to_cls[resource_pool]["rm"] = rm_cls all_wg = {} for resource_pool, class_dict in resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) actor_rollout_wg = all_wg["actor_rollout"] actor_rollout_wg.init_model() if config.actor_rollout_ref.rollout.mode == "sync": return actor_rollout_wg if config.reward_model.enable_resource_pool and config.reward_model.enable: rm_wg = all_wg["rm"] rm_wg.init_model() else: rm_wg = None # =========================== 2. Create AgentLoopManager =========================== agent_loop_manager = AgentLoopManager( config=config, worker_group=actor_rollout_wg, rm_wg=rm_wg, ) return agent_loop_manager ================================================ FILE: verl_distillation/tests/experimental/agent_loop/qwen_vl_tool_chat_template.jinja2 ================================================ {% set image_count = namespace(value=0) %} {% set video_count = namespace(value=0) %} {%- if tools %} {{- '<|im_start|>system\n' }} {%- if messages[0]['role'] == 'system' %} {%- if messages[0]['content'] is string %} {{- messages[0]['content'] }} {%- else %} {{- messages[0]['content'][0]['text'] }} {%- endif %} {%- else %} {{- 'You are a helpful assistant.' }} {%- endif %} {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} {%- for tool in tools %} {{- "\n" }} {{- tool | tojson }} {%- endfor %} {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} {% for message in messages %} {% if message['role'] != 'system' or loop.first == false %} {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %} <|im_start|>{{ message['role'] }} {% if message['content'] is string %} {{ message['content'] }}<|im_end|> {% else %} {% for content in message['content'] %} {% if content['type'] == 'image' or 'image' in content or 'image_url' in content %} {% set image_count.value = image_count.value + 1 %} {% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|> {% elif content['type'] == 'video' or 'video' in content %} {% set video_count.value = video_count.value + 1 %} {% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|> {% elif 'text' in content %} {{ content['text'] }} {% endif %} {% endfor %}<|im_end|> {% endif %} {%- elif message.role == "assistant" %} {{- '<|im_start|>' + message.role }} {%- if message.content %} {{- '\n' + message.content }} {%- endif %} {%- for tool_call in message.tool_calls %} {%- if tool_call.function is defined %} {%- set tool_call = tool_call.function %} {%- endif %} {{- '\n\n{"name": "' }} {{- tool_call.name }} {{- '", "arguments": ' }} {{- tool_call.arguments | tojson }} {{- '}\n' }} {%- endfor %} {{- '<|im_end|>\n' }} {%- elif message.role == "tool" %} {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %} {{- '<|im_start|>user' }} {%- endif %} {{- '\n\n' }} {% if message['content'] is string %} {{ message.content }} {% else %} {% for content in message['content'] %} {% if content['type'] == 'image' or 'image' in content or 'image_url' in content %} {% set image_count.value = image_count.value + 1 %} {% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|> {% elif content['type'] == 'video' or 'video' in content %} {% set video_count.value = video_count.value + 1 %} {% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|> {% elif content['type'] == 'text' or 'text' in content %} {{ content['text'] }} {% endif %} {% endfor %} {% endif %} {{- '\n' }} {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} {{- '<|im_end|>\n' }} {%- endif %} {%- endif %} {% endif %} {% endfor %} {%- else %} {% for message in messages %} {% if loop.first and message['role'] != 'system' %} <|im_start|>system You are a helpful assistant.<|im_end|> {% endif %} {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %} <|im_start|>{{ message['role'] }} {% if message['content'] is string %} {{ message['content'] }}<|im_end|> {% else %} {% for content in message['content'] %} {% if content['type'] == 'image' or 'image' in content or 'image_url' in content %} {% set image_count.value = image_count.value + 1 %} {% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|> {% elif content['type'] == 'video' or 'video' in content %} {% set video_count.value = video_count.value + 1 %} {% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|> {% elif 'text' in content %} {{ content['text'] }} {% endif %} {% endfor %}<|im_end|> {% endif %} {%- elif message.role == "assistant" %} {{- '<|im_start|>' + message.role }} {%- if message.content %} {{- '\n' + message.content }} {%- endif %} {%- for tool_call in message.tool_calls %} {%- if tool_call.function is defined %} {%- set tool_call = tool_call.function %} {%- endif %} {{- '\n\n{"name": "' }} {{- tool_call.name }} {{- '", "arguments": ' }} {{- tool_call.arguments | tojson }} {{- '}\n' }} {%- endfor %} {{- '<|im_end|>\n' }} {%- elif message.role == "tool" %} {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %} {{- '<|im_start|>user' }} {%- endif %} {{- '\n\n' }} {% if message['content'] is string %} {{ message.content }} {% else %} {% for content in message['content'] %} {% if content['type'] == 'image' or 'image' in content or 'image_url' in content %} {% set image_count.value = image_count.value + 1 %} {% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|> {% elif content['type'] == 'video' or 'video' in content %} {% set video_count.value = video_count.value + 1 %} {% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|> {% elif content['type'] == 'text' or 'text' in content %} {{ content['text'] }} {% endif %} {% endfor %} {% endif %} {{- '\n' }} {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} {{- '<|im_end|>\n' }} {%- endif %} {%- endif %} {% endfor %} {%- endif %} {% if add_generation_prompt %} <|im_start|>assistant {% endif %} ================================================ FILE: verl_distillation/tests/experimental/agent_loop/test_agent_loop_reward.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import ray from hydra import compose, initialize_config_dir from torchdata.stateful_dataloader import StatefulDataLoader from transformers import AutoTokenizer from verl.experimental.agent_loop import AgentLoopManager from verl.protocol import DataProto from verl.trainer.main_ppo import create_rl_sampler from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn @pytest.mark.skip(reason="compute score is depreated and replaced by reward manager worker") def test_agent_loop_compute_score(): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose("ppo_trainer") model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct") config.data.return_raw_chat = True config.actor_rollout_ref.model.path = model_path config.actor_rollout_ref.actor.use_dynamic_bsz = True config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"] config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.enforce_eager = True config.actor_rollout_ref.rollout.prompt_length = 1024 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.skip_tokenizer_init = True # 1. init agent loop manager agent_loop_manager = AgentLoopManager(config) # 2. init dataset and dataloader local_folder = os.path.expanduser("~/data/gsm8k/") data_files = [os.path.join(local_folder, "train.parquet")] tokenizer = AutoTokenizer.from_pretrained(model_path) dataset = RLHFDataset( data_files=data_files, tokenizer=tokenizer, config=config.data, processor=None, ) batch_size = 128 sampler = create_rl_sampler(config.data, dataset) dataloader = StatefulDataLoader( dataset=dataset, batch_size=batch_size, num_workers=config.data.dataloader_num_workers, drop_last=True, collate_fn=collate_fn, sampler=sampler, ) # 3. generate_sequences with agent loop batch_dict = next(iter(dataloader)) batch = DataProto.from_single_dict(batch_dict) gen_batch = agent_loop_manager.generate_sequences(prompts=batch) rm_scores = gen_batch.batch["rm_scores"] sample_scores = rm_scores.sum(dim=1) assert sample_scores.min() == 0.0, f"min score: {sample_scores.min()}" assert sample_scores.max() == 1.0, f"max score: {sample_scores.max()}" print(f"gsm8k acc: {sample_scores.mean()}") print("Test passed!") ray.shutdown() ================================================ FILE: verl_distillation/tests/experimental/agent_loop/test_agent_loop_reward_model.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import ray from hydra import compose, initialize_config_dir from torchdata.stateful_dataloader import StatefulDataLoader from transformers import AutoTokenizer from tests.experimental.agent_loop.agent_utils import AgentLoopManager from verl.protocol import DataProto from verl.trainer.main_ppo import create_rl_sampler from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn @pytest.mark.skip(reason="reward model is depreated and replaced by GRM") def test_agent_loop_compute_score_with_model(): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose("ppo_trainer") model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct") config.data.return_raw_chat = True config.actor_rollout_ref.model.path = model_path config.actor_rollout_ref.actor.use_dynamic_bsz = True config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"] config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.enforce_eager = True config.actor_rollout_ref.rollout.prompt_length = 1024 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.skip_tokenizer_init = True config.reward_model.enable = True config.reward_model.model.path = model_path config.reward_model.use_dynamic_bsz = True config.reward_model.forward_max_token_len_per_gpu = 6000 config.reward_model.micro_batch_size_per_gpu = 40 config.reward_model.enable_resource_pool = True config.reward_model.n_gpus_per_node = 1 config.reward_model.nnodes = 1 config.reward_model.model.trust_remote_code = True config.reward_model.model.input_tokenizer = None config.trainer.n_gpus_per_node = 4 config.trainer.nnodes = 1 # 1. init agent loop manager agent_loop_manager = AgentLoopManager(config) # 2. init dataset and dataloader local_folder = os.path.expanduser("~/data/gsm8k/") data_files = [os.path.join(local_folder, "train.parquet")] tokenizer = AutoTokenizer.from_pretrained(model_path) dataset = RLHFDataset( data_files=data_files, tokenizer=tokenizer, config=config.data, processor=None, ) batch_size = 128 sampler = create_rl_sampler(config.data, dataset) dataloader = StatefulDataLoader( dataset=dataset, batch_size=batch_size, num_workers=config.data.dataloader_num_workers, drop_last=True, collate_fn=collate_fn, sampler=sampler, ) # 3. generate_sequences with agent loop batch_dict = next(iter(dataloader)) batch = DataProto.from_single_dict(batch_dict) gen_batch = agent_loop_manager.generate_sequences(prompts=batch) rm_scores = gen_batch.batch["rm_scores"] sample_scores = rm_scores.sum(dim=1) print(sample_scores) ray.shutdown() ================================================ FILE: verl_distillation/tests/experimental/agent_loop/test_basic_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from typing import Any import numpy as np import pytest import ray from omegaconf import DictConfig from transformers.utils import get_json_schema from tests.experimental.agent_loop.agent_utils import init_agent_loop_manager from verl.experimental.agent_loop import AgentLoopManager from verl.experimental.agent_loop.agent_loop import get_trajectory_info from verl.protocol import DataProto from verl.tools.base_tool import BaseTool, OpenAIFunctionToolSchema from verl.tools.schemas import ToolResponse from verl.trainer.ppo.reward import compute_reward, load_reward_manager from verl.utils import hf_tokenizer @pytest.fixture def init_config() -> DictConfig: from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose( config_name="ppo_trainer", overrides=[ "actor_rollout_ref.actor.use_dynamic_bsz=true", # test sleep/wake_up with fsdp offload "actor_rollout_ref.actor.fsdp_config.param_offload=True", "actor_rollout_ref.actor.fsdp_config.optimizer_offload=True", "reward_model.reward_manager=dapo", "+reward_model.reward_kwargs.overlong_buffer_cfg.enable=False", "+reward_model.reward_kwargs.overlong_buffer_cfg.len=3072", "+reward_model.reward_kwargs.max_resp_len=4096", ], ) model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct") config.actor_rollout_ref.model.path = model_path config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"] config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.enforce_eager = True config.actor_rollout_ref.rollout.prompt_length = 4096 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.n = 4 config.actor_rollout_ref.rollout.agent.num_workers = 2 config.actor_rollout_ref.rollout.skip_tokenizer_init = True return config def test_single_turn(init_config): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) agent_loop_manager = AgentLoopManager(init_config) tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path) reward_fn = load_reward_manager( init_config, tokenizer, num_examine=0, **init_config.reward_model.get("reward_kwargs", {}) ) raw_prompts = [ [ { "role": "user", "content": "Let's play a role playing game. Your name is Alice, your favorite color is blue.", } ], [{"role": "user", "content": "Let's play a role playing game. Your name is Bob, your favorite color is red."}], ] batch = DataProto( non_tensor_batch={ "raw_prompt": np.array(raw_prompts), "agent_name": np.array(["single_turn_agent"] * len(raw_prompts)), "data_source": np.array(["openai/gsm8k"] * len(raw_prompts)), "reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)), }, ) n = init_config.actor_rollout_ref.rollout.n batch = batch.repeat(n) result = agent_loop_manager.generate_sequences(prompts=batch) assert len(result) == len(raw_prompts) * n # check result seq_len = result.batch["prompts"].size(1) + result.batch["responses"].size(1) assert result.batch["input_ids"].size(1) == seq_len assert result.batch["attention_mask"].size(1) == seq_len assert result.batch["position_ids"].size(1) == seq_len if init_config.actor_rollout_ref.rollout.calculate_log_probs: assert result.batch["rollout_log_probs"].size(1) == result.batch["responses"].size(1) # check compute score assert result.batch["rm_scores"].shape == result.batch["responses"].shape reward_tensor, reward_extra_info = compute_reward(result, reward_fn) assert reward_tensor.shape == result.batch["responses"].shape assert "acc" in reward_extra_info, f"reward_extra_info {reward_extra_info} should contain 'acc'" assert reward_extra_info["acc"].shape == (len(result),), f"invalid acc: {reward_extra_info['acc']}" # check turns num_turns = result.non_tensor_batch["__num_turns__"] assert np.all(num_turns == 2) print("Test passed!") ray.shutdown() class WeatherTool(BaseTool): def get_current_temperature(self, location: str, unit: str = "celsius"): """Get current temperature at a location. Args: location: The location to get the temperature for, in the format "City, State, Country". unit: The unit to return the temperature in. Defaults to "celsius". (choices: ["celsius", "fahrenheit"]) Returns: the temperature, the location, and the unit in a dict """ print(f"[DEBUG] get_current_temperature: {location}, {unit}") return { "temperature": 26.1, "location": location, "unit": unit, } def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: schema = get_json_schema(self.get_current_temperature) return OpenAIFunctionToolSchema(**schema) async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: try: result = self.get_current_temperature(**parameters) return ToolResponse(text=json.dumps(result)), 0, {} except Exception as e: return ToolResponse(text=str(e)), 0, {} class WeatherToolWithData(BaseTool): def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: schema = get_json_schema(self.get_temperature_date) return OpenAIFunctionToolSchema(**schema) def get_temperature_date(self, location: str, date: str, unit: str = "celsius"): """Get temperature at a location and date. Args: location: The location to get the temperature for, in the format "City, State, Country". date: The date to get the temperature for, in the format "Year-Month-Day". unit: The unit to return the temperature in. Defaults to "celsius". (choices: ["celsius", "fahrenheit"]) Returns: the temperature, the location, the date and the unit in a dict """ print(f"[DEBUG] get_temperature_date: {location}, {date}, {unit}") return { "temperature": 25.9, "location": location, "date": date, "unit": unit, } async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: try: result = self.get_temperature_date(**parameters) return ToolResponse(text=json.dumps(result)), 0, {} except Exception as e: return ToolResponse(text=str(e)), 0, {} def test_tool_agent(init_config): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } }, ignore_reinit_error=True, ) # =========================== 1. Init rollout manager =========================== tool_config = { "tools": [ { "class_name": "tests.experimental.agent_loop.test_basic_agent_loop.WeatherTool", "config": {"type": "native"}, }, { "class_name": "tests.experimental.agent_loop.test_basic_agent_loop.WeatherToolWithData", "config": {"type": "native"}, }, ] } tool_config_path = "/tmp/tool_config.json" with open(tool_config_path, "w") as f: json.dump(tool_config, f) n = 2 init_config.actor_rollout_ref.rollout.n = n init_config.actor_rollout_ref.rollout.multi_turn.tool_config_path = tool_config_path init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 2 init_config.actor_rollout_ref.rollout.calculate_log_probs = True agent_loop_manager = AgentLoopManager(init_config) # =========================== 2. Generate sequences =========================== raw_prompts = [ [ {"role": "user", "content": "How are you?"}, ], [ {"role": "user", "content": "What's the temperature in Los Angeles now?"}, ], [ {"role": "user", "content": "What's the temperature in New York now?"}, ], [ { "role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\n\n" "Current Date: 2024-09-30", }, {"role": "user", "content": "What's the temperature in San Francisco now? How about tomorrow?"}, ], ] batch = DataProto( non_tensor_batch={ "raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object), "agent_name": np.array(["tool_agent"] * len(raw_prompts)), "data_source": np.array(["openai/gsm8k"] * len(raw_prompts)), "reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)), }, ) batch = batch.repeat(n) result = agent_loop_manager.generate_sequences(prompts=batch) assert len(result) == len(raw_prompts) * n # Check turns num_turns = result.non_tensor_batch["__num_turns__"] print(f"num_turns: {num_turns}") for i in range(len(num_turns)): if i // n == 0: # [user, assistant] assert num_turns[i] == 2 else: # [user, assistant, tool, assistant] assert num_turns[i] == 4 # Check response_mask tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path) responses = result.batch["responses"] response_mask = result.batch["response_mask"] attention_mask = result.batch["attention_mask"] assert result.batch["rm_scores"].size(1) == responses.size(1) assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}" assert result.batch["rollout_log_probs"].size(1) == result.batch["responses"].size(1) response_length = response_mask.size(1) for i in range(len(responses)): # response with tool response valid_tokens = responses[i][attention_mask[i][-response_length:].bool()] response_with_obs = tokenizer.decode(valid_tokens) # response without tool response valid_tokens = responses[i][response_mask[i].bool()] response_without_obs = tokenizer.decode(valid_tokens) assert "" not in response_without_obs, ( f"found in response: {response_without_obs}" ) assert "" not in response_without_obs, ( f"found in response: {response_without_obs}" ) print("=========================") print(response_with_obs) print("---") print(response_without_obs) print("Test passed!") ray.shutdown() def test_tool_agent_with_interaction(init_config): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) # =========================== 1. Init rollout manager =========================== tool_config = { "tools": [ { "class_name": "tests.experimental.agent_loop.test_basic_agent_loop.WeatherTool", "config": {"type": "native"}, }, { "class_name": "tests.experimental.agent_loop.test_basic_agent_loop.WeatherToolWithData", "config": {"type": "native"}, }, ] } tool_config_path = "/tmp/tool_config.json" with open(tool_config_path, "w") as f: json.dump(tool_config, f) interaction_config = { "interaction": [ {"name": "weather", "class_name": "verl.interactions.weather_interaction.WeatherInteraction", "config": {}} ] } interaction_config_path = "/tmp/interaction_config.json" with open(interaction_config_path, "w") as f: json.dump(interaction_config, f) n = 2 init_config.actor_rollout_ref.rollout.n = n init_config.actor_rollout_ref.rollout.multi_turn.tool_config_path = tool_config_path init_config.actor_rollout_ref.rollout.multi_turn.interaction_config_path = interaction_config_path init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 2 agent_loop_manager = init_agent_loop_manager(init_config) # =========================== 2. Generate sequences =========================== raw_prompts = [ [ {"role": "user", "content": "How are you?"}, ], [ {"role": "user", "content": "What's the temperature in Los Angeles now?"}, ], [ {"role": "user", "content": "What's the temperature in New York now?"}, ], [ { "role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\n\n" "Current Date: 2024-09-30", }, {"role": "user", "content": "What's the temperature in San Francisco now? How about tomorrow?"}, ], ] batch = DataProto( non_tensor_batch={ "raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object), "agent_name": np.array(["tool_agent"] * len(raw_prompts)), "data_source": np.array(["openai/gsm8k"] * len(raw_prompts)), "reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)), "extra_info": np.array( [ {"interaction_kwargs": {"name": "weather"}}, {"interaction_kwargs": {"name": "weather"}}, {"interaction_kwargs": {"name": "weather"}}, {"interaction_kwargs": {"name": "weather"}}, ] ), }, ) batch = batch.repeat(n) result = agent_loop_manager.generate_sequences(prompts=batch) assert len(result) == len(raw_prompts) * n # Check turns num_turns = result.non_tensor_batch["__num_turns__"] print(f"num_turns: {num_turns}") for i in range(len(num_turns)): if i // n == 0: # [user, assistant, user] assert num_turns[i] == 3 else: # [user, assistant, tool, assistant, user] assert num_turns[i] == 5 # Check response_mask tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path) responses = result.batch["responses"] response_mask = result.batch["response_mask"] attention_mask = result.batch["attention_mask"] assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}" response_length = response_mask.size(1) for i in range(len(responses)): # response with tool response valid_tokens = responses[i][attention_mask[i][-response_length:].bool()] response_with_obs = tokenizer.decode(valid_tokens) # response without tool response valid_tokens = responses[i][response_mask[i].bool()] response_without_obs = tokenizer.decode(valid_tokens) assert "\udb82\udc89" not in response_without_obs, f"found \udb82\udc89 in response: {response_without_obs}" assert "\udb82\udc8a" not in response_without_obs, f"found \udb82\udc8a in response: {response_without_obs}" print("=========================") print(response_with_obs) print("---") print(response_without_obs) print("Test passed!") ray.shutdown() @pytest.mark.asyncio async def test_get_trajectory_info(): """Tests the get_trajectory_info method.""" # Initialize the class to set up class-level attributes step = 10 index = [1, 1, 3, 3] expected_info = [ {"step": step, "sample_index": 1, "rollout_n": 0, "validate": False}, {"step": step, "sample_index": 1, "rollout_n": 1, "validate": False}, {"step": step, "sample_index": 3, "rollout_n": 0, "validate": False}, {"step": step, "sample_index": 3, "rollout_n": 1, "validate": False}, ] trajectory_info = await get_trajectory_info(step, index, validate=False) assert trajectory_info == expected_info ================================================ FILE: verl_distillation/tests/experimental/agent_loop/test_gpt_oss_tool_parser.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from transformers import AutoTokenizer from verl.experimental.agent_loop.tool_parser import GptOssToolParser @pytest.mark.asyncio @pytest.mark.skip(reason="local test only") async def test_gpt_oss_tool_parser(): example_text = """ <|start|>assistant<|channel|>commentary to=functions.get_current_weather \ <|constrain|>json<|message|>{"location": "Tokyo"}<|call|> <|start|>functions.get_current_weather to=assistant<|channel|>commentary<|message|>\ { "temperature": 20, "sunny": true }<|end|>""" tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b") response_ids = tokenizer.encode(example_text) tool_parser = GptOssToolParser(tokenizer) _, function_calls = await tool_parser.extract_tool_calls(response_ids) assert len(function_calls) == 1 assert function_calls[0].name == "get_current_weather" assert function_calls[0].arguments == '{"location": "Tokyo"}' ================================================ FILE: verl_distillation/tests/experimental/agent_loop/test_multi_modal.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from typing import Any import numpy as np import pytest import ray from omegaconf import DictConfig from PIL import Image from transformers.utils import get_json_schema from verl.experimental.agent_loop import AgentLoopManager from verl.protocol import DataProto from verl.tools.base_tool import BaseTool, OpenAIFunctionToolSchema from verl.tools.schemas import ToolResponse from verl.utils import hf_tokenizer @pytest.fixture def init_config() -> DictConfig: from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose( config_name="ppo_trainer", overrides=[ "actor_rollout_ref.actor.use_dynamic_bsz=true", # test sleep/wake_up with fsdp offload "actor_rollout_ref.actor.fsdp_config.param_offload=True", "actor_rollout_ref.actor.fsdp_config.optimizer_offload=True", ], ) model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct") config.actor_rollout_ref.model.path = model_path config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"] config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.enforce_eager = True config.actor_rollout_ref.rollout.prompt_length = 4096 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.n = 4 config.actor_rollout_ref.rollout.agent.num_workers = 2 config.actor_rollout_ref.rollout.skip_tokenizer_init = True return config class ImageGeneratorTool(BaseTool): def generate_image(self, description: str, size: str = "256x256"): """Generate a simple image based on description. Args: description: The description of the image to generate. size: The size of the image. Defaults to "256x256". (choices: ["256x256", "512x512"]) Returns: A generated image """ print(f"[DEBUG] generate_image: {description}, {size}") # Create a simple colored image for testing width, height = map(int, size.split("x")) # Create different colors based on description if "red" in description.lower(): color = (255, 0, 0) elif "blue" in description.lower(): color = (0, 0, 255) elif "green" in description.lower(): color = (0, 255, 0) else: color = (128, 128, 128) # gray # Create image image = Image.new("RGB", (width, height), color) # Add some pattern to make it more interesting for i in range(0, width, 50): for j in range(0, height, 50): # Add white squares in a grid pattern for x in range(i, min(i + 20, width)): for y in range(j, min(j + 20, height)): image.putpixel((x, y), (255, 255, 255)) return image def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: schema = get_json_schema(self.generate_image) return OpenAIFunctionToolSchema(**schema) async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: try: image = self.generate_image(**parameters) # Return the PIL Image directly - the framework should handle the conversion return ToolResponse(image=[image]), 0, {} except Exception as e: return ToolResponse(text=str(e)), 0, {} def test_multimodal_tool_agent(init_config): """Test agent loop with multimodal tool that returns images using Qwen VL model.""" ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } }, ignore_reinit_error=True, ) # Add custom chat template to enable tool calling support (same as recipe/deepeyes) template_path = os.path.join(os.path.dirname(__file__), "qwen_vl_tool_chat_template.jinja2") with open(template_path, encoding="utf-8") as f: custom_chat_template = f.read() init_config.actor_rollout_ref.model.custom_chat_template = custom_chat_template # =========================== 1. Init rollout manager with image tool =========================== tool_config = { "tools": [ { "class_name": "tests.experimental.agent_loop.test_multi_modal.ImageGeneratorTool", "config": {"type": "native"}, }, ] } tool_config_path = "/tmp/multimodal_tool_config.json" with open(tool_config_path, "w") as f: json.dump(tool_config, f) n = 2 init_config.actor_rollout_ref.rollout.n = n init_config.actor_rollout_ref.rollout.multi_turn.tool_config_path = tool_config_path init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 1 init_config.actor_rollout_ref.rollout.multi_turn.max_user_turns = 1 agent_loop_manager = AgentLoopManager(init_config) # =========================== 2. Generate sequences with multimodal prompts =========================== raw_prompts = [ [ {"role": "user", "content": "How are you?"}, ], [ {"role": "user", "content": "Please generate a red image for me."}, ], [ {"role": "user", "content": "Can you create a blue picture with size 512x512?"}, ], [ { "role": "system", "content": ( "You are Qwen VL, created by Alibaba Cloud. You are a helpful " "assistant that can generate and analyze images." ), }, {"role": "user", "content": "Generate a green landscape image and describe what you see in it."}, ], ] batch = DataProto( non_tensor_batch={ "raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object), "agent_name": np.array(["tool_agent"] * len(raw_prompts)), "data_source": np.array(["openai/gsm8k"] * len(raw_prompts)), "reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)), }, ) batch = batch.repeat(n) result = agent_loop_manager.generate_sequences(prompts=batch) assert len(result) == len(raw_prompts) * n # Check turns num_turns = result.non_tensor_batch["__num_turns__"] print(f"num_turns: {num_turns}") for i in range(len(num_turns)): if i // n == 0: # First prompt: "How are you?" - should have 2 turns [user, assistant] assert num_turns[i] == 2, f"Expected 2 turns but got {num_turns[i]} for sample {i}" else: # Tool-calling prompts should have 4 turns [user, assistant, tool, assistant] assert num_turns[i] == 4, f"Expected 4 turns but got {num_turns[i]} for sample {i}" # Check that images were properly returned in the tool responses tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path) responses = result.batch["responses"] response_mask = result.batch["response_mask"] attention_mask = result.batch["attention_mask"] assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}" response_length = response_mask.size(1) image_found_count = 0 for i in range(len(responses)): # response with tool response (including images) valid_tokens = responses[i][attention_mask[i][-response_length:].bool()] response_with_obs = tokenizer.decode(valid_tokens) # response without tool response valid_tokens = responses[i][response_mask[i].bool()] response_without_obs = tokenizer.decode(valid_tokens) # Check that tool responses were properly masked out from training assert "" not in response_without_obs, ( f"found in response: {response_without_obs}" ) assert "" not in response_without_obs, ( f"found in response: {response_without_obs}" ) # Check that images were included in the full response if "" in response_with_obs or "image" in response_with_obs.lower(): image_found_count += 1 print("=========================") print("Response with tool observations:") print(response_with_obs) print("---") print("Response without tool observations:") print(response_without_obs) # Verify that tool-calling responses contained image-related content print(f"Found {image_found_count} responses with image content out of {len(responses)}") # We should have at least some image content from the tool-calling prompts # Note: First prompt might not use tools, so we don't expect 100% image content expected_tool_calls = sum(1 for i in range(len(num_turns)) if num_turns[i] == 4) assert image_found_count >= 0, ( f"No image-related content found, but expected at least some from {expected_tool_calls} tool calls" ) print("Multimodal tool test passed!") ray.shutdown() def test_multimodal_single_turn_agent(init_config): """Test single turn agent loop with multimodal inputs using Qwen VL model.""" ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } }, ignore_reinit_error=True, ) # =========================== 1. Init rollout manager =========================== n = 2 init_config.actor_rollout_ref.rollout.n = n init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 1 init_config.actor_rollout_ref.rollout.multi_turn.max_user_turns = 1 agent_loop_manager = AgentLoopManager(init_config) # =========================== 2. Generate sequences with multimodal prompts =========================== # Create a simple test image test_image = Image.new("RGB", (256, 256), (100, 150, 200)) test_image2 = Image.new("RGB", (512, 512), (100, 150, 200)) raw_prompts = [ [ {"role": "user", "content": "Hello, how are you?"}, ], [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What color is this image?"}, ], }, ], [ { "role": "system", "content": "You are Qwen VL, created by Alibaba Cloud. You are a helpful assistant.", }, { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "Describe this image in detail."}, ], }, ], ] # Prepare multi_modal_data for each prompt multi_modal_data_list = [ None, # First prompt: text only {"image": test_image}, # Second prompt: with image {"image": test_image2}, # Third prompt: with image ] batch = DataProto( non_tensor_batch={ "raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object), "agent_name": np.array(["single_turn_agent"] * len(raw_prompts)), "data_source": np.array(["openai/gsm8k"] * len(raw_prompts)), "reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)), }, ) # Add multi_modal_data to batch multi_modal_data_array = np.array([data if data else {} for data in multi_modal_data_list], dtype=object) batch.non_tensor_batch["multi_modal_data"] = multi_modal_data_array batch = batch.repeat(n) result = agent_loop_manager.generate_sequences(prompts=batch) assert len(result) == len(raw_prompts) * n # Check turns - all should be single turn (2: user + assistant) num_turns = result.non_tensor_batch["__num_turns__"] print(f"num_turns: {num_turns}") for i in range(len(num_turns)): assert num_turns[i] == 2, f"Expected 2 turns but got {num_turns[i]} for sample {i}" # Verify responses tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path) prompts = result.batch["prompts"] responses = result.batch["responses"] response_mask = result.batch["response_mask"] assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}" # Check for image pads in prompts image_pad_count = 0 for i in range(len(prompts)): prompt_ids = prompts[i][prompts[i] != tokenizer.pad_token_id].tolist() prompt_text = tokenizer.decode(prompt_ids) # Check if this sample should have image pads (samples with index 1 and 2 in each repeat have images) sample_idx = i // n has_image_pad = "<|image_pad|>" in prompt_text or "<|vision_start|>" in prompt_text print("=========================") print(f"Sample {i} (original prompt index: {sample_idx}):") print(f"Prompt length: {len(prompt_ids)} tokens") print(f"Has image_pad: {has_image_pad}") if sample_idx != 0: # Samples 1 and 2 should have images if has_image_pad: image_pad_count += 1 # Count the number of image_pad tokens num_image_pads = prompt_text.count("<|image_pad|>") print(f"Number of <|image_pad|> tokens: {num_image_pads}") else: print("WARNING: Expected image_pad but not found!") # Show first 200 chars of prompt print(f"Prompt text (first 200 chars): {prompt_text[:200]}...") for i in range(len(responses)): valid_tokens = responses[i][response_mask[i].bool()] response_text = tokenizer.decode(valid_tokens) print(f"Sample {i} response: {response_text[:100]}...") # Verify that we found image pads in multimodal samples expected_multimodal_samples = 2 * n # 2 prompts with images, repeated n times print(f"\nFound {image_pad_count} samples with image_pad out of {expected_multimodal_samples} expected") assert image_pad_count > 0, "No image_pad tokens found in multimodal samples!" print("Single turn multimodal test passed!") ray.shutdown() def test_multimodal_partial_single_turn_agent(init_config): """Test partial single turn agent loop with multimodal inputs using Qwen VL model.""" # TODO(baiyan): # see verl/recipe/fully_async_policy/agent_loop/partial_single_turn_agent_loop.py for more details. # if use_correct_processor=True, the test will pass but the async training will hang, so I disable this test # for now return ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } }, ignore_reinit_error=True, ) from recipe.fully_async_policy.agent_loop import FullyAsyncAgentLoopManager # =========================== 1. Init rollout manager =========================== n = 2 init_config.actor_rollout_ref.rollout.n = n init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 1 init_config.actor_rollout_ref.rollout.multi_turn.max_user_turns = 1 import asyncio loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) agent_loop_manager = loop.run_until_complete(FullyAsyncAgentLoopManager.create(init_config)) # =========================== 2. Generate sequences with multimodal prompts =========================== # Create a simple test image test_image = Image.new("RGB", (256, 256), (200, 100, 50)) test_image2 = Image.new("RGB", (512, 512), (100, 150, 200)) raw_prompts = [ [ {"role": "user", "content": "What is the capital of France?"}, ], [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What do you see in this image?"}, ], }, ], [ { "role": "system", "content": "You are Qwen VL, a helpful multimodal assistant.", }, { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "Analyze the colors in this image."}, ], }, ], ] # Prepare multi_modal_data for each prompt multi_modal_data_list = [ None, # First prompt: text only {"image": test_image}, # Second prompt: with image {"image": test_image2}, # Third prompt: with image ] batch = DataProto( non_tensor_batch={ "raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object), "agent_name": np.array(["partial_single_turn_agent"] * len(raw_prompts)), "data_source": np.array(["openai/gsm8k"] * len(raw_prompts)), "reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)), }, ) # Add multi_modal_data to batch multi_modal_data_array = np.array([data if data else {} for data in multi_modal_data_list], dtype=object) batch.non_tensor_batch["multi_modal_data"] = multi_modal_data_array batch = batch.repeat(n) result = agent_loop_manager.generate_sequences(prompts=batch) assert len(result) == len(raw_prompts) * n # Check turns - all should be single turn (2: user + assistant) num_turns = result.non_tensor_batch["__num_turns__"] print(f"num_turns: {num_turns}") for i in range(len(num_turns)): assert num_turns[i] == 2, f"Expected 2 turns but got {num_turns[i]} for sample {i}" # Verify responses tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path) prompts = result.batch["prompts"] responses = result.batch["responses"] response_mask = result.batch["response_mask"] assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}" # Check for image pads in prompts image_pad_count = 0 for i in range(len(prompts)): prompt_ids = prompts[i][prompts[i] != tokenizer.pad_token_id].tolist() prompt_text = tokenizer.decode(prompt_ids) # Check if this sample should have image pads (samples with index 1 and 2 in each repeat have images) sample_idx = i // n has_image_pad = "<|image_pad|>" in prompt_text or "<|vision_start|>" in prompt_text print("=========================") print(f"Sample {i} (original prompt index: {sample_idx}):") print(f"Prompt length: {len(prompt_ids)} tokens") print(f"Has image_pad: {has_image_pad}") if sample_idx != 0: # Samples 1 and 2 should have images if has_image_pad: image_pad_count += 1 # Count the number of image_pad tokens num_image_pads = prompt_text.count("<|image_pad|>") print(f"Number of <|image_pad|> tokens: {num_image_pads}") else: print("WARNING: Expected image_pad but not found!") # Show first 200 chars of prompt print(f"Prompt text (first 200 chars): {prompt_text[:200]}...") for i in range(len(responses)): valid_tokens = responses[i][response_mask[i].bool()] response_text = tokenizer.decode(valid_tokens) print(f"Sample {i} response: {response_text[:100]}...") # Verify that we found image pads in multimodal samples expected_multimodal_samples = 2 * n # 2 prompts with images, repeated n times print(f"\nFound {image_pad_count} samples with image_pad out of {expected_multimodal_samples} expected") assert image_pad_count > 0, "No image_pad tokens found in multimodal samples!" print("Partial single turn multimodal test passed!") ray.shutdown() ================================================ FILE: verl_distillation/tests/experimental/agent_loop/test_standalone_rollout.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import os import pytest import ray from omegaconf import DictConfig from openai import AsyncOpenAI, OpenAI from tests.experimental.agent_loop.agent_utils import init_agent_loop_manager from verl.workers.rollout.replica import get_rollout_replica_class @pytest.fixture def init_config() -> DictConfig: from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose(config_name="ppo_trainer") config.trainer.n_gpus_per_node = 4 config.trainer.nnodes = 2 config.actor_rollout_ref.actor.use_dynamic_bsz = True config.actor_rollout_ref.model.path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct") config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"] config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.skip_tokenizer_init = False return config @pytest.mark.asyncio @pytest.mark.parametrize("tp_size", [2, 4]) async def test_standalone_rollout(init_config, tp_size): """Test standalone rollout single node and multi nodes.""" ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) init_config.actor_rollout_ref.rollout.tensor_model_parallel_size = tp_size num_replicas = (init_config.trainer.n_gpus_per_node * init_config.trainer.nnodes) // tp_size rollout_config = init_config.actor_rollout_ref.rollout model_config = init_config.actor_rollout_ref.model # create standalone rollout server rollout_server_class = get_rollout_replica_class(init_config.actor_rollout_ref.rollout.name) rollout_servers = [ rollout_server_class( replica_rank=replica_rank, config=rollout_config, model_config=model_config, gpus_per_node=2 ) for replica_rank in range(num_replicas) ] await asyncio.gather(*[server.init_standalone() for server in rollout_servers]) server_handles = [server._server_handle for server in rollout_servers] server_addresses = [server._server_address for server in rollout_servers] assert len(server_handles) == num_replicas assert len(server_addresses) == num_replicas os.environ.pop("HTTPS_PROXY", None) os.environ.pop("HTTP_PROXY", None) os.environ.pop("NO_PROXY", None) client = AsyncOpenAI( api_key="123-abc", base_url=f"http://{server_addresses[0]}/v1", ) completion = await client.chat.completions.create( model=init_config.actor_rollout_ref.model.path, messages=[{"role": "user", "content": "What can you do?"}], ) print(completion.choices[0].message.content) ray.shutdown() @pytest.mark.skip(reason="local test only") def test_hybrid_rollout_with_ep(init_config): """Test hybrid rollout with expert parallelism, DP=2, TP=4, EP=8.""" ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) model_path = os.path.expanduser("~/models/Qwen/Qwen3-30B-A3B-Instruct-2507") init_config.actor_rollout_ref.model.path = model_path # parallelism config init_config.actor_rollout_ref.rollout.tensor_model_parallel_size = 2 init_config.actor_rollout_ref.rollout.data_parallel_size = 4 init_config.actor_rollout_ref.rollout.expert_parallel_size = 8 # 1. init hybrid worker: FSDP+rollout # - build FSDP model and optimizer # - offload FSDP model and optimizer, build rollout # - sleep rollout and load FSDP model and optimizer agent_loop_manager = init_agent_loop_manager(init_config) # 2. wake up rollout # - wake_up weights # - load_weights from FSDP # - wake_up kv_cache agent_loop_manager.wake_up() # 3. test async openai call server_address = agent_loop_manager.server_addresses[0] client = OpenAI( api_key="123-abc", base_url=f"http://{server_address}/v1", ) smapling_params = { "temperature": 1.0, "top_p": 1.0, "max_tokens": 512, } response = client.chat.completions.create( model=model_path, messages=[{"role": "user", "content": "What can you do?"}], **smapling_params, ) completion = response.choices[0].message.content print(f"response: {completion}") print("Test passed!") ray.shutdown() ================================================ FILE: verl_distillation/tests/experimental/reward/reward_fn.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import aiohttp from openai.types.chat import ChatCompletion from transformers import PreTrainedTokenizer GRM_PROMPT_TEMPLATE = """ You are given a problem and a proposed solution. Problem: {problem} Solution: {solution} Please evaluate how well the solution addresses the problem. Give a score from 1 to 10, where: - 1 means the solution is completely irrelevant or incorrect. - 5 means the solution is partially correct but incomplete or not well reasoned. - 10 means the solution is fully correct, well-reasoned, and directly solves the problem. Only output the score as a single number (integer). """.strip() async def chat_complete(router_address: str, chat_complete_request: dict): url = f"http://{router_address}/v1/chat/completions" try: timeout = aiohttp.ClientTimeout(total=None) session = aiohttp.ClientSession(timeout=timeout) async with session.post(url, json=chat_complete_request) as resp: output = await resp.text() output = json.loads(output) return ChatCompletion(**output) except Exception as e: raise e finally: await session.close() async def compute_score_gsm8k( data_source: str, solution_str: str, ground_truth: str, extra_info: dict, reward_router_address: str, reward_model_tokenizer: PreTrainedTokenizer, ): """Compute the reward score.""" grm_prompt = GRM_PROMPT_TEMPLATE.format(problem=extra_info["question"], solution=solution_str) messages = [{"role": "user", "content": grm_prompt}] sampling_params = {"temperature": 0.7, "top_p": 0.8, "max_tokens": 4096} model_name = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct") chat_complete_request = { "messages": messages, "model": model_name, **sampling_params, } result = await chat_complete( router_address=reward_router_address, chat_complete_request=chat_complete_request, ) grm_response = result.choices[0].message.content try: score = int(grm_response.split("\n\n")[-1].strip()) except Exception: score = 0 return {"score": score, "acc": score == 10} ================================================ FILE: verl_distillation/tests/experimental/reward/test_agent_loop_reward_manager.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import ray from hydra import compose, initialize_config_dir from torchdata.stateful_dataloader import StatefulDataLoader from transformers import AutoTokenizer from verl.experimental.agent_loop import AgentLoopManager from verl.protocol import DataProto from verl.trainer.main_ppo import create_rl_sampler from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn def test_agent_loop_reward_manager(): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) with initialize_config_dir(config_dir=os.path.abspath("recipe/fapo/config")): config = compose("rm_config") rollout_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B-Instruct") reward_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct") # actor_rollout_ref config config.data.return_raw_chat = True config.data.max_prompt_length = 1024 config.data.max_response_length = 4096 config.actor_rollout_ref.model.path = rollout_model_path config.actor_rollout_ref.actor.use_dynamic_bsz = True config.actor_rollout_ref.rollout.name = os.getenv("ROLLOUT_NAME", "vllm") config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.tensor_model_parallel_size = 2 config.actor_rollout_ref.rollout.gpu_memory_utilization = 0.9 config.actor_rollout_ref.rollout.enforce_eager = True config.actor_rollout_ref.rollout.prompt_length = 1024 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.skip_tokenizer_init = True config.trainer.n_gpus_per_node = 4 config.trainer.nnodes = 1 config.reward_model.reward_manager = "dapo" config.reward_model.enable = True config.reward_model.enable_resource_pool = True config.reward_model.n_gpus_per_node = 4 config.reward_model.nnodes = 1 config.reward_model.model.path = reward_model_path config.reward_model.rollout.name = os.getenv("ROLLOUT_NAME", "vllm") config.reward_model.rollout.gpu_memory_utilization = 0.9 config.reward_model.rollout.tensor_model_parallel_size = 2 config.reward_model.rollout.skip_tokenizer_init = False config.reward_model.rollout.prompt_length = 5120 config.reward_model.rollout.response_length = 4096 config.custom_reward_function.path = "tests/experimental/reward/reward_fn.py" config.custom_reward_function.name = "compute_score_gsm8k" # 1. init reward model manager agent_loop_manager = AgentLoopManager(config) # 2. init test data local_folder = os.path.expanduser("~/data/gsm8k/") data_files = [os.path.join(local_folder, "train.parquet")] tokenizer = AutoTokenizer.from_pretrained(rollout_model_path) dataset = RLHFDataset( data_files=data_files, tokenizer=tokenizer, config=config.data, processor=None, ) batch_size = 64 sampler = create_rl_sampler(config.data, dataset) dataloader = StatefulDataLoader( dataset=dataset, batch_size=batch_size, num_workers=config.data.dataloader_num_workers, drop_last=True, collate_fn=collate_fn, sampler=sampler, ) # 3. generate responses batch_dict = next(iter(dataloader)) batch = DataProto.from_single_dict(batch_dict) gen_batch = agent_loop_manager.generate_sequences(prompts=batch) rm_scores = gen_batch.batch["rm_scores"] sample_scores = rm_scores.sum(dim=1) print(sample_scores) ray.shutdown() ================================================ FILE: verl_distillation/tests/experimental/reward/test_reward_model.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import ray from hydra import compose, initialize_config_dir from verl.experimental.reward import RewardModelManager from verl.protocol import DataProto GRM_PROMPT_TEMPLATE = """ You are given a problem and a proposed solution. Problem: {problem} Solution: {solution} Please evaluate how well the solution addresses the problem. Give a score from 1 to 10, where: - 1 means the solution is completely irrelevant or incorrect. - 5 means the solution is partially correct but incomplete or not well reasoned. - 10 means the solution is fully correct, well-reasoned, and directly solves the problem. Only output the score as a single number (integer). """.strip() def create_data_samples() -> DataProto: convs = [ { "problem": "What is the range of the numeric output of a sigmoid node in a neural network?", "solution": "Between -1 and 1.", }, { "problem": "What is the range of the numeric output of a sigmoid node in a neural network?", "solution": "Between 0 and 1.", }, { "problem": "What is the capital of Australia?", "solution": "Canberra is the capital city of Australia.", }, { "problem": "What is the capital of Australia?", "solution": "Sydney is the capital city of Australia.", }, ] messages = [[{"role": "user", "content": GRM_PROMPT_TEMPLATE.format(**conv)}] for conv in convs] prompts = DataProto.from_dict( non_tensors={ "raw_prompt": messages, } ) return convs, prompts def test_reward_model_manager(): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) with initialize_config_dir(config_dir=os.path.abspath("recipe/fapo/config")): config = compose("rm_config") model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B-Instruct") config.reward_model.reward_manager = "dapo" config.reward_model.enable = True config.reward_model.enable_resource_pool = True config.reward_model.n_gpus_per_node = 8 config.reward_model.nnodes = 1 config.reward_model.model.path = model_path config.reward_model.rollout.name = os.getenv("ROLLOUT_NAME", "vllm") config.reward_model.rollout.gpu_memory_utilization = 0.9 config.reward_model.rollout.tensor_model_parallel_size = 2 config.reward_model.rollout.skip_tokenizer_init = False config.reward_model.rollout.prompt_length = 2048 config.reward_model.rollout.response_length = 4096 # 1. init reward model manager reward_model_manager = RewardModelManager(config.reward_model) # 2. init test data convs, prompts = create_data_samples() # 3. generate responses sampling_params = { "max_tokens": 4096, "temperature": 0.7, "top_p": 0.8, "top_k": 20, } results = reward_model_manager.generate_sequences(prompts, sampling_params) responses = [result.choices[0].message.content for result in results] for idx, (conv, response) in enumerate(zip(convs, responses, strict=False)): print(f"Problem {idx}:\n{conv['problem']}\n") print(f"AI Solution {idx}:\n{conv['solution']}\n") print(f"GRM Response {idx}:\n{response}\n") print("=" * 50 + "\n") ray.shutdown() ================================================ FILE: verl_distillation/tests/interactions/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/tests/interactions/test_gsm8k_interaction.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest.mock import patch import pytest from verl.interactions.gsm8k_interaction import Gsm8kInteraction class TestGsm8kInteraction: """Test cases for Gsm8kInteraction class.""" def setup_method(self): """Set up test environment before each test method.""" self.config = {"name": "gsm8k"} self.interaction = Gsm8kInteraction(self.config) def test_init(self): """Test Gsm8kInteraction initialization.""" assert self.interaction._instance_dict == {} assert self.interaction.config == self.config assert self.interaction.name == "gsm8k" @pytest.mark.asyncio async def test_start_interaction_with_instance_id(self): """Test start_interaction with provided instance_id.""" instance_id = "test_instance" ground_truth = "42" result_id = await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) assert result_id == instance_id assert instance_id in self.interaction._instance_dict assert self.interaction._instance_dict[instance_id]["response"] == "" assert self.interaction._instance_dict[instance_id]["ground_truth"] == ground_truth assert self.interaction._instance_dict[instance_id]["reward"] == 0.0 @pytest.mark.asyncio async def test_start_interaction_without_instance_id(self): """Test start_interaction without provided instance_id (auto-generated).""" ground_truth = "42" result_id = await self.interaction.start_interaction(ground_truth=ground_truth) assert result_id is not None assert len(result_id) == 36 # UUID4 length assert result_id in self.interaction._instance_dict assert self.interaction._instance_dict[result_id]["ground_truth"] == ground_truth @pytest.mark.asyncio async def test_start_interaction_without_ground_truth(self): """Test start_interaction without ground_truth parameter.""" instance_id = "test_instance" result_id = await self.interaction.start_interaction(instance_id=instance_id) assert result_id == instance_id assert self.interaction._instance_dict[instance_id]["ground_truth"] is None @pytest.mark.asyncio async def test_generate_response_correct_answer_with_prefix(self): """Test generate_response with correct answer already having #### prefix.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) messages = [{"role": "assistant", "content": "#### 42"}] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is True assert response == "Your response is correct!" assert reward == 1.0 assert metadata == {} assert self.interaction._instance_dict[instance_id]["response"] == "#### 42" @pytest.mark.asyncio async def test_generate_response_correct_answer_without_prefix(self): """Test generate_response with correct answer missing #### prefix.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) messages = [{"role": "assistant", "content": "42"}] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is True assert response == "Your response is correct!" assert reward == 1.0 assert self.interaction._instance_dict[instance_id]["response"] == "42" @pytest.mark.asyncio async def test_generate_response_incorrect_answer(self): """Test generate_response with incorrect answer.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) messages = [{"role": "assistant", "content": "24"}] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is False assert response == "Your response is incorrect! You need to reflect on your answer and try again." assert reward == 0.0 assert self.interaction._instance_dict[instance_id]["response"] == "24" @pytest.mark.asyncio async def test_generate_response_multiple_messages(self): """Test generate_response with multiple messages (should use last assistant message).""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) messages = [ {"role": "user", "content": "What is 2+2?"}, {"role": "assistant", "content": "### 4"}, {"role": "user", "content": "What is 40+2?"}, {"role": "assistant", "content": "#### 42"}, ] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is True assert response == "Your response is correct!" assert self.interaction._instance_dict[instance_id]["response"] == "#### 42" @pytest.mark.asyncio async def test_generate_response_no_assistant_message(self): """Test generate_response with no assistant messages.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) messages = [{"role": "user", "content": "Hello!"}] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is False assert self.interaction._instance_dict[instance_id]["response"] == "" @pytest.mark.asyncio async def test_calculate_score_direct_call(self): """Test calculate_score method directly.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) # Set a response self.interaction._instance_dict[instance_id]["response"] = "#### 42" with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0) as mock_compute: score = await self.interaction.calculate_score(instance_id) assert score == 1.0 mock_compute.assert_called_once_with("#### 42", "42", method="strict", format_score=0.0, score=1.0) @pytest.mark.asyncio async def test_calculate_score_with_kwargs(self): """Test calculate_score method with additional kwargs.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) # Set a response self.interaction._instance_dict[instance_id]["response"] = "#### 24" with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0) as mock_compute: score = await self.interaction.calculate_score(instance_id, extra_param="test") assert score == 0.0 mock_compute.assert_called_once_with("#### 24", "42", method="strict", format_score=0.0, score=1.0) @pytest.mark.asyncio async def test_finalize_interaction(self): """Test finalize_interaction method.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) assert instance_id in self.interaction._instance_dict await self.interaction.finalize_interaction(instance_id) assert instance_id not in self.interaction._instance_dict @pytest.mark.asyncio async def test_finalize_interaction_with_kwargs(self): """Test finalize_interaction method with additional kwargs.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) assert instance_id in self.interaction._instance_dict await self.interaction.finalize_interaction(instance_id, extra_param="test") assert instance_id not in self.interaction._instance_dict @pytest.mark.asyncio async def test_finalize_nonexistent_interaction(self): """Test finalize_interaction with non-existent instance_id.""" instance_id = "nonexistent_instance" # This should raise KeyError with pytest.raises(KeyError): await self.interaction.finalize_interaction(instance_id) @pytest.mark.asyncio async def test_full_interaction_workflow_correct(self): """Test complete interaction workflow with correct answer.""" ground_truth = "42" # Start interaction instance_id = await self.interaction.start_interaction(ground_truth=ground_truth) # Generate response with correct answer messages = [{"role": "assistant", "content": "42"}] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is True assert reward == 1.0 # Finalize interaction await self.interaction.finalize_interaction(instance_id) assert instance_id not in self.interaction._instance_dict @pytest.mark.asyncio async def test_full_interaction_workflow_incorrect(self): """Test complete interaction workflow with incorrect answer.""" ground_truth = "42" # Start interaction instance_id = await self.interaction.start_interaction(ground_truth=ground_truth) # Generate response with incorrect answer messages = [{"role": "assistant", "content": "24"}] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is False assert reward == 0.0 # Continue with another attempt messages.append({"role": "user", "content": response}) messages.append({"role": "assistant", "content": "42"}) with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is True assert reward == 1.0 # Finalize interaction await self.interaction.finalize_interaction(instance_id) assert instance_id not in self.interaction._instance_dict @pytest.mark.asyncio async def test_multiple_concurrent_interactions(self): """Test multiple concurrent interaction instances.""" ground_truth_1 = "42" ground_truth_2 = "24" # Start multiple interactions instance_id_1 = await self.interaction.start_interaction(ground_truth=ground_truth_1) instance_id_2 = await self.interaction.start_interaction(ground_truth=ground_truth_2) assert len(self.interaction._instance_dict) == 2 assert instance_id_1 in self.interaction._instance_dict assert instance_id_2 in self.interaction._instance_dict # Test responses for both instances messages_1 = [{"role": "assistant", "content": "42"}] messages_2 = [{"role": "assistant", "content": "24"}] with patch("verl.utils.reward_score.gsm8k.compute_score", side_effect=[1.0, 1.0]): should_terminate_1, _, reward_1, _ = await self.interaction.generate_response(instance_id_1, messages_1) should_terminate_2, _, reward_2, _ = await self.interaction.generate_response(instance_id_2, messages_2) assert should_terminate_1 is True assert should_terminate_2 is True assert reward_1 == 1.0 assert reward_2 == 1.0 # Finalize both interactions await self.interaction.finalize_interaction(instance_id_1) await self.interaction.finalize_interaction(instance_id_2) assert len(self.interaction._instance_dict) == 0 @pytest.mark.asyncio async def test_edge_case_empty_messages(self): """Test edge case with empty messages list.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) messages = [] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is False assert reward == 0.0 assert self.interaction._instance_dict[instance_id]["response"] == "" @pytest.mark.asyncio async def test_edge_case_message_without_content(self): """Test edge case with message without content field.""" instance_id = "test_instance" ground_truth = "42" # Setup instance await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth) messages = [ {"role": "assistant"} # Missing content field ] with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0): should_terminate, response, reward, metadata = await self.interaction.generate_response( instance_id, messages ) assert should_terminate is False assert reward == 0.0 assert self.interaction._instance_dict[instance_id]["response"] is None def test_inheritance_from_base_interaction(self): """Test that Gsm8kInteraction properly inherits from BaseInteraction.""" from verl.interactions.base import BaseInteraction assert isinstance(self.interaction, BaseInteraction) # Test that all required methods are implemented assert hasattr(self.interaction, "start_interaction") assert hasattr(self.interaction, "generate_response") assert hasattr(self.interaction, "calculate_score") assert hasattr(self.interaction, "finalize_interaction") # Test that methods are callable assert callable(self.interaction.start_interaction) assert callable(self.interaction.generate_response) assert callable(self.interaction.calculate_score) assert callable(self.interaction.finalize_interaction) def test_name_attribute_initialization(self): """Test name attribute initialization with different configs.""" # Test with explicit name in config config_with_name = {"name": "custom_gsm8k"} interaction_with_name = Gsm8kInteraction(config_with_name) assert interaction_with_name.name == "custom_gsm8k" # Test with default name when not provided in config config_without_name = {} interaction_without_name = Gsm8kInteraction(config_without_name) assert interaction_without_name.name == "interaction_agent" # Default from BaseInteraction # Test that name is accessible as attribute assert hasattr(self.interaction, "name") assert self.interaction.name == "gsm8k" ================================================ FILE: verl_distillation/tests/interactions/test_interaction_registry.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import pytest from omegaconf import OmegaConf from verl.interactions.base import BaseInteraction from verl.interactions.gsm8k_interaction import Gsm8kInteraction from verl.interactions.utils.interaction_registry import ( get_interaction_class, initialize_interactions_from_config, ) class TestInteractionRegistry: def test_get_interaction_class(self): """Test getting interaction class by name.""" # Test getting base interaction class base_cls = get_interaction_class("verl.interactions.base.BaseInteraction") assert base_cls == BaseInteraction # Test getting gsm8k interaction class gsm8k_cls = get_interaction_class("verl.interactions.gsm8k_interaction.Gsm8kInteraction") assert gsm8k_cls == Gsm8kInteraction def test_initialize_single_interaction_from_config(self): """Test initializing single interaction from config.""" # Create temporary config file config_content = { "interaction": [ { "name": "test_gsm8k", "class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}, } ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(config_content, f.name) temp_config_path = f.name try: interaction_map = initialize_interactions_from_config(temp_config_path) # Check that interaction was created assert len(interaction_map) == 1 assert "test_gsm8k" in interaction_map assert isinstance(interaction_map["test_gsm8k"], Gsm8kInteraction) assert interaction_map["test_gsm8k"].name == "test_gsm8k" finally: os.unlink(temp_config_path) def test_initialize_multiple_interactions_from_config(self): """Test initializing multiple interactions from config.""" config_content = { "interaction": [ { "name": "gsm8k_solver", "class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}, }, { "name": "base_agent", "class_name": "verl.interactions.base.BaseInteraction", "config": {"custom_param": "test_value"}, }, ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(config_content, f.name) temp_config_path = f.name try: interaction_map = initialize_interactions_from_config(temp_config_path) # Check that both interactions were created assert len(interaction_map) == 2 assert "gsm8k_solver" in interaction_map assert "base_agent" in interaction_map # Check types assert isinstance(interaction_map["gsm8k_solver"], Gsm8kInteraction) assert isinstance(interaction_map["base_agent"], BaseInteraction) # Check names were injected assert interaction_map["gsm8k_solver"].name == "gsm8k_solver" assert interaction_map["base_agent"].name == "base_agent" # Check custom config was passed assert interaction_map["base_agent"].config.get("custom_param") == "test_value" finally: os.unlink(temp_config_path) def test_initialize_interaction_without_explicit_name(self): """Test that interaction name is derived from class name when not specified.""" config_content = { "interaction": [{"class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}}] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(config_content, f.name) temp_config_path = f.name try: interaction_map = initialize_interactions_from_config(temp_config_path) # Check that interaction name was derived from class name assert len(interaction_map) == 1 assert "gsm8k" in interaction_map # Should be "gsm8k" after removing "interaction" suffix assert isinstance(interaction_map["gsm8k"], Gsm8kInteraction) assert interaction_map["gsm8k"].name == "gsm8k" finally: os.unlink(temp_config_path) def test_initialize_empty_config(self): """Test initializing from empty config.""" config_content = {"interaction": []} with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(config_content, f.name) temp_config_path = f.name try: interaction_map = initialize_interactions_from_config(temp_config_path) assert len(interaction_map) == 0 finally: os.unlink(temp_config_path) def test_invalid_class_name(self): """Test handling of invalid class name.""" config_content = { "interaction": [{"name": "invalid", "class_name": "invalid.module.InvalidClass", "config": {}}] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(config_content, f.name) temp_config_path = f.name try: with pytest.raises(ModuleNotFoundError): initialize_interactions_from_config(temp_config_path) finally: os.unlink(temp_config_path) def test_duplicate_interaction_names(self): """Test handling of duplicate interaction names.""" config_content = { "interaction": [ {"name": "duplicate", "class_name": "verl.interactions.base.BaseInteraction", "config": {}}, { "name": "duplicate", "class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}, }, ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(config_content, f.name) temp_config_path = f.name try: with pytest.raises(ValueError, match="Duplicate interaction name 'duplicate' found"): initialize_interactions_from_config(temp_config_path) finally: os.unlink(temp_config_path) def test_auto_name_generation_edge_cases(self): """Test automatic name generation for various class name patterns.""" config_content = { "interaction": [ {"class_name": "verl.interactions.base.BaseInteraction", "config": {}}, {"class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}}, ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(config_content, f.name) temp_config_path = f.name try: interaction_map = initialize_interactions_from_config(temp_config_path) # Check that names were generated correctly assert len(interaction_map) == 2 assert "base" in interaction_map # BaseInteraction -> base assert "gsm8k" in interaction_map # Gsm8kInteraction -> gsm8k finally: os.unlink(temp_config_path) ================================================ FILE: verl_distillation/tests/kill_github_tests.sh ================================================ #!/bin/bash if [ "$#" -ne 1 ]; then echo "Usage: $0 YOUR_GITHUB_TOKEN" echo "Please provide exactly one input argument for your github token." exit 1 fi # Set your GitHub repository details OWNER="volcengine" REPO="verl" TOKEN=$1 # API URL for workflow runs API_URL="https://api.github.com/repos/$OWNER/$REPO/actions/runs?status=queued" # Check required commands command -v jq >/dev/null 2>&1 || { echo "jq is required but not installed. Aborting."; exit 1; } # Get queued workflow runs response=$(curl -s -H "Authorization: token $TOKEN" -H "Accept: application/vnd.github.v3+json" "$API_URL") # Run this for debugging # echo $response # Extract run IDs queued_run_ids=$(echo "$response" | jq -r '.workflow_runs[] | .id') if [ -z "$queued_run_ids" ]; then echo "No queued workflow runs found." exit 0 fi # Cancel each queued run for run_id in $queued_run_ids; do echo "Cancelling run $run_id" cancel_url="https://api.github.com/repos/$OWNER/$REPO/actions/runs/$run_id/cancel" curl -s -X POST -H "Authorization: token $TOKEN" -H "Accept: application/vnd.github.v3+json" "$cancel_url" done echo "Cancelled all queued workflow runs." ================================================ FILE: verl_distillation/tests/models/test_engine.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os os.environ["NCCL_DEBUG"] = "WARN" from functools import partial import numpy as np import pytest import ray import torch import torch.distributed as dist import torch.multiprocessing as mp from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForTokenClassification, Qwen3Config, Qwen3MoeConfig from verl import DataProto from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup from verl.trainer.config import CheckpointConfig from verl.utils.model import compute_position_id_with_mask, create_random_mask from verl.utils.torch_functional import logprobs_from_logits_naive from verl.workers.config import ( ActorConfig, CriticConfig, FSDPEngineConfig, FSDPOptimizerConfig, HFModelConfig, McoreEngineConfig, McoreOptimizerConfig, ) from verl.workers.roles import ActorWorker, CriticWorker from verl.workers.roles.utils.losses import ppo_loss, sft_loss @pytest.mark.parametrize("strategy", ["megatron", "fsdp", "fsdp2"]) def test_actor_engine(strategy): ray.init() path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B-Instruct") model_config = HFModelConfig(path=path) if strategy == "megatron": engine_config = McoreEngineConfig( forward_only=False, use_mbridge=False, tensor_model_parallel_size=2, pipeline_model_parallel_size=2, context_parallel_size=2, ) optimizer_config = McoreOptimizerConfig(lr_decay_steps=10) elif strategy in ["fsdp", "fsdp2"]: engine_config = FSDPEngineConfig( forward_only=False, fsdp_size=4, strategy=strategy, ulysses_sequence_parallel_size=2 ) optimizer_config = FSDPOptimizerConfig() else: raise NotImplementedError(f"strategy {strategy} is not supported") config = ActorConfig( model_config=model_config, engine=engine_config, strategy=strategy, ppo_micro_batch_size_per_gpu=256, ppo_mini_batch_size=4, optim=optimizer_config, use_dynamic_bsz=True, rollout_n=1, ) ray_cls_with_init = RayClassWithInitArgs(cls=ray.remote(ActorWorker), config=config) resource_pool = RayResourcePool(process_on_nodes=[8]) wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init) # init model wg.init_model() batch_size = 8 seqlen = 32 response_length = seqlen // 2 torch.manual_seed(1) np.random.seed(1) input_ids = torch.randint(0, model_config.hf_config.vocab_size, (batch_size, seqlen)) attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_valid_token=0.8, max_ratio_of_left_padding=0.2, min_ratio_of_valid_token=0.6 ) position_ids = compute_position_id_with_mask(attention_mask) global_token_num = torch.sum(attention_mask, dim=-1).tolist() print(input_ids.float().mean(), attention_mask.float().mean()) responses = input_ids[:, response_length:] response_mask = attention_mask[:, response_length:] assert torch.all(response_mask[:, 0] == 1) data = DataProto.from_single_dict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, "responses": responses, "response_mask": response_mask, }, meta_info={"temperature": 1.0, "global_token_num": global_token_num}, ) sft_loss_ = partial(sft_loss, config=config) # eval output = wg.compute_log_prob(data) # load hf model and compare results with hf model hf_model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.bfloat16) hf_output = hf_model(input_ids, attention_mask=attention_mask) hf_logprobs = logprobs_from_logits_naive( hf_output.logits[:, -response_length - 1 : -1, :].float(), input_ids[:, -response_length:] ) hf_logprobs_mean = torch.mean(hf_logprobs * response_mask) mcore_logprobs_mean = torch.mean(output.batch["old_log_probs"] * response_mask) torch.testing.assert_close(hf_logprobs_mean, mcore_logprobs_mean, atol=1e-3, rtol=1e-2) data = data.union(output) wg.set_loss_fn(sft_loss_) # train for one step metrics = wg.update_actor(data) print(metrics) # add ppo data data.batch["advantages"] = torch.rand_like(responses, dtype=torch.float32) data.batch["ref_log_prob"] = torch.rand_like(responses, dtype=torch.float32) # set ppo loss ppo_loss_ = partial(ppo_loss, config=config) wg.set_loss_fn(ppo_loss_) # update again ppo_metrics = wg.update_actor(data) print(ppo_metrics) ray.shutdown() def create_model(): from transformers import Qwen3Config config = Qwen3Config(num_hidden_layers=2, num_labels=1) model = AutoModelForTokenClassification.from_config(config) assert model.config.num_labels == 1 path = os.path.expanduser("~/models/test_model") model.save_pretrained(path) config.save_pretrained(path) return path @pytest.mark.parametrize("strategy", ["megatron", "fsdp", "fsdp2"]) def test_critic_engine(strategy): ray.init() path = create_model() model_config = HFModelConfig(path=path, load_tokenizer=False) if strategy == "megatron": engine_config = McoreEngineConfig( forward_only=False, use_mbridge=False, tensor_model_parallel_size=2, pipeline_model_parallel_size=2, context_parallel_size=2, ) optimizer_config = McoreOptimizerConfig(lr_decay_steps=10) elif strategy in ["fsdp", "fsdp2"]: engine_config = FSDPEngineConfig( forward_only=False, fsdp_size=4, strategy=strategy, ulysses_sequence_parallel_size=2 ) optimizer_config = FSDPOptimizerConfig() else: raise NotImplementedError(f"strategy {strategy} is not supported") config = CriticConfig( model_config=model_config, engine=engine_config, strategy=strategy, ppo_micro_batch_size_per_gpu=256, ppo_mini_batch_size=4, optim=optimizer_config, use_dynamic_bsz=True, rollout_n=1, ) ray_cls_with_init = RayClassWithInitArgs(cls=ray.remote(CriticWorker), config=config) resource_pool = RayResourcePool(process_on_nodes=[8]) wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init) # init model wg.init_model() batch_size = 8 seqlen = 32 response_length = seqlen // 2 torch.manual_seed(1) np.random.seed(1) input_ids = torch.randint(0, model_config.hf_config.vocab_size, (batch_size, seqlen)) attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_valid_token=0.8, max_ratio_of_left_padding=0.2, min_ratio_of_valid_token=0.6 ) position_ids = compute_position_id_with_mask(attention_mask) global_token_num = torch.sum(attention_mask, dim=-1).tolist() print(input_ids.float().mean(), attention_mask.float().mean()) responses = input_ids[:, response_length:] response_mask = attention_mask[:, response_length:] assert torch.all(response_mask[:, 0] == 1) data = DataProto.from_single_dict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, "responses": responses, "response_mask": response_mask, }, meta_info={"temperature": 1.0, "global_token_num": global_token_num}, ) # eval output = wg.compute_values(data) # load hf model and compare results with hf model with torch.device("cuda"): hf_model = AutoModelForTokenClassification.from_pretrained( path, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) hf_output = hf_model(input_ids.cuda(), attention_mask=attention_mask.cuda()) hf_values = hf_output.logits[:, -response_length - 1 : -1, :].float().squeeze(-1).cpu() hf_values_mean = torch.mean(hf_values * response_mask) engine_values = torch.mean(output.batch["values"] * response_mask) torch.testing.assert_close(hf_values_mean, engine_values, atol=1e-2, rtol=1e-2) data = data.union(output) # add ppo data data.batch["values"] = torch.rand_like(responses, dtype=torch.float32) data.batch["returns"] = torch.rand_like(responses, dtype=torch.float32) # update again ppo_metrics = wg.update_critic(data) print(ppo_metrics) ray.shutdown() def create_actor_model(tmp_path, config): model = AutoModelForCausalLM.from_config(config) path = os.path.join(tmp_path, "test_model") model.save_pretrained(path) config.save_pretrained(path) return path def _worker(rank: int, world_size: int, rendezvous_file: str, strategy: str, model_path: str): torch.cuda.set_device(rank) dist.init_process_group( backend="nccl", init_method=f"file://{rendezvous_file}", rank=rank, world_size=world_size, ) ref_model_config = AutoConfig.from_pretrained(model_path) with torch.device("meta"): ref_model = AutoModelForCausalLM.from_config(ref_model_config) from verl.workers.engine import BaseEngine, EngineRegistry # construct configs model_config = HFModelConfig(path=model_path, load_tokenizer=False) if strategy == "megatron": engine_config = McoreEngineConfig( forward_only=False, use_mbridge=True, tensor_model_parallel_size=2, pipeline_model_parallel_size=2, context_parallel_size=1, ) optimizer_config = McoreOptimizerConfig(lr_decay_steps=10) elif strategy in ["fsdp", "fsdp2"]: engine_config = FSDPEngineConfig( forward_only=False, fsdp_size=4, strategy=strategy, ulysses_sequence_parallel_size=2 ) optimizer_config = FSDPOptimizerConfig() else: raise NotImplementedError(f"strategy {strategy} is not supported") checkpoint_config = CheckpointConfig() # build model engine engine: BaseEngine = EngineRegistry.new( model_type="language_model", backend=engine_config.strategy, model_config=model_config, engine_config=engine_config, optimizer_config=optimizer_config, checkpoint_config=checkpoint_config, ) engine.initialize() # get per tensor parameter per_tensor_params = engine.get_per_tensor_param() ref_state_dict = ref_model.state_dict() # load ground truth and compare for key, value in per_tensor_params: assert key in ref_state_dict, f"{key} not in ref_state_dict" assert value.shape == ref_state_dict[key].shape, ( f"{key} shape not equal, {value.shape} != {ref_state_dict[key].shape}" ) if rank == 0: print(key, value.shape) dist.barrier() dist.destroy_process_group() @pytest.mark.parametrize("world_size", [8]) @pytest.mark.parametrize("config", [Qwen3Config(num_hidden_layers=2), Qwen3MoeConfig(num_hidden_layers=2)]) @pytest.mark.parametrize("strategy", ["megatron", "fsdp", "fsdp2"]) def test_per_tensor_generator(world_size, tmp_path, config, strategy): rendezvous_file = str(tmp_path / "rdzv_mask") os.makedirs(os.path.dirname(rendezvous_file), exist_ok=True) # create a model model_path = create_actor_model(tmp_path, config) # spawn workers mp.spawn( fn=_worker, args=(world_size, rendezvous_file, strategy, model_path), nprocs=world_size, join=True, ) ================================================ FILE: verl_distillation/tests/models/test_transformer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input from transformers import ( ApertusConfig, AutoModelForCausalLM, AutoModelForTokenClassification, GemmaConfig, LlamaConfig, MistralConfig, Qwen2Config, ) from verl.utils.model import compute_position_id_with_mask, create_random_mask from verl.utils.torch_functional import log_probs_from_logits_all_rmpad, masked_mean # TODO(sgm): add more models for test # we only need one scale for each model test_configs = [ LlamaConfig(num_hidden_layers=1), MistralConfig(num_hidden_layers=1), GemmaConfig(num_hidden_layers=1), Qwen2Config(num_hidden_layers=1), ApertusConfig(num_hidden_layers=1), ] def test_hf_casual_models(): batch_size = 4 seqlen = 128 response_length = 127 for config in test_configs: # config = AutoConfig.from_pretrained(test_case) with torch.device("cuda"): model = AutoModelForCausalLM.from_config( config=config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model = model.to(device="cuda") input_ids = torch.randint(low=0, high=config.vocab_size, size=(batch_size, seqlen), device="cuda") attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_left_padding=0.1, max_ratio_of_valid_token=0.8, min_ratio_of_valid_token=0.5, ) position_ids = compute_position_id_with_mask( attention_mask ) # TODO(sgm): we can construct the position_ids_rmpad here input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # input with input_ids_rmpad and postition_ids to enable flash attention varlen logits_rmpad = model( input_ids_rmpad, position_ids=position_ids_rmpad, use_cache=False ).logits # (1, total_nnz, vocab_size) origin_logits = model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False ).logits origin_logits_rmpad, origin_logits_indices, *_ = unpad_input(origin_logits, attention_mask) logits_rmpad = logits_rmpad.squeeze(0) log_probs = log_probs_from_logits_all_rmpad( input_ids_rmpad=input_ids_rmpad, logits_rmpad=logits_rmpad, indices=indices, batch_size=batch_size, seqlen=seqlen, response_length=response_length, ) # (batch, seqlen) origin_log_probs = log_probs_from_logits_all_rmpad( input_ids_rmpad=input_ids_rmpad, logits_rmpad=origin_logits_rmpad, indices=origin_logits_indices, batch_size=batch_size, seqlen=seqlen, response_length=response_length, ) # (batch, seqlen) torch.testing.assert_close( masked_mean(log_probs, attention_mask[:, -response_length - 1 : -1]), masked_mean(origin_log_probs, attention_mask[:, -response_length - 1 : -1]), atol=1e-2, rtol=1e-5, ) print("Check pass") def test_hf_value_models(): batch_size = 4 seqlen = 128 for config in test_configs: # config = AutoConfig.from_pretrained(test_case) config.num_labels = 1 config.classifier_dropout = 0 config.hidden_dropout = 0 with torch.device("cuda"): model = AutoModelForTokenClassification.from_config( config=config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model = model.to(device="cuda") input_ids = torch.randint(low=0, high=config.vocab_size, size=(batch_size, seqlen), device="cuda") attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_left_padding=0.1, max_ratio_of_valid_token=0.8, min_ratio_of_valid_token=0.5, ) position_ids = compute_position_id_with_mask( attention_mask ) # TODO(sgm): we can construct the position_ids_rmpad here input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) origin_logits = model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False ).logits # input with input_ids_rmpad and postition_ids to enable flash attention varlen rmpad_logits = model( input_ids_rmpad, position_ids=position_ids_rmpad, use_cache=False ).logits # (1, total_nnz, 1) rmpad_logits = rmpad_logits.squeeze(0) pad_logits = pad_input(rmpad_logits, indices, batch_size, seqlen=seqlen) torch.testing.assert_close( masked_mean(pad_logits, attention_mask[:, :, None]), masked_mean(origin_logits, attention_mask[:, :, None]), atol=1e-2, rtol=1e-5, ) print("Value model check pass") def test_attn_implementation_override(): """Test that attn_implementation override config is properly respected.""" # Test case 1: Test the actual extraction logic (no network required) test_cases = [ ({}, "flash_attention_2"), # Default case ({"attn_implementation": "eager"}, "eager"), # Override case ({"attn_implementation": "sdpa"}, "sdpa"), # Another override ({"other_config": "value"}, "flash_attention_2"), # No attn_implementation key ] for override_config, expected in test_cases: actual = override_config.get("attn_implementation", "flash_attention_2") assert actual == expected, f"Expected {expected}, got {actual} for config {override_config}" # Test case 2: Test with local config creation (simulate FSDP worker behavior) # Test default behavior override_config_default = {} attn_implementation_default = override_config_default.get("attn_implementation", "flash_attention_2") assert attn_implementation_default == "flash_attention_2" # Test override behavior override_config_eager = {"attn_implementation": "eager"} attn_implementation_eager = override_config_eager.get("attn_implementation", "flash_attention_2") assert attn_implementation_eager == "eager" # Test that we can create a config with specific attn_implementation config_with_eager = LlamaConfig(num_hidden_layers=1, _attn_implementation="eager") assert config_with_eager._attn_implementation == "eager" config_with_flash = LlamaConfig(num_hidden_layers=1, _attn_implementation="flash_attention_2") assert config_with_flash._attn_implementation == "flash_attention_2" print("✓ All attn_implementation override config tests passed") def test_fsdp_worker_attn_implementation_integration(): """Test integration of attn_implementation with FSDP worker logic.""" # Mock the FSDP worker configuration scenario mock_override_config = {"attn_implementation": "eager"} # Test the exact logic used in FSDP workers attn_implementation = mock_override_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "eager" # Test with empty config (should default) mock_override_config_empty = {} attn_implementation_default = mock_override_config_empty.get("attn_implementation", "flash_attention_2") assert attn_implementation_default == "flash_attention_2" # Test that the parameter would be passed correctly to both AutoConfig and Model expected_calls = [ ("AutoConfig.from_pretrained", {"attn_implementation": attn_implementation}), ("AutoModel.from_pretrained", {"attn_implementation": attn_implementation}), ] # Verify the parameter extraction works as expected for call_name, expected_params in expected_calls: assert expected_params["attn_implementation"] == "eager" print("✓ FSDP worker integration test passed") if __name__ == "__main__": test_hf_casual_models() test_hf_value_models() test_attn_implementation_override() test_fsdp_worker_attn_implementation_integration() ================================================ FILE: verl_distillation/tests/models/test_transformers_ulysses.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import copy from dataclasses import dataclass import pytest import torch import torch.distributed import transformers from flash_attn.bert_padding import index_first_axis, rearrange, unpad_input from packaging import version from torch.distributed import init_device_mesh from transformers import AutoModelForCausalLM, LlamaConfig, PretrainedConfig, Qwen2Config from verl.models.transformers.monkey_patch import apply_monkey_patch from verl.protocol import DataProto from verl.utils.distributed import initialize_global_process_group from verl.utils.model import compute_position_id_with_mask, create_random_mask from verl.utils.ulysses import ( gather_outputs_and_unpad, get_ulysses_sequence_parallel_world_size, set_ulysses_sequence_parallel_group, ulysses_pad_and_slice_inputs, ) from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager # TODO(sgm): add more models for test # we only need one scale for each model @dataclass class SequenceParallelConfig: config: PretrainedConfig sp_size: int is_valid: bool def test_configs(): configs = [ SequenceParallelConfig( LlamaConfig(num_hidden_layers=2, num_attention_heads=32, num_key_value_heads=32), sp_size=8, is_valid=True ), SequenceParallelConfig( Qwen2Config(num_hidden_layers=2, num_attention_heads=28, num_key_value_heads=4, hidden_size=3584), sp_size=4, is_valid=True, ), SequenceParallelConfig( Qwen2Config(num_hidden_layers=2, num_attention_heads=28, num_key_value_heads=4, hidden_size=3584), sp_size=8, is_valid=False, ), SequenceParallelConfig( Qwen2Config(num_hidden_layers=2, num_attention_heads=32, num_key_value_heads=4), sp_size=4, is_valid=True ), SequenceParallelConfig( Qwen2Config(num_hidden_layers=2, num_attention_heads=32, num_key_value_heads=4), sp_size=8, is_valid=True ), ] if version.parse(transformers.__version__) >= version.parse("4.56.0"): from transformers import ApertusConfig configs.append( SequenceParallelConfig( ApertusConfig(num_hidden_layers=2, num_attention_heads=32, num_key_value_heads=32, hidden_size=4096), sp_size=8, is_valid=True, ) ) return configs def sync_model_parameters_global(layer): # synchronize weights for p in layer.parameters(): torch.distributed.broadcast(tensor=p.data, src=0) @pytest.mark.parametrize("test_config", test_configs()) def test_hf_casual_fwd_bwd(test_config): if not torch.distributed.is_initialized(): initialize_global_process_group() context = contextlib.nullcontext() if test_config.is_valid else pytest.raises(AssertionError) with context: world_size = torch.distributed.get_world_size() _hf_casual_fwd_bwd(test_config.config, test_config.sp_size, world_size // test_config.sp_size) # TODO: seems not work, will cause `socketStartConnect: Connect to xxx failed : Software caused connection abort` # torch.distributed.destroy_process_group() def _hf_casual_fwd(config, sp_size, dp_size): assert torch.cuda.device_count() >= 2, "need at least 2 gpus for test" ulysses_device_mesh = init_device_mesh( device_type="cuda", mesh_shape=(dp_size, sp_size), mesh_dim_names=("dp", "sp") ) sharding_manager = FSDPUlyssesShardingManager(ulysses_device_mesh) batch_size = 1 seqlen = 128 # response_length = 127 # patch before load with torch.device("cuda"): model = AutoModelForCausalLM.from_config( config=config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) apply_monkey_patch(model, sp_size) model = model.to(device="cuda") sync_model_parameters_global(model) # different rank will generate different input_ids following fsdp input_ids = torch.randint(low=0, high=config.vocab_size, size=(batch_size, seqlen), device="cuda") attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_left_padding=0, max_ratio_of_valid_token=0.9, min_ratio_of_valid_token=0.8 ) position_ids = compute_position_id_with_mask( attention_mask ) # TODO(sgm): we can construct the position_ids_rmpad here model_inputs = { "input_ids": input_ids.cuda(), "attention_mask": attention_mask.cuda(), "position_ids": position_ids.int().cuda(), } model_inputs = DataProto.from_dict(model_inputs) # 1. perform ulysses forward with sharding_manager: model_inputs = sharding_manager.preprocess_data(model_inputs) input_ids = model_inputs.batch["input_ids"] attention_mask = model_inputs.batch["attention_mask"] position_ids = model_inputs.batch["position_ids"] input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # slice input tensor for ulysses # input_ids are padded and sliced # postition_ids are only padded but not sliced input_ids_rmpad_sliced, position_ids_rmpad_padded, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=get_ulysses_sequence_parallel_world_size() ) # input with input_ids_rmpad and postition_ids to enable flash attention varlen logits_split_in_seq = model( input_ids_rmpad_sliced, position_ids=position_ids_rmpad_padded, use_cache=False ).logits # (1, total_nnz/n, vocab_size) # all_gather output logits_full = gather_outputs_and_unpad(logits_split_in_seq, gather_dim=1, unpad_dim=1, padding_size=pad_size) # 2. perform normal forward set_ulysses_sequence_parallel_group(None) logits_rmpad_local = model( input_ids_rmpad, position_ids=position_ids_rmpad, use_cache=False ).logits # (1, total_nnz, vocab_size) mean_local = logits_rmpad_local.mean() mean_full = logits_full.mean() torch.testing.assert_close(mean_local, mean_full, rtol=1e-2, atol=1e-5) def _hf_casual_fwd_bwd(config, sp_size, dp_size): assert torch.cuda.device_count() >= 2, "need at least 2 gpus for test" ulysses_device_mesh = init_device_mesh( device_type="cuda", mesh_shape=(dp_size, sp_size), mesh_dim_names=("dp", "sp") ) sharding_manager = FSDPUlyssesShardingManager(ulysses_device_mesh) batch_size = 1 seqlen = 128 # response_length = 127 # patch before load with torch.device("cuda"): model = AutoModelForCausalLM.from_config( config=config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) apply_monkey_patch(model, sp_size) model = model.to(device="cuda") sync_model_parameters_global(model) # different rank will generate different input_ids following fsdp input_ids = torch.randint(low=0, high=config.vocab_size, size=(batch_size, seqlen), device="cuda") attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_left_padding=0, max_ratio_of_valid_token=0.9, min_ratio_of_valid_token=0.8 ) position_ids = compute_position_id_with_mask( attention_mask ) # TODO(sgm): we can construct the position_ids_rmpad here model_inputs = { "input_ids": input_ids.cuda(), "attention_mask": attention_mask.cuda(), "position_ids": position_ids.int().cuda(), } model_inputs = DataProto.from_dict(model_inputs) # 1. perform ulysses forward with sharding_manager: model_inputs = sharding_manager.preprocess_data(model_inputs) input_ids = model_inputs.batch["input_ids"] attention_mask = model_inputs.batch["attention_mask"] position_ids = model_inputs.batch["position_ids"] input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # slice input tensor for ulysses # input_ids are padded and sliced # postition_ids are only padded but not sliced input_ids_rmpad_sliced, position_ids_rmpad_padded, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=get_ulysses_sequence_parallel_world_size() ) # input with input_ids_rmpad and postition_ids to enable flash attention varlen logits_split_in_seq = model( input_ids_rmpad_sliced, position_ids=position_ids_rmpad_padded, use_cache=False ).logits # (1, total_nnz/n, vocab_size) # all_gather output logits_full = gather_outputs_and_unpad(logits_split_in_seq, gather_dim=1, unpad_dim=1, padding_size=pad_size) # 2. perform normal forward set_ulysses_sequence_parallel_group(None) input_ids_full = copy.deepcopy(input_ids_rmpad) position_ids_full = copy.deepcopy(position_ids_rmpad) model_no_sp = copy.deepcopy(model) logits_rmpad_local = model_no_sp( input_ids_full, position_ids=position_ids_full, use_cache=False ).logits # (1, total_nnz, vocab_size) mean_local = logits_rmpad_local.mean() mean_full = logits_full.mean() mean_full.backward() mean_local.backward() # 3. check the gradients grad = model.model.layers[0].self_attn.q_proj.weight.grad grad_full = model_no_sp.model.layers[0].self_attn.q_proj.weight.grad torch.testing.assert_close(mean_local, mean_full, rtol=1e-2, atol=3e-5) # The check should be less strict because the gradient is not an averaged value. torch.testing.assert_close(grad, grad_full, rtol=1e-2, atol=1e-3) if __name__ == "__main__": pytest.main([__file__, "-svv"]) ================================================ FILE: verl_distillation/tests/single_controller/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/tests/single_controller/base/test_decorator.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import verl.single_controller.base.decorator as decorator_module from verl.single_controller.base.decorator import ( DISPATCH_MODE_FN_REGISTRY, Dispatch, _check_dispatch_mode, get_predefined_dispatch_fn, register_dispatch_mode, update_dispatch_mode, ) @pytest.fixture def reset_dispatch_registry(): # Store original state original_registry = DISPATCH_MODE_FN_REGISTRY.copy() yield # Reset registry after test decorator_module.DISPATCH_MODE_FN_REGISTRY.clear() decorator_module.DISPATCH_MODE_FN_REGISTRY.update(original_registry) def test_register_new_dispatch_mode(reset_dispatch_registry): # Test registration def dummy_dispatch(worker_group, *args, **kwargs): return args, kwargs def dummy_collect(worker_group, output): return output register_dispatch_mode("TEST_MODE", dummy_dispatch, dummy_collect) # Verify enum extension _check_dispatch_mode(Dispatch.TEST_MODE) # Verify registry update assert get_predefined_dispatch_fn(Dispatch.TEST_MODE) == { "dispatch_fn": dummy_dispatch, "collect_fn": dummy_collect, } # Clean up Dispatch.remove("TEST_MODE") def test_update_existing_dispatch_mode(reset_dispatch_registry): # Store original implementation original_mode = Dispatch.ONE_TO_ALL # New implementations def new_dispatch(worker_group, *args, **kwargs): return args, kwargs def new_collect(worker_group, output): return output # Test update= update_dispatch_mode(original_mode, new_dispatch, new_collect) # Verify update assert get_predefined_dispatch_fn(original_mode)["dispatch_fn"] == new_dispatch assert get_predefined_dispatch_fn(original_mode)["collect_fn"] == new_collect ================================================ FILE: verl_distillation/tests/single_controller/check_worker_alive/main.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import time import ray from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.base.worker import Worker from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup @ray.remote class TestActor(Worker): def __init__(self) -> None: super().__init__() @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def foo(self, wait_time): time.sleep(wait_time) sys.exit(1) if __name__ == "__main__": wait_time = int(os.getenv("WAIT_TIME", "10")) ray.init() # test single-node-no-partition print("test single-node-no-partition") resource_pool = RayResourcePool([2], use_gpu=False) class_with_args = RayClassWithInitArgs(cls=TestActor) print("create worker group") wg = RayWorkerGroup(resource_pool, class_with_args, name_prefix="test") wg.start_worker_aliveness_check(1) time.sleep(1) print(time.time(), "start foo") _ = wg.foo(wait_time) print("foo started") print( time.time(), f"wait 6x wait time {wait_time * 6} to let signal returned to process but still not exceed process wait time", ) time.sleep(wait_time * 6) ray.shutdown() ================================================ FILE: verl_distillation/tests/single_controller/detached_worker/README.md ================================================ # Detached Worker ## How to run (Only on a single node) - Start a local ray cluster: ```bash ray start --head --port=6379 ``` - Run the server ```bash python3 server.py ``` - On another terminal, Run the client ```bash python3 client.py ``` ================================================ FILE: verl_distillation/tests/single_controller/detached_worker/client.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ In client, we can get the server handler and send RPC request """ import ray import torch from server import Trainer from tensordict import TensorDict from verl import DataProto from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup def compute_position_id_with_mask(mask): return torch.clip(torch.cumsum(mask, dim=-1) - 1, min=0, max=None) if __name__ == "__main__": ray.init(address="auto", namespace="verl") # get the worker group using names worker_names = ["trainerTrainer_0:0", "trainerTrainer_0:1"] cls_with_init_args = RayClassWithInitArgs(cls=Trainer) worker_group = RayWorkerGroup.from_detached(worker_names=worker_names, ray_cls_with_init=cls_with_init_args) batch_size = 16 sequence_length = 1024 # give Trainer some data to train input_ids = torch.randint(low=0, high=256, size=(batch_size, sequence_length), dtype=torch.int64, device="cuda") attention_mask = torch.ones_like(input_ids) position_ids = compute_position_id_with_mask(attention_mask) data = DataProto( batch=TensorDict( {"input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids}, batch_size=batch_size, ), meta_info={}, ) output = worker_group.train_model(data) print(output) ================================================ FILE: verl_distillation/tests/single_controller/detached_worker/run.sh ================================================ #!/bin/bash ray start --head --port=6379 python3 server.py python3 client.py ray stop --force ================================================ FILE: verl_distillation/tests/single_controller/detached_worker/server.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Server starts a Trainer. Client sends data to the server to train. """ import os os.environ["MEGATRON_USE_CUDA_TIMER"] = "0" os.environ["MEGATRON_START_PROCESS_TIMER"] = "False" os.environ["NCCL_DEBUG"] = "WARN" import ray import torch from megatron.core import parallel_state as mpu from megatron.core import tensor_parallel from megatron.core.models.gpt.gpt_model import ModelType from omegaconf import OmegaConf from tensordict import TensorDict from torch import nn from transformers import LlamaConfig from verl import DataProto from verl.models.llama.megatron import ParallelLlamaForCausalLMRmPadPP from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup from verl.utils.megatron.optimizer import get_megatron_optimizer, init_megatron_optim_config from verl.utils.megatron_utils import get_model, mcore_model_parallel_config @ray.remote class Trainer(Worker): def __init__(self): super().__init__() if not torch.distributed.is_initialized(): rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl") torch.cuda.set_device(rank) mpu.initialize_model_parallel( tensor_model_parallel_size=2, pipeline_model_parallel_size=1, virtual_pipeline_model_parallel_size=None, pipeline_model_parallel_split_rank=None, use_sharp=False, context_parallel_size=1, expert_model_parallel_size=1, nccl_communicator_config_path=None, ) tensor_parallel.model_parallel_cuda_manual_seed(10) is_collect = ( mpu.get_tensor_model_parallel_rank() == 0 and mpu.get_pipeline_model_parallel_rank() == mpu.get_pipeline_model_parallel_world_size() - 1 and mpu.get_context_parallel_rank() == 0 ) self._register_dispatch_collect_info( mesh_name="train", dp_rank=mpu.get_data_parallel_rank(), is_collect=is_collect ) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): actor_model_config = LlamaConfig( vocab_size=256, hidden_size=2048, intermediate_size=5504, num_hidden_layers=24, num_attention_heads=16, num_key_value_heads=16, ) megatron_config = mcore_model_parallel_config(sequence_parallel=True, params_dtype=torch.bfloat16) self.megatron_config = megatron_config def megatron_actor_model_provider(pre_process, post_process): # vpp is not supported yet because it will hang for some reason. Need debugging # this_megatron_config = copy.deepcopy(megatron_config) # this_megatron_config.virtual_pipeline_model_parallel_rank = vpp_rank parallel_model = ParallelLlamaForCausalLMRmPadPP( config=actor_model_config, megatron_config=megatron_config, pre_process=pre_process, post_process=post_process, ) parallel_model.cuda() return parallel_model actor_module = get_model( model_provider_func=megatron_actor_model_provider, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=True, ) actor_module = nn.ModuleList(actor_module) optim_config = OmegaConf.create({"lr": 1e-6, "clip_grad": 1.0}) optim_config = init_megatron_optim_config(optim_config) self.optimizer_config = optim_config actor_optimizer = get_megatron_optimizer(model=actor_module, config=optim_config) self.model = actor_module[0] self.optimizer = actor_optimizer @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train")) def train_model(self, data: DataProto) -> DataProto: input_ids = data.batch["input_ids"] attention_mask = data.batch["attention_mask"] position_ids = data.batch["position_ids"] self.optimizer.zero_grad() self.model.zero_grad_buffer( zero_buffer=(not self.optimizer_config.use_distributed_optimizer) ) # use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm # update for 1 iteration output = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids).logits output.mean().backward() update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step( self.megatron_config, self.megatron_config.timers ) return DataProto(batch=TensorDict({"loss": output.detach()}, batch_size=output.shape[0])) if __name__ == "__main__": ray.init(address="auto", namespace="verl") resource_pool = RayResourcePool(process_on_nodes=[2], detached=True) cls_with_init_args = RayClassWithInitArgs(cls=Trainer) worker_group = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=cls_with_init_args, name_prefix="trainer", detached=True, ) worker_group.init_model() worker_names = worker_group.worker_names print(worker_names) ================================================ FILE: verl_distillation/tests/single_controller/test_auto_padding_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import ray import torch from verl import DataProto from verl.protocol import DataProtoConfig from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup # or set env var VERL_AUTO_PADDING = "1" / "true" DataProtoConfig.auto_padding = True @ray.remote class Actor(Worker): def __init__(self) -> None: super().__init__() @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def add(self, data: DataProto): data.batch["a"] += self.rank return data def test_auto_padding(): ray.init(num_cpus=100) chunk_size = 4 actor_cls = RayClassWithInitArgs(cls=Actor) resource_pool = RayResourcePool(process_on_nodes=[chunk_size], use_gpu=False) actor_wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=actor_cls) # test locally first for test_size in range(4, 20): local_data = DataProto.from_dict({"a": torch.zeros(test_size)}, {"na": np.zeros(test_size, dtype=object)}) # print(f"before padding, local_data = {local_data}") padding_size = (chunk_size - (test_size % chunk_size)) if (test_size % chunk_size > 0) else 0 local_data.padding(padding_size) # print(f"after padding, local_data = {local_data}") assert len(local_data) == len(local_data) + len(local_data) % chunk_size, ( f"expecting padded length to be {len(local_data) + len(local_data) % chunk_size}, but got {len(local_data)}" ) chunked = local_data.chunk(chunk_size) assert len(chunked) == chunk_size, f"during test_size = {test_size}, expecting {chunk_size}, got {chunked}" for dp in chunked: assert len(dp) == test_size // chunk_size + bool(test_size % chunk_size), ( f"test size = {test_size}, expecting dp to be length of " f"{test_size // chunk_size + bool(test_size % chunk_size)}, but got {len(dp)}: {dp} {chunked}" ) # test with RayWorkerGroup method decorated as dispatch_mode=Dispatch.DP_COMPUTE_PROTO data = DataProto.from_dict({"a": torch.zeros(10)}, {"na": np.array([str(i) for i in range(10)], dtype=object)}) output = actor_wg.add(data) print(output.batch["a"]) assert len(output) == 10, "Failed in args split and padding." data = DataProto.from_dict({"a": torch.zeros(10)}, {"na": np.array([str(i) for i in range(10)], dtype=object)}) output = actor_wg.add(data=data) print(output.batch["a"]) assert len(output) == 10, "Failed in kwargs split and padding." data = DataProto.from_dict({"a": torch.zeros(1)}, {"na": np.array([str(i) for i in range(1)], dtype=object)}) output = actor_wg.add(data) print(output.batch["a"]) assert len(output) == 1, "Failed in args split and padding." data = DataProto.from_dict({"a": torch.zeros(1)}, {"na": np.array([str(i) for i in range(1)], dtype=object)}) output = actor_wg.add(data=data) print(output.batch["a"]) assert len(output) == 1, "Failed in kwargs split and padding." data = DataProto.from_dict({"a": torch.zeros(8)}, {"na": np.array([str(i) for i in range(8)], dtype=object)}) output = actor_wg.add(data) print(output.batch["a"]) assert len(output) == 8, "Failed in args split and padding." data = DataProto.from_dict({"a": torch.zeros(8)}, {"na": np.array([str(i) for i in range(8)], dtype=object)}) output = actor_wg.add(data=data) print(output.batch["a"]) assert len(output) == 8, "Failed in kwargs split and padding." # test data proto specific config DataProtoConfig.auto_padding = False data = DataProto.from_dict( {"a": torch.zeros(10)}, {"na": np.array([str(i) for i in range(10)], dtype=object)}, auto_padding=True ) output = actor_wg.add(data) print(output.batch["a"]) assert len(output) == 10, "Failed in args split and padding." data = DataProto.from_dict( {"a": torch.zeros(10)}, {"na": np.array([str(i) for i in range(10)], dtype=object)}, auto_padding=True ) output = actor_wg.add(data=data) print(output.batch["a"]) assert len(output) == 10, "Failed in kwargs split and padding." data = DataProto.from_single_dict( {"a": torch.zeros(1), "na": np.array([str(i) for i in range(1)], dtype=object)}, auto_padding=True ) output = actor_wg.add(data) print(output.batch["a"]) assert len(output) == 1, "Failed in args split and padding." data = DataProto.from_single_dict( {"a": torch.zeros(1), "na": np.array([str(i) for i in range(1)], dtype=object)}, auto_padding=True ) output = actor_wg.add(data=data) print(output.batch["a"]) assert len(output) == 1, "Failed in kwargs split and padding." data = DataProto.from_single_dict({"a": torch.zeros(8), "na": np.array([str(i) for i in range(8)], dtype=object)}) output = actor_wg.add(data) print(output.batch["a"]) assert len(output) == 8, "Failed in args split and padding." data = DataProto.from_single_dict({"a": torch.zeros(8), "na": np.array([str(i) for i in range(8)], dtype=object)}) output = actor_wg.add(data=data) print(output.batch["a"]) assert len(output) == 8, "Failed in kwargs split and padding." ray.shutdown() if __name__ == "__main__": test_auto_padding() ================================================ FILE: verl_distillation/tests/single_controller/test_colocated_workers.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ray from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.ray.base import ( RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, create_colocated_worker_cls, ) @ray.remote class Actor(Worker): def __init__(self) -> None: super().__init__() @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def add(self, data: DataProto): data.batch["a"] += self.rank return data @ray.remote class Critic(Worker): def __init__(self, config) -> None: super().__init__() self.config = config @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) async def sub(self, data: DataProto): data.batch["a"] -= self.config["b"] return data def test_colocated_workers(): ray.init() import torch data = DataProto.from_dict({"a": torch.zeros(10)}) # create separate workers on the same resource pool actor_cls = RayClassWithInitArgs(cls=Actor) critic_cls = RayClassWithInitArgs(cls=Critic, config={"b": 10}) resource_pool = RayResourcePool(process_on_nodes=[2]) actor_wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=actor_cls) critic_wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=critic_cls) expected_actor_output = actor_wg.add(data) expected_critic_output = critic_wg.sub(data) # create colocated workers cls_dict = {"actor": actor_cls, "critic": critic_cls} ray_cls_with_init = create_colocated_worker_cls(cls_dict) wg_dict = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init) spawn_wg = wg_dict.spawn(prefix_set=cls_dict.keys()) colocated_actor_wg = spawn_wg["actor"] colocated_critic_wg = spawn_wg["critic"] actor_output = colocated_actor_wg.add(data) critic_output = colocated_critic_wg.sub(data) torch.testing.assert_close(expected_actor_output.batch, actor_output.batch, atol=0, rtol=0) torch.testing.assert_close(expected_critic_output.batch, critic_output.batch, atol=0, rtol=0) ray.shutdown() ================================================ FILE: verl_distillation/tests/single_controller/test_colocated_workers_fused.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ray from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.ray.base import ( RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, create_colocated_worker_cls_fused, ) @ray.remote class Actor(Worker): def __init__(self) -> None: super().__init__() @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def add(self, data: DataProto): data.batch["a"] += self.rank return data @ray.remote class Critic(Worker): def __init__(self, config) -> None: super().__init__() self.config = config @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def sub(self, data: DataProto): data.batch["a"] -= self.config["b"] return data def test_colocated_workers_fused(): ray.init() import torch data = DataProto.from_dict({"a": torch.zeros(10)}) # create separate workers on the same resource pool actor_cls = RayClassWithInitArgs(cls=Actor) critic_cls = RayClassWithInitArgs(cls=Critic, config={"b": 10}) resource_pool = RayResourcePool(process_on_nodes=[2]) actor_wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=actor_cls) critic_wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=critic_cls) expected_actor_output = actor_wg.add(data) expected_critic_output = critic_wg.sub(data) # create colocated workers cls_dict = {"actor": actor_cls, "critic": critic_cls} ray_cls_with_init = create_colocated_worker_cls_fused(cls_dict) wg_dict = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init) spawn_wg = wg_dict.spawn(prefix_set=cls_dict.keys()) colocated_actor_wg = spawn_wg["actor"] colocated_critic_wg = spawn_wg["critic"] actor_output = colocated_actor_wg.add(data) critic_output = colocated_critic_wg.sub(data) torch.testing.assert_close(expected_actor_output.batch, actor_output.batch, atol=0, rtol=0) torch.testing.assert_close(expected_critic_output.batch, critic_output.batch, atol=0, rtol=0) ray.shutdown() ================================================ FILE: verl_distillation/tests/single_controller/test_data_transfer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ In this test, we instantiate a data parallel worker with 8 GPUs """ import ray import tensordict import torch from codetiming import Timer from torch import distributed as dist from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup from verl.utils.ray_utils import parallel_put @ray.remote class DummyWorker(Worker): def __init__(self): super().__init__() dist.init_process_group() @register(dispatch_mode=Dispatch.DP_COMPUTE, blocking=False) def do_nothing(self, data): for key in data.batch.keys(): data.batch[key] += 1 if tensordict.__version__ >= "0.5.0": data.batch = data.batch.consolidate() return data def test_data_transfer(): ray.init() # construct resource pool resource_pool = RayResourcePool([8]) cls_with_init = RayClassWithInitArgs(cls=DummyWorker) # construct worker group wg = RayWorkerGroup(resource_pool, cls_with_init) # this is real dataset size batch_size = 4096 seqlen = 32768 data_dict = {} for i in range(2): data_dict[str(i)] = torch.randint(0, 10000, (batch_size, seqlen)) data = DataProto.from_dict(tensors=data_dict) print(data) # we manually split data here and send to each worker data_list = data.chunk(wg.world_size) for i in range(wg.world_size): # consolidate is necessary if tensordict.__version__ >= "0.5.0": data_list[i].batch = data_list[i].batch.consolidate() with Timer(name="ray.pickle", initial_text=True): for i in range(wg.world_size): ray.cloudpickle.pickle.dumps(data_list[i]) with Timer(name="raw.pickle", initial_text=True): import pickle for i in range(wg.world_size): pickle.dumps(data_list[i]) # we put in advance with Timer(name="put", initial_text=True): # takes around 40 seconds data_list_ref = parallel_put(data_list) # for i in range(wg.world_size): # data_list[i] = ray.put(data_list[i]) with Timer(name="launch", initial_text=True): output_ref = wg.do_nothing(data_list_ref) with Timer(name="get", initial_text=True): # takes around 40 seconds output_lst = ray.get(output_ref) for input_data, output_data in zip(data_list, output_lst, strict=True): for key in input_data.batch.keys(): assert torch.all(torch.eq(input_data.batch[key] + 1, output_data.batch[key])), ( input_data.batch[key], output_data.batch[key], key, ) ray.shutdown() ================================================ FILE: verl_distillation/tests/single_controller/test_decorator_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import time import pytest import ray import torch from tensordict import TensorDict from verl.protocol import DataProto, DataProtoFuture from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.base.worker import Worker from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup # Pytest fixture for Ray setup/teardown @pytest.fixture def ray_init_shutdown(): ray.init(num_cpus=100) yield ray.shutdown() # Define a simple worker for testing @ray.remote class DecoratorTestWorker(Worker): def __init__(self, initial_value=0): super().__init__() self.value = initial_value # Simulate some setup if needed time.sleep(0.1) # Ensure worker init completes # Test method for synchronous DP compute (default behavior) @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def dp_compute(self, data: DataProto) -> DataProto: time.sleep(0.1) # Simulate work rank_value = torch.tensor(self.rank, device=data.batch["input"].device, dtype=data.batch["input"].dtype) data.batch["output"] = data.batch["input"] + self.value + rank_value return data # Test async def method with DP compute (default behavior) @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO, blocking=False) async def async_dp_compute(self, data: DataProto) -> DataProto: # Simulate async work await asyncio.sleep(0.1) # Simulate async work rank_value = torch.tensor(self.rank, device=data.batch["input"].device, dtype=data.batch["input"].dtype) data.batch["output_async"] = data.batch["input"] * 2 + self.value + rank_value return data # Test function for synchronous DP compute def test_decorator_dp_compute(ray_init_shutdown): """ Tests the default behavior of a synchronous decorated method with DP_COMPUTE_PROTO. Verifies the result correctness. """ num_workers = 2 resource_pool = RayResourcePool([num_workers], use_gpu=False, max_colocate_count=1) # Use CPU for simplicity cls_with_args = RayClassWithInitArgs(cls=DecoratorTestWorker, initial_value=10) worker_group = RayWorkerGroup( resource_pool, cls_with_args, name_prefix=f"decorator_test_sync_dp_{int(time.time())}" ) # Prepare input data (size 4, for 2 workers) input_tensor = torch.arange(4, dtype=torch.float32) data = DataProto(batch=TensorDict({"input": input_tensor}, batch_size=[4])) # Call the decorated method output = worker_group.dp_compute(data) # Assert the result correctness assert isinstance(output, DataProto), "Expected DataProto result" assert "output" in output.batch.keys() assert len(output) == len(data), "Output length should match input length" # Expected output calculation for DP_COMPUTE_PROTO with 2 workers # Worker 0 gets data[0:2], Worker 1 gets data[2:4] # Worker 0 adds initial_value(10) + rank(0) = 10 # Worker 1 adds initial_value(10) + rank(1) = 11 expected_output_part1 = torch.tensor([0, 1], dtype=torch.float32) + 10 + 0 expected_output_part2 = torch.tensor([2, 3], dtype=torch.float32) + 10 + 1 expected_output = torch.cat([expected_output_part1, expected_output_part2]) torch.testing.assert_close(output.batch["output"], expected_output, msg="Sync DP compute output data mismatch") # Test function for async def method with DP compute def test_decorator_async_function(ray_init_shutdown): """ Tests the decorator with an `async def` method using DP_COMPUTE_PROTO. Verifies that the call returns a future and the result is correct after .get(). """ num_workers = 2 resource_pool = RayResourcePool([num_workers], use_gpu=False, max_colocate_count=1) cls_with_args = RayClassWithInitArgs(cls=DecoratorTestWorker, initial_value=5) worker_group = RayWorkerGroup( resource_pool, cls_with_args, name_prefix=f"decorator_test_async_dp_{int(time.time())}" ) # Prepare input data (size 4, for 2 workers) input_tensor = torch.arange(4, dtype=torch.float32) data = DataProto(batch=TensorDict({"input": input_tensor}, batch_size=[4])) # Call the async decorated method - this should return a future future_output: DataProtoFuture = worker_group.async_dp_compute(data) # Assert that the call returned a future assert isinstance(future_output, DataProtoFuture), "Expected DataProtoFuture for async def call" # Get the result (this should block) result_data = future_output.get() # Assert the result correctness assert isinstance(result_data, DataProto) assert "output_async" in result_data.batch.keys() assert len(result_data) == len(data), "Output length should match input length" # Expected output calculation for DP_COMPUTE_PROTO with 2 workers # Worker 0 gets data[0:2], Worker 1 gets data[2:4] # Worker 0 calculates: input * 2 + initial_value(5) + rank(0) # Worker 1 calculates: input * 2 + initial_value(5) + rank(1) expected_output_part1 = (torch.tensor([0, 1], dtype=torch.float32) * 2) + 5 + 0 expected_output_part2 = (torch.tensor([2, 3], dtype=torch.float32) * 2) + 5 + 1 expected_output = torch.cat([expected_output_part1, expected_output_part2]) torch.testing.assert_close( result_data.batch["output_async"], expected_output, msg="Async DP compute output data mismatch" ) ================================================ FILE: verl_distillation/tests/single_controller/test_device_mesh_register.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ray import torch from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import make_nd_compute_dataproto_dispatch_fn, register @ray.remote class TestActor(Worker): def __init__(self): super().__init__() import torch.distributed torch.distributed.init_process_group(backend="nccl") self.infer_device_mesh = torch.distributed.device_mesh.init_device_mesh( device_type="cuda", mesh_shape=[2, 4], mesh_dim_names=["dp", "tp"] ) self.train_device_mesh = torch.distributed.device_mesh.init_device_mesh( device_type="cuda", mesh_shape=[2, 2, 2], mesh_dim_names=["pp", "dp", "tp"] ) self._register_dispatch_collect_info( "infer", dp_rank=self.infer_device_mesh["dp"].get_local_rank(), is_collect=self.infer_device_mesh["tp"].get_local_rank() == 0, ) self._register_dispatch_collect_info( "train", dp_rank=self.train_device_mesh["dp"].get_local_rank(), is_collect=self.train_device_mesh["tp"].get_local_rank() == 0 and self.train_device_mesh["pp"].get_local_rank() == 1, ) @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="infer")) def generate_data_proto(self, data: DataProto): tp_rank = self.infer_device_mesh["tp"].get_local_rank() dp_rank = self.infer_device_mesh["dp"].get_local_rank() data.batch["a"] += (tp_rank + 1) * dp_rank return data @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train")) def train_data_proto(self, data: DataProto): tp_rank = self.train_device_mesh["tp"].get_local_rank() dp_rank = self.train_device_mesh["dp"].get_local_rank() pp_rank = self.train_device_mesh["pp"].get_local_rank() data.batch["a"] += (tp_rank + 1) * (dp_rank + 2) * (pp_rank + 3) # tp rank 0, pp rank 1, dp rank 0, output data added: 8 + 3 = 11 # tp rank 0, pp rank 1, dp rank 1, output data added: 12 + 4 = 16 return data def test_dist_global_info_wg(): # create a worker group with size 8 # register a infer dist info with tp=4, dp=2 # register a train dist info with tp=2, dp=2, pp=2 # test the correctness of data dispatch and computation from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup ray.init() ray_cls = RayClassWithInitArgs(TestActor) resource_pool = RayResourcePool(process_on_nodes=[8]) wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls) infer_input_data_proto = DataProto.from_single_dict(data={"a": torch.tensor([1, 2])}) infer_output_data_proto = wg.generate_data_proto(infer_input_data_proto) assert wg._dispatch_info["infer"] == [0, 0, 0, 0, 1, 1, 1, 1] assert torch.all(torch.eq(infer_output_data_proto.batch["a"], torch.tensor([1, 3]))) train_input_data_proto = DataProto.from_single_dict(data={"a": torch.tensor([3, 4])}) train_output_data_proto = wg.train_data_proto(train_input_data_proto) assert wg._dispatch_info["train"] == [0, 0, 1, 1, 0, 0, 1, 1] assert torch.all(torch.eq(train_output_data_proto.batch["a"], torch.tensor([11, 16]))) ray.shutdown() if __name__ == "__main__": test_dist_global_info_wg() ================================================ FILE: verl_distillation/tests/single_controller/test_driverfunc_to_worker.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import ray import torch from tensordict import TensorDict from verl import DataProto from verl.single_controller.base.worker import Worker from verl.single_controller.ray import RayWorkerGroup from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool os.environ["RAY_DEDUP_LOGS"] = "0" os.environ["NCCL_DEBUG"] = "WARN" @ray.remote class ModelActor(Worker): def __init__(self): pass class HackSelf: def __init__(self): pass def get_aux_metrics(self, test_proto): sequence_ids = test_proto.batch["sequence_ids"] decode_count = [] for i in range(sequence_ids.size(0)): decode_count.append(len(sequence_ids[i].tolist())) ret_proto = DataProto( batch=TensorDict( {"sequence_ids": sequence_ids, "decode_count": torch.tensor(decode_count)}, batch_size=sequence_ids.size(0) ) ) return ret_proto def test(): # construct model ray.init() # create 2 workers, each hold a GPU resource_pool = RayResourcePool([2], use_gpu=True, name_prefix="a") class_with_args = RayClassWithInitArgs(cls=ModelActor) shard_wg = RayWorkerGroup(resource_pool, class_with_args) test_bs = 8 test_proto = DataProto( TensorDict( { "sequence_ids": torch.ones([test_bs, 2048], dtype=torch.int64), }, batch_size=test_bs, ), meta_info={"query_length": 1536}, ) # Sharding among different ranks ret_proto1 = shard_wg.execute_with_func_generator(get_aux_metrics, test_proto) # compare execute on driver hs = HackSelf() ret_proto2 = get_aux_metrics(hs, test_proto) torch.testing.assert_close(ret_proto1.batch["decode_count"], ret_proto2.batch["decode_count"]) ray.shutdown() ================================================ FILE: verl_distillation/tests/single_controller/test_fused_workers_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ray from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.ray.base import ( RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, create_colocated_worker_raw_cls, ) @ray.remote class Actor(Worker): def __init__(self) -> None: super().__init__() @register(dispatch_mode=Dispatch.ONE_TO_ALL) def add(self, x): x += self.rank return x @ray.remote class Critic(Worker): def __init__(self, val) -> None: super().__init__() self.val = val @register(dispatch_mode=Dispatch.ALL_TO_ALL) def sub(self, x): x -= self.val return x actor_cls = RayClassWithInitArgs(cls=Actor) critic_cls = RayClassWithInitArgs(cls=Critic, val=10) cls_dict = {"actor": actor_cls, "critic": critic_cls} FusedBaseClass = create_colocated_worker_raw_cls(cls_dict) @ray.remote class HybridWorker(FusedBaseClass): @register(dispatch_mode=Dispatch.ONE_TO_ALL) def foo(self, x): return self.critic.sub(self.actor.add(x)) def test_fused_workers(): ray.init(num_cpus=100) # create separate workers on the same resource pool process_on_nodes = [2] resource_pool = RayResourcePool(process_on_nodes=process_on_nodes, use_gpu=False) # create colocated workers hybrid_cls_with_init = RayClassWithInitArgs(cls=HybridWorker) hybrid_cls_with_init.fused_worker_used = True fused_wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=hybrid_cls_with_init) fused_wg.fuse(cls_dict.keys()) x = fused_wg.actor.add(0.1) print(x) y = fused_wg.critic.sub(x) print(y) z = fused_wg.foo(0.1) print(z) for i, j in zip(y, z, strict=True): assert i == j ray.shutdown() if __name__ == "__main__": test_fused_workers() ================================================ FILE: verl_distillation/tests/single_controller/test_high_level_scheduling_api.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import time import ray from verl.single_controller.base.worker import Worker from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, merge_resource_pool @ray.remote class TestActor(Worker): # TODO: pass *args and **kwargs is bug prone and not very convincing def __init__(self, cuda_visible_devices=None) -> None: super().__init__(cuda_visible_devices) def get_node_id(self): return ray.get_runtime_context().get_node_id() def test(): ray.init() # test single-node-no-partition print("test single-node-no-partition") resource_pool = RayResourcePool([8], use_gpu=True) class_with_args = RayClassWithInitArgs(cls=TestActor) print("create actor worker group") actor_wg = RayWorkerGroup(resource_pool, class_with_args, name_prefix="high_level_api_actor") print("create critic worker group") critic_wg = RayWorkerGroup(resource_pool, class_with_args, name_prefix="hight_level_api_critic") print("create rm worker group") rm_wg = RayWorkerGroup(resource_pool, class_with_args, name_prefix="high_level_api_rm") print("create ref worker group") ref_wg = RayWorkerGroup(resource_pool, class_with_args, name_prefix="high_level_api_ref") assert actor_wg.execute_all_sync("get_cuda_visible_devices") == [str(i) for i in range(8)] assert critic_wg.execute_all_sync("get_cuda_visible_devices") == [str(i) for i in range(8)] assert rm_wg.execute_all_sync("get_cuda_visible_devices") == [str(i) for i in range(8)] assert ref_wg.execute_all_sync("get_cuda_visible_devices") == [str(i) for i in range(8)] del actor_wg del critic_wg del rm_wg del ref_wg gc.collect() # make sure ray actors are deleted [ray.util.remove_placement_group(pg) for pg in resource_pool.get_placement_groups()] print("wait 5s to remove placemeng_group") time.sleep(5) # test single-node-multi-partition print("test single-node-multi-partition") rm_resource_pool = RayResourcePool([4], use_gpu=True, name_prefix="rm") ref_resource_pool = RayResourcePool([4], use_gpu=True, name_prefix="ref") total_resource_pool = merge_resource_pool(rm_resource_pool, ref_resource_pool) assert rm_resource_pool.world_size == 4 assert ref_resource_pool.world_size == 4 assert total_resource_pool.world_size == 8 actor_wg = RayWorkerGroup(total_resource_pool, class_with_args, name_prefix="high_level_api_actor") critic_wg = RayWorkerGroup(total_resource_pool, class_with_args, name_prefix="high_level_api_critic") rm_wg = RayWorkerGroup(rm_resource_pool, class_with_args, name_prefix="high_level_api_rm") ref_wg = RayWorkerGroup(ref_resource_pool, class_with_args, name_prefix="high_level_api_ref") assert actor_wg.execute_all_sync("get_cuda_visible_devices") == [str(i) for i in range(8)] assert critic_wg.execute_all_sync("get_cuda_visible_devices") == [str(i) for i in range(8)] assert rm_wg.execute_all_sync("get_cuda_visible_devices") == [str(i) for i in range(4)] assert ref_wg.execute_all_sync("get_cuda_visible_devices") == [str(i) for i in range(4, 8)] ray.shutdown() ================================================ FILE: verl_distillation/tests/single_controller/test_nested_worker.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ray from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.base.worker import Worker from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup class TestActor(Worker): # TODO: pass *args and **kwargs is bug prone and not very convincing def __init__(self, x) -> None: super().__init__() self.a = x @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get(self): return self.a + self.rank class TestHighLevelActor(Worker): def __init__(self, x=None) -> None: super().__init__() self.test_actor = TestActor(x=x) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get(self): return self.test_actor.get() def test_nested_worker(): ray.init(num_cpus=100) # create 4 workers, each hold a GPU resource_pool = RayResourcePool([4], use_gpu=True) class_with_args = RayClassWithInitArgs(cls=ray.remote(TestActor), x=2) worker_group = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=class_with_args, name_prefix="worker_group_basic" ) output = worker_group.get() assert output == [2, 3, 4, 5] class_with_args = RayClassWithInitArgs(cls=ray.remote(TestHighLevelActor), x=2) high_level_worker_group = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=class_with_args, name_prefix="worker_group_basic_2" ) output_1 = high_level_worker_group.get() assert output_1 == [2, 3, 4, 5] ray.shutdown() ================================================ FILE: verl_distillation/tests/single_controller/test_ray_collectives.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test for using ray collective group. Suppose we Actor and Rollout. Actor contains 4 workers and Rollout contains 2 workers. We established a Worker to Rollout relationship by using collective groups Actor: rank 0, 1 - Rollout rank 0 Rollout rank 2, 3 - Rollout rank 1 Then, we initiate 4 p2p comms from actor to rollout """ import ray import ray.util.collective as collective import torch from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup @ray.remote class Actor(Worker): @register(Dispatch.ONE_TO_ALL) def init(self): remote_rank = self.rank // 2 self.group_name = f"A{self.rank}_R{remote_rank}" collective.init_collective_group(world_size=2, rank=0, backend="nccl", group_name=self.group_name) @register(Dispatch.ONE_TO_ALL, blocking=False) def send_tensors(self): tensor = torch.ones(size=(4,), dtype=torch.float32, device="cuda") * self.rank collective.send(tensor=tensor, dst_rank=1, group_name=self.group_name) @ray.remote class Rollout(Worker): @register(Dispatch.ONE_TO_ALL) def init(self): self.remote_first_rank = self.rank * 2 self.remote_second_rank = self.remote_first_rank + 1 self.first_group_name = f"A{self.remote_first_rank}_R{self.rank}" self.second_group_name = f"A{self.remote_second_rank}_R{self.rank}" collective.init_collective_group(world_size=2, rank=1, backend="nccl", group_name=self.first_group_name) collective.init_collective_group(world_size=2, rank=1, backend="nccl", group_name=self.second_group_name) @register(Dispatch.ONE_TO_ALL, blocking=False) def receive_tensors(self): self.tensor1 = torch.randn(size=(4,), dtype=torch.float32, device="cuda") self.tensor2 = torch.randn(size=(4,), dtype=torch.float32, device="cuda") collective.recv(self.tensor1, src_rank=0, group_name=self.first_group_name) collective.recv(self.tensor2, src_rank=0, group_name=self.second_group_name) @register(Dispatch.ONE_TO_ALL) def get_tensors(self): return {f"src_{self.remote_first_rank}": self.tensor1, f"src_{self.remote_second_rank}": self.tensor2} def test_ray_collective_group(): ray.init() actor_resource_pool = RayResourcePool([4]) rollout_resource_pool = RayResourcePool([2]) actor_cls = RayClassWithInitArgs(cls=Actor) rollout_cls = RayClassWithInitArgs(cls=Rollout) actor_wg = RayWorkerGroup( resource_pool=actor_resource_pool, ray_cls_with_init=actor_cls, name_prefix="collective_group_actor" ) rollout_wg = RayWorkerGroup( resource_pool=rollout_resource_pool, ray_cls_with_init=rollout_cls, name_prefix="collective_group_rollout" ) actor_wg.init() rollout_wg.init() out1 = actor_wg.send_tensors() out2 = rollout_wg.receive_tensors() # block to wait ray.get(out1) ray.get(out2) output = rollout_wg.get_tensors() rollout_0_output = output[0] rollout_1_output = output[1] output = rollout_0_output | rollout_1_output print(output) for i in range(4): assert torch.sum(output[f"src_{i}"]).item() == 4 * i ray.shutdown() if __name__ == "__main__": test_ray_collective_group() ================================================ FILE: verl_distillation/tests/single_controller/test_ray_local_envs_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ e2e test verl.single_controller.ray """ import os import ray from verl.single_controller.base.worker import Worker from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup @ray.remote class TestActor(Worker): def __init__(self) -> None: super().__init__() def getenv(self, key): val = os.getenv(key, f"{key} not set") return val def test_basics(): ray.init(num_cpus=100) # create 4 workers, each hold a GPU resource_pool = RayResourcePool([4], use_gpu=False) class_with_args = RayClassWithInitArgs(cls=TestActor) worker_group = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=class_with_args, name_prefix="worker_group_basic" ) output = worker_group.execute_all_sync("getenv", key="RAY_LOCAL_WORLD_SIZE") assert output == ["4", "4", "4", "4"] ray.shutdown() def test_customized_worker_env(): ray.init(num_cpus=100) # create 4 workers, each hold a GPU resource_pool = RayResourcePool([4], use_gpu=False) class_with_args = RayClassWithInitArgs(cls=TestActor) worker_group = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=class_with_args, name_prefix="worker_group_customized", worker_env={ "test_key": "test_value", # new key will be appended }, ) output = worker_group.execute_all_sync("getenv", key="test_key") assert output == ["test_value", "test_value", "test_value", "test_value"] try: worker_group = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=class_with_args, name_prefix="worker_group_error", worker_env={ "WORLD_SIZE": "100", # override system env will result in error }, ) except ValueError as e: assert "WORLD_SIZE" in str(e) else: raise ValueError("test failed") ray.shutdown() if __name__ == "__main__": test_basics() test_customized_worker_env() ================================================ FILE: verl_distillation/tests/single_controller/test_ray_utils_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import ray from verl.utils.ray_utils import parallel_put # Initialize Ray for testing if not already done globally @pytest.fixture() def init_ray(): ray.init(num_cpus=4) yield ray.shutdown() def test_parallel_put_basic(init_ray): data = [1, "hello", {"a": 2}, [3, 4]] refs = parallel_put(data) assert len(refs) == len(data) retrieved_data = [ray.get(ref) for ref in refs] assert retrieved_data == data def test_parallel_put_empty(init_ray): data = [] with pytest.raises(AssertionError): _ = parallel_put(data) def test_parallel_put_workers(init_ray): data = list(range(20)) # Test with specific number of workers refs = parallel_put(data, max_workers=4) assert len(refs) == len(data) retrieved_data = [ray.get(ref) for ref in refs] assert retrieved_data == data # Test with default workers (should cap) refs_default = parallel_put(data) assert len(refs_default) == len(data) retrieved_data_default = [ray.get(ref) for ref in refs_default] assert retrieved_data_default == data ================================================ FILE: verl_distillation/tests/single_controller/test_rvdz.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ray @ray.remote class TestWorker: def __init__(self, rank, world_size, group_name): self.rank = rank self.world_size = world_size self.group_name = group_name self.communicator = None def init(self): from verl.utils.rendezvous.ray_backend import create_nccl_communicator_in_ray self.communicator = create_nccl_communicator_in_ray(self.rank, self.world_size, self.group_name) def test(self): if self.communicator is None: return None return self.communicator.rank_id() def test_rvdz(): ray.init() group_name = "test_group" world_size = 2 workers = [TestWorker.options(num_gpus=1).remote(rank, world_size, group_name) for rank in range(world_size)] ray.get([worker.init.remote() for worker in workers]) ranks = ray.get([worker.test.remote() for worker in workers]) assert ranks == [0, 1], f"expecting [0, 1], got {ranks}" ray.shutdown() ================================================ FILE: verl_distillation/tests/single_controller/test_worker_group_basics.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ e2e test verl.single_controller.ray """ import ray import torch from verl.single_controller.base.decorator import Dispatch, Execute, collect_all_to_all, register from verl.single_controller.base.worker import Worker from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup def two_to_all_dispatch_fn(worker_group, *args, **kwargs): """ Assume the input is a list of 2. Duplicate the input interleaved and pass to each worker. """ for arg in args: assert len(arg) == 2 for i in range(worker_group.world_size - 2): arg.append(arg[i % 2]) for k, v in kwargs.items(): assert len(v) == 2 for i in range(worker_group.world_size - 2): v.append(v[i % 2]) return args, kwargs @ray.remote class TestActor(Worker): # TODO: pass *args and **kwargs is bug prone and not very convincing def __init__(self, x) -> None: super().__init__() self._x = x def foo(self, y): return self._x + y @register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.RANK_ZERO) def foo_rank_zero(self, x, y): return self._x + y + x @register(Dispatch.ONE_TO_ALL, blocking=False) def foo_one_to_all(self, x, y): return self._x + y + x @register(Dispatch.ALL_TO_ALL, blocking=False) def foo_all_to_all(self, x, y): return self._x + y + x @register(dispatch_mode={"dispatch_fn": two_to_all_dispatch_fn, "collect_fn": collect_all_to_all}) def foo_custom(self, x, y): return self._x + y + x @ray.remote(num_gpus=0.1) def remote_call_wg(worker_names): class_with_args = RayClassWithInitArgs(cls=TestActor, x=2) worker_group = RayWorkerGroup.from_detached( worker_names=worker_names, ray_cls_with_init=class_with_args, name_prefix=None ) print(worker_group.worker_names) output_ref = worker_group.foo_custom(x=[1, 2], y=[5, 6]) assert output_ref == [8, 10, 8, 10] output_ref = worker_group.foo_rank_zero(x=1, y=2) assert output_ref == 5 return worker_group.worker_names def add_one(data): data = data.to("cuda") data += 1 data = data.to("cpu") return data def test_basics(): ray.init(num_cpus=100) # create 4 workers, each hold a GPU resource_pool = RayResourcePool([4], use_gpu=True) class_with_args = RayClassWithInitArgs(cls=TestActor, x=2) worker_group = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=class_with_args, name_prefix="worker_group_basic" ) print(worker_group.worker_names) # this will wait for all the results output = worker_group.execute_all_sync("foo", y=3) assert output == [5, 5, 5, 5] # this is a list of object reference. It won't block. output_ref = worker_group.execute_all_async("foo", y=4) print(output_ref) assert ray.get(output_ref) == [6, 6, 6, 6] output_ref = worker_group.foo_one_to_all(x=1, y=2) assert ray.get(output_ref) == [5, 5, 5, 5] output_ref = worker_group.foo_all_to_all(x=[1, 2, 3, 4], y=[5, 6, 7, 8]) assert ray.get(output_ref) == [8, 10, 12, 14] print(ray.get(remote_call_wg.remote(worker_group.worker_names))) output = worker_group.execute_func_rank_zero(add_one, torch.ones(2, 2)) torch.testing.assert_close(output, torch.ones(2, 2) + 1) ray.shutdown() if __name__ == "__main__": test_basics() ================================================ FILE: verl_distillation/tests/single_controller/test_worker_group_torch.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os os.environ["RAY_DEDUP_LOGS"] = "0" os.environ["NCCL_DEBUG"] = "WARN" import ray import torch import torch.distributed from verl.single_controller.base.worker import Worker from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup @ray.remote class TestAllGatherActor(Worker): def __init__(self, size) -> None: super().__init__() self.size = size def init(self): torch.distributed.init_process_group() self.tensor = torch.zeros(size=(self.size,), dtype=torch.int64, device="cuda") self.tensor += self.rank def all_gather(self): world_size = self._world_size output = torch.zeros( size=(self.tensor.shape[0] * world_size,), dtype=self.tensor.dtype, device=self.tensor.device ) torch.distributed.all_gather_into_tensor(output, self.tensor, async_op=False) return output @ray.remote class TestAllGatherActorV2(Worker): def __init__(self, size) -> None: super().__init__() self.size = size torch.distributed.init_process_group() self.tensor = torch.zeros(size=(self.size,), dtype=torch.int64, device="cuda") self.tensor += self.rank def all_gather(self): world_size = self._world_size output = torch.zeros( size=(self.tensor.shape[0] * world_size,), dtype=self.tensor.dtype, device=self.tensor.device ) torch.distributed.all_gather_into_tensor(output, self.tensor, async_op=False) return output def test_all_gather_torch(): """ In this test, we instantiate 4 GPUs in a group and test the all_gather """ ray.init() # create 4 workers, each hold a GPU resource_pool = RayResourcePool([4], use_gpu=True) class_with_args = RayClassWithInitArgs(cls=TestAllGatherActor, size=2) worker_group = RayWorkerGroup(resource_pool, class_with_args, name_prefix="worker_group_torch") worker_group.execute_all_sync("init") output = worker_group.execute_all_sync("all_gather") for i in range(1, len(output)): assert torch.all(output[i] == output[0]) output = output[0].cpu() print(output) assert torch.all(output == torch.tensor([0, 0, 1, 1, 2, 2, 3, 3], dtype=torch.int64)) ray.shutdown() def test_all_gather_torch_v2(): """ In this test, we instantiate 4 GPUs in a group and test the all_gather """ ray.init() # create 4 workers, each hold a GPU resource_pool = RayResourcePool([4], use_gpu=True) class_with_args = RayClassWithInitArgs(cls=TestAllGatherActorV2, size=2) worker_group = RayWorkerGroup(resource_pool, class_with_args, name_prefix="worker_group_torch") output = worker_group.execute_all_sync("all_gather") for i in range(1, len(output)): assert torch.all(output[i] == output[0]) output = output[0].cpu() print(output) assert torch.all(output == torch.tensor([0, 0, 1, 1, 2, 2, 3, 3], dtype=torch.int64)) ray.shutdown() ================================================ FILE: verl_distillation/tests/special_distributed/README.md ================================================ This folder is reserved for unit tests (instead of end-to-end tests) that require multiple GPUs. ================================================ FILE: verl_distillation/tests/special_distributed/run_all.sh ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env bash set -e -x torchrun --nproc-per-node=4 --standalone tests/special_distributed/test_tensor_dict.py ================================================ FILE: verl_distillation/tests/special_distributed/test_fsdp_ckpt.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import torch import torch.distributed from torch.distributed import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import MixedPrecision, ShardingStrategy from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.distributed import initialize_global_process_group from verl.utils.fsdp_utils import MixedPrecisionPolicy, apply_fsdp2 def create_random_input_ids(batch_size, seq_len, vocab_size): from flash_attn.bert_padding import unpad_input from verl.utils.model import compute_position_id_with_mask, create_random_mask input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device="cuda") attention_mask = create_random_mask( input_ids, max_ratio_of_left_padding=0.1, min_ratio_of_valid_token=0.5, max_ratio_of_valid_token=0.7 ) position_ids = compute_position_id_with_mask(attention_mask) input_ids = unpad_input(input_ids.unsqueeze(-1), attention_mask)[0].transpose(0, 1) position_ids = unpad_input(position_ids.unsqueeze(-1), attention_mask)[0].transpose(0, 1) return input_ids, position_ids def test_fsdp_ckpt(strategy="fsdp"): assert torch.cuda.device_count() >= 2, "need at least 2 gpus for test" local_rank, rank, world_size = initialize_global_process_group() device_mesh = init_device_mesh("cuda", mesh_shape=(world_size,), mesh_dim_names=("dp",)) model_name = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B-Instruct") config = Qwen2Config(num_hidden_layers=1) with torch.device("cuda"): model = AutoModelForCausalLM.from_config( config=config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model = model.to(device="cuda") # Wrap model with FSDP if strategy == "fsdp": mixed_precision = MixedPrecision( param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32 ) model = FSDP( model, use_orig_params=False, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, mixed_precision=mixed_precision, device_mesh=device_mesh, ) else: mp_policy = MixedPrecisionPolicy( param_dtype=torch.bfloat16, reduce_dtype=torch.float32, cast_forward_inputs=True ) fsdp_kwargs = { "mesh": device_mesh, "mp_policy": mp_policy, } apply_fsdp2(model, fsdp_kwargs, {}) optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9) # Create checkpoint manager tokenizer = AutoTokenizer.from_pretrained(model_name) checkpoint_manager = FSDPCheckpointManager( model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, tokenizer=tokenizer ) # Generate sample input batch_size = 10 seq_len = 1024 vocab_size = config.vocab_size # First input for initial update input_ids1, position_ids1 = create_random_input_ids(batch_size, seq_len, vocab_size) # Second input for verification input_ids2, position_ids2 = create_random_input_ids(batch_size, seq_len, vocab_size) # Step 1: Initial update and save checkpoint outputs1 = model(input_ids=input_ids1, position_ids=position_ids1) loss1 = outputs1.logits.mean() loss1.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Save checkpoint after first update temp_dir = tempfile.mkdtemp() checkpoint_path = os.path.join(temp_dir, "checkpoint") checkpoint_manager.save_checkpoint(local_path=checkpoint_path, hdfs_path=None, global_step=0) saved_state_dict = model.state_dict() # Step 2: Second update and forward pass outputs2 = model(input_ids=input_ids2, position_ids=position_ids2) loss2 = outputs2.logits.mean() loss2.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Record logits after second update with torch.no_grad(): logits_before_load = model(input_ids=input_ids2, position_ids=position_ids2).logits # Step 3: Load checkpoint and repeat second update checkpoint_manager.load_checkpoint(checkpoint_path) loaded_state_dict = model.state_dict() for key in loaded_state_dict: assert key in saved_state_dict, f"Key {key} not found in saved state dict" torch.testing.assert_close(loaded_state_dict[key], saved_state_dict[key], atol=0.0, rtol=0.0) # Repeat the second update with same input outputs3 = model(input_ids=input_ids2, position_ids=position_ids2) loss3 = outputs3.logits.mean() loss3.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Record logits after loaded checkpoint and update with torch.no_grad(): logits_after_load = model(input_ids=input_ids2, position_ids=position_ids2).logits # Step 4: Verify outputs match torch.testing.assert_close(logits_before_load, logits_after_load, atol=0.0, rtol=0.0) print("Checkpoint save/load test passed!") # Cleanup shutil.rmtree(temp_dir) torch.distributed.barrier() torch.distributed.destroy_process_group() if __name__ == "__main__": strategy = os.environ.get("STRATEGY", "fsdp") os.environ["FLASH_ATTENTION_DETERMINISTIC"] = "1" test_fsdp_ckpt(strategy=strategy) ================================================ FILE: verl_distillation/tests/special_distributed/test_mcore_config_converter.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import megatron.core.parallel_state as mpu import torch from megatron.core.transformer import MLATransformerConfig, TransformerConfig from transformers import AutoConfig, PretrainedConfig from verl.models.mcore import hf_to_mcore_config from verl.utils.distributed import destroy_global_process_group, initialize_global_process_group TEST_MODELS = [ "Qwen/Qwen2.5-7B", # Qwen2 dense "Qwen/Qwen3-8B", # Qwen3 dense "deepseek-ai/deepseek-coder-1.3b-instruct", # deepseek dense "Qwen/Qwen2-57B-A14B", # Qwen2 moe "Qwen/Qwen3-30B-A3B", # Qwen3 moe # "mistralai/Mixtral-8x7B-v0.1", # Mixtral # require authentication "deepseek-ai/DeepSeek-V3-Base", # Deepseek V3 ] def check_config_converter_results(tf_config: TransformerConfig | MLATransformerConfig, hf_config: PretrainedConfig): assert tf_config.num_layers == hf_config.num_hidden_layers, ( f"Number of layers mismatch: {tf_config.num_layers} != {hf_config.num_hidden_layers}" ) assert tf_config.hidden_size == hf_config.hidden_size, ( f"Hidden size mismatch: {tf_config.hidden_size} != {hf_config.hidden_size}" ) assert tf_config.num_attention_heads == hf_config.num_attention_heads, ( f"Number of attention heads mismatch: {tf_config.num_attention_heads} != {hf_config.num_attention_heads}" ) assert tf_config.num_query_groups == hf_config.num_key_value_heads, ( f"Number of query groups mismatch: {tf_config.num_query_groups} != {hf_config.num_key_value_heads}" ) assert tf_config.ffn_hidden_size == hf_config.intermediate_size, ( f"FFN hidden size mismatch: {tf_config.ffn_hidden_size} != {hf_config.intermediate_size}" ) assert tf_config.attention_dropout == hf_config.attention_dropout, ( f"Attention dropout mismatch: {tf_config.attention_dropout} != {hf_config.attention_dropout}" ) assert tf_config.hidden_dropout == getattr(hf_config, "hidden_dropout", 0.0), ( f"Hidden dropout mismatch: {tf_config.hidden_dropout} != {getattr(hf_config, 'hidden_dropout', 0.0)}" ) if getattr(hf_config, "head_dim", None) is not None: assert tf_config.kv_channels == getattr(hf_config, "head_dim", None), ( f"Head dim mismatch: {tf_config.kv_channels} != {getattr(hf_config, 'head_dim', None)}" ) assert tf_config.layernorm_epsilon == hf_config.rms_norm_eps, ( f"Layernorm epsilon mismatch: {tf_config.layernorm_epsilon} != {hf_config.rms_norm_eps}" ) def modify_hf_config(name: str, hf_config: PretrainedConfig): if name == "deepseek-ai/DeepSeek-V3-Base": hf_config.num_nextn_predict_layers = 0 hf_config.quantization_config = None return hf_config def test_mcore_config_converter(): """ Test the conversion of Hugging Face model configurations to MCore configurations. """ local_rank, rank, world_size = initialize_global_process_group() mpu.initialize_model_parallel( tensor_model_parallel_size=2, pipeline_model_parallel_size=2, virtual_pipeline_model_parallel_size=None, pipeline_model_parallel_split_rank=None, use_sharp=False, context_parallel_size=2, expert_model_parallel_size=1, expert_tensor_parallel_size=None, nccl_communicator_config_path=None, ) for model_name in TEST_MODELS: print(f"testing {model_name}") hf_config = AutoConfig.from_pretrained(os.path.expanduser(f"~/models/configs/{model_name}/config.json")) hf_config = modify_hf_config(model_name, hf_config) tf_config = hf_to_mcore_config(hf_config, torch.bfloat16) check_config_converter_results(tf_config, hf_config) destroy_global_process_group() if __name__ == "__main__": test_mcore_config_converter() ================================================ FILE: verl_distillation/tests/special_distributed/test_tensor_dict.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os os.environ["NCCL_DEBUG"] = "WARN" import numpy as np import torch import torch.distributed from verl.protocol import DataProto, all_gather_data_proto from verl.utils.distributed import initialize_global_process_group def test_all_gather_data_proto(): device_mesh = torch.distributed.device_mesh.init_device_mesh("cuda", mesh_shape=[2, 2], mesh_dim_names=["dp", "tp"]) global_rank = torch.distributed.get_rank() obs = torch.tensor([[1 * global_rank, 2 * global_rank + 1], [3 * global_rank, 4 * global_rank + 1]]) labels = ["a", "b"] if global_rank % 2 == 0 else ["b", "a"] labels = np.array(labels, dtype=object) data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"info": "test_info"}) all_gather_data_proto(data=data, process_group=device_mesh.get_group("dp")) if global_rank == 0: expected_obs = torch.tensor([[0, 1], [0, 1], [2, 5], [6, 9]], device="cuda") expected_labels = ["a", "b", "a", "b"] elif global_rank == 1: expected_obs = torch.tensor([[1, 3], [3, 5], [3, 7], [9, 13]], device="cuda") expected_labels = ["b", "a", "b", "a"] elif global_rank == 2: expected_obs = torch.tensor([[0, 1], [0, 1], [2, 5], [6, 9]], device="cuda") expected_labels = ["a", "b", "a", "b"] elif global_rank == 3: expected_obs = torch.tensor([[1, 3], [3, 5], [3, 7], [9, 13]], device="cuda") expected_labels = ["b", "a", "b", "a"] torch.testing.assert_close(data.batch["obs"], expected_obs, atol=0, rtol=0) assert (data.non_tensor_batch["labels"] == expected_labels).all() assert data.meta_info == {"info": "test_info"} def test_vocab_parallel_entropy(): from megatron.core import parallel_state as mpu from verl.utils.megatron.tensor_parallel import vocab_parallel_entropy from verl.utils.profiler import log_gpu_memory_usage from verl.utils.torch_functional import entropy_from_logits mpu.initialize_model_parallel( tensor_model_parallel_size=2, pipeline_model_parallel_size=1, virtual_pipeline_model_parallel_size=None ) batch_size = 2 seqlen = 128 vocab_size = 155136 logits = torch.randn(batch_size * seqlen, vocab_size, device="cuda", requires_grad=True) target = torch.randint(low=0, high=vocab_size, size=(batch_size * seqlen,), device="cuda", dtype=torch.int64) # broadcast across tp torch.distributed.broadcast( logits, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group() ) torch.distributed.broadcast( target, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group() ) tp_rank = mpu.get_tensor_model_parallel_rank() vocab_size_per_tp = vocab_size // mpu.get_tensor_model_parallel_world_size() # get the local logits of each tp vocab_parallel_logits = ( logits.clone().detach()[:, tp_rank * vocab_size_per_tp : (tp_rank + 1) * vocab_size_per_tp].requires_grad_() ) logits.grad = None vocab_parallel_logits.grad = None log_gpu_memory_usage("begin") output_entropy = vocab_parallel_entropy(vocab_parallel_logits) log_gpu_memory_usage("after forward") grad_output = torch.randn_like(output_entropy) output_entropy.backward(grad_output) log_gpu_memory_usage("after backward") target_entropy = entropy_from_logits(logits) torch.testing.assert_close(output_entropy, target_entropy) target_entropy.backward(grad_output) torch.testing.assert_close( logits.grad[:, tp_rank * vocab_size_per_tp : (tp_rank + 1) * vocab_size_per_tp], vocab_parallel_logits.grad ) # make sure logits is not altered torch.testing.assert_close( logits[:, tp_rank * vocab_size_per_tp : (tp_rank + 1) * vocab_size_per_tp], vocab_parallel_logits ) if mpu.get_tensor_model_parallel_rank() == 0: print("test_vocab_parallel_entropy passes") mpu.destroy_model_parallel() if __name__ == "__main__": local_rank, rank, world_size = initialize_global_process_group() test_all_gather_data_proto() test_vocab_parallel_entropy() ================================================ FILE: verl_distillation/tests/special_e2e/README.md ================================================ This folder is reserved for end-to-end tests that typically require multiple GPUs. ================================================ FILE: verl_distillation/tests/special_e2e/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/tests/special_e2e/check_custom_rwd_fn.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse def check_congratulations_in_file(output_file): with open(output_file) as f: output = f.read() success_message = "Congratulations!!! You have called my_reward_function successfully!!!" assert success_message in output, f"Success message of my_reward_function not found in {output_file}" print("Check passes") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--output_file", required=True, type=str) args = parser.parse_args() check_congratulations_in_file(args.output_file) ================================================ FILE: verl_distillation/tests/special_e2e/check_results.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import numpy as np def extract_reward_from_line(line): # TODO: this function needs error handling try: key_vals = line.split(" - ") for key_val in key_vals: key, val = key_val.split(":") if key == "critic/rewards/mean": reward = float(val) return reward return -np.inf except Exception: return -np.inf if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--output_file", required=True, type=str) parser.add_argument("--target", type=float, default=0.2, help="target reward score") args = parser.parse_args() with open(args.output_file) as f: output = f.read().split("\n") best_reward = -np.inf for line in output: if line.startswith("step"): reward = extract_reward_from_line(line) if reward > best_reward: best_reward = reward print(f"Best reward is {best_reward}") assert best_reward > args.target, f"Best reward must be greater than {args.target}. best_reward: {best_reward}" print("Check passes") ================================================ FILE: verl_distillation/tests/special_e2e/envs/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .digit_completion import DigitCompletion __all__ = ["DigitCompletion"] ================================================ FILE: verl_distillation/tests/special_e2e/envs/digit_completion/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import AutoTokenizer, LlamaConfig from .task import DigitCompletion, generate_ground_truth_response from .tokenizer import CharTokenizer AutoTokenizer.register(LlamaConfig, CharTokenizer, exist_ok=True) __all__ = ["DigitCompletion", "generate_ground_truth_response", "CharTokenizer"] ================================================ FILE: verl_distillation/tests/special_e2e/envs/digit_completion/task.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Task and environment definition for digit completion.""" import numpy as np class DigitCompletion: """ The implementation of a simple digit completion task. The prompt is a sequence of numbers with fixed difference. The task is to complete the next N numbers. If the max number is reached, the next number should be modulo with max number. For example, - prompt = [1, 2, 3] - N = 5 - max_number = 6 the response should be [4, 5, 6, 7%6, 8%6] = [4, 5, 6, 0, 1] Note that the tokenizer is char-level to increase the difficulty. """ def __init__(self, max_number: int, max_diff: int, max_num_in_response: int, seed=0): """ Args: max_number: the maximum number allowed in the arithmetic sequence max_diff: the maximum diff. The actual common diff will be sampled from [0, max_diff] max_num_in_response: the maximum number in the response """ super().__init__() self.max_number = max_number self.max_diff = max_diff self.max_num_in_response = max_num_in_response assert self.max_num_in_response < 10 assert self.max_number > 0 assert self.max_diff > 0 self.max_number_length = len(str(max_number)) # {num1},{num2}:{max_num_in_response},{max_number} self._prompt_length = self.max_number_length * 2 + 4 + self.max_number_length # no negative is allowed self.np_rng = np.random.default_rng(seed=seed) def __str__(self): return ( f"Prompt length: {self.prompt_length}. Response length: {self.response_length}, " f"Max number: {self.max_number}. Max diff: {self.max_diff}, " f"Max number in response: {self.max_num_in_response}" ) def get_state(self): return {"rng": self.np_rng} def set_state(self, state): assert "rng" in state, "rng must be inside state" self.np_rng = state["rng"] @property def prompt_length(self): return self._prompt_length @property def response_length(self): # number length + comma length + [EOS] # The actual number times 1.5 to allow 'U' return (self.max_num_in_response * self.max_number_length + (self.max_num_in_response - 1) + 1) * 2 def add(self, a, b): return (a + b) % self.max_number def get_all_prompts(self): all_prompts = [] for first_num in range(self.max_number + 1): for diff in range(0, self.max_diff + 1): second_num = self.add(first_num, diff) for num_to_complete in range(self.max_num_in_response + 1): prompt = str(first_num) + "," + str(second_num) + f":{self.max_number},{num_to_complete}" all_prompts.append(prompt) return all_prompts def sample_str_prompts(self): # step 1: sample initial numbers first_num = self.np_rng.integers(self.max_number + 1) diff = self.np_rng.integers(self.max_diff + 1) second_num = self.add(first_num, diff) num_to_complete = self.np_rng.integers(self.max_num_in_response + 1) prompt = str(first_num) + "," + str(second_num) + f":{self.max_number},{num_to_complete}" return prompt def sample_batch_str_prompts(self, batch_size): str_prompts = [] for _ in range(batch_size): str_prompts.append(self.sample_str_prompts()) return str_prompts def compute_attention_mask(prompts, pad_token_id): mask = np.ones_like(prompts) mask[prompts == pad_token_id] = 0 return mask def compute_position_id_with_mask(mask): return np.clip(np.cumsum(mask, axis=-1) - 1, a_min=0, a_max=None) def generate_ground_truth_response(prompt: str): """Generate ground truth response given a prompt.""" num, info = prompt.split(":") num1, num2 = num.split(",") max_number, num_to_gen = info.split(",") num1 = int(num1) num2 = int(num2) max_number = int(max_number) num_to_gen = int(num_to_gen) diff = (num2 - num1) % max_number results = [] last_num = num2 for _ in range(num_to_gen): curr = (last_num + diff) % max_number results.append(str(curr)) last_num = curr response = ",".join(results) return response def compute_reward(prompt: str, response: str, sequence_reward=1.0): """We compute dense reward here so that we can directly train RL without SFT""" response_length = len(response) ground_truth_response = generate_ground_truth_response(prompt) per_token_reward = sequence_reward / (len(ground_truth_response) + 1) # including [EOS] # pad reward = np.zeros(response_length, dtype=np.float32) # this assumes that each char is a token # assign reward until mismatches ground_truth_idx = 0 for i in range(response_length): if ground_truth_idx == len(ground_truth_response): break ground_truth_response_token = ground_truth_response[ground_truth_idx] response_token = response[i] if ground_truth_response_token == response_token: reward[i] = per_token_reward ground_truth_idx += 1 else: # no matches break return reward, {"ground_truth_response": ground_truth_response} if __name__ == "__main__": task = DigitCompletion(max_number=20, max_diff=3, max_num_in_response=5) print(task.sample_str_prompts()) prompt = "7,8:20,0" response = "" print(compute_reward(prompt, response)) prompt = "7,8:20,0" response = "E000" print(compute_reward(prompt, response)) prompt = "9,10:20,2" response = "11,12,13" print(compute_reward(prompt, response)) ================================================ FILE: verl_distillation/tests/special_e2e/envs/digit_completion/tokenizer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Copied from https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py CharacterTokenzier for Hugging Face Transformers. This is heavily inspired from CanineTokenizer in transformers package. """ import json import os from pathlib import Path from typing import Optional, Sequence from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer class CharTokenizer(PreTrainedTokenizer): def __init__(self, characters: Sequence[str], model_max_length: int, chat_template, **kwargs): """Character tokenizer for Hugging Face transformers. Args: characters (Sequence[str]): List of desired characters. Any character which is not included in this list will be replaced by a special token called [UNK] with id=6. Following are list of all of the special tokens with their corresponding ids: "[CLS]": 0 "[SEP]": 1 "[BOS]": 2 "[MASK]": 3 "[PAD]": 4 "[RESERVED]": 5 "[UNK]": 6 an id (starting at 7) will be assigned to each character. model_max_length (int): Model maximum sequence length. """ eos_token_str = "E" sep_token_str = "S" pad_token_str = "P" unk_token_str = "U" self.characters = characters self.model_max_length = model_max_length eos_token = AddedToken(eos_token_str, lstrip=False, rstrip=False) sep_token = AddedToken(sep_token_str, lstrip=False, rstrip=False) pad_token = AddedToken(pad_token_str, lstrip=False, rstrip=False) unk_token = AddedToken(unk_token_str, lstrip=False, rstrip=False) self._vocab_str_to_int = { sep_token_str: 0, eos_token_str: 1, pad_token_str: 2, unk_token_str: 3, **{ch: i + 4 for i, ch in enumerate(characters)}, } self._vocab_int_to_str = {v: k for k, v in self._vocab_str_to_int.items()} super().__init__( eos_token=eos_token, sep_token=sep_token, pad_token=pad_token, unk_token=unk_token, add_prefix_space=False, model_max_length=model_max_length, **kwargs, ) self.chat_template = chat_template @property def vocab_size(self) -> int: return len(self._vocab_str_to_int) def get_vocab(self): return self._vocab_str_to_int def _tokenize(self, text: str) -> list[str]: return list(text) def _convert_token_to_id(self, token: str) -> int: return self._vocab_str_to_int.get(token, self._vocab_str_to_int["U"]) def _convert_id_to_token(self, index: int) -> str: return self._vocab_int_to_str[index] def convert_tokens_to_string(self, tokens): return "".join(tokens) def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] result = cls + token_ids_0 + sep if token_ids_1 is not None: result += token_ids_1 + sep return result def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False, ) -> list[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True, ) result = [1] + ([0] * len(token_ids_0)) + [1] if token_ids_1 is not None: result += ([0] * len(token_ids_1)) + [1] return result def get_config(self) -> dict: return { "char_ords": [ord(ch) for ch in self.characters], "model_max_length": self.model_max_length, "chat_template": self.chat_template, } @classmethod def from_config(cls, config: dict): cfg = {} cfg["characters"] = [chr(i) for i in config["char_ords"]] cfg["model_max_length"] = config["model_max_length"] cfg["chat_template"] = config["chat_template"] return cls(**cfg) def save_pretrained(self, save_directory: str | os.PathLike, **kwargs): cfg_file = Path(save_directory) / "tokenizer_config.json" cfg = self.get_config() with open(cfg_file, "w") as f: json.dump(cfg, f, indent=4) @classmethod def from_pretrained(cls, save_directory: str | os.PathLike, **kwargs): cfg_file = Path(save_directory) / "tokenizer_config.json" with open(cfg_file) as f: cfg = json.load(f) return cls.from_config(cfg) ================================================ FILE: verl_distillation/tests/special_e2e/generation/run_gen_qwen05.sh ================================================ #!/usr/bin/env bash # Tested with 1 & 4 GPUs set -xeuo pipefail MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} NGPUS_PER_NODE=${NGPUS_PER_NODE:-4} OUTPUT_PATH=${OUTPUT_PATH:-$HOME/data/gen/qwen_05_gen_test.parquet} GEN_TP=${GEN_TP:-2} # Default tensor parallel size to 2 python3 -m verl.trainer.main_generation \ trainer.nnodes=1 \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ data.path="${HOME}/data/gsm8k/test.parquet" \ data.prompt_key=prompt \ data.n_samples=1 \ data.output_path="${OUTPUT_PATH}" \ model.path="${MODEL_ID}" \ +model.trust_remote_code=True \ rollout.temperature=1.0 \ rollout.top_k=50 \ rollout.top_p=0.7 \ rollout.prompt_length=2048 \ rollout.response_length=1024 \ rollout.tensor_model_parallel_size="${GEN_TP}" \ rollout.gpu_memory_utilization=0.8 ================================================ FILE: verl_distillation/tests/special_e2e/generation/run_gen_qwen05_server.sh ================================================ #!/usr/bin/env bash # Tested with 1 & 4 GPUs set -xeuo pipefail MODEL_ID=${MODEL_ID:-$HOME/models/Qwen/Qwen2.5-0.5B-Instruct} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} OUTPUT_PATH=${OUTPUT_PATH:-$HOME/data/gen/qwen_05_gen_test.parquet} GEN_TP=${GEN_TP:-2} # Default tensor parallel size to 2 python3 -m verl.trainer.main_generation_server \ trainer.nnodes=1 \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ actor_rollout_ref.model.path="${MODEL_ID}" \ actor_rollout_ref.model.trust_remote_code=True \ actor_rollout_ref.rollout.temperature=1.0 \ actor_rollout_ref.rollout.top_k=50 \ actor_rollout_ref.rollout.top_p=0.7 \ actor_rollout_ref.rollout.prompt_length=2048 \ actor_rollout_ref.rollout.response_length=1024 \ actor_rollout_ref.rollout.tensor_model_parallel_size="${GEN_TP}" \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.n=4 \ data.train_files="${HOME}/data/gsm8k/test.parquet" \ data.prompt_key=prompt \ +data.output_path="${OUTPUT_PATH}" \ ================================================ FILE: verl_distillation/tests/special_e2e/ppo_trainer/expert_parallel/qwen2moe_minimal.json ================================================ { "num_hidden_layers": 2, "max_window_layers": 2 } ================================================ FILE: verl_distillation/tests/special_e2e/ppo_trainer/run_function_reward.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail NUM_GPUS=${NUM_GPUS:-8} MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" TRAIN_FILES=${TRAIN_FILES:-$HOME/data/gsm8k/train.parquet} VAL_FILES=${VAL_FILES:-$HOME/data/gsm8k/test.parquet} MAX_PROMPT_LEN=${MAX_PROMPT_LEN:-512} MAX_RESPONSE_LEN=${MAX_RESPONSE_LEN:-512} ENGINE=${ENGINE:-vllm} ROLLOUT_MODE=${ROLLOUT_MODE:-sync} RETURN_RAW_CHAT="False" SKIP_TOKENIZER_INIT=${SKIP_TOKENIZER_INIT:-False} if [ "$ROLLOUT_MODE" = "async" ]; then RETURN_RAW_CHAT="True" SKIP_TOKENIZER_INIT="True" fi GPU_MEMORY_UTILIZATION=${GPU_MEMORY_UTILIZATION:-0.8} ACTOR_FSDP_PARAM_OFFLOAD=${ACTOR_FSDP_PARAM_OFFLOAD:-False} ACTOR_FSDP_OPTIMIZER_OFFLOAD=${ACTOR_FSDP_OPTIMIZER_OFFLOAD:-False} REF_FSDP_PARAM_OFFLOAD=${REF_FSDP_PARAM_OFFLOAD:-True} RM_PAD=${RM_PAD:-True} FUSED_KERNELS=${FUSED_KERNELS:-False} FUSED_KERNEL_BACKEND=${FUSED_KERNEL_BACKEND:-torch} # or 'triton' for triton backend ADV_ESTIMATOR=${ADV_ESTIMATOR:-gae} LOSS_MODE=${LOSS_MODE:-vanilla} USE_KL=${USE_KL:-False} CUSTOM_REWARD_FN=${CUSTOM_REWARD_FN:-False} ENABLE_CHUNKED_PREFILL=${ENABLE_CHUNKED_PREFILL:-True} # For vLLM VLM placeholder issue: https://github.com/vllm-project/vllm/issues/15185 STRATEGY=${STRATEGY:-fsdp} # LoRA config LORA_RANK=${LORA_RANK:-0} LORA_ALPHA=${LORA_ALPHA:-${LORA_RANK}} LORA_TARGET=${LORA_TARGET:-"all-linear"} LORA_EXCLUDE=${LORA_EXCLUDE:-"DONT_EXCLUDE"} USE_SHM=${USE_SHM:-False} LOAD_FORMAT=${LOAD_FORMAT:-dummy} LAYERED_SUMMON=${LAYERED_SUMMON:-False} # Validation VAL_BEFORE_TRAIN=${VAL_BEFORE_TRAIN:-False} TEST_FREQ=${TEST_FREQ:--1} # Save & Resume RESUME_MODE=${RESUME_MODE:-disable} SAVE_FREQ=${SAVE_FREQ:--1} TOTAL_TRAIN_STEPS=${TOTAL_TRAIN_STEPS:-1} # whether to save hf_model SAVE_HF_MODEL=${SAVE_HF_MODEL:-False} FSDP_SIZE=${FSDP_SIZE:--1} SP_SIZE=${SP_SIZE:-1} if [ "${SAVE_HF_MODEL}" = "True" ]; then CHECKPOINT_CONTENTS="['model','hf_model','optimizer','extra']" else CHECKPOINT_CONTENTS="['model','optimizer','extra']" fi train_traj_micro_bsz_per_gpu=2 # b n_resp_per_prompt=4 # g train_traj_micro_bsz=$((train_traj_micro_bsz_per_gpu * NUM_GPUS)) # b * n train_traj_mini_bsz=$((train_traj_micro_bsz * 2)) # 2 * b * n train_prompt_mini_bsz=$((train_traj_mini_bsz * n_resp_per_prompt)) # 2 * b * n / g train_prompt_bsz=$((train_prompt_mini_bsz * 2)) # 4 * b * n / g reward_fn_name=null reward_fn_file_path=null output_file="$(pwd)/output.txt" if [ "${CUSTOM_REWARD_FN}" = "True" ]; then reward_fn_name="my_reward_function" reward_fn_file_path="$(pwd)/my_reward_function.py" rm -rf "${reward_fn_file_path}" cat < "$reward_fn_file_path" def ${reward_fn_name}(data_source, solution_str, ground_truth, extra_info=None): print(f"Congratulations!!! You have called ${reward_fn_name} successfully!!!") return 0.1 EOF rm -rf "${output_file}" fi exp_name="${VERL_EXP_NAME:-$(basename "${MODEL_ID,,}")-function-reward-minimal}" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator="${ADV_ESTIMATOR}" \ data.train_files="${TRAIN_FILES}" \ data.val_files="${VAL_FILES}" \ data.train_batch_size="${train_prompt_bsz}" \ data.max_prompt_length="${MAX_PROMPT_LEN}" \ data.max_response_length="${MAX_RESPONSE_LEN}" \ data.return_raw_chat=${RETURN_RAW_CHAT} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.use_shm=${USE_SHM} \ actor_rollout_ref.model.lora_rank=${LORA_RANK} \ actor_rollout_ref.model.lora_alpha=${LORA_ALPHA} \ actor_rollout_ref.model.target_modules=${LORA_TARGET} \ actor_rollout_ref.model.exclude_modules=${LORA_EXCLUDE} \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding="${RM_PAD}" \ actor_rollout_ref.model.use_fused_kernels=${FUSED_KERNELS} \ actor_rollout_ref.model.fused_kernel_options.impl_backend=${FUSED_KERNEL_BACKEND} \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.actor.strategy=${STRATEGY} \ actor_rollout_ref.actor.fsdp_config.param_offload=${ACTOR_FSDP_PARAM_OFFLOAD} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${ACTOR_FSDP_OPTIMIZER_OFFLOAD} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${FSDP_SIZE} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size="${SP_SIZE}" \ actor_rollout_ref.actor.checkpoint.save_contents=${CHECKPOINT_CONTENTS} \ actor_rollout_ref.actor.use_kl_loss="${USE_KL}" \ actor_rollout_ref.actor.policy_loss.loss_mode="${LOSS_MODE}" \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name="${ENGINE}" \ actor_rollout_ref.rollout.mode="${ROLLOUT_MODE}" \ actor_rollout_ref.rollout.load_format=${LOAD_FORMAT} \ actor_rollout_ref.rollout.layered_summon=${LAYERED_SUMMON} \ actor_rollout_ref.rollout.skip_tokenizer_init="${SKIP_TOKENIZER_INIT}" \ actor_rollout_ref.rollout.gpu_memory_utilization="${GPU_MEMORY_UTILIZATION}" \ actor_rollout_ref.rollout.enable_chunked_prefill="${ENABLE_CHUNKED_PREFILL}" \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.ref.fsdp_config.param_offload="${REF_FSDP_PARAM_OFFLOAD}" \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding="${RM_PAD}" \ critic.model.path="${MODEL_PATH}" \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ custom_reward_function.path="${reward_fn_file_path}"\ custom_reward_function.name="${reward_fn_name}"\ algorithm.use_kl_in_reward="${USE_KL}" \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl-test' \ trainer.experiment_name="${exp_name}" \ trainer.nnodes=1 \ trainer.n_gpus_per_node="${NUM_GPUS}" \ trainer.val_before_train="${VAL_BEFORE_TRAIN}" \ trainer.test_freq="${TEST_FREQ}" \ trainer.save_freq="${SAVE_FREQ}" \ trainer.resume_mode="${RESUME_MODE}" \ trainer.total_epochs=2 \ trainer.device=cuda \ trainer.total_training_steps="${TOTAL_TRAIN_STEPS}" $@ \ | tee "${output_file}" if [ "${CUSTOM_REWARD_FN}" = "True" ]; then python3 tests/special_e2e/check_custom_rwd_fn.py --output_file="${output_file}" check_exit_code=$? rm -rf "${reward_fn_file_path}" rm -rf "${output_file}" # Return the exit code of check_custom_rwd_fn.py if it fails if [ $check_exit_code -ne 0 ]; then exit $check_exit_code fi fi ================================================ FILE: verl_distillation/tests/special_e2e/ppo_trainer/run_model_reward.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail NUM_GPUS=${NUM_GPUS:-8} MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" TRAIN_FILES=${TRAIN_FILES:-$HOME/data/gsm8k/train.parquet} VAL_FILES=${VAL_FILES:-$HOME/data/gsm8k/test.parquet} RM_PAD=${RM_PAD:-True} FUSED_KERNELS=${FUSED_KERNELS:-False} FUSED_KERNEL_BACKEND=${FUSED_KERNEL_BACKEND:-torch} # or 'triton' for triton backend SP_SIZE=${SP_SIZE:-1} SEQ_BALANCE=${SEQ_BALANCE:-False} LIGER=${LIGER:-False} # Validation VAL_BEFORE_TRAIN=${VAL_BEFORE_TRAIN:-False} TEST_FREQ=${TEST_FREQ:--1} # Save & Resume RESUME_MODE=${RESUME_MODE:-disable} SAVE_FREQ=${SAVE_FREQ:--1} TOTAL_TRAIN_STEPS=${TOTAL_TRAIN_STEPS:-1} train_traj_micro_bsz_per_gpu=2 # b n_resp_per_prompt=4 # g train_traj_micro_bsz=$((train_traj_micro_bsz_per_gpu * NUM_GPUS)) # b * n train_traj_mini_bsz=$((train_traj_micro_bsz * 2)) # 2 * b * n train_prompt_mini_bsz=$((train_traj_mini_bsz * n_resp_per_prompt)) # 2 * b * n / g train_prompt_bsz=$((train_prompt_mini_bsz * 2)) # 4 * b * n / g train_max_token_num_per_gpu=32768 infer_max_token_num_per_gpu=32768 exp_name="$(basename "${MODEL_ID,,}")-model-reward-minimal" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="${TRAIN_FILES}" \ data.val_files="${VAL_FILES}" \ data.train_batch_size=${train_prompt_bsz} \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.return_raw_chat=True \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.use_liger="${LIGER}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding="${RM_PAD}" \ actor_rollout_ref.model.use_fused_kernels=${FUSED_KERNELS} \ actor_rollout_ref.model.fused_kernel_options.impl_backend=${FUSED_KERNEL_BACKEND} \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.use_dynamic_bsz="${SEQ_BALANCE}" \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${train_max_token_num_per_gpu} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size="${SP_SIZE}" \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_max_token_num_per_gpu} \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_max_token_num_per_gpu} \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ critic.optim.lr=1e-5 \ critic.ulysses_sequence_parallel_size="${SP_SIZE}" \ critic.model.use_remove_padding="${RM_PAD}" \ critic.optim.lr_warmup_steps_ratio=0.05 \ critic.model.path="${MODEL_PATH}" \ critic.model.enable_gradient_checkpointing=False \ critic.use_dynamic_bsz="${SEQ_BALANCE}" \ critic.ppo_max_token_len_per_gpu=${train_max_token_num_per_gpu} \ critic.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ reward_model.enable=True \ reward_model.ulysses_sequence_parallel_size="${SP_SIZE}" \ reward_model.model.path="${MODEL_PATH}" \ reward_model.model.use_remove_padding="${RM_PAD}" \ reward_model.model.fsdp_config.param_offload=True \ reward_model.use_dynamic_bsz="${SEQ_BALANCE}" \ reward_model.forward_max_token_len_per_gpu=${infer_max_token_num_per_gpu} \ reward_model.micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl-test' \ trainer.experiment_name="${exp_name}" \ trainer.nnodes=1 \ trainer.n_gpus_per_node="${NUM_GPUS}" \ trainer.val_before_train="${VAL_BEFORE_TRAIN}" \ trainer.test_freq="${VAL_BEFORE_TRAIN}" \ trainer.save_freq="${SAVE_FREQ}" \ trainer.resume_mode="${RESUME_MODE}" \ trainer.total_epochs=2 \ trainer.total_training_steps="${TOTAL_TRAIN_STEPS}" $@ ================================================ FILE: verl_distillation/tests/special_e2e/ppo_trainer/run_single_gpu.sh ================================================ PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ actor_rollout_ref.rollout.name=hf \ trainer.total_training_steps=2 ================================================ FILE: verl_distillation/tests/special_e2e/ppo_trainer/run_single_gpu_with_engine.sh ================================================ PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=['console'] \ trainer.val_before_train=False \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ actor_rollout_ref.rollout.name=hf \ trainer.use_legacy_worker_impl=disable \ trainer.total_training_steps=2 ================================================ FILE: verl_distillation/tests/special_e2e/run_dapo.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail NUM_GPUS=${NUM_GPUS:-8} MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" adv_estimator=grpo kl_coef=0.0 use_kl_in_reward=False use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=1024 max_response_length=2048 enable_overlong_buffer=True overlong_buffer_len=128 overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=seq_reward max_num_gen_batches=10 train_traj_micro_bsz_per_gpu=2 # b n_resp_per_prompt=4 # g train_traj_micro_bsz=$((train_traj_micro_bsz_per_gpu * NUM_GPUS)) # b * n train_traj_mini_bsz=$((train_traj_micro_bsz * 2)) # 2 * b * n train_prompt_mini_bsz=$((train_traj_mini_bsz * n_resp_per_prompt)) # 2 * b * n / g train_prompt_bsz=$((train_prompt_mini_bsz * 2)) # 4 * b * n / g gen_prompt_bsz=$((train_prompt_bsz * 4)) exp_name="$(basename "${MODEL_ID,,}")-dapo-minimal" python3 -m recipe.dapo.main_dapo \ data.train_files="${HOME}/data/gsm8k/train.parquet" \ data.val_files="${HOME}/data/gsm8k/test.parquet" \ reward_model.reward_manager=dapo \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ trainer.logger=console \ trainer.project_name='verl-test' \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=${NUM_GPUS} \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.total_epochs=2 \ trainer.resume_mode=disable \ trainer.val_before_train=False \ trainer.total_training_steps=1 $@ ================================================ FILE: verl_distillation/tests/special_e2e/run_fully_async_policy.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail # Test script for fully_async_policy E2E regression testing # This script runs fully async PPO training with both FSDP2 and Megatron backends # to ensure the asynchronous training mechanism works correctly NUM_GPUS=${NUM_GPUS:-8} ACTOR_STRATEGY=${ACTOR_STRATEGY:-"fsdp2"} # fsdp2 or megatron # Download model if not exists MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" rollout_mode="async" rollout_name="vllm" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=1024 max_response_length=2048 enable_overlong_buffer=True overlong_buffer_len=128 overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" # Temperature parameters temperature=1.0 top_p=1.0 top_k=-1 val_top_p=0.7 # Fully async specific parameters n_gpus_rollout=4 n_gpus_training=4 train_prompt_bsz=0 gen_prompt_bsz=1 n_resp_per_prompt=16 train_prompt_mini_bsz=16 total_rollout_steps=$(((128))) test_freq=-1 staleness_threshold=0.1 trigger_parameter_sync_step=4 partial_rollout=True exp_name="$(basename "${MODEL_ID,,}")-fully-async-policy-${ACTOR_STRATEGY}-minimal" echo "Running fully_async_policy with ${ACTOR_STRATEGY} strategy" echo "Total GPUs: ${NUM_GPUS}, Rollout GPUs: ${n_gpus_rollout}, Training GPUs: ${n_gpus_training}" # Common parameters for both FSDP2 and Megatron common_params=( data.train_files="${HOME}/data/gsm8k/train.parquet" data.val_files="${HOME}/data/gsm8k/test.parquet" data.prompt_key=prompt data.truncation='left' data.max_prompt_length=${max_prompt_length} data.max_response_length=${max_response_length} data.train_batch_size=${train_prompt_bsz} data.gen_batch_size=${gen_prompt_bsz} data.return_raw_chat=${return_raw_chat} actor_rollout_ref.rollout.n=${n_resp_per_prompt} actor_rollout_ref.rollout.calculate_log_probs=True algorithm.adv_estimator=${adv_estimator} algorithm.use_kl_in_reward=${use_kl_in_reward} algorithm.kl_ctrl.kl_coef=${kl_coef} actor_rollout_ref.hybrid_engine=False actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} actor_rollout_ref.actor.clip_ratio_c=10.0 actor_rollout_ref.model.path="${MODEL_PATH}" actor_rollout_ref.actor.optim.lr=1e-6 actor_rollout_ref.actor.optim.lr_warmup_steps=-1 actor_rollout_ref.actor.optim.weight_decay=0.1 actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} actor_rollout_ref.actor.entropy_coeff=0 actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} actor_rollout_ref.rollout.gpu_memory_utilization=0.80 actor_rollout_ref.rollout.temperature=${temperature} actor_rollout_ref.rollout.top_p=${top_p} actor_rollout_ref.rollout.top_k=${top_k} actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} actor_rollout_ref.rollout.val_kwargs.do_sample=True actor_rollout_ref.rollout.val_kwargs.n=1 actor_rollout_ref.rollout.enable_chunked_prefill=True actor_rollout_ref.rollout.name=${rollout_name} actor_rollout_ref.rollout.mode=${rollout_mode} reward_model.reward_manager=dapo +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} +reward_model.reward_kwargs.overlong_buffer_cfg.log=False +reward_model.reward_kwargs.max_resp_len=${max_response_length} trainer.logger=['console'] trainer.project_name='verl-test-fully-async' trainer.experiment_name="${exp_name}" trainer.val_before_train=True trainer.save_freq=-1 trainer.resume_mode=disable trainer.nnodes=1 trainer.n_gpus_per_node=${n_gpus_training} rollout.nnodes=1 rollout.n_gpus_per_node=${n_gpus_rollout} rollout.total_rollout_steps=${total_rollout_steps} rollout.total_epochs=2 rollout.test_freq=${test_freq} # Fully async specific configurations async_training.staleness_threshold=${staleness_threshold} async_training.partial_rollout="${partial_rollout}" async_training.trigger_parameter_sync_step="${trigger_parameter_sync_step}" ) if [ "${ACTOR_STRATEGY}" == "fsdp2" ]; then echo "Running fully async training with FSDP2 strategy..." # FSDP2 specific parameters gen_tp=1 sp_size=1 fsdp_size=1 ref_offload=True actor_offload=False python3 -m recipe.fully_async_policy.fully_async_main \ "${common_params[@]}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} $@ elif [ "${ACTOR_STRATEGY}" == "megatron" ]; then echo "Running fully async training with Megatron strategy..." # Megatron specific parameters gen_tp=2 train_tp=1 train_pp=2 ref_offload=True actor_offload=False python3 -m recipe.fully_async_policy.fully_async_main \ --config-path=config \ --config-name='fully_async_ppo_megatron_trainer.yaml' \ "${common_params[@]}" \ actor_rollout_ref.actor.strategy=megatron \ critic.strategy=megatron \ actor_rollout_ref.actor.optim.lr_decay_steps=10000000 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.param_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.grad_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.param_offload=${ref_offload} $@ else echo "Error: Unknown strategy ${ACTOR_STRATEGY}. Please use 'fsdp2' or 'megatron'" exit 1 fi echo "Fully async policy E2E test completed successfully with ${ACTOR_STRATEGY} strategy" ================================================ FILE: verl_distillation/tests/special_e2e/run_genrm_remote.sh ================================================ #!/usr/bin/env bash export no_proxy="localhost,127.0.0.1" set -x # Launch a vllm server CUDA_VISIBLE_DEVICES=0 vllm serve $HOME/models/verl-team/GenRM-CI-Test-1.5B \ --served_model_name genrm-demo --host localhost --port 30000 > /dev/null & SERVER_PID=$! # kill server when script exits cleanup() { echo "Cleaning up..." kill $SERVER_PID 2>/dev/null || true wait $SERVER_PID 2>/dev/null || true echo "Cleanup done" } trap cleanup EXIT # wait for server to start wait_for_server() { local max_attempts=60 local attempt=0 local sleep_time=10 while [ $attempt -lt $max_attempts ]; do if curl -s "http://localhost:30000/health" >/dev/null; then echo "Server is up and running!" return 0 fi echo "Waiting for server to start... (attempt $((attempt+1))/$max_attempts)" sleep $sleep_time ((attempt++)) done echo "Error: Failed to start server after $max_attempts attempts" >&2 return 1 } if ! wait_for_server; then exit 1 fi CUDA_VISIBLE_DEVICES=4,5,6,7 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=${HOME}/data/gsm8k/train.parquet \ data.val_files=${HOME}/data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HOME/models/Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=4 \ algorithm.use_kl_in_reward=False \ reward_model.reward_manager=batch \ custom_reward_function.path=recipe/genrm_remote/reward_function.py \ custom_reward_function.name=compute_score_batch \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl-test' \ trainer.experiment_name='qwen2.5-0.5b-gen-rm' \ trainer.n_gpus_per_node=4 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.resume_mode='disable' \ trainer.total_training_steps=1 ================================================ FILE: verl_distillation/tests/special_e2e/run_geo3k_fsdp_sgl_multiturn_w_tool.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x #huggingface-cli download Qwen/Qwen2.5-VL-3B-Instruct --local-dir $HOME/models/Qwen/Qwen2.5-VL-3B-Instruct ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" FSDP_STRATEGY=${FSDP_STRATEGY:-fsdp} python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='geo3k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=64 \ data.max_prompt_length=2048 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=$HOME/models/Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.strategy=$FSDP_STRATEGY \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.ref.strategy=$FSDP_STRATEGY \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='geo3k_async_rl' \ trainer.experiment_name=qwen2.5-vl-3b_function_rm-geo3k-sgl-multi-w-tool-$FSDP_STRATEGY-rebased-0619-verify-n8 \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ data.train_files=$HOME/data/geo3k_verl_sgl_multi_turn_preprocessed/train.parquet \ data.val_files=$HOME/data/geo3k_verl_sgl_multi_turn_preprocessed/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml" \ trainer.val_before_train=False \ trainer.total_training_steps=1 $@ ================================================ FILE: verl_distillation/tests/special_e2e/run_grpo_lora_with_merge.sh ================================================ #!/usr/bin/env bash # # An e2e test script for testing the GRPO LoRA training process # and processing the generated checkpoint using the merge_model.py script. set -xeuo pipefail MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} if [ ! -d "$MODEL_PATH" ]; then echo "Downloading model to ${MODEL_PATH}..." # huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" else echo "Model directory ${MODEL_PATH} already exists, skip downloading." fi BATCH_SIZE=16 EXP_NAME="qwen2.5_0.5b_grpo_lora" # step 1. train model with grpo-lora for 1 step python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=${BATCH_SIZE} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=${MODEL_PATH} \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.lora_rank=64 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${BATCH_SIZE} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name=${EXP_NAME} \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.total_training_steps=1 \ trainer.save_freq=1 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ # step 2. merge model python3 -m verl.model_merger merge \ --backend fsdp \ --local_dir checkpoints/verl_grpo_example_gsm8k/${EXP_NAME}/global_step_1/actor/ \ --target_dir checkpoints/verl_grpo_example_gsm8k/${EXP_NAME}/global_step_1/actor/hf # step 3. assert # make sure adapter_model.safetensors exists and its size is larger than 1MB file_path="checkpoints/verl_grpo_example_gsm8k/${EXP_NAME}/global_step_1/actor/hf/lora_adapter/adapter_model.safetensors" if [ ! -f "$file_path" ]; then echo "Error: File $file_path does not exist!" exit 1 fi file_size=$(stat -c %s "$file_path") min_size_mb=1 min_size=$((min_size_mb * 1024 * 1024)) # 1MB = 1048576 bytes if [ "$file_size" -lt "$min_size" ]; then echo "Error: File $file_path is too small! Current size: $((file_size/1024))KB, Required: ${min_size_mb}MB" exit 1 fi echo "Check passed: File exists and size is $(($file_size/1024/1024))MB" exit 0 ================================================ FILE: verl_distillation/tests/special_e2e/run_gsm8k_fsdp_sgl_multiturn_sf_tool.sh ================================================ # run on 8xH20 # make sure your current working directory is the root of the project set -x export PYTHONUNBUFFERED=1 export RAY_DEDUP_LOGS=0 export RUST_BACKTRACE=1 export HYDRA_FULL_ERROR=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_sf_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=128 \ data.max_prompt_length=2048 \ data.max_response_length=16384 \ data.filter_overlong_prompts=False \ data.truncation='error' \ data.return_raw_chat=True \ data.train_files=$HOME/data/retool_dapo/train.parquet \ data.val_files=$HOME/data/retool_aime2024/train.parquet \ actor_rollout_ref.model.path=Qwen/Qwen3-4B \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_liger=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ +actor_rollout_ref.model.enable_activation_offloading=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=32768 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.0 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/sandbox_fusion_tool_config.yaml" \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='retool_async_rl' \ trainer.experiment_name='qwen3-4b_function_rm-retool-async-sgl-no-sft-n8-v2505271300' \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=100 \ trainer.test_freq=20 \ trainer.total_training_steps=1000 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_distillation/tests/special_e2e/run_gsm8k_fsdp_sgl_multiturn_w_tool.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x #huggingface-cli download Qwen/Qwen2.5-3B-Instruct --local-dir $HOME/models/Qwen/Qwen2.5-3B-Instruct ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" FSDP_STRATEGY=${FSDP_STRATEGY:-fsdp} python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=$HOME/models/Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.strategy=$FSDP_STRATEGY \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.strategy=$FSDP_STRATEGY \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name=qwen2.5-3b_function_rm-gsm8k-sgl-multi-w-tool-$FSDP_STRATEGY-rebased-0427-verify-n16 \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ data.train_files=$HOME/data/gsm8k_verl_sgl_multi_turn_preprocessed/train.parquet \ data.val_files=$HOME/data/gsm8k_verl_sgl_multi_turn_preprocessed/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.val_before_train=False \ trainer.total_training_steps=1 $@ ================================================ FILE: verl_distillation/tests/special_e2e/run_one_step_off_policy.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail # Test script for one_step_off_policy E2E regression testing # This script runs one_step_off_policy with both FSDP2 and Megatron backends # to ensure the asynchronous training mechanism works correctly NUM_GPUS=${NUM_GPUS:-8} ACTOR_STRATEGY=${ACTOR_STRATEGY:-"fsdp2"} # fsdp2 or megatron # Download model if not exists MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" # Algorithm parameters adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 # Response length parameters max_prompt_length=1024 max_response_length=2048 enable_overlong_buffer=True overlong_buffer_len=128 overlong_penalty_factor=1.0 # Training parameters loss_agg_mode="token-mean" train_prompt_bsz=8 n_resp_per_prompt=3 train_prompt_mini_bsz=4 # Temperature parameters temperature=1.0 top_p=1.0 top_k=-1 val_top_p=0.7 # One-step-off-policy specific parameters # Allocate 2 GPUs for rollout, remaining for training n_gpus_rollout=2 n_gpus_training=$((NUM_GPUS - n_gpus_rollout)) exp_name="$(basename "${MODEL_ID,,}")-one-step-off-policy-${ACTOR_STRATEGY}-minimal" echo "Running one_step_off_policy with ${ACTOR_STRATEGY} strategy" echo "Total GPUs: ${NUM_GPUS}, Rollout GPUs: ${n_gpus_rollout}, Training GPUs: ${n_gpus_training}" # Common parameters for both FSDP2 and Megatron common_params=( data.train_files="${HOME}/data/gsm8k/train.parquet" data.val_files="${HOME}/data/gsm8k/test.parquet" data.prompt_key=prompt data.truncation='left' data.max_prompt_length=${max_prompt_length} data.max_response_length=${max_response_length} data.train_batch_size=${train_prompt_bsz} actor_rollout_ref.rollout.n=${n_resp_per_prompt} algorithm.adv_estimator=${adv_estimator} algorithm.use_kl_in_reward=${use_kl_in_reward} algorithm.kl_ctrl.kl_coef=${kl_coef} actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} actor_rollout_ref.actor.clip_ratio_c=10.0 actor_rollout_ref.model.path="${MODEL_PATH}" actor_rollout_ref.actor.optim.lr=1e-6 actor_rollout_ref.actor.optim.lr_warmup_steps=-1 actor_rollout_ref.actor.optim.weight_decay=0.1 actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} actor_rollout_ref.actor.entropy_coeff=0 actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} actor_rollout_ref.rollout.gpu_memory_utilization=0.80 actor_rollout_ref.rollout.temperature=${temperature} actor_rollout_ref.rollout.top_p=${top_p} actor_rollout_ref.rollout.top_k=${top_k} actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} actor_rollout_ref.rollout.val_kwargs.do_sample=True actor_rollout_ref.rollout.val_kwargs.n=1 actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.name=vllm \ reward_model.reward_manager=dapo +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} +reward_model.reward_kwargs.overlong_buffer_cfg.log=False +reward_model.reward_kwargs.max_resp_len=${max_response_length} trainer.logger=['console'] trainer.project_name='verl-test' trainer.experiment_name="${exp_name}" trainer.val_before_train=False trainer.test_freq=-1 trainer.save_freq=-1 trainer.total_epochs=2 trainer.total_training_steps=2 trainer.resume_mode=disable trainer.nnodes=1 trainer.n_gpus_per_node=${n_gpus_training} rollout.nnodes=1 rollout.n_gpus_per_node=${n_gpus_rollout} ) if [ "${ACTOR_STRATEGY}" == "fsdp2" ]; then echo "Running with FSDP2 strategy..." # FSDP2 specific parameters gen_tp=2 sp_size=2 fsdp_size=2 ref_offload=True actor_offload=False python3 -m recipe.one_step_off_policy.main_ppo \ "${common_params[@]}" \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} $@ elif [ "${ACTOR_STRATEGY}" == "megatron" ]; then echo "Running with Megatron strategy..." # Megatron specific parameters gen_tp=2 train_tp=1 train_pp=2 ref_offload=True actor_offload=False python3 -m recipe.one_step_off_policy.main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_megatron_trainer.yaml' \ "${common_params[@]}" \ actor_rollout_ref.actor.strategy=megatron \ critic.strategy=megatron \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.param_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.grad_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.param_offload=${ref_offload} $@ else echo "Error: Unknown strategy ${ACTOR_STRATEGY}. Please use 'fsdp2' or 'megatron'" exit 1 fi echo "One-step-off-policy E2E test completed successfully with ${ACTOR_STRATEGY} strategy" ================================================ FILE: verl_distillation/tests/special_e2e/run_ppo_trainer_megatron.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping export VERL_LOGGING_LEVEL=INFO export VERL_PPO_LOGGING_LEVEL=INFO NUM_GPUS=${NUM_GPUS:-8} MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" USE_DUMMY_MODEL=${USE_DUMMY_MODEL:-False} DUMMY_MODEL_PATH=${DUMMY_MODEL_PATH:-${HOME}/dummy_models/${MODEL_ID}} if [ "$USE_DUMMY_MODEL" = "True" ]; then if [ -z "${DUMMY_MODEL_CONFIG_PATH}" ]; then echo "[ERROR] DUMMY_MODEL_CONFIG_PATH not set" exit 1 fi python scripts/init_random_model.py \ --hf_model_path "${MODEL_PATH}" \ --new_config_path "${DUMMY_MODEL_CONFIG_PATH}" \ --output_path "${DUMMY_MODEL_PATH}" MODEL_PATH="${DUMMY_MODEL_PATH}" fi TRAIN_FILES=${TRAIN_FILES:-${HOME}/data/gsm8k/train.parquet} VAL_FILES=${VAL_FILES:-${HOME}/data/gsm8k/test.parquet} ADV_ESTIMATOR=${ADV_ESTIMATOR:-gae} # Validation VAL_BEFORE_TRAIN=${VAL_BEFORE_TRAIN:-False} TEST_FREQ=${TEST_FREQ:--1} # Save & Resume RESUME_MODE=${RESUME_MODE:-disable} SAVE_FREQ=${SAVE_FREQ:--1} TOTAL_TRAIN_STEPS=${TOTAL_TRAIN_STEPS:-1} USE_DYNAMIC_BSZ=${USE_DYNAMIC_BSZ:-True} ppo_max_token_len_per_gpu=${PPO_MAX_TOKEN_LEN:-2400} forward_max_token_len_per_gpu=${FWD_MAX_TOKEN_LEN:-4800} train_traj_micro_bsz_per_gpu=${MICRO_BSZ:-2} # b n_resp_per_prompt=4 # g train_traj_micro_bsz=$((train_traj_micro_bsz_per_gpu * NUM_GPUS)) # b * n train_traj_mini_bsz=$((train_traj_micro_bsz * 2)) # 2 * b * n train_prompt_mini_bsz=$((train_traj_mini_bsz * n_resp_per_prompt)) # 2 * b * n / g train_prompt_bsz=$((train_prompt_mini_bsz * 2)) # 4 * b * n / g MAX_PROMPT_LENGTH=${MAX_PROMPT_LENGTH:-512} MAX_RESPONSE_LENGTH=${MAX_RESPONSE_LENGTH:-512} COMMON_PP=${COMMON_PP:-2} COMMON_VPP=${COMMON_VPP:-2} COMMON_CP=${COMMON_CP:-2} COMMON_TP=${COMMON_TP:-2} COMMON_EP=${COMMON_EP:-1} COMMON_ETP=${COMMON_ETP:-1} TRAIN_TP=${TRAIN_TP:-$COMMON_TP} INFER_TP=${INFER_TP:-$COMMON_TP} ACTOR_PP=${ACTOR_PP:-$COMMON_PP} ACTOR_VPP=${ACTOR_VPP:-$COMMON_VPP} ACTOR_CP=${ACTOR_CP:-$COMMON_CP} ACTOR_TP=${ACTOR_TP:-$TRAIN_TP} ACTOR_EP=${ACTOR_EP:-$COMMON_EP} ACTOR_ETP=${ACTOR_ETP:-$COMMON_ETP} ROLLOUT_TP=${ROLLOUT_TP:-$INFER_TP} REF_PP=${REF_PP:-$COMMON_PP} REF_VPP=${REF_VPP:-$COMMON_VPP} REF_CP=${REF_CP:-$COMMON_CP} REF_TP=${REF_TP:-$TRAIN_TP} REF_EP=${REF_EP:-$COMMON_EP} REF_ETP=${REF_ETP:-$COMMON_ETP} CRITIC_PP=${CRITIC_PP:-$COMMON_PP} CRITIC_VPP=${CRITIC_VPP:-$COMMON_VPP} CRITIC_CP=${CRITIC_CP:-$COMMON_CP} CRITIC_TP=${CRITIC_TP:-$TRAIN_TP} CRITIC_EP=${CRITIC_EP:-$COMMON_EP} CRITIC_ETP=${CRITIC_ETP:-$COMMON_ETP} RM_PP=${RM_PP:-$COMMON_PP} RM_VPP=${RM_VPP:-$COMMON_VPP} RM_CP=${RM_CP:-$COMMON_CP} RM_TP=${RM_TP:-$TRAIN_TP} RM_EP=${RM_EP:-$COMMON_EP} RM_ETP=${RM_ETP:-$COMMON_ETP} ALL_OFFLOAD=${ALL_OFFLOAD:-False} COMMON_PARAM_OFFLOAD=${COMMON_PARAM_OFFLOAD:-$ALL_OFFLOAD} COMMON_GRAD_OFFLOAD=${COMMON_GRAD_OFFLOAD:-$ALL_OFFLOAD} COMMON_OPTIMIZER_OFFLOAD=${COMMON_OPTIMIZER_OFFLOAD:-$ALL_OFFLOAD} ACTOR_PARAM_OFFLOAD=${ACTOR_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} ACTOR_GRAD_OFFLOAD=${ACTOR_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} ACTOR_OPTIMIZER_OFFLOAD=${ACTOR_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} REF_PARAM_OFFLOAD=${REF_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_PARAM_OFFLOAD=${CRITIC_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_GRAD_OFFLOAD=${CRITIC_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} CRITIC_OPTIMIZER_OFFLOAD=${CRITIC_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} RM_PARAM_OFFLOAD=${RM_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} USE_MBRIDGE=${USE_MBRIDGE:-False} USE_FUSED_KERNELS=${USE_FUSED_KERNELS:-False} LR_WARMUP_STEPS=${LR_WARMUP_STEPS:-null} CHECKPOINT_CONTENTS=['model','hf_model','optimizer','extra'] SKIP_SAVE_HF_MODEL=${SKIP_SAVE_HF_MODEL:-0} if [ $SKIP_SAVE_HF_MODEL -eq 1 ]; then CHECKPOINT_CONTENTS=['model','optimizer','extra'] fi USE_DIST_CKPT=${USE_DIST_CKPT:-False} DIST_CKPT_PATH=${DIST_CKPT_PATH:-${HOME}/dist_ckpt/${MODEL_ID}} if [ "$USE_DIST_CKPT" = "True" ]; then if [ "$USE_DUMMY_MODEL" = "True" ]; then DIST_CKPT_PATH=${HOME}/dist_ckpt_dummy/${MODEL_ID} fi python scripts/converter_hf_to_mcore.py \ --hf_model_path "${MODEL_PATH}" \ --output_path "${DIST_CKPT_PATH}" fi ENGINE=${ENGINE:-"vllm"} exp_name="$(basename "${MODEL_ID,,}")-megatron-gsm8k-minimal" if [ "$ENGINE" = "vllm" ]; then MODE=${MODE:-"sync"} ROLLOUT_MODE_ARG="actor_rollout_ref.rollout.mode=${MODE}" if [ "$MODE" = "async" ]; then ROLLOUT_MODE_ARG="${ROLLOUT_MODE_ARG} data.return_raw_chat=True" fi else ROLLOUT_MODE_ARG="" fi OPTIM_MEMORY_EFFICIENT=${OPTIM_MEMORY_EFFICIENT:-False} PROFILE_ENABLE=${PROFILE_ENABLE:-False} PROFILE_STEPS=${PROFILE_STEPS:-[1]} PROFILE_RANKS_ALL=${PROFILE_RANKS_ALL:-True} PROFILE_RANKS=${PROFILE_RANKS:-[0,1,2,3]} DISCRETE=${DISCRETE:-True} # or True python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator="${ADV_ESTIMATOR}" \ data.train_files="${TRAIN_FILES}" \ data.val_files="${VAL_FILES}" \ data.train_batch_size=${train_prompt_bsz} \ data.max_prompt_length=${MAX_PROMPT_LENGTH} \ data.max_response_length=${MAX_RESPONSE_LENGTH} \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.use_fused_kernels=${USE_FUSED_KERNELS} \ actor_rollout_ref.actor.optim.lr_warmup_steps=$LR_WARMUP_STEPS \ +actor_rollout_ref.actor.optim.override_optimizer_config.optimizer_cpu_offload=$OPTIM_MEMORY_EFFICIENT \ +actor_rollout_ref.actor.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=$OPTIM_MEMORY_EFFICIENT \ +actor_rollout_ref.actor.optim.override_optimizer_config.use_precision_aware_optimizer=$OPTIM_MEMORY_EFFICIENT \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.actor.use_dynamic_bsz=${USE_DYNAMIC_BSZ} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${ppo_max_token_len_per_gpu} \ actor_rollout_ref.actor.megatron.use_mbridge=${USE_MBRIDGE} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$ACTOR_PP \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=$ACTOR_VPP \ actor_rollout_ref.actor.megatron.context_parallel_size=$ACTOR_CP \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$ACTOR_TP \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$ACTOR_EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ACTOR_ETP \ actor_rollout_ref.actor.megatron.param_offload=${ACTOR_PARAM_OFFLOAD} \ actor_rollout_ref.actor.megatron.optimizer_offload=${ACTOR_OPTIMIZER_OFFLOAD} \ actor_rollout_ref.actor.megatron.grad_offload=${ACTOR_GRAD_OFFLOAD} \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=${USE_DIST_CKPT} \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=${DIST_CKPT_PATH} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.checkpoint.save_contents=$CHECKPOINT_CONTENTS \ actor_rollout_ref.actor.profiler.enable=$PROFILE_ENABLE \ actor_rollout_ref.actor.profiler.ranks=$PROFILE_RANKS \ actor_rollout_ref.actor.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.rollout.name="${ENGINE}" ${ROLLOUT_MODE_ARG}\ actor_rollout_ref.rollout.tensor_model_parallel_size=$ROLLOUT_TP \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.rollout.update_weights_bucket_megabytes=128 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.ref.megatron.use_mbridge=${USE_MBRIDGE} \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=$REF_PP \ actor_rollout_ref.ref.megatron.virtual_pipeline_model_parallel_size=$REF_VPP \ actor_rollout_ref.ref.megatron.context_parallel_size=$REF_CP \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=$REF_TP \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=$REF_EP \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$REF_ETP \ actor_rollout_ref.ref.megatron.param_offload=${REF_PARAM_OFFLOAD} \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=${USE_DIST_CKPT} \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=${DIST_CKPT_PATH} \ critic.optim.lr=2e-5 \ critic.optim.lr_warmup_steps=$LR_WARMUP_STEPS \ +critic.optim.override_optimizer_config.optimizer_cpu_offload=$OPTIM_MEMORY_EFFICIENT \ +critic.optim.override_optimizer_config.overlap_cpu_optimizer_d2h_h2d=$OPTIM_MEMORY_EFFICIENT \ +critic.optim.override_optimizer_config.use_precision_aware_optimizer=$OPTIM_MEMORY_EFFICIENT \ critic.model.path="${MODEL_PATH}" \ critic.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ critic.ppo_max_token_len_per_gpu=${forward_max_token_len_per_gpu} \ critic.megatron.use_mbridge=${USE_MBRIDGE} \ critic.megatron.pipeline_model_parallel_size=$CRITIC_PP \ critic.megatron.virtual_pipeline_model_parallel_size=$CRITIC_VPP \ critic.megatron.context_parallel_size=$CRITIC_CP \ critic.megatron.tensor_model_parallel_size=$CRITIC_TP \ critic.megatron.expert_model_parallel_size=$CRITIC_EP \ critic.megatron.expert_tensor_parallel_size=$CRITIC_ETP \ critic.megatron.param_offload=${CRITIC_PARAM_OFFLOAD} \ critic.megatron.optimizer_offload=${CRITIC_OPTIMIZER_OFFLOAD} \ critic.megatron.grad_offload=${CRITIC_GRAD_OFFLOAD} \ critic.megatron.use_dist_checkpointing=${USE_DIST_CKPT} \ critic.megatron.dist_checkpointing_path=${DIST_CKPT_PATH} \ critic.checkpoint.save_contents=$CHECKPOINT_CONTENTS \ critic.profiler.enable=$PROFILE_ENABLE \ critic.profiler.ranks=$PROFILE_RANKS \ critic.profiler.all_ranks=$PROFILE_RANKS_ALL \ reward_model.enable=True \ reward_model.model.path="${MODEL_PATH}" \ reward_model.micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ reward_model.megatron.use_mbridge=${USE_MBRIDGE} \ reward_model.megatron.pipeline_model_parallel_size=$RM_PP \ reward_model.megatron.virtual_pipeline_model_parallel_size=$RM_VPP \ reward_model.megatron.context_parallel_size=$RM_CP \ reward_model.megatron.tensor_model_parallel_size=$RM_TP \ reward_model.megatron.expert_model_parallel_size=$RM_EP \ reward_model.megatron.expert_tensor_parallel_size=$RM_ETP \ reward_model.megatron.param_offload=${RM_PARAM_OFFLOAD} \ reward_model.megatron.use_dist_checkpointing=${USE_DIST_CKPT} \ reward_model.megatron.dist_checkpointing_path=${DIST_CKPT_PATH} \ reward_model.profiler.enable=$PROFILE_ENABLE \ reward_model.profiler.ranks=$PROFILE_RANKS \ reward_model.profiler.all_ranks=$PROFILE_RANKS_ALL \ algorithm.use_kl_in_reward=False \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl-test' \ trainer.experiment_name="${exp_name}" \ trainer.nnodes=1 \ trainer.n_gpus_per_node=${NUM_GPUS} \ trainer.val_before_train="${VAL_BEFORE_TRAIN}" \ trainer.test_freq="${TEST_FREQ}" \ trainer.save_freq="${SAVE_FREQ}" \ trainer.resume_mode="${RESUME_MODE}" \ trainer.total_epochs=2 \ trainer.total_training_steps="${TOTAL_TRAIN_STEPS}" \ global_profiler.profile_continuous_steps=True \ global_profiler.tool=nsys \ global_profiler.steps=$PROFILE_STEPS \ global_profiler.global_tool_config.nsys.discrete=$DISCRETE $@ ================================================ FILE: verl_distillation/tests/special_e2e/run_prime.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail NUM_GPUS=${NUM_GPUS:-8} MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" TRAIN_FILES=${TRAIN_FILES:-${HOME}/data/gsm8k/train.parquet} VAL_FILES=${VAL_FILES:-${HOME}/data/gsm8k/test.parquet} train_traj_micro_bsz_per_gpu=2 # b n_resp_per_prompt=4 # g train_traj_micro_bsz=$((train_traj_micro_bsz_per_gpu * NUM_GPUS)) # b * n train_traj_mini_bsz=$((train_traj_micro_bsz * 2)) # 2 * b * n train_prompt_mini_bsz=$((train_traj_mini_bsz * n_resp_per_prompt)) # 2 * b * n / g train_prompt_bsz=$((train_prompt_mini_bsz * 2)) # 4 * b * n / g exp_name="$(basename "${MODEL_ID,,}")-prime-minimal" python3 -m recipe.prime.main_prime \ data.train_files="${TRAIN_FILES}" \ data.val_files="${VAL_FILES}" \ data.train_batch_size=${train_prompt_bsz} \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_accuracy=True \ data.accuracy_lower_bound=0.2 \ data.accuracy_upper_bound=0.8 \ data.oversample_factor=4 \ data.return_raw_chat=True \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=5e-7 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.model.enable_gradient_checkpointing=False \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.adv_estimator=rloo \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ reward_model.model.path="${MODEL_PATH}" \ reward_model.micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ reward_model.model.update=before \ reward_model.model.beta_train=0.05 \ reward_model.model.optim.lr=1e-6 \ reward_model.model.optim.grad_clip=10.0 \ reward_model.model.input_tokenizer=null \ reward_model.mini_batch_size=${train_prompt_bsz} \ reward_model.reward_manager=prime \ trainer.val_before_train=False \ trainer.logger=console \ trainer.project_name='verl-test' \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=${NUM_GPUS} \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.total_training_steps=1 $@ ================================================ FILE: verl_distillation/tests/special_e2e/run_r1_distill_qwen_aime24_eval.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail #huggingface-cli download deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ # --local-dir $HOME/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B python3 -m verl.trainer.main_generation \ trainer.nnodes=1 \ trainer.n_gpus_per_node=8 \ data.path=$HOME/data/r1/test.parquet \ data.prompt_key=prompt \ data.batch_size=1024 \ data.n_samples=1 \ data.output_path=$HOME/data/r1/test-output-k1.parquet \ model.path=$HOME/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ rollout.temperature=0.6 \ rollout.top_p=0.95 \ rollout.prompt_length=1024 \ rollout.response_length=32768 \ rollout.tensor_model_parallel_size=1 \ rollout.gpu_memory_utilization=0.95 \ rollout.max_num_batched_tokens=65536 \ rollout.enforce_eager=False \ rollout.free_cache_engine=True python3 -m recipe.r1.main_eval \ data.path=$HOME/data/r1/test-output-k1.parquet \ data.prompt_key=prompt \ data.response_key=responses \ custom_reward_function.path=recipe/r1/reward_score.py \ custom_reward_function.name=reward_func ================================================ FILE: verl_distillation/tests/special_e2e/run_spin.sh ================================================ set -e set -x NUM_GPUS=${NUM_GPUS:-8} exp_name="Qwen2.5-0.5B-Instruct-spin-minimal" MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" CUDA_VISIBLE_DEVICES=${VISIBLE_DEVICES} python3 -m recipe.spin.main_spin \ data.train_files="${HOME}/data/gsm8k/train.parquet" \ data.val_files="${HOME}/data/gsm8k/test.parquet" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size=8 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=64 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size=64 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=1 \ +trainer.log_freq=1 \ trainer.ref_update_freq=1 \ trainer.total_training_steps=1 \ trainer.total_epochs=1000 2>&1 | tee verl_demo.log ================================================ FILE: verl_distillation/tests/special_e2e/run_sppo.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail # in e2e_sppo.yml, we set NUM_GPUS=8 L20 NUM_GPUS=${NUM_GPUS:-8} exp_name="Qwen2.5-0.5B-Instruct-sppo-minimal" MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" python3 -m recipe.sppo.main_sppo \ data.train_files="${HOME}/data/math/train.parquet" \ data.val_files="${HOME}/data/math/test.parquet" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path="$MODEL_PATH" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node=$NUM_GPUS \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.total_training_steps=1 \ trainer.total_epochs=2 $@ ================================================ FILE: verl_distillation/tests/special_e2e/run_test.sh ================================================ #!/bin/bash set -xeuo pipefail # Get the configuration name and engine name from arguments CONFIG_NAME="$1" ENGINE="${2:-vllm}" # Download model if needed #huggingface-cli download Qwen/Qwen2.5-0.5B --local-dir "$HOME/models/Qwen/Qwen2.5-0.5B" # Run the training with the specified configuration python3 -m verl.trainer.main_ppo \ --config-name "$CONFIG_NAME" "$@" ================================================ FILE: verl_distillation/tests/special_e2e/sft/compare_sft_engine_results.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import torch def get_result(file): file = os.path.expanduser(file) result = [] with open(file) as f: lines = f.readlines() for line in lines: result.append(json.loads(line)) return result def compare_results(golden_results, other_result): golden_loss = golden_results[0]["data"]["train/loss"] golden_grad_norm = golden_results[0]["data"]["train/grad_norm"] loss = other_result[0]["data"]["train/loss"] grad_norm = other_result[0]["data"]["train/grad_norm"] torch.testing.assert_close(golden_loss, loss, atol=1e-2, rtol=1e-2) torch.testing.assert_close(golden_grad_norm, grad_norm, atol=1e-4, rtol=1e-2) if __name__ == "__main__": golden_results = get_result("~/verl/test/log/golden.jsonl") # get all other results other_results = {} # walk through all files in ~/verl/test/log for file in os.listdir(os.path.expanduser("~/verl/test/log/verl_sft_test")): if file.endswith(".jsonl"): other_results[file] = get_result(os.path.join(os.path.expanduser("~/verl/test/log/verl_sft_test"), file)) # # compare results for file, other_result in other_results.items(): print(f"compare results {file}") compare_results(golden_results, other_result) print("All results are close to golden results") ================================================ FILE: verl_distillation/tests/special_e2e/sft/run_sft.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail ENTRYPOINT=${ENTRYPOINT:-"-m verl.trainer.fsdp_sft_trainer"} NUM_GPUS=${NUM_GPUS:-8} MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" TRAIN_FILES=${TRAIN_FILES:-$HOME/data/gsm8k/train.parquet} VAL_FILES=${VAL_FILES:-$HOME/data/gsm8k/test.parquet} SP_SIZE=${SP_SIZE:-1} LIGER=${LIGER:-False} MULTITURN=${MULTITURN:-False} LORA_RANK=${LORA_RANK:-0} RM_PAD=${RM_PAD:-True} TOTAL_TRAIN_STEP=${TOTAL_TRAIN_STEP:-1} RESUME_MODE=${RESUME_MODE:-disable} SAVE_FREQ=${SAVE_FREQ:-1} micro_bsz=2 NUM_GPUS=8 project_name="verl-test" exp_name="$(basename "${MODEL_ID,,}")-sft-minimal" ckpts_home=${ckpts_home:-$HOME/${project_name}/${exp_name}} mkdir -p "${ckpts_home}" torchrun --standalone --nnodes=1 --nproc_per_node=${NUM_GPUS} ${ENTRYPOINT} \ data.train_files="${TRAIN_FILES}" \ data.val_files="${VAL_FILES}" \ data.prompt_key=extra_info \ data.response_key=extra_info \ data.prompt_dict_keys=['question'] \ data.response_dict_keys=['answer'] \ data.multiturn.enable="${MULTITURN}" \ data.multiturn.messages_key=messages \ optim.lr=1e-4 \ data.micro_batch_size_per_gpu=${micro_bsz} \ model.strategy=fsdp \ model.partial_pretrain="${MODEL_PATH}" \ model.lora_rank="${LORA_RANK}" \ model.lora_alpha=16 \ model.target_modules=all-linear \ model.use_liger="${LIGER}" \ ulysses_sequence_parallel_size="${SP_SIZE}" \ use_remove_padding="${RM_PAD}" \ trainer.default_local_dir="${ckpts_home}" \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.total_training_steps=${TOTAL_TRAIN_STEP} \ trainer.save_freq=${SAVE_FREQ} \ trainer.checkpoint.save_contents=[model,optimizer,extra,hf_model] \ trainer.max_ckpt_to_keep=1 \ trainer.resume_mode=${RESUME_MODE} \ trainer.logger=['console'] $@ rm -rf "${ckpts_home:?}/*" ================================================ FILE: verl_distillation/tests/special_e2e/sft/run_sft_engine_gsm8k.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail ENTRYPOINT=${ENTRYPOINT:-"-m verl.trainer.sft_trainer"} NUM_GPUS=${NUM_GPUS:-1} TRAIN_FILES=~/data/gsm8k_sft/train.parquet VAL_FILES=~/data/gsm8k_sft/test.parquet backend=${BACKEND:-fsdp} project_name=verl_sft_test RESUME_MODE=disable ckpts_home=${ckpts_home:-~/verl/test/gsm8k-sft-${backend}} MODEL_ID=${MODEL_ID:-Qwen/Qwen3-0.6B} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} #huggingface-cli download "${MODEL_ID}" --local-dir "${MODEL_PATH}" SP_SIZE=${SP_SIZE:-1} FSDP_SIZE=${FSDP_SIZE:-${NUM_GPUS}} FSDP_STRATEGY=${FSDP_STRATEGY:-"fsdp"} TP_SIZE=${TP_SIZE:-1} PP_SIZE=${PP_SIZE:-1} VPP_SIZE=${VPP_SIZE:-null} CP_SIZE=${CP_SIZE:-1} PAD_MODE=${PAD_MODE:-no_padding} USE_REMOVE_PADDING=${USE_REMOVE_PADDING:-True} FSDP_ENGINE_CONFIG="\ engine=${backend} \ optim=${backend} \ optim.lr=1e-5 \ optim.lr_warmup_steps_ratio=0.2 \ optim.weight_decay=0.1 \ optim.betas="[0.9,0.95]" \ optim.clip_grad=1.0 \ optim.min_lr_ratio=0.1 \ optim.lr_scheduler_type=cosine \ engine.ulysses_sequence_parallel_size=${SP_SIZE} \ engine.strategy=${FSDP_STRATEGY} \ engine.fsdp_size=${FSDP_SIZE}" MEGATRON_ENGINE_CONFIG="\ engine=${backend} \ optim=${backend} \ optim.lr=1e-5 \ optim.lr_warmup_steps_ratio=0.2 \ optim.weight_decay=0.1 \ optim.betas="[0.9,0.95]" \ optim.clip_grad=1.0 \ optim.lr_warmup_init=0 \ optim.lr_decay_style=cosine \ optim.min_lr=1e-6 \ engine.tensor_model_parallel_size=${TP_SIZE} \ engine.pipeline_model_parallel_size=${PP_SIZE} \ engine.virtual_pipeline_model_parallel_size=${VPP_SIZE} \ engine.context_parallel_size=${CP_SIZE}" if [ "$backend" = "fsdp" ]; then ENGINE_CONFIG="$FSDP_ENGINE_CONFIG" echo "Using fsdp engine" exp_name=gsm8k-${backend}-${FSDP_STRATEGY}-sp${SP_SIZE}-fsdp${FSDP_SIZE}-pad-${PAD_MODE}-use_remove_padding-${USE_REMOVE_PADDING} else ENGINE_CONFIG="$MEGATRON_ENGINE_CONFIG" echo "Using megatron engine" exp_name=gsm8k-${backend}-tp${TP_SIZE}-pp${PP_SIZE}-vpp${VPP_SIZE}-cp${CP_SIZE}-pad-${PAD_MODE}-use_remove_padding-${USE_REMOVE_PADDING} fi mkdir -p "${ckpts_home}" torchrun --standalone --nnodes=1 --nproc_per_node=${NUM_GPUS} ${ENTRYPOINT} \ data.train_files="${TRAIN_FILES}" \ data.val_files="${VAL_FILES}" \ data.train_batch_size=256 \ data.pad_mode=${PAD_MODE} \ data.truncation=error \ data.use_dynamic_bsz=True \ data.max_token_len_per_gpu=8192 \ data.messages_key=messages \ model.path=$MODEL_PATH \ model.use_remove_padding=${USE_REMOVE_PADDING} \ ${ENGINE_CONFIG} \ trainer.test_freq=after_each_epoch \ trainer.save_freq=-1 \ trainer.logger=['console','file'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.total_epochs=2 \ trainer.total_training_steps=2 \ trainer.default_local_dir="${ckpts_home}" \ trainer.resume_mode=${RESUME_MODE} \ # trainer.total_training_steps=${TOTAL_TRAIN_STEP} \ # trainer.checkpoint.save_contents=[model,optimizer,extra,hf_model] \ # trainer.max_ckpt_to_keep=1 \ rm -rf "${ckpts_home:?}/*" ================================================ FILE: verl_distillation/tests/special_e2e/sft/test_sft_engine_all.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail rm -rf ~/verl/test/log mkdir -p ~/verl/test/log export VERL_FILE_LOGGER_ROOT=~/verl/test/log # test with single gpu as golden echo "run with single gpu as golden" BACKEND=fsdp SP_SIZE=1 FSDP_SIZE=1 NUM_GPUS=1 FSDP_STRATEGY=fsdp VERL_FILE_LOGGER_PATH=~/verl/test/log/golden.jsonl bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh # test with fsdp 1 echo "run with sp1 fsdp_size2 num_gpus8 fsdp_strategy fsdp pad_mode no_padding" BACKEND=fsdp SP_SIZE=1 FSDP_SIZE=2 NUM_GPUS=8 FSDP_STRATEGY=fsdp PAD_MODE=no_padding bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh echo "run with sp1 fsdp_size-1 num_gpus8 fsdp_strategy fsdp pad_mode no_padding" BACKEND=fsdp SP_SIZE=1 FSDP_SIZE=-1 NUM_GPUS=8 FSDP_STRATEGY=fsdp PAD_MODE=no_padding bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh echo "run with sp2 fsdp_size-1 num_gpus8 fsdp_strategy fsdp pad_mode no_padding" BACKEND=fsdp SP_SIZE=2 FSDP_SIZE=-1 NUM_GPUS=8 FSDP_STRATEGY=fsdp PAD_MODE=no_padding bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh echo "run with sp4 fsdp_size4 num_gpus8 fsdp_strategy fsdp pad_mode no_padding" BACKEND=fsdp SP_SIZE=4 FSDP_SIZE=4 NUM_GPUS=8 FSDP_STRATEGY=fsdp PAD_MODE=no_padding bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh # test use_remove_padding and pad_mode no_padding echo "run with sp4 fsdp_size4 num_gpus8 fsdp_strategy fsdp pad_mode no_padding use_remove_padding False" BACKEND=fsdp SP_SIZE=1 FSDP_SIZE=-1 NUM_GPUS=8 FSDP_STRATEGY=fsdp PAD_MODE=no_padding USE_REMOVE_PADDING=False bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh # test with fsdp 2 echo "run with sp1 fsdp_size1 num_gpus1 fsdp_strategy fsdp2" BACKEND=fsdp SP_SIZE=1 FSDP_SIZE=1 NUM_GPUS=1 FSDP_STRATEGY=fsdp2 bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh echo "run with sp1 fsdp_size-1 num_gpus8 fsdp_strategy fsdp2" BACKEND=fsdp SP_SIZE=1 FSDP_SIZE=-1 NUM_GPUS=8 FSDP_STRATEGY=fsdp2 bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh echo "run with sp2 fsdp_size-1 num_gpus8 fsdp_strategy fsdp2" BACKEND=fsdp SP_SIZE=2 FSDP_SIZE=-1 NUM_GPUS=8 FSDP_STRATEGY=fsdp2 bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh BACKEND=fsdp SP_SIZE=1 FSDP_SIZE=2 NUM_GPUS=8 FSDP_STRATEGY=fsdp2 bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh BACKEND=fsdp SP_SIZE=4 FSDP_SIZE=4 NUM_GPUS=8 FSDP_STRATEGY=fsdp2 bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh # test with megatron echo "run with tp1 pp1 cp1 num_gpus1" BACKEND=megatron TP_SIZE=1 PP_SIZE=1 CP_SIZE=1 NUM_GPUS=1 bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh echo "run with tp2 pp2 vpp2 cp1 num_gpus8" BACKEND=megatron TP_SIZE=2 PP_SIZE=2 VPP_SIZE=2 CP_SIZE=1 NUM_GPUS=8 bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh # TODO: toggle with following test when cp is fixed # BACKEND=megatron TP_SIZE=2 PP_SIZE=2 VPP_SIZE=2 CP_SIZE=1 NUM_GPUS=8 bash tests/special_e2e/sft/run_sft_engine_gsm8k.sh >& ~/verl/test/log/gsm8k-tp2_pp2_vpp2_cp1_num_gpus8.log python3 tests/special_e2e/sft/compare_sft_engine_results.py rm -rf ~/verl/test/log ================================================ FILE: verl_distillation/tests/special_e2e/sft/test_sp_loss_match.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from tensordict import TensorDict from torch.distributed.device_mesh import init_device_mesh from verl.trainer.fsdp_sft_trainer import FSDPSFTTrainer from verl.utils.distributed import initialize_global_process_group def test_trainer_forward_consistency(trainer: FSDPSFTTrainer, total_steps: int = 4): """Test consistency between original forward pass and SP+rmpad forward passes. Args: trainer: The FSDPSFTTrainer instance to test total_steps: Number of steps to test (default: 4) """ if trainer.device_mesh.get_rank() == 0: print("\nStarting debug comparison between original and SP+rmpad forward passes...") print(f"Sequence parallel size: {trainer.config.ulysses_sequence_parallel_size}") print(f"Remove padding: {trainer.use_remove_padding}\n") steps_remaining = total_steps for epoch in range(1): # Just one epoch for testing trainer.train_sampler.set_epoch(epoch=epoch) for data in trainer.train_dataloader: data = TensorDict(data, batch_size=trainer.config.data.train_batch_size).cuda() trainer.fsdp_model.train() micro_batches = data.split(trainer.config.data.micro_batch_size_per_gpu) for idx, micro_batch in enumerate(micro_batches): if trainer.device_mesh.get_rank() == 0: print(f"\nProcessing micro batch {idx + 1}/{len(micro_batches)}") # Compute losses using both methods # Disable SP and rmpad trainer.use_remove_padding = False old_sp = trainer.config.ulysses_sequence_parallel_size trainer.config.ulysses_sequence_parallel_size = 1 loss_ref = trainer._compute_loss_and_backward(micro_batch.copy(), do_backward=False) # Do SP and rmpad trainer.config.ulysses_sequence_parallel_size = old_sp trainer.use_remove_padding = True loss_sp = trainer._compute_loss_and_backward(micro_batch.copy(), do_backward=False) # Collect losses across all ranks loss_ref_all = loss_ref.clone() loss_sp_all = loss_sp.clone() torch.distributed.all_reduce(loss_ref_all, op=torch.distributed.ReduceOp.AVG) torch.distributed.all_reduce(loss_sp_all, op=torch.distributed.ReduceOp.AVG) # Calculate relative difference of averaged losses rel_diff = torch.abs(loss_ref_all - loss_sp_all) / (torch.abs(loss_ref_all) + 1e-8) if trainer.device_mesh.get_rank() == 0: print("\nComparison Results (Averaged across ranks):") print(f"Reference Loss: {loss_ref_all.item():.6f}") print(f"SP+rmpad Loss: {loss_sp_all.item():.6f}") print(f"Relative Difference: {rel_diff.item():.6f}") assert rel_diff.item() < 1e-2, "Significant difference detected between averaged losses!" print("Loss difference is within the acceptable range.") steps_remaining -= 1 if steps_remaining == 0: break if steps_remaining == 0: break break if trainer.device_mesh.get_rank() == 0: print("\nDebug comparison completed successfully.") def create_trainer(config): """Create and initialize a trainer instance with the given config. Args: config: Configuration object with training parameters Returns: FSDPSFTTrainer: Initialized trainer instance """ local_rank, rank, world_size = initialize_global_process_group() device_mesh = init_device_mesh(device_type="cuda", mesh_shape=(world_size,), mesh_dim_names=("fsdp",)) dp_size = world_size // config.ulysses_sequence_parallel_size ulysses_device_mesh = init_device_mesh( device_type="cuda", mesh_shape=(dp_size, config.ulysses_sequence_parallel_size), mesh_dim_names=("dp", "sp") ) # build tokenizer and datasets first from verl.trainer.fsdp_sft_trainer import create_sft_dataset from verl.utils import hf_tokenizer from verl.utils.fs import copy_to_local local_model_path = copy_to_local(src=config.model.partial_pretrain, verbose=True) tokenizer = hf_tokenizer(local_model_path, trust_remote_code=config.model.trust_remote_code) train_dataset = create_sft_dataset( config.data.train_files, config.data, tokenizer, max_samples=config.data.get("train_max_samples", -1) ) val_dataset = create_sft_dataset( config.data.val_files, config.data, tokenizer, max_samples=config.data.get("val_max_samples", -1) ) return FSDPSFTTrainer( config=config, device_mesh=device_mesh, ulysses_device_mesh=ulysses_device_mesh, tokenizer=tokenizer, train_dataset=train_dataset, val_dataset=val_dataset, ) def main(config): """Main function to run trainer tests. Args: config: Configuration object with training parameters """ trainer = create_trainer(config) test_trainer_forward_consistency(trainer) if __name__ == "__main__": import hydra from omegaconf import DictConfig @hydra.main(config_path="../../../verl/trainer/config", config_name="sft_trainer") def hydra_entry(cfg: DictConfig) -> None: main(cfg) hydra_entry() ================================================ FILE: verl_distillation/tests/special_npu/run_qwen2_5_05b_dapo.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail NUM_GPUS=${NUM_GPUS:-16} MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} adv_estimator=grpo kl_coef=0.0 use_kl_in_reward=False use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=1024 max_response_length=2048 enable_overlong_buffer=True overlong_buffer_len=128 overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=seq_reward max_num_gen_batches=10 train_traj_micro_bsz_per_gpu=2 # b n_resp_per_prompt=4 # g train_traj_micro_bsz=$((train_traj_micro_bsz_per_gpu * NUM_GPUS)) # b * n train_traj_mini_bsz=$((train_traj_micro_bsz * 2)) # 2 * b * n train_prompt_mini_bsz=$((train_traj_mini_bsz * n_resp_per_prompt)) # 2 * b * n / g train_prompt_bsz=$((train_prompt_mini_bsz * 2)) # 4 * b * n / g gen_prompt_bsz=$((train_prompt_bsz * 4)) exp_name="$(basename "${MODEL_ID,,}")-dapo-minimal" python3 -m recipe.dapo.main_dapo \ data.train_files="${HOME}/data/gsm8k/train.parquet" \ data.val_files="${HOME}/data/gsm8k/test.parquet" \ reward_model.reward_manager=dapo \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ data.train_batch_size=${train_prompt_bsz} \ data.gen_batch_size=${gen_prompt_bsz} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=${train_traj_micro_bsz_per_gpu} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.forward_prefetch=True \ actor_rollout_ref.ref.fsdp_config.forward_prefetch=True \ actor_rollout_ref.actor.entropy_checkpointing=True \ actor_rollout_ref.ref.entropy_checkpointing=True \ actor_rollout_ref.actor.entropy_from_logits_with_chunking=True \ actor_rollout_ref.ref.entropy_from_logits_with_chunking=True \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ trainer.logger=console \ trainer.project_name='verl-test' \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=${NUM_GPUS} \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=1 \ trainer.resume_mode=disable \ trainer.val_before_train=False \ trainer.total_training_steps=2 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/tests/special_npu/run_qwen2_5_05b_grpo.sh ================================================ set -x MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=512 \ data.max_response_length=128 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=5e-7 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=20 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=1 \ trainer.total_training_steps=2 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/tests/special_npu/run_qwen2_5_05b_grpo_mindspeed.sh ================================================ set -x MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} USE_DIST_CKPT=${USE_DIST_CKPT:-False} DIST_CKPT_PATH=${DIST_CKPT_PATH:-${HOME}/dist_ckpt/qwen2_5_05b_grpo_mindspeed} if [ "$USE_DIST_CKPT" = "True" ]; then if [ "$USE_DUMMY_MODEL" = "True" ]; then DIST_CKPT_PATH=${HOME}/dist_ckpt_dummy/${MODEL_ID} fi python scripts/converter_hf_to_mcore.py \ --hf_model_path "${MODEL_PATH}" \ --output_path "${DIST_CKPT_PATH}" fi python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=512 \ data.max_response_length=128 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=${MODEL_ID} \ actor_rollout_ref.actor.optim.lr=5e-7 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=20 \ actor_rollout_ref.actor.strategy=megatron \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=1 \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=${DIST_CKPT_PATH} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.strategy=megatron \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=1 \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=${DIST_CKPT_PATH} \ actor_rollout_ref.ref.use_torch_compile=False \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=1 \ trainer.total_training_steps=2 \ trainer.device=npu \ +actor_rollout_ref.actor.megatron.override_transformer_config.use_flash_attn=True $@ ================================================ FILE: verl_distillation/tests/special_npu/run_qwen2_5_05b_sft_peft_sp2.sh ================================================ set -x mkdir -p ./save_ckpts MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} torchrun --standalone --nnodes=1 --nproc_per_node=8 \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=32 \ model.partial_pretrain="${MODEL_PATH}" \ trainer.default_local_dir=./save_ckpts \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct \ trainer.logger=console \ trainer.total_epochs=1 \ trainer.total_training_steps=2 \ model.lora_rank=32 \ model.lora_alpha=16 \ model.target_modules=all-linear \ model.strategy=fsdp \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true \ trainer.device=npu rm -rf ./outputs ./save_ckpts ================================================ FILE: verl_distillation/tests/special_npu/run_qwen2_5_vl_3b_npu.sh ================================================ set -x ENGINE=${1:-vllm} # Some models are optimized by vllm ascend. While in some case, e.g. rlhf training, # the optimized model may not be suitable. In this case, set this value to 0 to disable the optimized model. export USE_OPTIMIZED_MODEL=0 MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-VL-3B-Instruct} MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.ref.use_torch_compile=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_3b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=1 \ trainer.total_training_steps=2 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/tests/special_npu/run_qwen3_06b_ppo.sh ================================================ set -x # TODO (FightingZhen) Env VLLM_USE_V1=1 is not supported in vllm==0.7.3 # export VLLM_USE_V1=1 MODEL_ID=${MODEL_ID:-Qwen/Qwen2.5-0.5B-Instruct} # TODO: change to Qwen3-0.6B when CI server is ready MODEL_PATH=${MODEL_PATH:-${HOME}/models/${MODEL_ID}} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=512 \ data.max_response_length=128 \ data.shuffle=False \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.enforce_eager=False \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path="${MODEL_PATH}" \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.ulysses_sequence_parallel_size=2 \ critic.model.fsdp_config.param_offload=True \ critic.model.fsdp_config.optimizer_offload=True \ critic.use_dynamic_bsz=True \ trainer.critic_warmup=0 \ trainer.logger='["console"]' \ trainer.project_name='verl_ppo_example_gsm8k_qwen3' \ trainer.experiment_name='qwen3_06b_fsdp' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=1 \ trainer.total_training_steps=2 \ trainer.device=npu $@ ================================================ FILE: verl_distillation/tests/special_sanity/check_api_docs.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fail CI if any function or class that is publicly exported via ``__all__`` lacks a docstring. Usage ----- # Check specific modules or packages python check_docstrings.py mypkg.core mypkg.utils # Check an entire source tree (all top-level packages under cwd) python check_docstrings.py """ from __future__ import annotations import argparse import importlib import inspect import pkgutil import sys from pathlib import Path from types import ModuleType from typing import Iterable _ALLOW_LIST = [ "verl.third_party.vllm.LLM", "verl.third_party.vllm.parallel_state", "verl.utils.profiler.WorkerProfiler", "verl.utils.profiler.WorkerProfilerExtension", "verl.utils.profiler.log_gpu_memory_usage", "verl.utils.profiler.log_print", "verl.utils.profiler.mark_annotate", "verl.utils.profiler.mark_end_range", "verl.utils.profiler.mark_start_range", "verl.models.mcore.qwen2_5_vl.get_vision_model_config", "verl.models.mcore.qwen2_5_vl.get_vision_projection_config", "verl.models.mcore.mbridge.freeze_moe_router", "verl.models.mcore.mbridge.make_value_model", "verl.utils.transformers_compat.flash_attn_supports_top_left_mask", ] def iter_submodules(root: ModuleType) -> Iterable[ModuleType]: """Yield *root* and every sub-module inside it.""" yield root def print_pkg_error(pkg_name): print(f"[warn] Skipping {pkg_name!r}", file=sys.stderr) if getattr(root, "__path__", None): # only packages have __path__ for mod_info in pkgutil.walk_packages(root.__path__, prefix=f"{root.__name__}.", onerror=print_pkg_error): try: yield importlib.import_module(mod_info.name) except Exception as exc: print(f"[warn] Skipping {mod_info.name!r}: {exc}", file=sys.stderr) def names_missing_doc(mod: ModuleType) -> list[str]: """Return fully-qualified names that need docstrings.""" missing: list[str] = [] public = getattr(mod, "__all__", []) for name in public: obj = getattr(mod, name, None) if f"{mod.__name__}.{name}" in _ALLOW_LIST: continue if obj is None: # Exported but not found in the module: flag it anyway. missing.append(f"{mod.__name__}.{name} (not found)") continue if inspect.isfunction(obj) or inspect.isclass(obj): doc = inspect.getdoc(obj) if not doc or not doc.strip(): missing.append(f"{mod.__name__}.{name}") return missing def check_module(qualname: str) -> list[str]: """Import *qualname* and check it (and sub-modules).""" try: module = importlib.import_module(qualname) except ModuleNotFoundError as exc: print(f"[error] Cannot import '{qualname}': {exc}", file=sys.stderr) return [qualname] missing: list[str] = [] for submod in iter_submodules(module): missing.extend(names_missing_doc(submod)) return missing def autodiscover_packages() -> list[str]: """Detect top-level packages under CWD when no argument is given.""" pkgs: list[str] = [] for p in Path.cwd().iterdir(): if p.is_dir() and (p / "__init__.py").exists(): pkgs.append(p.name) return pkgs def main() -> None: parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "modules", nargs="*", help="Fully-qualified module or package names (defaults to every top-level package found in CWD).", ) args = parser.parse_args() targets = args.modules or autodiscover_packages() if not targets: raise ValueError("[error] No modules specified and none detected automatically.") all_missing: list[str] = [] for modname in targets: all_missing.extend(check_module(modname)) if all_missing: print("\nMissing docstrings:") for name in sorted(all_missing): print(f" - {name}") raise ValueError("Missing docstrings detected. Please enhance them with docs accordingly.") print("✅ All exported functions/classes have docstrings.") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/tests/special_sanity/check_dataproto_usage.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This CI test is used for checking whether DataProto is used in the code of some directory """ import os from argparse import ArgumentParser from pathlib import Path SEARCH_WHITELIST = [] SEARCH_KEYWORDS = ["DataProto"] if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("--directory", "-d", required=True, type=str) args = parser.parse_args() directory_in_str = args.directory pathlist = Path(directory_in_str).glob("**/*.py") for path in pathlist: path_in_str = str(path.absolute()) # judge whether current path is in pre-defined search whitelist or not. path_in_whitelist = False for sw in SEARCH_WHITELIST: # for easy debugging in non-linux system sw = sw.replace("/", os.sep) if sw in path_in_str: print(f"[SKIP] File {path_in_str} is in device api usage check whitelist, checking is skipped.") path_in_whitelist = True break if path_in_whitelist: continue with open(path_in_str, encoding="utf-8") as f: file_content = f.read() find_invalid_device_management = False for sk in SEARCH_KEYWORDS: if sk in file_content: find_invalid_device_management = True break print( f"[CHECK] File {path_in_str} is detected for DataProto usage check, check result: " f"{'success' if not find_invalid_device_management else f'failed, because detect {sk}'}." ) assert not find_invalid_device_management, ( f"file {path_in_str} contains DataProto usage, please use TensorDict directly!" ) ================================================ FILE: verl_distillation/tests/special_sanity/check_device_api_usage.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This CI test is used for checking whether device api usage is irregular, suggest using api in `verl/utils/device.py`. Search targets include .py files in verl/recipe and verl/verl. Some files that must contain ".cuda", "cuda" or "nccl" keyword is pre-defined in whitelist below. """ import os from argparse import ArgumentParser from pathlib import Path # directory or file path must contain keyword ".cuda" or "cuda" CUDA_KEYWORD_CHECK_WHITELIST = [ "verl/utils/device.py", "recipe/prime/prime_ray_trainer.py", # appear in default device_name "recipe/spin/spin_trainer.py", # appear in default device_name "recipe/sppo/sppo_ray_trainer.py", # appear in default device_name "recipe/one_step_off_policy/ray_trainer.py", # appear in default device_name "recipe/transfer_queue/ray_trainer.py", # appear in default device_name "verl/utils/profiler/nvtx_profile.py", # appear in NsightSystemsProfiler "verl/utils/kernel/linear_cross_entropy.py", # appear in nvidia nvtx "verl/utils/rendezvous/ray_backend.py", # appear in cupy importance "verl/single_controller/ray/base.py", # appear in default device_name "verl/trainer/ppo/ray_trainer.py", # appear in default device_name "verl/utils/reward_score/sandbox_fusion/utils.py", # appear in sandbox language type "verl/workers/reward_model/megatron/reward_model.py", # appear in default device_name "verl/third_party/torch/distributed/_state_dict_utils.py", # torch monkey patch fixes "verl/third_party/torch/distributed/checkpoint/state_dict.py", # torch monkey patch fixes "verl/workers/engine/base.py", # appear in default device_name "verl/workers/engine/fsdp/transformer_impl.py", # appear in default device_name "verl/workers/rollout/vllm_rollout/vllm_async_server.py", # appear in config.cudagraph_capture_sizes "verl/workers/rollout/sglang_rollout/async_sglang_server.py", # manually set CUDA_VISIBLE_DEVICES ] # directory or file path must contain keyword "nccl" NCCL_KEYWORD_CHECK_WHITELIST = [ "verl/utils/device.py", "verl/third_party/sglang/parallel_state.py", # appear in default backend "verl/recipe/fully_async_policy/param_sync.py", # fully_async_policy in default backend ] SEARCH_WHITELIST = CUDA_KEYWORD_CHECK_WHITELIST + NCCL_KEYWORD_CHECK_WHITELIST SEARCH_KEYWORDS = [".cuda", '"cuda"', '"nccl"'] if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("--directory", "-d", required=True, type=str) args = parser.parse_args() directory_in_str = args.directory pathlist = Path(directory_in_str).glob("**/*.py") for path in pathlist: path_in_str = str(path.absolute()) # judge whether current path is in pre-defined search whitelist or not. path_in_whitelist = False for sw in SEARCH_WHITELIST: # for easy debugging in non-linux system sw = sw.replace("/", os.sep) if sw in path_in_str: print(f"[SKIP] File {path_in_str} is in device api usage check whitelist, checking is skipped.") path_in_whitelist = True break if path_in_whitelist: continue with open(path_in_str, encoding="utf-8") as f: file_content = f.read() find_invalid_device_management = False for sk in SEARCH_KEYWORDS: if sk in file_content: find_invalid_device_management = True break print( f"[CHECK] File {path_in_str} is detected for device api usage check, check result: " f"{'success' if not find_invalid_device_management else f'failed, because detect {sk}'}." ) assert not find_invalid_device_management, ( f'file {path_in_str} contains .cuda/"cuda"/"nccl" usage, please use api in ' f"verl/utils/device.py directly." ) ================================================ FILE: verl_distillation/tests/special_sanity/check_docs_time_info.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Check that every .md and .rst file under docs/ contains the substring "Last updated", with an allow-list for exceptions. """ import sys from pathlib import Path # === CONFIGURATION === # Relative paths (to docs/) or glob patterns to skip checking ALLOW_LIST = { "docs/README.md", # you can list individual files "docs/legacy/*.rst", # or glob patterns "docs/index.rst", "docs/start/install.rst", "docs/start/quickstart.rst", "docs/README_vllm0.7.md", } # The folder to scan DOCS_DIR = Path("docs") # === SCRIPT === def is_allowed(path: Path) -> bool: """ Return True if `path` matches any entry in ALLOW_LIST. """ rel = str(path) for pattern in ALLOW_LIST: if Path(rel).match(pattern): return True return False def main(): if not DOCS_DIR.exists(): print(f"Error: Documentation directory '{DOCS_DIR}' does not exist.", file=sys.stderr) sys.exit(1) missing = [] # Gather all .md and .rst files under docs/ for ext in ("*.md", "*.rst"): for path in DOCS_DIR.rglob(ext): if is_allowed(path): continue text = path.read_text(encoding="utf-8", errors="ignore") if "Last updated" not in text: missing.append(path) # Report if missing: print("\nThe following files are missing the 'Last updated' string:\n") for p in missing: print(f" - {p}") print(f"\nTotal missing: {len(missing)}\n", file=sys.stderr) raise AssertionError( "Some documentation files lack a 'Last updated' line. Please include info such as " "'Last updated: mm/dd/yyyy' to indicate the last update time of the document." ) else: print("✅ All checked files contain 'Last updated'.") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/tests/special_sanity/check_docstrings.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Python script to check docstrings for functions and classes in specified files. Checks that every public function and class has proper docstring documentation. """ import ast import os import sys class DocstringChecker(ast.NodeVisitor): """AST visitor to check for missing docstrings in functions and classes.""" def __init__(self, filename: str): self.filename = filename self.missing_docstrings: list[tuple[str, str, int]] = [] self.current_class = None self.function_nesting_level = 0 def visit_FunctionDef(self, node: ast.FunctionDef): """Visit function definitions and check for docstrings.""" if not node.name.startswith("_") and self.function_nesting_level == 0: if not self._has_docstring(node): func_name = f"{self.current_class}.{node.name}" if self.current_class else node.name self.missing_docstrings.append((func_name, self.filename, node.lineno)) self.function_nesting_level += 1 self.generic_visit(node) self.function_nesting_level -= 1 def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef): """Visit async function definitions and check for docstrings.""" if not node.name.startswith("_") and self.function_nesting_level == 0: if not self._has_docstring(node): func_name = f"{self.current_class}.{node.name}" if self.current_class else node.name self.missing_docstrings.append((func_name, self.filename, node.lineno)) self.function_nesting_level += 1 self.generic_visit(node) self.function_nesting_level -= 1 def visit_ClassDef(self, node: ast.ClassDef): """Visit class definitions and check for docstrings.""" if not node.name.startswith("_"): if not self._has_docstring(node): self.missing_docstrings.append((node.name, self.filename, node.lineno)) old_class = self.current_class self.current_class = node.name self.generic_visit(node) self.current_class = old_class def _has_docstring(self, node) -> bool: """Check if a node has a docstring.""" return ast.get_docstring(node) is not None def check_file_docstrings(filepath: str) -> list[tuple[str, str, int]]: """Check docstrings in a single file.""" try: with open(filepath, encoding="utf-8") as f: content = f.read() tree = ast.parse(content, filename=filepath) checker = DocstringChecker(filepath) checker.visit(tree) return checker.missing_docstrings except Exception as e: print(f"Error processing {filepath}: {e}") return [] def main(): """Main function to check docstrings in specified files.""" files_to_check = [ "verl/trainer/ppo/ray_trainer.py", "verl/trainer/main_ppo.py", "verl/trainer/ppo/reward.py", "verl/utils/reward_score/__init__.py", "verl/trainer/ppo/core_algos.py", "verl/experimental/agent_loop/agent_loop.py", "verl/workers/sharding_manager/fsdp_vllm.py", "verl/workers/sharding_manager/fsdp_ulysses.py", ] script_dir = os.path.dirname(os.path.abspath(__file__)) repo_path = os.path.dirname(os.path.dirname(script_dir)) if not os.path.exists(repo_path): print(f"Repository path {repo_path} does not exist!") sys.exit(1) os.chdir(repo_path) all_missing_docstrings = [] print("Checking docstrings in specified files...") print("=" * 60) for file_path in files_to_check: if not os.path.exists(file_path): print(f"Warning: File {file_path} does not exist!") continue print(f"Checking {file_path}...") missing = check_file_docstrings(file_path) all_missing_docstrings.extend(missing) if missing: print(f" Found {len(missing)} missing docstrings") else: print(" All functions and classes have docstrings ✓") print("=" * 60) if all_missing_docstrings: print(f"\nSUMMARY: Found {len(all_missing_docstrings)} functions/classes missing docstrings:") print("-" * 60) by_file = {} for name, filepath, lineno in all_missing_docstrings: if filepath not in by_file: by_file[filepath] = [] by_file[filepath].append((name, lineno)) for filepath in sorted(by_file.keys()): print(f"\n{filepath}:") for name, lineno in sorted(by_file[filepath], key=lambda x: x[1]): print(f" - {name} (line {lineno})") print(f"\nTotal missing docstrings: {len(all_missing_docstrings)}") raise Exception(f"Found {len(all_missing_docstrings)} functions/classes without proper docstrings!") else: print("\n✅ All functions and classes have proper docstrings!") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/tests/special_sanity/check_license.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from pathlib import Path from typing import Iterable license_head_bytedance = "Copyright 2024 Bytedance Ltd. and/or its affiliates" license_head_bytedance_25 = "Copyright 2025 Bytedance Ltd. and/or its affiliates" # Add custom license headers below license_head_prime = "Copyright 2024 PRIME team and/or its affiliates" license_head_individual = "Copyright 2025 Individual Contributor:" license_head_sglang = "Copyright 2023-2024 SGLang Team" license_head_modelbest = "Copyright 2025 ModelBest Inc. and/or its affiliates" license_head_amazon = "Copyright 2025 Amazon.com Inc and/or its affiliates" license_head_facebook = "Copyright (c) 2016- Facebook, Inc" license_head_meituan = "Copyright 2025 Meituan Ltd. and/or its affiliates" license_headers = [ license_head_bytedance, license_head_bytedance_25, license_head_prime, license_head_individual, license_head_sglang, license_head_modelbest, license_head_amazon, license_head_facebook, license_head_meituan, ] def get_py_files(path_arg: Path) -> Iterable[Path]: """get py files under a dir. if already py file return it Args: path_arg (Path): path to scan for py files Returns: Iterable[Path]: list of py files """ if path_arg.is_dir(): return path_arg.glob("**/*.py") elif path_arg.is_file() and path_arg.suffix == ".py": return [path_arg] return [] if __name__ == "__main__": parser = ArgumentParser() parser.add_argument( "--directories", "-d", required=True, type=Path, nargs="+", help="List of directories to check for license headers", ) args = parser.parse_args() # Collect all Python files from specified directories pathlist = set(path for path_arg in args.directories for path in get_py_files(path_arg)) for path in pathlist: # because path is object not string path_in_str = str(path.absolute()) print(path_in_str) with open(path_in_str, encoding="utf-8") as f: file_content = f.read() has_license = False for lh in license_headers: if lh in file_content: has_license = True break assert has_license, f"file {path_in_str} does not contain license" ================================================ FILE: verl_distillation/tests/special_sanity/check_pr_description.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python3 import json import os # Number of lines to check NUM_LINES = 5 # Custom exception types for clear error handling class TemplateFileError(Exception): pass class PRBodyLoadError(Exception): pass class PRDescriptionError(Exception): pass # Path to the PR template file template_file = os.path.join(os.getenv("GITHUB_WORKSPACE", "."), ".github", "PULL_REQUEST_TEMPLATE.md") def load_template(path): """ Load only the first NUM_LINES of the PR template file as a list of lines, without stripping any characters. """ lines = [] try: with open(path, encoding="utf-8") as f: for _ in range(NUM_LINES): line = f.readline() if not line: break lines.append(line.strip()) return lines except Exception as e: raise TemplateFileError(f"Failed to read PR template (first {NUM_LINES} lines) at {path}: {e}") from e def load_pr_body(event_path): try: with open(event_path, encoding="utf-8") as f: payload = json.load(f) return payload.get("pull_request", {}).get("body", "") or "" except Exception as e: raise PRBodyLoadError(f"Failed to read PR body from {event_path}: {e}") from e def check_pr_description(body, template_lines): """ Compare the first NUM_LINES lines of the PR body to the template lines. If they match exactly, the placeholder was not modified. """ pr_lines = body.splitlines(keepends=True) pr_first = [x.strip() for x in pr_lines[:NUM_LINES]] if pr_first == template_lines: raise PRDescriptionError( "It looks like you haven't updated the '### What does this PR do?' section. Please replace " "the placeholder text with a concise description of what your PR does." ) else: print(pr_first) print(template_lines) def main(): event_path = os.getenv("GITHUB_EVENT_PATH") if not event_path: raise OSError("GITHUB_EVENT_PATH is not set.") template_lines = load_template(template_file) pr_body = load_pr_body(event_path) check_pr_description(pr_body, template_lines) print("✅ '### What does this PR do?' section has been filled out.") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/tests/special_sanity/check_pr_title.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re # Get PR title from environment pr_title = os.environ.get("PR_TITLE", "").strip() # Define rules allowed_modules = ["fsdp", "megatron", "sglang", "vllm", "rollout", "trainer"] allowed_modules += ["tests", "training_utils", "recipe", "hardware", "deployment"] allowed_modules += ["ray", "worker", "single_controller", "misc", "docker", "ci"] allowed_modules += ["perf", "model", "algo", "env", "tool", "ckpt", "doc", "data", "cfg"] allowed_types = ["feat", "fix", "refactor", "chore", "test"] # Check for [1/N] prefix and extract the rest of the title progress_match = re.match(r"^\[\d/[\dNn]\]\s*(.+)$", pr_title, re.IGNORECASE) if progress_match: pr_title = progress_match.group(1).strip() # Check for [BREAKING] prefix and extract the rest of the title breaking_match = re.match(r"^\[BREAKING\]\s*(.+)$", pr_title, re.IGNORECASE) if breaking_match: core_pr_title = breaking_match.group(1).strip() is_breaking = True else: core_pr_title = pr_title is_breaking = False # Build dynamic regex pattern for modules (now working on core_pr_title) re_modules_pattern = re.compile(r"^\[([a-z_,\s]+)\]", re.IGNORECASE) re_modules = re_modules_pattern.match(core_pr_title) if not re_modules: print(f"❌ Invalid PR title: '{pr_title}'") print("Expected format: [BREAKING][module] type: description") print(f"Allowed modules: {', '.join(allowed_modules)}") raise Exception("Invalid PR title") else: modules = re.findall(r"[a-z_]+", re_modules.group(1).lower()) if not all(module in allowed_modules for module in modules): invalid_modules = [module for module in modules if module not in allowed_modules] print(f"❌ Invalid modules: {', '.join(invalid_modules)}") print(f"Allowed modules: {', '.join(allowed_modules)}") raise Exception("Invalid PR title") types_pattern = "|".join(re.escape(t) for t in allowed_types) re_types_pattern = re.compile(rf"^\[[a-z_,\s]+\]\s+({types_pattern}):\s+.+$", re.IGNORECASE) match = re_types_pattern.match(core_pr_title) if not match: print(f"❌ Invalid PR title: '{pr_title}'") print("Expected format: [BREAKING][module] type: description") print(f"Allowed types: {', '.join(allowed_types)}") raise Exception("Invalid PR title") change_type = match.group(1).lower() # Build the success message breaking_info = " (BREAKING CHANGE)" if is_breaking else "" print(f"✅ PR title is valid: {pr_title}, modules: {modules}, type: {change_type}{breaking_info}") ================================================ FILE: verl_distillation/tests/special_sanity/test_config_docs.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from pathlib import Path def validate_yaml_format(yaml_lines): errors = [] i = 0 while i < len(yaml_lines): line = yaml_lines[i] stripped = line.strip() # Skip empty lines if stripped == "": i += 1 continue # Match YAML keys like "field:" or "field: value" key_match = re.match(r"^(\s*)([a-zA-Z0-9_]+):", line) if key_match: # Check if there's a comment above if i == 0 or not yaml_lines[i - 1].strip().startswith("#"): errors.append(f"Missing comment above line {i + 1}: {line.strip()}") # Check for inline comment if "#" in line and not stripped.startswith("#"): comment_index = line.index("#") colon_index = line.index(":") if comment_index > colon_index: errors.append(f"Inline comment found on line {i + 1}: {line.strip()}") # Check for blank line after this key line (unless next is a deeper indent) if i + 1 < len(yaml_lines): next_line = yaml_lines[i + 1] next_stripped = next_line.strip() # If next is not empty and not a deeper nested line, enforce blank line if next_stripped != "": errors.append(f"Missing blank line after line {i + 1}: {line.strip()}") i += 1 return errors def test_trainer_config_doc(): yamls_to_inspect = [ "verl/trainer/config/ppo_trainer.yaml", "verl/trainer/config/actor/actor.yaml", "verl/trainer/config/actor/dp_actor.yaml", "verl/trainer/config/critic/critic.yaml", "verl/trainer/config/critic/dp_critic.yaml", "verl/trainer/config/ref/ref.yaml", "verl/trainer/config/ref/dp_ref.yaml", "verl/trainer/config/rollout/rollout.yaml", ] success = True for yaml_to_inspect in yamls_to_inspect: yaml_path = Path(yaml_to_inspect) # path to your YAML file with open(yaml_path) as f: lines = f.readlines() validation_errors = validate_yaml_format(lines) if validation_errors: success = False print("YAML documentation format check failed:") print(f"Please read the top block of {yaml_to_inspect} to see format rules:\n") for err in validation_errors: print(" -", err) if not success: raise Exception("Please fix documentation format.") else: print("YAML format check passed ✅") ================================================ FILE: verl_distillation/tests/special_sanity/test_import.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_import(): import verl print(verl.__version__) def test_single_controller_import(): import verl.single_controller print(verl.single_controller.__version__) ================================================ FILE: verl_distillation/tests/special_sanity/type_coverage_check.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Custom type annotation check tool. To inspect the type annotation for functions in the entire codebase, please run: find verl -type f -name "*.py" | xargs -n 1 python3 tests/special_sanity/type_coverage_check.py --all-lines --debug --target-file """ import argparse import ast import linecache import subprocess from pathlib import Path def get_changed_files() -> list[Path]: result = subprocess.run( ["git", "diff", "--name-only", "--diff-filter=AM", "origin/main...HEAD"], stdout=subprocess.PIPE, text=True ) return [Path(f) for f in result.stdout.splitlines() if f.endswith(".py")] def get_changed_lines(file_path: Path) -> set[int]: result = subprocess.run( ["git", "diff", "-U0", "origin/main...HEAD", "--", str(file_path)], stdout=subprocess.PIPE, text=True, ) lines: set[int] = set() for line in result.stdout.splitlines(): if line.startswith("@@"): for part in line.split(): try: if part.startswith("+") and "," in part: start, count = map(int, part[1:].split(",")) lines.update(range(start, start + count)) elif part.startswith("+") and "," not in part: lines.add(int(part[1:])) except Exception: # (vermouth1992) There are many edge cases here because + can be in the changed program pass return lines CHECK_SUCCESS = 0 CHECK_WARNING = 1 CHECK_FAILURE = -1 def should_check_type(arg_name: str) -> bool: if arg_name in ("self", "cls"): return False if arg_name.startswith("*"): return False return True def has_type_annotations(node: ast.AST, debug: bool = False) -> int: if isinstance(node, ast.FunctionDef): is_private = node.name.startswith("_") has_ann = ( all(arg.annotation is not None for arg in node.args.args if should_check_type(arg.arg)) and node.returns is not None ) if has_ann or is_private: return CHECK_SUCCESS else: if debug: print(node, [(arg.annotation, arg.arg) for arg in node.args.args if should_check_type(arg.arg)]) return CHECK_FAILURE return CHECK_SUCCESS def check_file( file_path: Path, changed_lines: set[int], debug: bool = False ) -> tuple[int, int, list[tuple[Path, int, str]], list[tuple[Path, int, str]]]: with open(file_path) as f: source: str = f.read() tree = ast.parse(source, filename=str(file_path)) annotated = 0 total = 0 warning_lines: list[tuple[Path, int, str]] = [] failure_lines: list[tuple[Path, int, str]] = [] for node in ast.walk(tree): if hasattr(node, "lineno") and node.lineno in changed_lines: if isinstance(node, ast.FunctionDef | ast.Assign | ast.AnnAssign): total += 1 result = has_type_annotations(node, debug) if result == CHECK_SUCCESS or result == CHECK_WARNING: annotated += 1 if result == CHECK_WARNING: warning_lines.append( (file_path, node.lineno, linecache.getline(str(file_path), node.lineno).strip()) ) else: source_line = linecache.getline(str(file_path), node.lineno).strip() failure_lines.append((file_path, node.lineno, source_line)) return annotated, total, warning_lines, failure_lines def main() -> None: parser = argparse.ArgumentParser() parser.add_argument( "--threshold", type=float, default=0.3, help="Minimum ratio of annotated lines required (0.0 - 1.0)" ) parser.add_argument("--target-file", type=str, default=None, help="Path to the Python source file to analyse") parser.add_argument( "--all-lines", action="store_true", help="Check all lines in the file instead of only changed lines based on git", ) parser.add_argument("--debug", action="store_true", help="Add debugging logs") args = parser.parse_args() total_changed = 0 total_annotated = 0 all_warnings: list[tuple[Path, int, str]] = [] all_failures: list[tuple[Path, int, str]] = [] target_files = [args.target_file] if args.target_file is not None else get_changed_files() for fpath in target_files: if "tests/" in str(fpath): continue if args.all_lines: changed_lines = [i + 1 for i in range(len(open(fpath).readlines()))] else: changed_lines = get_changed_lines(fpath) annotated, total, warning_lines, failure_lines = check_file(fpath, changed_lines, args.debug) total_annotated += annotated total_changed += total all_warnings.extend(warning_lines) all_failures.extend(failure_lines) ratio = (total_annotated / total_changed) if total_changed else 1.0 print( f"🔍 Type coverage on {'all' if args.all_lines else 'changed'} lines: " f"{total_annotated}/{total_changed} = {ratio:.2%}. Files inspected: {target_files}" ) if all_warnings: print("\n⚠️ Suggest Improve: Lines missing type annotations for inputs and outputs:\n") for fname, lineno, line in all_warnings: print(f"{fname}:{lineno}: {line}") if all_failures: print("⚠️ [ERROR] Lines missing type annotations for inputs and outputs:\n") for fname, lineno, line in all_failures: print(f"{fname}:{lineno}: {line}") if ratio < args.threshold: print( f"Please add type annotations for inputs and outputs to meet threshold {args.threshold}. " f"Cases exempt from checking:" ) print("1. Private methods.") print("2. Args with name in ('self', 'cls'), or *args / **kwargs") print("3. Files under tests/") raise Exception(f"\n❌ Type coverage below threshold ({args.threshold:.0%}).") else: if all_warnings or all_failures: print("") print("✅ Type annotation coverage acceptable.\n") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/tests/special_sanity/validate_imported_docs.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ verify_imported_docs.py Assert that every function or class *explicitly imported* (via `from import `) in a given Python file has a docstring. """ from __future__ import annotations import argparse import ast import importlib import inspect import pathlib import sys def _parse_args() -> argparse.Namespace: p = argparse.ArgumentParser(description="Verify that imported functions/classes have docstrings.") p.add_argument( "--target-file", default="verl/trainer/ppo/ray_trainer.py", help="Path to the Python source file to analyse (e.g. verl/trainer/ppo/ray_trainer.py)", ) p.add_argument( "--allow-list", default=["omegaconf.open_dict"], help="a list of third_party dependencies that do not have proper docs :(", ) p.add_argument( "--project-root", default=".", help="Directory to prepend to PYTHONPATH so local packages resolve (default: .)", ) p.add_argument( "--quiet", action="store_true", help="Suppress success message (still prints errors).", ) return p.parse_args() def _import_attr(module_name: str, attr_name: str): """Import `module_name` then return `getattr(module, attr_name)`.""" module = importlib.import_module(module_name) return getattr(module, attr_name) def _check_file(py_file: pathlib.Path, project_root: pathlib.Path, allow_list: list[str]) -> list[str]: """Return a list of error strings (empty == success).""" # Ensure local packages resolve sys.path.insert(0, str(project_root.resolve())) tree = ast.parse(py_file.read_text(), filename=str(py_file)) problems: list[str] = [] for node in ast.walk(tree): if not isinstance(node, ast.ImportFrom): continue # Relative imports (level > 0) get the leading dots stripped module_name = "." * node.level + (node.module or "") for alias in node.names: if alias.name == "*": problems.append( f"{py_file}:{node.lineno} - wildcard import `from {module_name} import *` cannot be verified." ) continue imported_name = alias.name try: obj = _import_attr(module_name, imported_name) except Exception: # pragma: no cover – wide net for import quirks pass # For some reason the module cannot be imported, skip for now # problems.append( # f"{py_file}:{node.lineno} - could not resolve " # f"`{imported_name}` from `{module_name}` ({exc})" # ) continue if f"{module_name}.{imported_name}" in allow_list: continue if inspect.isfunction(obj) or inspect.isclass(obj): doc = inspect.getdoc(obj) if not (doc and doc.strip()): kind = "class" if inspect.isclass(obj) else "function" problems.append( f"{py_file}:{node.lineno} - {kind} `{module_name}.{imported_name}` is missing a docstring." ) return problems def main() -> None: args = _parse_args() target_path = pathlib.Path(args.target_file).resolve() project_root = pathlib.Path(args.project_root).resolve() if not target_path.is_file(): raise Exception(f"❌ Target file not found: {target_path}") errors = _check_file(target_path, project_root, args.allow_list) if errors: print("Docstring verification failed:\n") print("\n".join(f" • {e}" for e in errors)) raise Exception("❌ Docstring verification failed.") if not args.quiet: print(f"✅ All explicitly imported functions/classes in {target_path} have docstrings.") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/tests/special_sanity/validate_structure.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python3 """ Validate that test file subfolders mirror the top-level package layout. Usage examples -------------- # Typical run (defaults: impl_root=my_project, tests_root=tests) python check_tests_structure.py # Custom layout and extra allowed folders python check_tests_structure.py \ --impl-root verl \ --tests-root tests \ --allow-dirs special_e2e special_sanity special_standalone special_distributed """ from __future__ import annotations import argparse import sys from pathlib import Path def discover_allowed_modules(impl_root: Path, extra: list[str]) -> set[str]: """Return the set of first-level directories that tests may live under.""" allowed = {p.name for p in impl_root.iterdir() if p.is_dir()} allowed.update(extra) return allowed def find_violations(tests_root: Path, allowed: set[str], allowed_files: list[str]) -> list[str]: """Return a list of error strings for test files in the wrong place.""" errors: list[str] = [] for test_file in tests_root.rglob("test*.py"): if str(test_file) in allowed_files: continue rel_parts = test_file.relative_to(tests_root).parts if len(rel_parts) < 2: errors.append(f"{test_file}: must be inside one of {sorted(allowed)} (not at tests root)") continue first_folder = rel_parts[0] if first_folder not in allowed: errors.append( f"{test_file}: subfolder '{first_folder}' under tests/ is not an allowed module. " f"The valid ones are: {sorted(allowed)}" ) return errors def main() -> None: parser = argparse.ArgumentParser(description="Check that test files follow tests//… layout.") parser.add_argument( "--impl-root", type=Path, default="verl", help="Implementation root (default: my_project)", ) parser.add_argument( "--tests-root", type=Path, default="tests", help="Root of test tree (default: tests)", ) parser.add_argument( "--allow-dirs", nargs="*", default=["special_e2e", "special_sanity", "special_standalone", "special_distributed"], help="Extra top-level test folders that are exempt from the rule", ) parser.add_argument( "--allow-files", nargs="*", default=[ "tests/test_protocol_on_cpu.py", "tests/test_base_config_on_cpu.py", "tests/test_protocol_v2_on_cpu.py", ], help="Extra top-level test folders that are exempt from the rule", ) args = parser.parse_args() if not args.impl_root.is_dir(): raise Exception(f"Implementation root '{args.impl_root}' does not exist.") if not args.tests_root.is_dir(): raise Exception(f"Tests root '{args.tests_root}' does not exist.") allowed = discover_allowed_modules(args.impl_root, args.allow_dirs) violations = find_violations(args.tests_root, allowed, args.allow_files) if violations: print("❌ Test layout violations found:\n", file=sys.stderr) for err in violations: print(" -", err, file=sys.stderr) print( f"\nGuideline:\n Place each test file under tests//…\n where is " f"one of the top-level packages inside '{args.impl_root}', or is explicitly listed via --allow-dirs.\n", file=sys.stderr, ) raise Exception("❌ Test layout violations found.") print("✅ Tests folder structure looks good.") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/tests/special_standalone/README.md ================================================ The standalone test folder is reserved for tests that require dedicated environment (e.g. memory stress tests) ================================================ FILE: verl_distillation/tests/special_standalone/test_memory_buffers.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test memory buffers - We start with two models with the same weights - We use Memory buffer to make one of the models and then compare the parameters """ import gc import torch from transformers import LlamaConfig, LlamaModel def test_memory_buffers(): llama_config = LlamaConfig( vocab_size=256, hidden_size=4096, intermediate_size=11008, num_hidden_layers=2, num_attention_heads=16, num_key_value_heads=16, ) model = LlamaModel(config=llama_config).cuda() model_copy = LlamaModel(config=llama_config).cuda() model_copy.load_state_dict(model.state_dict()) norm_factor = 1024**3 t_before = torch.cuda.get_device_properties(0).total_memory / norm_factor r_before = torch.cuda.memory_reserved(0) / norm_factor a_before = torch.cuda.memory_allocated(0) / norm_factor print(f"Before Total memory: {t_before} GB, reserved: {r_before} GB, allocated: {a_before} GB") t = torch.cuda.get_device_properties(0).total_memory / norm_factor r = torch.cuda.memory_reserved(0) / norm_factor a = torch.cuda.memory_allocated(0) / norm_factor gc.collect() torch.cuda.empty_cache() print(f"After Total memory: {t} GB, reserved: {r} GB, allocated: {a} GB") change_ratio = (a - a_before) / a_before assert change_ratio < 0.01, f"make sure the allocated change is less than 1%, Got {change_ratio}" for (name1, param1), (name2, param2) in zip(model.named_parameters(), model_copy.named_parameters(), strict=True): assert name1 == name2 assert torch.eq(param1.data, param2.data).all(), f"{param1.data}, {param2.data}, {name1}" if __name__ == "__main__": test_memory_buffers() ================================================ FILE: verl_distillation/tests/test_base_config_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from verl.base_config import BaseConfig @pytest.fixture def base_config_mock(): """Fixture to create a mock BaseConfig instance with test attributes.""" mock_config = BaseConfig() mock_config.test_attr = "test_value" return mock_config def test_getitem_success(base_config_mock): """Test __getitem__ with existing attribute (happy path).""" assert base_config_mock["test_attr"] == "test_value" def test_getitem_nonexistent_attribute(base_config_mock): """Test __getitem__ with non-existent attribute (exception path 1).""" with pytest.raises(AttributeError): _ = base_config_mock["nonexistent_attr"] def test_getitem_invalid_key_type(base_config_mock): """Test __getitem__ with invalid key type (exception path 2).""" with pytest.raises(TypeError): _ = base_config_mock[123] # type: ignore ================================================ FILE: verl_distillation/tests/test_protocol_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import numpy as np import pytest import tensordict import torch from packaging.version import parse as parse_version from tensordict import TensorDict from verl import DataProto from verl.protocol import ( deserialize_single_tensor, deserialize_tensordict, serialize_single_tensor, serialize_tensordict, union_numpy_dict, union_tensor_dict, ) from verl.utils import tensordict_utils as tu def test_union_tensor_dict(): obs = torch.randn(100, 10) data1 = TensorDict({"obs": obs, "act": torch.randn(100, 3)}, batch_size=[100]) data2 = TensorDict({"obs": obs, "next_obs": torch.randn(100, 10), "rew": torch.randn(100)}, batch_size=[100]) data_with_copied_obs = TensorDict( {"obs": obs.clone(), "next_obs": torch.randn(100, 10), "rew": torch.randn(100)}, batch_size=[100] ) union_tensor_dict(data1, data2) with pytest.raises(AssertionError): union_tensor_dict(data1, data_with_copied_obs) def test_union_numpy_dict(): """ A comprehensive test suite for union_numpy_dict, covering standard use cases, N-dimensional arrays, object-dtype arrays, and NaN value handling. """ arr_3d = np.arange(8).reshape((2, 2, 2)) union_numpy_dict({"a": arr_3d}, {"a": arr_3d}) arr1 = np.array([1, "hello", np.array([2, 3])], dtype=object) arr2 = np.array([1, "hello", np.array([2, 3])], dtype=object) union_numpy_dict({"a": arr1}, {"a": arr2}) # --- Test Case 1: The original test with mixed object/float types --- # This test case from the original test file is preserved. data = np.random.random(100) # This array intentionally mixes float('nan') and the string 'nan' nan_data = [float("nan") for _ in range(99)] nan_data.append("nan") nan_data_arr = np.array(nan_data, dtype=object) dict1 = {"a": data, "b": nan_data_arr} dict2_same = {"a": data.copy(), "b": nan_data_arr.copy()} dict3_different = {"a": np.random.random(100)} union_numpy_dict(dict1, dict2_same) # Should pass with pytest.raises(AssertionError): union_numpy_dict(dict1, dict3_different) # --- Test Case 2: Standard 3D arrays (fixes the core bug) --- arr_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4)) dict_3d_1 = {"nd_array": arr_3d} dict_3d_2_same = {"nd_array": arr_3d.copy()} dict_3d_3_different = {"nd_array": arr_3d + 1} union_numpy_dict(dict_3d_1, dict_3d_2_same) # Should pass with pytest.raises(AssertionError, match="`nd_array` in tensor_dict1 and tensor_dict2 are not the same object."): union_numpy_dict(dict_3d_1, dict_3d_3_different) # --- Test Case 3: Nested 2D and 4D object-dtype arrays --- sub_arr1 = np.array([1, 2]) sub_arr2 = np.array([3.0, 4.0]) # 2D object array arr_2d_obj = np.array([[sub_arr1, "text"], [sub_arr2, None]], dtype=object) arr_2d_obj_diff = np.array([[sub_arr1, "text"], [sub_arr2, "other"]], dtype=object) union_numpy_dict({"data": arr_2d_obj}, {"data": arr_2d_obj.copy()}) # Should pass with pytest.raises(AssertionError): union_numpy_dict({"data": arr_2d_obj}, {"data": arr_2d_obj_diff}) # 4D object array to ensure deep recursion is robust arr_4d_obj = np.array([[[[sub_arr1]]], [[[sub_arr2]]]], dtype=object) arr_4d_obj_diff = np.array([[[[sub_arr1]]], [[[np.array([9, 9])]]]], dtype=object) union_numpy_dict({"data": arr_4d_obj}, {"data": arr_4d_obj.copy()}) # Should pass with pytest.raises(AssertionError): union_numpy_dict({"data": arr_4d_obj}, {"data": arr_4d_obj_diff}) # --- Test Case 4: Explicit NaN value comparison --- # This verifies that our new _deep_equal logic correctly handles NaNs. nan_arr = np.array([1.0, np.nan, 3.0]) dict_nan_1 = {"data": nan_arr} dict_nan_2_same = {"data": np.array([1.0, np.nan, 3.0])} # A new array with same values dict_nan_3_different_val = {"data": np.array([1.0, 2.0, 3.0])} dict_nan_4_different_pos = {"data": np.array([np.nan, 1.0, 3.0])} # NaNs in the same position should be considered equal for merging. union_numpy_dict(dict_nan_1, dict_nan_2_same) # Should pass with pytest.raises(AssertionError): union_numpy_dict(dict_nan_1, dict_nan_3_different_val) with pytest.raises(AssertionError): union_numpy_dict(dict_nan_1, dict_nan_4_different_pos) # --- Test Case 5: Circular reference handling --- # Create two separate, but structurally identical, circular references. # This should pass without a RecursionError. circ_arr_1 = np.array([None], dtype=object) circ_arr_1[0] = circ_arr_1 circ_arr_2 = np.array([None], dtype=object) circ_arr_2[0] = circ_arr_2 union_numpy_dict({"data": circ_arr_1}, {"data": circ_arr_2}) # Should pass # Create a circular reference and a non-circular one. # This should fail with an AssertionError because they are different. non_circ_arr = np.array([None], dtype=object) with pytest.raises(AssertionError): union_numpy_dict({"data": circ_arr_1}, {"data": non_circ_arr}) def test_tensor_dict_constructor(): obs = torch.randn(100, 10) act = torch.randn(100, 10, 3) data = DataProto.from_dict(tensors={"obs": obs, "act": act}) assert data.batch.batch_size == torch.Size([100]) with pytest.raises(AssertionError): data = DataProto.from_dict(tensors={"obs": obs, "act": act}, num_batch_dims=2) with pytest.raises(AssertionError): data = DataProto.from_dict(tensors={"obs": obs, "act": act}, num_batch_dims=3) def test_tensor_dict_make_iterator(): obs = torch.randn(100, 10) labels = [random.choice(["abc", "cde"]) for _ in range(100)] dataset = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}) data_iter_1 = dataset.make_iterator(mini_batch_size=10, epochs=2, seed=1) data_list_1 = [] for data in data_iter_1: data_list_1.append(data) data_iter_2 = dataset.make_iterator(mini_batch_size=10, epochs=2, seed=1) data_list_2 = [] for data in data_iter_2: data_list_2.append(data) for data1, data2 in zip(data_list_1, data_list_2, strict=True): assert isinstance(data1, DataProto) assert isinstance(data2, DataProto) result = torch.all(torch.eq(data1.batch["obs"], data2.batch["obs"])) if not result.item(): print(data1.batch["obs"]) print(data2.batch["obs"]) raise AssertionError() non_tensor_result = np.all(np.equal(data1.non_tensor_batch["labels"], data2.non_tensor_batch["labels"])) if not non_tensor_result.item(): print(data1.non_tensor_batch["labels"]) print(data2.non_tensor_batch["labels"]) def test_reorder(): obs = torch.tensor([1, 2, 3, 4, 5, 6]) labels = ["a", "b", "c", "d", "e", "f"] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"name": "abdce"}) data.reorder(torch.tensor([3, 4, 2, 0, 1, 5])) assert torch.all(torch.eq(data.batch["obs"], torch.tensor([4, 5, 3, 1, 2, 6]))) assert np.all(data.non_tensor_batch["labels"] == np.array(["d", "e", "c", "a", "b", "f"])) assert data.meta_info == {"name": "abdce"} def test_chunk_concat(): obs = torch.tensor([1, 2, 3, 4, 5, 6]) labels = ["a", "b", "c", "d", "e", "f"] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"name": "abdce"}) with pytest.raises(AssertionError): data.chunk(5) data_split = data.chunk(2) assert len(data_split) == 2 assert torch.all(torch.eq(data_split[0].batch["obs"], torch.tensor([1, 2, 3]))) assert np.all(data_split[0].non_tensor_batch["labels"] == np.array(["a", "b", "c"])) assert data_split[0].meta_info == {"name": "abdce"} assert torch.all(torch.eq(data_split[1].batch["obs"], torch.tensor([4, 5, 6]))) assert np.all(data_split[1].non_tensor_batch["labels"] == np.array(["d", "e", "f"])) assert data_split[1].meta_info == {"name": "abdce"} concat_data = DataProto.concat(data_split) assert torch.all(torch.eq(concat_data.batch["obs"], data.batch["obs"])) assert np.all(concat_data.non_tensor_batch["labels"] == data.non_tensor_batch["labels"]) assert concat_data.meta_info == data.meta_info def test_concat_metrics_from_multiple_workers(): """Test that concat() properly merges metrics from all workers in distributed training.""" # Simulate 3 workers each with their own metrics obs1 = torch.tensor([1, 2]) obs2 = torch.tensor([3, 4]) obs3 = torch.tensor([5, 6]) # Each worker has different metrics (as list of dict format) worker1_metrics = [{"loss": 0.5, "accuracy": 0.9}] worker2_metrics = [{"loss": 0.6, "accuracy": 0.85}] worker3_metrics = [{"loss": 0.55, "accuracy": 0.88}] data1 = DataProto.from_dict(tensors={"obs": obs1}, meta_info={"metrics": worker1_metrics, "config_flag": True}) data2 = DataProto.from_dict(tensors={"obs": obs2}, meta_info={"metrics": worker2_metrics, "config_flag": True}) data3 = DataProto.from_dict(tensors={"obs": obs3}, meta_info={"metrics": worker3_metrics, "config_flag": True}) # Concat all workers' data concat_data = DataProto.concat([data1, data2, data3]) # Verify tensors are concatenated assert torch.all(torch.eq(concat_data.batch["obs"], torch.tensor([1, 2, 3, 4, 5, 6]))) # Verify ALL workers' metrics are flattened to dict of lists expected_metrics = {"loss": [0.5, 0.6, 0.55], "accuracy": [0.9, 0.85, 0.88]} assert concat_data.meta_info["metrics"] == expected_metrics # Verify config flags are preserved from first worker assert concat_data.meta_info["config_flag"] is True def test_concat_with_empty_and_non_list_meta_info(): """Test concat() handles edge cases: empty meta_info, non-list values, and None.""" obs1 = torch.tensor([1, 2]) obs2 = torch.tensor([3, 4]) # Worker 1 has metrics, worker 2 doesn't data1 = DataProto.from_dict(tensors={"obs": obs1}, meta_info={"metrics": [{"loss": 0.5}], "flag": True}) data2 = DataProto.from_dict(tensors={"obs": obs2}, meta_info={"flag": True}) concat_data = DataProto.concat([data1, data2]) # Should flatten worker1's metrics to dict of lists assert concat_data.meta_info["metrics"] == {"loss": [0.5]} assert concat_data.meta_info["flag"] is True # Test with non-list meta_info value data3 = DataProto.from_dict(tensors={"obs": obs1}, meta_info={"single_value": 42}) data4 = DataProto.from_dict(tensors={"obs": obs2}, meta_info={"single_value": 42}) concat_data2 = DataProto.concat([data3, data4]) assert concat_data2.meta_info["single_value"] == 42 def test_concat_first_worker_missing_metrics(): """Test that metrics from other workers are preserved even when first worker has no metrics. This is a critical edge case - the old buggy implementation only checked data[0].meta_info and would lose all metrics if the first worker didn't have any. """ obs1 = torch.tensor([1, 2]) obs2 = torch.tensor([3, 4]) obs3 = torch.tensor([5, 6]) # First worker has NO metrics, but workers 2 and 3 do data1 = DataProto.from_dict(tensors={"obs": obs1}, meta_info={"config_flag": True}) data2 = DataProto.from_dict(tensors={"obs": obs2}, meta_info={"metrics": {"loss": 0.6}, "config_flag": True}) data3 = DataProto.from_dict(tensors={"obs": obs3}, meta_info={"metrics": {"loss": 0.55}, "config_flag": True}) concat_data = DataProto.concat([data1, data2, data3]) # Should flatten metrics from workers 2 and 3 into dict of lists expected_metrics = {"loss": [0.6, 0.55]} assert concat_data.meta_info["metrics"] == expected_metrics assert concat_data.meta_info["config_flag"] is True def test_concat_non_list_metrics(): """Test that concat() handles non-list metrics (single dict) correctly. In some cases, metrics might be a single dict instead of a list. The implementation should flatten them into a dict of lists. """ obs1 = torch.tensor([1, 2]) obs2 = torch.tensor([3, 4]) # Metrics as single dict (not wrapped in list) data1 = DataProto.from_dict(tensors={"obs": obs1}, meta_info={"metrics": {"loss": 0.5, "accuracy": 0.9}}) data2 = DataProto.from_dict(tensors={"obs": obs2}, meta_info={"metrics": {"loss": 0.6, "accuracy": 0.85}}) concat_data = DataProto.concat([data1, data2]) # Should flatten to dict of lists expected_metrics = {"loss": [0.5, 0.6], "accuracy": [0.9, 0.85]} assert concat_data.meta_info["metrics"] == expected_metrics def test_concat_merge_different_non_metric_keys(): """Test that concat() merges non-metric meta_info keys from all workers. When different workers have different non-metric keys, all keys should be preserved. This prevents silent data loss and aligns with the docstring stating meta_info is "merged". """ obs1 = torch.tensor([1, 2]) obs2 = torch.tensor([3, 4]) obs3 = torch.tensor([5, 6]) # Each worker has some unique non-metric keys data1 = DataProto.from_dict(tensors={"obs": obs1}, meta_info={"config": "A", "shared_key": "X"}) data2 = DataProto.from_dict(tensors={"obs": obs2}, meta_info={"extra_key": "B", "shared_key": "X"}) data3 = DataProto.from_dict(tensors={"obs": obs3}, meta_info={"another_key": "C", "shared_key": "X"}) concat_data = DataProto.concat([data1, data2, data3]) # All unique keys should be preserved assert concat_data.meta_info["config"] == "A" assert concat_data.meta_info["extra_key"] == "B" assert concat_data.meta_info["another_key"] == "C" assert concat_data.meta_info["shared_key"] == "X" def test_concat_conflicting_non_metric_keys(): """Test that concat() raises an assertion error when non-metric keys have conflicting values. This ensures data integrity by catching cases where workers have different values for what should be the same configuration parameter. """ obs1 = torch.tensor([1, 2]) obs2 = torch.tensor([3, 4]) # Same key "config" but different values data1 = DataProto.from_dict(tensors={"obs": obs1}, meta_info={"config": "A"}) data2 = DataProto.from_dict(tensors={"obs": obs2}, meta_info={"config": "B"}) # Should raise an assertion error due to conflicting values with pytest.raises(AssertionError, match="Conflicting values for meta_info key 'config'"): DataProto.concat([data1, data2]) def test_pop(): obs = torch.randn(100, 10) act = torch.randn(100, 3) dataset = DataProto.from_dict({"obs": obs, "act": act}, meta_info={"2": 2, "1": 1}) poped_dataset = dataset.pop(batch_keys=["obs"], meta_info_keys=["2"]) assert poped_dataset.batch.keys() == {"obs"} assert poped_dataset.meta_info.keys() == {"2"} assert dataset.batch.keys() == {"act"} assert dataset.meta_info.keys() == {"1"} def test_repeat(): # Create a DataProto object with some batch and non-tensor data obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"info": "test_info"}) # Test interleave=True repeated_data_interleave = data.repeat(repeat_times=2, interleave=True) expected_obs_interleave = torch.tensor([[1, 2], [1, 2], [3, 4], [3, 4], [5, 6], [5, 6]]) expected_labels_interleave = ["a", "a", "b", "b", "c", "c"] assert torch.all(torch.eq(repeated_data_interleave.batch["obs"], expected_obs_interleave)) assert (repeated_data_interleave.non_tensor_batch["labels"] == expected_labels_interleave).all() assert repeated_data_interleave.meta_info == {"info": "test_info"} # Test interleave=False repeated_data_no_interleave = data.repeat(repeat_times=2, interleave=False) expected_obs_no_interleave = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2], [3, 4], [5, 6]]) expected_labels_no_interleave = ["a", "b", "c", "a", "b", "c"] assert torch.all(torch.eq(repeated_data_no_interleave.batch["obs"], expected_obs_no_interleave)) assert (repeated_data_no_interleave.non_tensor_batch["labels"] == expected_labels_no_interleave).all() assert repeated_data_no_interleave.meta_info == {"info": "test_info"} def test_dataproto_pad_unpad(): obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"info": "test_info"}) from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto padded_data, pad_size = pad_dataproto_to_divisor(data, size_divisor=2) assert pad_size == 1 expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2]]) expected_labels = ["a", "b", "c", "a"] assert torch.all(torch.eq(padded_data.batch["obs"], expected_obs)) assert (padded_data.non_tensor_batch["labels"] == expected_labels).all() assert padded_data.meta_info == {"info": "test_info"} unpadd_data = unpad_dataproto(padded_data, pad_size=pad_size) assert torch.all(torch.eq(unpadd_data.batch["obs"], obs)) assert (unpadd_data.non_tensor_batch["labels"] == labels).all() assert unpadd_data.meta_info == {"info": "test_info"} padded_data, pad_size = pad_dataproto_to_divisor(data, size_divisor=3) assert pad_size == 0 expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) expected_labels = ["a", "b", "c"] assert torch.all(torch.eq(padded_data.batch["obs"], expected_obs)) assert (padded_data.non_tensor_batch["labels"] == expected_labels).all() assert padded_data.meta_info == {"info": "test_info"} unpadd_data = unpad_dataproto(padded_data, pad_size=pad_size) assert torch.all(torch.eq(unpadd_data.batch["obs"], obs)) assert (unpadd_data.non_tensor_batch["labels"] == labels).all() assert unpadd_data.meta_info == {"info": "test_info"} padded_data, pad_size = pad_dataproto_to_divisor(data, size_divisor=7) assert pad_size == 4 expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2], [3, 4], [5, 6], [1, 2]]) expected_labels = ["a", "b", "c", "a", "b", "c", "a"] assert torch.all(torch.eq(padded_data.batch["obs"], expected_obs)) assert (padded_data.non_tensor_batch["labels"] == expected_labels).all() assert padded_data.meta_info == {"info": "test_info"} unpadd_data = unpad_dataproto(padded_data, pad_size=pad_size) assert torch.all(torch.eq(unpadd_data.batch["obs"], obs)) assert (unpadd_data.non_tensor_batch["labels"] == labels).all() assert unpadd_data.meta_info == {"info": "test_info"} def test_dataproto_fold_unfold(): from verl.protocol import DataProto, fold_batch_dim, unfold_batch_dim obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"info": "test_info"}) data1 = data.repeat(repeat_times=2, interleave=True) data2 = fold_batch_dim(data1, new_batch_size=3) torch.testing.assert_close(data2.batch["obs"], torch.tensor([[[1, 2], [1, 2]], [[3, 4], [3, 4]], [[5, 6], [5, 6]]])) assert (data2.non_tensor_batch["labels"] == [["a", "a"], ["b", "b"], ["c", "c"]]).all() data2.reorder(indices=torch.tensor([1, 2, 0])) data3 = unfold_batch_dim(data2, batch_dims=2) torch.testing.assert_close(data3.batch["obs"], torch.tensor([[3, 4], [3, 4], [5, 6], [5, 6], [1, 2], [1, 2]])) assert (data3.non_tensor_batch["labels"] == ["b", "b", "c", "c", "a", "a"]).all() assert data3.meta_info == {"info": "test_info"} def test_torch_save_data_proto(): obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"info": "test_info"}) data.save_to_disk("test_data.pt") loaded_data = DataProto.load_from_disk("test_data.pt") assert torch.all(torch.eq(loaded_data.batch["obs"], data.batch["obs"])) assert (loaded_data.non_tensor_batch["labels"] == data.non_tensor_batch["labels"]).all() assert loaded_data.meta_info == data.meta_info import os os.remove("test_data.pt") def test_len(): obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = np.array(["a", "b", "c"], dtype=object) data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"info": "test_info"}) assert len(data) == 3 data = DataProto(batch=None, non_tensor_batch={"labels": labels}, meta_info={"info": "test_info"}) assert len(data) == 3 data = DataProto(batch=None, non_tensor_batch={}, meta_info={"info": "test_info"}) assert len(data) == 0 data = DataProto(batch=None, non_tensor_batch=None, meta_info={"info": "test_info"}) assert len(data) == 0 def test_dataproto_index(): data_len = 100 idx_num = 10 obs = torch.randn(data_len, 10) labels = [random.choice(["abc", "cde"]) for _ in range(data_len)] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}) labels_np = np.array(labels) idx_np_int = np.random.randint(0, data_len, size=(idx_num,)) result_np_int = data[idx_np_int] assert result_np_int.batch.keys() == data.batch.keys() assert result_np_int.non_tensor_batch.keys() == data.non_tensor_batch.keys() assert result_np_int.batch["obs"].shape[0] == idx_num assert result_np_int.non_tensor_batch["labels"].shape[0] == idx_num assert np.array_equal(result_np_int.batch["obs"].cpu().numpy(), obs[idx_np_int].numpy()) assert np.array_equal(result_np_int.non_tensor_batch["labels"], labels_np[idx_np_int]) idx_torch_int = torch.randint(0, data_len, size=(idx_num,)) result_torch_int = data[idx_torch_int] assert result_torch_int.batch.keys() == data.batch.keys() assert result_torch_int.non_tensor_batch.keys() == data.non_tensor_batch.keys() assert result_torch_int.batch["obs"].shape[0] == idx_num assert result_torch_int.non_tensor_batch["labels"].shape[0] == idx_num assert np.array_equal(result_torch_int.batch["obs"].cpu().numpy(), obs[idx_torch_int].cpu().numpy()) assert np.array_equal(result_torch_int.non_tensor_batch["labels"], labels_np[idx_torch_int.cpu().numpy()]) idx_list_int = [np.random.randint(0, data_len) for _ in range(idx_num)] result_list_int = data[idx_list_int] assert result_list_int.batch.keys() == data.batch.keys() assert result_list_int.non_tensor_batch.keys() == data.non_tensor_batch.keys() assert result_list_int.batch["obs"].shape[0] == idx_num assert result_list_int.non_tensor_batch["labels"].shape[0] == idx_num assert np.array_equal(result_list_int.batch["obs"].cpu().numpy(), obs[idx_list_int].cpu().numpy()) assert np.array_equal(result_list_int.non_tensor_batch["labels"], labels_np[idx_list_int]) idx_np_bool = np.random.randint(0, 2, size=(data_len,), dtype=bool) result_np_bool = data[idx_np_bool] assert result_np_bool.batch.keys() == data.batch.keys() assert result_np_bool.non_tensor_batch.keys() == data.non_tensor_batch.keys() assert result_np_bool.batch["obs"].shape[0] == idx_np_bool.sum() assert result_np_bool.non_tensor_batch["labels"].shape[0] == idx_np_bool.sum() assert np.array_equal(result_np_bool.batch["obs"].cpu().numpy(), obs[idx_np_bool].cpu().numpy()) assert np.array_equal(result_np_bool.non_tensor_batch["labels"], labels_np[idx_np_bool]) idx_torch_bool = torch.randint(0, 2, size=(data_len,), dtype=torch.bool) result_torch_bool = data[idx_torch_bool] assert result_torch_bool.batch.keys() == data.batch.keys() assert result_torch_bool.non_tensor_batch.keys() == data.non_tensor_batch.keys() assert result_torch_bool.batch["obs"].shape[0] == idx_torch_bool.sum().item() assert result_torch_bool.non_tensor_batch["labels"].shape[0] == idx_torch_bool.sum().item() assert np.array_equal(result_torch_bool.batch["obs"].cpu().numpy(), obs[idx_torch_bool].cpu().numpy()) assert np.array_equal(result_torch_bool.non_tensor_batch["labels"], labels_np[idx_torch_bool]) idx_list_bool = [np.random.randint(0, 2, dtype=bool) for _ in range(data_len)] result_list_bool = data[idx_list_bool] assert result_list_bool.batch.keys() == data.batch.keys() assert result_list_bool.non_tensor_batch.keys() == data.non_tensor_batch.keys() assert result_list_bool.batch["obs"].shape[0] == sum(idx_list_bool) assert result_list_bool.non_tensor_batch["labels"].shape[0] == sum(idx_list_bool) assert np.array_equal(result_list_bool.batch["obs"].cpu().numpy(), obs[idx_list_bool].cpu().numpy()) assert np.array_equal(result_list_bool.non_tensor_batch["labels"], labels_np[idx_list_bool]) def test_old_vs_new_from_single_dict(): class CustomProto(DataProto): """Uses the new, fixed from_single_dict.""" pass class OriginProto(DataProto): """Mimics the *old* from_single_dict (always returns a DataProto).""" @classmethod def from_single_dict(cls, data, meta_info=None, auto_padding=False): tensors, non_tensors = {}, {} for k, v in data.items(): if torch.is_tensor(v): tensors[k] = v else: non_tensors[k] = v # always calls DataProto.from_dict, ignoring `cls` return DataProto.from_dict( tensors=tensors, non_tensors=non_tensors, meta_info=meta_info, auto_padding=auto_padding, ) sample = {"x": torch.tensor([0])} orig = OriginProto.from_single_dict(sample) # old behavior: always DataProto, not a CustomOriginProto assert type(orig) is DataProto assert type(orig) is not OriginProto cust = CustomProto.from_single_dict(sample) # new behavior: respects subclass assert type(cust) is CustomProto def test_dataproto_no_batch(): labels = ["a", "b", "c"] data = DataProto.from_dict(non_tensors={"labels": labels}, meta_info={"info": "test_info"}) selected = data.select(non_tensor_batch_keys=["labels"]) assert (selected.non_tensor_batch["labels"] == labels).all() pop_data = data.pop(non_tensor_batch_keys=["labels"]) assert (pop_data.non_tensor_batch["labels"] == labels).all() assert data.non_tensor_batch == {} def test_sample_level_repeat(): # Create a DataProto object with some batch and non-tensor data obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"info": "test_info"}) # list repeated_data_interleave = data.sample_level_repeat(repeat_times=[3, 1, 2]) expected_obs_interleave = torch.tensor([[1, 2], [1, 2], [1, 2], [3, 4], [5, 6], [5, 6]]) expected_labels_interleave = ["a", "a", "a", "b", "c", "c"] assert torch.all(torch.eq(repeated_data_interleave.batch["obs"], expected_obs_interleave)) assert (repeated_data_interleave.non_tensor_batch["labels"] == expected_labels_interleave).all() assert repeated_data_interleave.meta_info == {"info": "test_info"} # torch.tensor repeated_data_no_interleave = data.sample_level_repeat(repeat_times=torch.tensor([1, 2, 3])) expected_obs_no_interleave = torch.tensor([[1, 2], [3, 4], [3, 4], [5, 6], [5, 6], [5, 6]]) expected_labels_no_interleave = ["a", "b", "b", "c", "c", "c"] assert torch.all(torch.eq(repeated_data_no_interleave.batch["obs"], expected_obs_no_interleave)) assert (repeated_data_no_interleave.non_tensor_batch["labels"] == expected_labels_no_interleave).all() assert repeated_data_no_interleave.meta_info == {"info": "test_info"} def test_dataproto_unfold_column_chunks(): obs1 = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) obs2 = torch.tensor([[1, 2], [5, 6], [9, 10]]) labels = ["a", "b", "c"] data = DataProto.from_dict( tensors={"obs1": obs1, "obs2": obs2}, non_tensors={"labels": labels}, meta_info={"name": "abc"} ) ret = data.unfold_column_chunks(2, split_keys=["obs1"]) expect_obs1 = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) expect_obs2 = torch.tensor([[1, 2], [1, 2], [5, 6], [5, 6], [9, 10], [9, 10]]) expect_labels = ["a", "a", "b", "b", "c", "c"] assert torch.all(torch.eq(ret.batch["obs1"], expect_obs1)) assert torch.all(torch.eq(ret.batch["obs2"], expect_obs2)) assert (ret.non_tensor_batch["labels"] == expect_labels).all() assert ret.meta_info == {"name": "abc"} obs1 = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) obs2 = torch.tensor([[1, 2], [5, 6], [9, 10]]) labels = [["a1", "a2"], ["b1", "b2"], ["c1", "c2"]] data = DataProto.from_dict( tensors={"obs1": obs1, "obs2": obs2}, non_tensors={"labels": labels}, meta_info={"name": "abc"} ) ret = data.unfold_column_chunks(2, split_keys=["obs1", "labels"]) expect_obs1 = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) expect_obs2 = torch.tensor([[1, 2], [1, 2], [5, 6], [5, 6], [9, 10], [9, 10]]) expect_labels = [["a1"], ["a2"], ["b1"], ["b2"], ["c1"], ["c2"]] assert torch.all(torch.eq(ret.batch["obs1"], expect_obs1)) assert torch.all(torch.eq(ret.batch["obs2"], expect_obs2)) assert (ret.non_tensor_batch["labels"] == expect_labels).all() assert ret.meta_info == {"name": "abc"} obs1 = torch.tensor( [[[1, 1], [2, 2], [3, 3], [4, 4]], [[5, 5], [6, 6], [7, 7], [8, 8]], [[9, 9], [10, 10], [11, 11], [12, 12]]] ) obs2 = torch.tensor([[[1, 1], [2, 2]], [[5, 5], [6, 6]], [[9, 9], [10, 10]]]) labels = ["a", "b", "c"] data = DataProto.from_dict( tensors={"obs1": obs1, "obs2": obs2}, non_tensors={"labels": labels}, meta_info={"name": "abc"} ) ret = data.unfold_column_chunks(2, split_keys=["obs1"]) expect_obs1 = torch.tensor( [ [[1, 1], [2, 2]], [[3, 3], [4, 4]], [[5, 5], [6, 6]], [[7, 7], [8, 8]], [[9, 9], [10, 10]], [[11, 11], [12, 12]], ] ) expect_obs2 = torch.tensor( [[[1, 1], [2, 2]], [[1, 1], [2, 2]], [[5, 5], [6, 6]], [[5, 5], [6, 6]], [[9, 9], [10, 10]], [[9, 9], [10, 10]]] ) expect_labels = ["a", "a", "b", "b", "c", "c"] assert torch.all(torch.eq(ret.batch["obs1"], expect_obs1)) assert torch.all(torch.eq(ret.batch["obs2"], expect_obs2)) assert (ret.non_tensor_batch["labels"] == expect_labels).all() assert ret.meta_info == {"name": "abc"} def test_dataproto_chunk_after_index(): data_len = 4 obs = torch.randn(data_len, 4) labels = [f"label_{i}" for i in range(data_len)] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"name": "abc"}) # Test with boolean numpy array bool_mask = np.array([True, False, True, False]) selected = data[bool_mask] assert isinstance(selected.batch.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch.batch_size) # int or List[int] # Test with integer numpy array int_mask = np.array([0, 2]) selected = data[int_mask] assert isinstance(selected.batch.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch.batch_size) # Test with boolean list list_mask = [True, False, True, False] selected = data[list_mask] assert isinstance(selected.batch.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch.batch_size) # Test with list list_mask = [0, 2] selected = data[list_mask] assert isinstance(selected.batch.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch.batch_size) # Test with torch tensor (bool) torch_bool_mask = torch.tensor([True, False, True, False]) selected = data[torch_bool_mask] assert isinstance(selected.batch.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch.batch_size) # Test with torch tensor (int) torch_int_mask = torch.tensor([0, 2]) selected = data[torch_int_mask] assert isinstance(selected.batch.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch.batch_size) @pytest.mark.skipif( parse_version(tensordict.__version__) < parse_version("0.10"), reason="requires at least tensordict 0.10" ) def test_to_tensordict(): obs = torch.tensor([1, 2, 3, 4, 5, 6]) labels = ["a", "b", "c", "d", "e", "f"] data = DataProto.from_dict(tensors={"obs": obs}, non_tensors={"labels": labels}, meta_info={"name": "abdce"}) output = data.to_tensordict() assert torch.all(torch.eq(output["obs"], obs)).item() assert output["labels"] == labels assert output["name"] == "abdce" @pytest.mark.skipif( parse_version(tensordict.__version__) < parse_version("0.10"), reason="requires at least tensordict 0.10" ) def test_from_tensordict(): tensor_dict = { "obs": torch.tensor([1, 2, 3, 4, 5, 6]), "labels": ["a", "b", "c", "d", "e", "f"], } non_tensor_dict = {"name": "abdce"} tensordict = tu.get_tensordict(tensor_dict, non_tensor_dict) data = DataProto.from_tensordict(tensordict) assert data.non_tensor_batch["labels"].tolist() == tensor_dict["labels"] assert torch.all(torch.eq(data.batch["obs"], tensor_dict["obs"])).item() assert data.meta_info["name"] == "abdce" def test_serialize_deserialize_single_tensor(): """Test serialization and deserialization of a single tensor""" # Create test tensor original_tensor = torch.randn(3, 4, 5) # Serialize dtype, shape, data = serialize_single_tensor(original_tensor) # Deserialize reconstructed_tensor = deserialize_single_tensor((dtype, shape, data)) # Verify results assert torch.allclose(original_tensor, reconstructed_tensor) assert original_tensor.shape == reconstructed_tensor.shape assert original_tensor.dtype == reconstructed_tensor.dtype def test_serialize_deserialize_tensordict_regular_tensors(): """Test serialization and deserialization of TensorDict with regular tensors""" # Create test data batch_size = (5, 3) tensor1 = torch.randn(*batch_size, 4) tensor2 = torch.randint(0, 10, (*batch_size, 2)) # Create TensorDict original_tensordict = TensorDict({"tensor1": tensor1, "tensor2": tensor2}, batch_size=batch_size) # Serialize batch_size_serialized, device, encoded_items = serialize_tensordict(original_tensordict) # Deserialize reconstructed_tensordict = deserialize_tensordict((batch_size_serialized, device, encoded_items)) # Verify results assert original_tensordict.batch_size == reconstructed_tensordict.batch_size assert set(original_tensordict.keys()) == set(reconstructed_tensordict.keys()) for key in original_tensordict.keys(): original_tensor = original_tensordict[key] reconstructed_tensor = reconstructed_tensordict[key] assert torch.allclose(original_tensor, reconstructed_tensor) assert original_tensor.shape == reconstructed_tensor.shape assert original_tensor.dtype == reconstructed_tensor.dtype def test_serialize_deserialize_tensordict_nested_tensors(): """Test serialization and deserialization of TensorDict with nested tensors""" # Create nested tensor tensor_list = [torch.randn(2, 3), torch.randn(3, 4), torch.randn(1, 5)] nested_tensor = torch.nested.as_nested_tensor(tensor_list) # Create regular tensor for comparison regular_tensor = torch.randn(3, 4, 5) # Create TensorDict original_tensordict = TensorDict({"nested": nested_tensor, "regular": regular_tensor}, batch_size=(3,)) # Serialize batch_size_serialized, device, encoded_items = serialize_tensordict(original_tensordict) # Deserialize reconstructed_tensordict = deserialize_tensordict((batch_size_serialized, device, encoded_items)) # Verify results assert original_tensordict.batch_size == reconstructed_tensordict.batch_size assert set(original_tensordict.keys()) == set(reconstructed_tensordict.keys()) # Verify regular tensor original_regular = original_tensordict["regular"] reconstructed_regular = reconstructed_tensordict["regular"] assert torch.allclose(original_regular, reconstructed_regular) assert original_regular.shape == reconstructed_regular.shape assert original_regular.dtype == reconstructed_regular.dtype # Verify nested tensor original_nested = original_tensordict["nested"] reconstructed_nested = reconstructed_tensordict["nested"] # Check if it's a nested tensor assert original_nested.is_nested assert reconstructed_nested.is_nested # Check layout assert original_nested.layout == reconstructed_nested.layout # Check each tensor after unbinding original_unbind = original_nested.unbind() reconstructed_unbind = reconstructed_nested.unbind() assert len(original_unbind) == len(reconstructed_unbind) for orig, recon in zip(original_unbind, reconstructed_unbind, strict=False): assert torch.allclose(orig, recon) assert orig.shape == recon.shape assert orig.dtype == recon.dtype def test_serialize_deserialize_tensordict_mixed_types(): """Test serialization and deserialization of TensorDict with mixed tensor types""" # Create tensors with different data types float_tensor = torch.randn(2, 3).float() double_tensor = torch.randn(2, 3).double() int_tensor = torch.randint(0, 10, (2, 3)).int() long_tensor = torch.randint(0, 10, (2, 3)).long() bool_tensor = torch.tensor([[True, False], [False, True]]) bfloat16_tensor = torch.randn(2, 3).bfloat16() # Add fp8 tensor (if available) # Note: FP8 is not natively supported in all PyTorch versions # We'll check if it's available and conditionally include it has_fp8 = hasattr(torch, "float8_e5m2") or hasattr(torch, "float8_e4m3fn") if has_fp8: try: # Try to create an FP8 tensor (implementation may vary) # This is a placeholder - actual FP8 support might require specific hardware fp8_tensor = torch.randn(2, 3) if hasattr(torch, "float8_e5m2"): fp8_tensor = fp8_tensor.to(torch.float8_e5m2) elif hasattr(torch, "float8_e4m3fn"): fp8_tensor = fp8_tensor.to(torch.float8_e4m3fn) except Exception: has_fp8 = False # Create nested tensor tensor_list = [ torch.randn(2, 3), torch.randn(3, 4), ] nested_tensor = torch.nested.as_nested_tensor(tensor_list) # Create TensorDict with all available types tensordict_data = { "float": float_tensor, "double": double_tensor, "int": int_tensor, "long": long_tensor, "bool": bool_tensor, "bfloat16": bfloat16_tensor, "nested": nested_tensor, } # Conditionally add fp8 tensor if available if has_fp8: tensordict_data["fp8"] = fp8_tensor original_tensordict = TensorDict( tensordict_data, batch_size=(2,), ) # Serialize batch_size_serialized, device, encoded_items = serialize_tensordict(original_tensordict) # Deserialize reconstructed_tensordict = deserialize_tensordict((batch_size_serialized, device, encoded_items)) # Verify results assert original_tensordict.batch_size == reconstructed_tensordict.batch_size assert set(original_tensordict.keys()) == set(reconstructed_tensordict.keys()) for key in original_tensordict.keys(): original_tensor = original_tensordict[key] reconstructed_tensor = reconstructed_tensordict[key] if original_tensor.is_nested: # For nested tensors, check each tensor after unbinding original_unbind = original_tensor.unbind() reconstructed_unbind = reconstructed_tensor.unbind() assert len(original_unbind) == len(reconstructed_unbind) for orig, recon in zip(original_unbind, reconstructed_unbind, strict=False): assert torch.allclose(orig, recon, equal_nan=True) assert orig.shape == recon.shape assert orig.dtype == recon.dtype else: # For regular tensors, compare directly assert torch.all(original_tensor == reconstructed_tensor) assert original_tensor.shape == reconstructed_tensor.shape assert original_tensor.dtype == reconstructed_tensor.dtype def test_serialize_deserialize_tensordict_with_device(): """Test serialization and deserialization of TensorDict with device information""" # Create test data batch_size = (2, 3) tensor1 = torch.randn(*batch_size, 4) tensor2 = torch.randint(0, 10, (*batch_size, 2)) # Create TensorDict with device information device = "cuda" if torch.cuda.is_available() else "cpu" original_tensordict = TensorDict({"tensor1": tensor1, "tensor2": tensor2}, batch_size=batch_size, device=device) # Serialize batch_size_serialized, device_serialized, encoded_items = serialize_tensordict(original_tensordict) # Deserialize reconstructed_tensordict = deserialize_tensordict((batch_size_serialized, device_serialized, encoded_items)) # Verify results assert original_tensordict.batch_size == reconstructed_tensordict.batch_size assert str(original_tensordict.device) == str(reconstructed_tensordict.device) assert set(original_tensordict.keys()) == set(reconstructed_tensordict.keys()) for key in original_tensordict.keys(): original_tensor = original_tensordict[key] reconstructed_tensor = reconstructed_tensordict[key] assert torch.allclose(original_tensor.cpu(), reconstructed_tensor.cpu()) assert original_tensor.shape == reconstructed_tensor.shape assert original_tensor.dtype == reconstructed_tensor.dtype ================================================ FILE: verl_distillation/tests/test_protocol_v2_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Replace DataProto with raw TensorDict """ import copy import random import numpy as np import pytest import torch from verl.utils import tensordict_utils as tu def test_union_tensor_dict(): obs = torch.randn(100, 10) meta_info1 = {"top_p": 0.8} meta_info2 = {"top_p": 0.9} data1 = {"obs": obs, "act": torch.randn(100, 3), "data_sources": ["gsm8k"] * 100} data2 = {"obs": obs, "next_obs": torch.randn(100, 10), "rew": torch.randn(100), "data_sources": ["gsm8k"] * 100} data_with_copied_obs = {"obs": obs.clone(), "next_obs": torch.randn(100, 10), "rew": torch.randn(100)} data1 = tu.get_tensordict(tensor_dict=data1) data2 = tu.get_tensordict(tensor_dict=data2) data_with_copied_obs = tu.get_tensordict(data_with_copied_obs) tu.union_tensor_dict(data1, data2) with pytest.raises(AssertionError): # conflict in tensor values tu.union_tensor_dict(data1, data_with_copied_obs) data1 = tu.assign_non_tensor_dict(data1, meta_info1) tu.union_tensor_dict(data1, data2) # works ok data2 = tu.assign_non_tensor_dict(data2, meta_info2) with pytest.raises(AssertionError): # conflict in NonTensorData tu.union_tensor_dict(data1, data2) data1.pop("top_p") data2.pop("top_p") data2["data_sources"][0] = "math" with pytest.raises(AssertionError): # conflict in NonTensorData tu.union_tensor_dict(data1, data2) def test_tensor_dict_constructor(): obs = torch.ones(100, 10) act = torch.zeros(100, 10, 3) data_source = ["gsm8k"] * 100 non_tensor_dict = {"name": "abdce"} data = tu.get_tensordict( tensor_dict={"obs": obs, "act": act, "data_source": data_source}, non_tensor_dict=non_tensor_dict ) assert data.batch_size == torch.Size([100]) # test slicing assert torch.all(torch.eq(data[0]["obs"], torch.ones(10))).item() assert torch.all(torch.eq(data[0]["act"], torch.zeros(10, 3))).item() assert data[0]["data_source"] == "gsm8k" assert torch.all(torch.eq(data[0:2]["obs"], torch.ones(2, 10))).item() assert torch.all(torch.eq(data[0:2]["act"], torch.zeros(2, 10, 3))).item() assert data[0:2]["data_source"] == ["gsm8k"] * 2 # test non tensor data assert data["name"] == "abdce" def test_index_select_tensor_dict(): vocab_size = 128 a = torch.randint(low=0, high=vocab_size, size=(11,)) b = torch.randint(low=0, high=vocab_size, size=(13,)) c = torch.randint(low=0, high=vocab_size, size=(12,)) d = torch.randint(low=0, high=vocab_size, size=(15,)) input_ids = [a, b, c, d] input_ids = torch.nested.as_nested_tensor(input_ids, layout=torch.jagged) padded_tensor = torch.randn(4, 10) non_tensor_dict = {"global_batch_size": "4"} data = tu.get_tensordict( tensor_dict={ "input_ids": input_ids, "padded_tensor": padded_tensor, }, non_tensor_dict=non_tensor_dict, ) assert data.batch_size == torch.Size([4]) # test index select indices = torch.tensor([1, 3]) selected_data = tu.index_select_tensor_dict(data, indices) assert selected_data.batch_size == torch.Size([2]) target_input_ids = torch.nested.as_nested_tensor([input_ids[idx] for idx in indices], layout=torch.jagged) target_select_data = tu.get_tensordict( tensor_dict={ "input_ids": target_input_ids, "padded_tensor": padded_tensor[indices], }, non_tensor_dict=non_tensor_dict, ) tu.assert_tensordict_eq(selected_data, target_select_data) def test_tensordict_with_images(): # each sample contains a sequence with multiple images of different sizes vocab_size = 128 a = torch.randint(low=0, high=vocab_size, size=(11,)) b = torch.randint(low=0, high=vocab_size, size=(13,)) input_ids = [a, b] input_ids = torch.nested.as_nested_tensor(input_ids, layout=torch.jagged) # must be numpy # TODO(vermouth1992). We may use nested tensor too. But this requires nested over nested a_images = [ torch.randint(low=0, high=255, size=(3, 256, 256), dtype=torch.uint8).numpy(), torch.randint(low=0, high=255, size=(3, 128, 128), dtype=torch.uint8).numpy(), ] b_images = [ torch.randint(low=0, high=255, size=(3, 256, 256), dtype=torch.uint8).numpy(), torch.randint(low=0, high=255, size=(3, 128, 128), dtype=torch.uint8).numpy(), torch.randint(low=0, high=255, size=(3, 64, 64), dtype=torch.uint8).numpy(), ] images = [a_images, b_images] data = tu.get_tensordict({"input_ids": input_ids, "images": images}) assert np.all(np.equal(data[0]["images"][0], a_images[0])) assert torch.all(torch.eq(data[0]["input_ids"], a)) def test_tensordict_with_packing(): vocab_size = 128 a = torch.randint(low=0, high=vocab_size, size=(11,)) b = torch.randint(low=0, high=vocab_size, size=(13,)) input_ids = [a, b] input_ids = torch.nested.as_nested_tensor(input_ids, layout=torch.jagged) data = tu.get_tensordict({"input_ids": input_ids}) # test cu_seqlens cu_seqlens = torch.tensor([0, 11, 24]) assert torch.all(torch.eq(cu_seqlens, data["input_ids"].offsets())) # test index assert torch.all(torch.eq(data["input_ids"][0], a)) assert torch.all(torch.eq(data["input_ids"][1], b)) assert torch.all(torch.eq(data[0]["input_ids"], a)) assert torch.all(torch.eq(data[1]["input_ids"], b)) data_lst = data.chunk(2) assert torch.all(torch.eq(data_lst[0]["input_ids"][0], a)) assert torch.all(torch.eq(data_lst[1]["input_ids"][0], b)) def test_tensordict_eq(): obs = torch.tensor([1, 2, 3, 4, 5, 6]) data_sources = ["abc", "def", "abc", "def", "pol", "klj"] non_tensor_dict = {"train_sample_kwargs": {"top_p": 1.0}, "val_sample_kwargs": {"top_p": 0.7}} data = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict) obs = torch.tensor([1, 2, 3, 4, 5, 6]) data_sources = ["abc", "def", "abc", "def", "pol", "klj"] non_tensor_dict = {"train_sample_kwargs": {"top_p": 1.0}, "val_sample_kwargs": {"top_p": 0.7}} data1 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict) tu.assert_tensordict_eq(data, data1) data2 = copy.deepcopy(data1) data2["obs"][0] += 1 with pytest.raises(AssertionError): tu.assert_tensordict_eq(data, data2) data2 = copy.deepcopy(data1) data2["data_sources"][0] = "math" with pytest.raises(AssertionError): tu.assert_tensordict_eq(data, data2) data2 = copy.deepcopy(data1) data2["train_sample_kwargs"]["top_p"] = 0.9 with pytest.raises(AssertionError): tu.assert_tensordict_eq(data, data2) tensor_list = [ torch.tensor([1, 2, 3, 3, 2]), torch.tensor([4, 5]), torch.tensor([7, 8, 10, 14]), torch.tensor([10, 11, 12]), torch.tensor([13, 14, 15, 18]), torch.tensor([16, 17]), ] obs = torch.nested.as_nested_tensor(tensor_list, layout=torch.jagged) data_sources = ["abc", "def", "abc", "def", "pol", "klj"] non_tensor_dict = {"train_sample_kwargs": {"top_p": 1.0}, "val_sample_kwargs": {"top_p": 0.7}} data3 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict) tensor_list[0] = torch.tensor([1, 2, 3, 3, 2]) obs = torch.nested.as_nested_tensor(tensor_list, layout=torch.jagged) data4 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict) tu.assert_tensordict_eq(data3, data4) tensor_list[0] = torch.tensor([1, 2, 4]) obs = torch.nested.as_nested_tensor(tensor_list, layout=torch.jagged) data5 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict) with pytest.raises(AssertionError): tu.assert_tensordict_eq(data3, data5) tensor_list[0] = torch.tensor([4, 5]) tensor_list[1] = torch.tensor([1, 2, 3, 3, 2]) obs = torch.nested.as_nested_tensor(tensor_list, layout=torch.jagged) data6 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict) with pytest.raises(AssertionError): tu.assert_tensordict_eq(data3, data6) def test_tensor_dict_make_iterator(): obs = torch.tensor([1, 2, 3, 4, 5, 6]) data_sources = ["abc", "def", "abc", "def", "pol", "klj"] non_tensor_dict = {"train_sample_kwargs": {"top_p": 1.0}, "val_sample_kwargs": {"top_p": 0.7}} dataset = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict) dataloader = tu.make_iterator( dataset, mini_batch_size=2, epochs=2, seed=0, dataloader_kwargs={"shuffle": False, "drop_last": False} ) expected_tensor_dict = [dataset[0:2], dataset[2:4], dataset[4:6], dataset[0:2], dataset[2:4], dataset[4:6]] i = 0 for d in dataloader: tu.assert_tensordict_eq(d, expected_tensor_dict[i]) i += 1 data_iter_1 = tu.make_iterator(dataset, mini_batch_size=3, epochs=1, seed=1, dataloader_kwargs={"shuffle": True}) data_list_1 = [] for data in data_iter_1: data_list_1.append(data) data_iter_2 = tu.make_iterator(dataset, mini_batch_size=3, epochs=1, seed=1, dataloader_kwargs={"shuffle": True}) data_list_2 = [] for data in data_iter_2: data_list_2.append(data) for data1, data2 in zip(data_list_1, data_list_2, strict=True): tu.assert_tensordict_eq(data1, data2) def test_reorder(): obs = torch.tensor([1, 2, 3, 4, 5, 6]) labels = ["a", "b", "c", "d", "e", "f"] non_tensor_dict = {"name": "abdce"} data = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict=non_tensor_dict) data = data[torch.tensor([3, 4, 2, 0, 1, 5])] assert torch.all(torch.eq(data["obs"], torch.tensor([4, 5, 3, 1, 2, 6]))) assert np.all(data["labels"] == np.array(["d", "e", "c", "a", "b", "f"])) assert data["name"] == "abdce" def test_chunk_concat(): obs = torch.tensor([1, 2, 3, 4, 5, 6]) labels = ["a", "b", "c", "d", "e", "f"] data = tu.get_tensordict({"obs": obs, "labels": labels}, non_tensor_dict={"name": "abcde"}) data_split = data.tensor_split(indices_or_sections=5, dim=0) expected_idx_lst = [[0, 1], [2], [3], [4], [5]] for d, expected_idx in zip(data_split, expected_idx_lst, strict=False): tu.assert_tensordict_eq(d, data[expected_idx]) data_split = data.chunk(2) assert len(data_split) == 2 assert torch.all(torch.eq(data_split[0]["obs"], torch.tensor([1, 2, 3]))) assert np.all(data_split[0]["labels"] == np.array(["a", "b", "c"])) assert data_split[0]["name"] == "abcde" assert torch.all(torch.eq(data_split[1]["obs"], torch.tensor([4, 5, 6]))) assert np.all(data_split[1]["labels"] == np.array(["d", "e", "f"])) assert data_split[1]["name"] == "abcde" concat_data = torch.cat(data_split, dim=0) assert torch.all(torch.eq(concat_data["obs"], data["obs"])) assert np.all(concat_data["labels"] == data["labels"]) assert concat_data["name"] == data["name"] def test_pop(): obs = torch.randn(100, 10) act = torch.randn(100, 3) dataset = tu.get_tensordict({"obs": obs, "act": act}, non_tensor_dict={"2": 2, "1": 1}) poped_dataset = tu.pop(dataset, keys=["obs", "2"]) assert poped_dataset.batch_size[0] == 100 assert poped_dataset.keys() == {"obs", "2"} assert dataset.keys() == {"act", "1"} def test_repeat(): # Create a DataProto object with some batch and non-tensor data obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = tu.get_tensordict({"obs": obs, "labels": labels}, non_tensor_dict={"info": "test_info"}) # Test interleave=True repeated_data_interleave = data.repeat_interleave(repeats=2) expected_obs_interleave = torch.tensor([[1, 2], [1, 2], [3, 4], [3, 4], [5, 6], [5, 6]]) expected_labels_interleave = ["a", "a", "b", "b", "c", "c"] assert torch.all(torch.eq(repeated_data_interleave["obs"], expected_obs_interleave)) assert repeated_data_interleave["labels"] == expected_labels_interleave assert repeated_data_interleave["info"] == "test_info" # Test interleave=False repeated_data_no_interleave = data.repeat(2) expected_obs_no_interleave = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2], [3, 4], [5, 6]]) expected_labels_no_interleave = ["a", "b", "c", "a", "b", "c"] assert torch.all(torch.eq(repeated_data_no_interleave["obs"], expected_obs_no_interleave)) assert repeated_data_no_interleave["labels"] == expected_labels_no_interleave assert repeated_data_no_interleave["info"] == "test_info" def test_dataproto_pad_unpad(): obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict={"info": "test_info"}) padded_data, pad_size = tu.pad_to_divisor(data, size_divisor=2) assert pad_size == 1 expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2]]) expected_labels = ["a", "b", "c", "a"] assert torch.all(torch.eq(padded_data["obs"], expected_obs)) assert padded_data["labels"] == expected_labels assert padded_data["info"] == "test_info" unpadd_data = tu.unpad(padded_data, pad_size=pad_size) assert torch.all(torch.eq(unpadd_data["obs"], obs)) assert unpadd_data["labels"] == labels assert unpadd_data["info"] == "test_info" padded_data, pad_size = tu.pad_to_divisor(data, size_divisor=3) assert pad_size == 0 expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) expected_labels = ["a", "b", "c"] assert torch.all(torch.eq(padded_data["obs"], expected_obs)) assert padded_data["labels"] == expected_labels assert padded_data["info"] == "test_info" unpadd_data = tu.unpad(padded_data, pad_size=pad_size) assert torch.all(torch.eq(unpadd_data["obs"], obs)) assert unpadd_data["labels"] == labels assert unpadd_data["info"] == "test_info" padded_data, pad_size = tu.pad_to_divisor(data, size_divisor=7) assert pad_size == 4 expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2], [3, 4], [5, 6], [1, 2]]) expected_labels = ["a", "b", "c", "a", "b", "c", "a"] assert torch.all(torch.eq(padded_data["obs"], expected_obs)) assert padded_data["labels"] == expected_labels assert padded_data["info"] == "test_info" unpadd_data = tu.unpad(padded_data, pad_size=pad_size) assert torch.all(torch.eq(unpadd_data["obs"], obs)) assert unpadd_data["labels"] == labels assert unpadd_data["info"] == "test_info" def test_torch_save_data_proto(): obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = tu.get_tensordict({"obs": obs, "labels": labels}, non_tensor_dict={"info": "test_info"}) filename = "test_data.pt" torch.save(data, filename) loaded_data = torch.load(filename, weights_only=False) assert torch.all(torch.eq(loaded_data["obs"], data["obs"])) assert loaded_data["labels"] == data["labels"] assert loaded_data["info"] == data["info"] import os os.remove(filename) def test_len(): obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = np.array(["a", "b", "c"], dtype=object) data = tu.get_tensordict({"obs": obs, "labels": labels.tolist()}, non_tensor_dict={"info": "test_info"}) assert len(data) == 3 data = tu.get_tensordict({"labels": labels.tolist()}, non_tensor_dict={"info": "test_info"}) assert len(data) == 3 data_item = data[0] assert len(data_item) == 0 data = tu.get_tensordict({}, non_tensor_dict={"info": "test_info"}) assert len(data) == 0 def test_dataproto_index(): data_len = 100 idx_num = 10 obs = torch.randn(data_len, 10) labels = [random.choice(["abc", "cde"]) for _ in range(data_len)] data = tu.get_tensordict({"obs": obs, "labels": labels}) labels_np = np.array(labels) idx_np_int = np.random.randint(0, data_len, size=(idx_num,)) result_np_int = data[idx_np_int] assert result_np_int.keys() == data.keys() assert result_np_int["obs"].shape[0] == idx_num assert len(result_np_int["labels"]) == idx_num assert np.array_equal(result_np_int["obs"].cpu().numpy(), obs[idx_np_int].numpy()) assert np.array_equal(result_np_int["labels"], labels_np[idx_np_int]) idx_torch_int = torch.randint(0, data_len, size=(idx_num,)) result_torch_int = data[idx_torch_int] assert result_torch_int.keys() == data.keys() assert result_torch_int["obs"].shape[0] == idx_num assert len(result_torch_int["labels"]) == idx_num assert np.array_equal(result_torch_int["obs"].cpu().numpy(), obs[idx_torch_int].cpu().numpy()) assert np.array_equal(result_torch_int["labels"], labels_np[idx_torch_int.cpu().numpy()]) idx_list_int = [np.random.randint(0, data_len) for _ in range(idx_num)] result_list_int = data[idx_list_int] assert result_list_int.keys() == data.keys() assert result_list_int["obs"].shape[0] == idx_num assert len(result_list_int["labels"]) == idx_num assert np.array_equal(result_list_int["obs"].cpu().numpy(), obs[idx_list_int].cpu().numpy()) assert np.array_equal(result_list_int["labels"], labels_np[idx_list_int]) # idx_np_bool = np.random.randint(0, 2, size=(data_len,), dtype=bool) # result_np_bool = data[idx_np_bool] # assert result_np_bool.keys() == data.keys() # assert result_np_bool["obs"].shape[0] == idx_np_bool.sum() # assert len(result_np_bool["labels"]) == idx_np_bool.sum() # assert np.array_equal(result_np_bool["obs"].cpu().numpy(), obs[idx_np_bool].cpu().numpy()) # assert np.array_equal(result_np_bool["labels"], labels_np[idx_np_bool]) idx_torch_bool = torch.randint(0, 2, size=(data_len,), dtype=torch.bool) result_torch_bool = data[idx_torch_bool] assert result_torch_bool.keys() == data.keys() assert result_torch_bool["obs"].shape[0] == idx_torch_bool.sum().item() assert len(result_torch_bool["labels"]) == idx_torch_bool.sum().item() assert np.array_equal(result_torch_bool["obs"].cpu().numpy(), obs[idx_torch_bool].cpu().numpy()) assert np.array_equal(result_torch_bool["labels"], labels_np[idx_torch_bool]) # idx_list_bool = [np.random.randint(0, 2, dtype=bool) for _ in range(data_len)] # result_list_bool = data[idx_list_bool] # assert result_list_bool.keys() == data.keys() # assert result_list_bool["obs"].shape[0] == sum(idx_list_bool) # assert len(result_list_bool["labels"]) == sum(idx_list_bool) # assert np.array_equal(result_list_bool["obs"].cpu().numpy(), obs[idx_list_bool].cpu().numpy()) # assert np.array_equal(result_list_bool["labels"], labels_np[idx_list_bool]) def test_select(): obs = torch.randn(100, 10) act = torch.randn(100, 3) dataset = tu.get_tensordict({"obs": obs, "act": act}, non_tensor_dict={"2": 2, "1": 1}) subset = dataset.select("obs", "2") assert torch.all(torch.eq(subset["obs"], dataset["obs"])) assert subset["2"] == dataset["2"] assert "act" not in subset.keys() assert "1" not in subset.keys() def test_dataproto_no_batch(): labels = ["a", "b", "c"] data = tu.get_tensordict(tensor_dict={"labels": labels}, non_tensor_dict={"info": "test_info"}) selected = data.select("labels") assert selected["labels"] == labels pop_data = tu.pop(data, keys=["labels"]) assert pop_data["labels"] == labels assert "labels" not in data def test_sample_level_repeat(): # Create a DataProto object with some batch and non-tensor data obs = torch.tensor([[1, 2], [3, 4], [5, 6]]) labels = ["a", "b", "c"] data = tu.get_tensordict({"obs": obs, "labels": labels}, non_tensor_dict={"info": "test_info"}) # list repeated_data_interleave = data.repeat_interleave(repeats=torch.tensor([3, 1, 2])) expected_obs_interleave = torch.tensor([[1, 2], [1, 2], [1, 2], [3, 4], [5, 6], [5, 6]]) expected_labels_interleave = ["a", "a", "a", "b", "c", "c"] assert torch.all(torch.eq(repeated_data_interleave["obs"], expected_obs_interleave)) assert repeated_data_interleave["labels"] == expected_labels_interleave assert repeated_data_interleave["info"] == "test_info" # torch.tensor repeated_data_no_interleave = data.repeat_interleave(repeats=torch.tensor([1, 2, 3])) expected_obs_no_interleave = torch.tensor([[1, 2], [3, 4], [3, 4], [5, 6], [5, 6], [5, 6]]) expected_labels_no_interleave = ["a", "b", "b", "c", "c", "c"] assert torch.all(torch.eq(repeated_data_no_interleave["obs"], expected_obs_no_interleave)) assert repeated_data_no_interleave["labels"] == expected_labels_no_interleave assert repeated_data_no_interleave["info"] == "test_info" def test_dataproto_chunk_after_index(): data_len = 4 obs = torch.randn(data_len, 4) labels = [f"label_{i}" for i in range(data_len)] data = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict={"name": "abc"}) # Test with boolean numpy array bool_mask = torch.tensor([True, False, True, False]) selected = data[bool_mask] assert isinstance(selected.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch_size) # int or List[int] # Test with integer numpy array int_mask = torch.tensor([0, 2]) selected = data[int_mask] assert isinstance(selected.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch_size) # Test with boolean list list_mask = [True, False, True, False] selected = data[list_mask] assert isinstance(selected.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch_size) # Test with list list_mask = [0, 2] selected = data[list_mask] assert isinstance(selected.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch_size) # Test with torch tensor (bool) torch_bool_mask = torch.tensor([True, False, True, False]) selected = data[torch_bool_mask] assert isinstance(selected.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch_size) # Test with torch tensor (int) torch_int_mask = torch.tensor([0, 2]) selected = data[torch_int_mask] assert isinstance(selected.batch_size, torch.Size) assert all(isinstance(d, int) for d in selected.batch_size) ================================================ FILE: verl_distillation/tests/trainer/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for the trainer module. """ ================================================ FILE: verl_distillation/tests/trainer/config/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/tests/trainer/config/legacy_ppo_megatron_trainer.yaml ================================================ data: tokenizer: null train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet train_max_samples: -1 # set to -1 to use full dataset val_max_samples: -1 # set to -1 to use full dataset prompt_key: prompt reward_fn_key: data_source max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 val_batch_size: null # DEPRECATED: Validation datasets are sent to inference engines as a whole batch, which will schedule the memory themselves return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs return_raw_chat: False return_full_prompt: False shuffle: True seed: null # An integer seed to use when shuffling the data. If not set or set to `null`, the data shuffling will not be seeded, resulting in a different data order on each run. filter_overlong_prompts: False # for large-scale dataset, filtering overlong prompts could be timeconsuming. You cat set the filter_overlong_prompts_workers to use multiprocessing to speed up. filter_overlong_prompts_workers: 1 truncation: error trust_remote_code: False # main_ppo will check this config to determine whether to use remote code for tokenizer custom_cls: path: null name: null sampler: class_path: null class_name: null dataloader_num_workers: 8 return_multi_modal_inputs: True actor_rollout_ref: hybrid_engine: True nccl_timeout: 600 # seconds, default is 10 minutes for torch, you can set it to a larger value if you have long-running operations like 32B or 72B model using megatron model: path: ~/models/deepseek-llm-7b-chat custom_chat_template: null external_lib: null override_config: model_config: {} moe_config: freeze_moe_router: False enable_gradient_checkpointing: False gradient_checkpointing_kwargs: ## Activation Checkpointing activations_checkpoint_method: null # 'uniform', 'block'; not used with 'selective' # 'uniform' divides the total number of transformer layers and checkpoints the input activation of each chunk # 'block' checkpoints the specified number of layers per pipeline stage at the specified granularity activations_checkpoint_granularity: null # 'selective' or 'full' # 'full' will checkpoint the entire transformer layer and 'selective' only checkpoints memory intensive part of attention activations_checkpoint_num_layers: null # not used with 'selective' trust_remote_code: False actor: strategy: megatron # This is for backward-compatibility ppo_mini_batch_size: 256 ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu ppo_micro_batch_size_per_gpu: null use_dynamic_bsz: False ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length} use_torch_compile: True # False to disable torch compile # pg_losses2 = -advantages * torch.clamp(ratio, 1 - cliprange_low, 1 + cliprange_high) clip_ratio: 0.2 # default value if clip_ratio_low and clip_ratio_high are not specified clip_ratio_low: 0.2 clip_ratio_high: 0.2 clip_ratio_c: 3.0 # lower bound of the value for Dual-clip PPO from https://arxiv.org/pdf/1912.09729 loss_agg_mode: "token-mean" # / "seq-mean-token-sum" / "seq-mean-token-mean" # NOTE: "token-mean" is the default behavior entropy_coeff: 0 use_kl_loss: False # True for GRPO kl_loss_coef: 0.001 # for grpo kl_loss_type: low_var_kl # for grpo ppo_epochs: 1 data_loader_seed: null shuffle: False policy_loss: # policy loss config loss_mode: "vanilla" # Loss function mode: vanilla / clip-cov / kl-cov / gpg from https://arxiv.org/abs/2505.22617, clip_cov_ratio: 0.0002 # Ratio of tokens to be clipped for clip-cov loss clip_cov_lb: 1.0 # Lower bound for clip-cov loss clip_cov_ub: 5.0 # Upper bound for clip-cov loss kl_cov_ratio: 0.0002 # Ratio of tokens to be applied kl penalty for kl-cov loss ppo_kl_coef: 0.1 # KL divergence penalty coefficient optim: optimizer: adam lr: 1e-6 clip_grad: 1.0 total_training_steps: -1 # must be override by program lr_warmup_init: 0.0 # initial learning rate for warmup, default to 0.0 lr_warmup_steps: null # Prioritized. None, 0 or Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime lr_decay_steps: null lr_decay_style: constant # select from constant/linear/cosine/inverse_square_root min_lr: 0.0 # minimum learning rate, default to 0.0 weight_decay: 0.01 weight_decay_incr_style: constant # select from constant/linear/cosine lr_wsd_decay_style: exponential # select from constant/exponential/cosine lr_wsd_decay_steps: null use_checkpoint_opt_param_scheduler: False # use checkpoint optimizer parameter scheduler megatron: param_offload: False grad_offload: False optimizer_offload: False tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: null pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null # change VPP interface for parallelism tests context_parallel_size: 1 sequence_parallel: True use_distributed_optimizer: True use_dist_checkpointing: False dist_checkpointing_path: null seed: 42 override_transformer_config: {} # additional transformer config like: num_layers_in_first(/last)_pipeline_stage use_mbridge: False profile: # profile the actor model in `update_policy` use_profile: False # open it when you want to profile the actor model profile_ranks: null # list, you can specify the ranks to profile step_start: -1 # start step in update_policy step_end: -1 # end step save_path: null # the path to save the profile result load_weight: True checkpoint: async_save: False # save checkpoint asynchronously # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # For more flexibility, you can specify the contents to load from the checkpoint. load_contents: ${actor_rollout_ref.actor.checkpoint.save_contents} ref: strategy: ${actor_rollout_ref.actor.strategy} use_torch_compile: ${actor_rollout_ref.actor.use_torch_compile} megatron: param_offload: False tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: null pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null # change VPP interface for parallelism tests context_parallel_size: 1 sequence_parallel: True use_distributed_optimizer: True use_dist_checkpointing: False dist_checkpointing_path: null seed: ${actor_rollout_ref.actor.megatron.seed} override_transformer_config: ${actor_rollout_ref.actor.megatron.override_transformer_config} use_mbridge: ${actor_rollout_ref.actor.megatron.use_mbridge} profile: use_profile: False profile_ranks: null step_start: -1 step_end: -1 save_path: null load_weight: True log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} rollout: name: vllm mode: sync # sync: LLM, async: AsyncLLM temperature: 1.0 top_k: -1 # 0 for hf rollout, -1 for vllm rollout top_p: 1 prompt_length: ${data.max_prompt_length} # for xperf_gpt response_length: ${data.max_response_length} # for vllm rollout dtype: bfloat16 # should align with FSDP gpu_memory_utilization: 0.5 ignore_eos: False enforce_eager: False free_cache_engine: True load_format: dummy tensor_model_parallel_size: 2 max_num_batched_tokens: 8192 max_model_len: null max_num_seqs: 1024 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} disable_log_stats: True enable_chunked_prefill: True # could get higher throughput # for hf rollout do_sample: True layer_name_map: qkv_layer_name: qkv gate_proj_layer_name: gate_up # number of responses (i.e. num sample times) n: 1 engine_kwargs: # inference engine parameters, please refer vllm/sglang official doc for detail vllm: {} sglang: {} val_kwargs: # sampling parameters for validation top_k: -1 # 0 for hf rollout, -1 for vllm rollout top_p: 1.0 temperature: 0 n: 1 do_sample: False # default eager for validation # Multi-turn interaction config for tools or chat. multi_turn: # set to True for multi-turn tool interaction tasks; should set rollout.name to sglang as well enable: False # null for no limit (default max_length // 3) max_assistant_turns: null # null for no tool tool_config_path: null # null for no limit (default max_length // 3) max_user_turns: null # max parallel call for tools in single turn max_parallel_calls: 1 # max length of tool response max_tool_response_length: 256 # truncate side of tool response: left, middle, right tool_response_truncate_side: middle # null for no interaction interaction_config_path: null # - When set to True, the model's default chat template is used for multi-turn rollout, which typically matches production behavior. # - When set to False, the token ids recorded for training are used instead; unlike the default chat template, these always include the model's full output, # which may contain additional content such as reasoning content. This maintains the consistency between training and rollout, but it will lead to longer prompts. use_inference_chat_template: False # Tokenization is performed turn by turn and the resulting token ids are concatenated to form the full conversation. # To ensure this matches the result of tokenizing the entire conversation at once, a sanity check is run at the end of each multi-turn rollout to compare the two sets of token ids. # Some models are known to produce different tokenization results when tokenizing turn by turn vs. all at once. aThis behavior has already been validated for them. # To reduce excessive warnings, you can turn off the sanity check for these models if you are using their default chat template: # Qwen/QwQ-32B, Qwen/Qwen3-xxB # - disable: disable tokenization sanity check # - strict: enable strict tokenization sanity check (default) # - ignore_strippable: ignore strippable tokens when checking tokenization sanity tokenization_sanity_check_mode: strict # Format of the multi-turn interaction. Options: hermes, llama3_json, ... format: hermes # [Experimental] agent loop based rollout configs agent: # Number of agent loop workers num_workers: 8 custom_async_server: path: null name: null # support logging rollout prob for debugging purpose calculate_log_probs: False # Nsight system profiler configs profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig discrete: False all_ranks: False ranks: [] critic: rollout_n: ${actor_rollout_ref.rollout.n} strategy: ${actor_rollout_ref.actor.strategy} nccl_timeout: 600 # seconds, default is 10 minutes for torch, you can set it to a larger value if you have long-running operations like 32B or 72B model using megatron optim: optimizer: adam lr: 1e-6 clip_grad: 1.0 total_training_steps: -1 # must be override by program lr_warmup_init: 0.0 # initial learning rate for warmup, default to 0.0 lr_warmup_steps: null # Prioritized. None, 0 or Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime lr_decay_steps: null lr_decay_style: constant # select from constant/linear/cosine/inverse_square_root min_lr: 0.0 # minimum learning rate, default to 0.0 weight_decay: 0.01 weight_decay_incr_style: constant # select from constant/linear/cosine lr_wsd_decay_style: exponential # select from constant/exponential/cosine lr_wsd_decay_steps: null use_checkpoint_opt_param_scheduler: False # use checkpoint optimizer parameter scheduler model: path: ~/models/deepseek-llm-7b-chat tokenizer_path: ${actor_rollout_ref.model.path} override_config: model_config: {} moe_config: freeze_moe_router: False external_lib: ${actor_rollout_ref.model.external_lib} trust_remote_code: False enable_gradient_checkpointing: False gradient_checkpointing_kwargs: ## Activation Checkpointing activations_checkpoint_method: null activations_checkpoint_granularity: null activations_checkpoint_num_layers: null megatron: param_offload: False grad_offload: False optimizer_offload: False tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: null pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null # change VPP interface for parallelism tests context_parallel_size: 1 sequence_parallel: True use_distributed_optimizer: True use_dist_checkpointing: False dist_checkpointing_path: null seed: ${actor_rollout_ref.actor.megatron.seed} override_transformer_config: ${actor_rollout_ref.actor.megatron.override_transformer_config} use_mbridge: ${actor_rollout_ref.actor.megatron.use_mbridge} load_weight: True ppo_mini_batch_size: ${actor_rollout_ref.actor.ppo_mini_batch_size} ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu ppo_micro_batch_size_per_gpu: null use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} ppo_max_token_len_per_gpu: 32768 # (${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}) * 2 forward_max_token_len_per_gpu: ${critic.ppo_max_token_len_per_gpu} ppo_epochs: ${actor_rollout_ref.actor.ppo_epochs} data_loader_seed: ${actor_rollout_ref.actor.data_loader_seed} shuffle: ${actor_rollout_ref.actor.shuffle} cliprange_value: 0.5 loss_agg_mode: ${actor_rollout_ref.actor.loss_agg_mode} checkpoint: async_save: False # save checkpoint asynchronously # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] load_contents: ${critic.checkpoint.save_contents} # Nsight system profiler configs profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig discrete: False all_ranks: False ranks: [] reward_model: enable: False strategy: ${actor_rollout_ref.actor.strategy} nccl_timeout: 600 # seconds, default is 10 minutes for torch, you can set it to a larger value if you have long-running operations like 32B or 72B model using megatron megatron: param_offload: False tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: null pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null # change VPP interface for parallelism tests context_parallel_size: 1 sequence_parallel: True use_distributed_optimizer: False use_dist_checkpointing: False dist_checkpointing_path: null seed: ${actor_rollout_ref.actor.megatron.seed} override_transformer_config: {} use_mbridge: ${actor_rollout_ref.actor.megatron.use_mbridge} model: input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical path: ~/models/FsfairX-LLaMA3-RM-v0.1 trust_remote_code: False external_lib: ${actor_rollout_ref.model.external_lib} load_weight: True micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu micro_batch_size_per_gpu: null use_dynamic_bsz: ${critic.use_dynamic_bsz} forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} max_length: null reward_manager: naive launch_reward_fn_async: False # custom reward function executed async on CPU, during log_prob sandbox_fusion: url: null # faas url to run code in cloud sandbox max_concurrent: 64 # max concurrent requests to sandbox memory_limit_mb: 1024 # Max memory limit for each sandbox process in MB # Nsight system profiler configs profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig discrete: False all_ranks: False ranks: [] custom_reward_function: path: null name: compute_score algorithm: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.AlgoConfig gamma: 1.0 lam: 1.0 adv_estimator: gae norm_adv_by_std_in_grpo: True use_kl_in_reward: False kl_penalty: kl # how to estimate kl divergence kl_ctrl: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.KLControlConfig type: fixed kl_coef: 0.001 horizon: 10000 target_kl: 0.1 use_pf_ppo: False pf_ppo: reweight_method: pow # ["pow", "max_min", "max_random"] weight_pow: 2.0 trainer: balance_batch: True total_epochs: 30 total_training_steps: null profile_steps: null # [1,2,5] or [] or null project_name: verl_examples experiment_name: gsm8k logger: ['console', 'wandb'] log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 esi_redundant_time: 0 # auto: find the last ckpt to resume. If can't find, start from scratch resume_mode: auto # or disable or resume_path if resume_from_path is set resume_from_path: null del_local_ckpt_after_load: False val_before_train: True test_freq: -1 critic_warmup: 0 default_hdfs_dir: null default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} max_actor_ckpt_to_keep: null max_critic_ckpt_to_keep: null # The timeout for ray worker group to wait for the register center to be ready ray_wait_register_center_timeout: 300 device: cuda # see ppo_trainer.yaml for more details controller_nsight_options: trace: "cuda,nvtx,cublas,ucx" cuda-memory-usage: "true" cuda-graph-trace: "graph" worker_nsight_options: trace: "cuda,nvtx,cublas,ucx" cuda-memory-usage: "true" cuda-graph-trace: "graph" capture-range: "cudaProfilerApi" capture-range-end: null kill: none npu_profile: options: save_path: ./profiler_data roles: ["all"] level: level1 with_memory: False record_shapes: False with_npu: True with_cpu: True with_module: False with_stack: False analysis: True ray_kwargs: ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. timeline_json_file: null ================================================ FILE: verl_distillation/tests/trainer/config/legacy_ppo_trainer.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # dataset config data: # Tokenizer class or path. If null, it will be inferred from the model. tokenizer: null # Whether to use shared memory for data loading. use_shm: False # Training set parquet. Can be a list or a single file. # The program will read all files into memory, so it can't be too large (< 100GB). # The path can be either a local path or an HDFS path. # For HDFS path, we provide utils to download it to DRAM and convert it to a local path. train_files: ~/data/rlhf/gsm8k/train.parquet # Validation parquet. Can be a list or a single file. val_files: ~/data/rlhf/gsm8k/test.parquet # Maximum sample length to be used. # Set to -1 to use full dataset, otherwise, randomly # select the specified number of samples from train dataset train_max_samples: -1 # Maximum sample length to be used. # Set to -1 to use full dataset, otherwise, randomly # select the specified number of samples from val dataset val_max_samples: -1 # The field in the dataset where the prompt is located. Default is 'prompt'. prompt_key: prompt # The field used to select the reward function (if using different ones per example). reward_fn_key: data_source # Maximum prompt length. All prompts will be left-padded to this length. # An error will be reported if the length is too long. max_prompt_length: 512 # Maximum response length. Rollout in RL algorithms (e.g. PPO) generates up to this length. max_response_length: 512 # Batch size sampled for one training iteration of different RL algorithms. train_batch_size: 1024 # Batch size used during validation. Can be null. val_batch_size: null # Whether to return the original input_ids without adding chat template. # This is used when the reward model's chat template differs from the policy. # If using a model-based RM with different templates, this should be True. return_raw_input_ids: False # Whether to return the original chat (prompt) without applying chat template. return_raw_chat: False # Whether to return the full prompt with chat template. return_full_prompt: False # Whether to shuffle the data in the dataloader. shuffle: True # An integer seed to use when shuffling the data. If not set or set to # `null`, the data shuffling will not be seeded, resulting in a different data order on each run. seed: null # num dataloader workers dataloader_num_workers: 8 # Whether to shuffle the validation set. validation_shuffle: False # Whether to filter overlong prompts. filter_overlong_prompts: False # Number of workers for filtering overlong prompts. # For large-scale datasets, filtering can be time-consuming. # Use multiprocessing to speed up. Default is 1. filter_overlong_prompts_workers: 1 # Truncate the input_ids or prompt if they exceed max_prompt_length. # Options: 'error', 'left', or 'right'. Default is 'error'. truncation: error # The field in the multi-modal dataset where the image is located. Default is 'images'. image_key: images # The field in the multi-modal dataset where the video is located. video_key: videos # If the remote tokenizer has a Python file, this flag determines whether to allow using it. trust_remote_code: False # Optional: specify a custom dataset class path and name if overriding default loading behavior. custom_cls: # The path to the file containing your customized dataset class. If not specified, pre-implemented dataset will be used. path: null # The name of the dataset class within the specified file. name: null # Whether to return multi-modal inputs in the dataset. Set to False if rollout generates new multi-modal inputs. return_multi_modal_inputs: True # Data generation configuration for augmenting the dataset. datagen: # The path to the file containing your customized data generation class. # E.g. 'pkg://verl.experimental.dynamic_dataset.dynamicgen_dataset' path: null # The class name of the data generation class within the specified file. # E.g. 'MockDataGenerator' name: null # settings related to data sampler sampler: # the path to the module containing a curriculum class which implements the # AbstractSampler interface class_path: null # the name of the curriculum class like `MySampler` class_name: null # Additional kwargs when calling tokenizer.apply_chat_template apply_chat_template_kwargs: {} # config for actor, rollout and reference model actor_rollout_ref: # Whether it's a hybrid engine, currently only supports hybrid engine hybrid_engine: true # common configs for the model model: _target_: verl.workers.config.HFModelConfig # Huggingface model path. This can be either local path or HDFS path. path: ~/models/deepseek-llm-7b-chat # Custom chat template for the model. custom_chat_template: null # Whether to use shared memory (SHM) for accelerating the loading of model weights use_shm: false # Additional Python packages to register huggingface models/tokenizers. external_lib: null # Used to override model's original configurations, mainly dropout override_config: {} # Enable gradient checkpointing for actor enable_gradient_checkpointing: true # Enable activation offloading for actor enable_activation_offload: false # Whether to remove padding tokens in inputs during training use_remove_padding: false # Set to positive value to enable LoRA (e.g., 32) lora_rank: 0 # LoRA scaling factor lora_alpha: 16 # Target modules to apply LoRA. Options: "all-linear" (not recommended for VLMs) or # [q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj] target_modules: all-linear # Exclude modules from applying Lora. Similar usage to target_modules and Peft. # Example: '.*visual.*' for excluding the ViT in Qwen2.5-VL, as currently vllm does not support ViT Lora. exclude_modules: null # Whether to use Liger for linear layer fusion use_liger: false # Whether to use custom fused kernels (e.g., FlashAttention, fused MLP) use_fused_kernels: false # Options for fused kernels. If use_fused_kernels is true, this will be used. fused_kernel_options: # Implementation backend for fused kernels. Options: "triton" or "torch". impl_backend: torch # Whether to enable loading a remote code model trust_remote_code: false # actor configs actor: # fsdp, fsdp2 or megatron. fsdp backend used here. strategy: fsdp # Split each sample into sub-batches of this size for PPO ppo_mini_batch_size: 256 # [Deprecated] Global micro batch size ppo_micro_batch_size: null # Local per-GPU micro batch size ppo_micro_batch_size_per_gpu: null # Whether to automatically adjust batch size at runtime use_dynamic_bsz: false # Max tokens per GPU in one PPO batch; affects gradient accumulation # Typically it should be: n * ${data.max_prompt_length} + ${data.max_response_length} ppo_max_token_len_per_gpu: 16384 # Gradient clipping for actor updates grad_clip: 1.0 # PPO clip ratio clip_ratio: 0.2 # Lower bound for asymmetric clipping (used in dual-clip PPO) clip_ratio_low: 0.2 # Upper bound for asymmetric clipping (used in dual-clip PPO) clip_ratio_high: 0.2 # policy loss config policy_loss: # Loss function mode: vanilla / clip-cov / kl-cov /gpg from https://arxiv.org/abs/2505.22617 loss_mode: "vanilla" # Ratio of tokens to be clipped for clip-cov loss clip_cov_ratio: 0.0002 # Lower bound for clip-cov loss clip_cov_lb: 1.0 # Upper bound for clip-cov loss clip_cov_ub: 5.0 # Ratio of tokens to be applied kl penalty for kl-cov loss kl_cov_ratio: 0.0002 # KL divergence penalty coefficient ppo_kl_coef: 0.1 # Constant C in Dual-clip PPO; clips when advantage < 0 and ratio > C clip_ratio_c: 3.0 # Loss aggregation mode: "token-mean", "seq-mean-token-sum", or "seq-mean-token-mean" loss_agg_mode: token-mean # Entropy regularization coefficient in PPO loss entropy_coeff: 0 # Whether to use KL loss instead of KL reward penalty. True for GRPO use_kl_loss: false # Whether to use torch.compile() use_torch_compile: true # KL loss coefficient when use_kl_loss is enabled. For GRPO kl_loss_coef: 0.001 # Type of KL divergence loss. Options: "kl"(k1), "abs", "mse"(k2), "low_var_kl"(k3), "full" kl_loss_type: low_var_kl # Number of PPO epochs per batch ppo_epochs: 1 # Shuffle training data across PPO epochs shuffle: false # Sequence parallelism size for Ulysses-style model parallelism ulysses_sequence_parallel_size: 1 # calculate entropy with chunking to reduce memory peak entropy_from_logits_with_chunking: False # recompute entropy entropy_checkpointing: False # checkpoint configs checkpoint: # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # For more flexibility, you can specify the contents to load from the checkpoint. load_contents: ${actor_rollout_ref.actor.checkpoint.save_contents} # optimizer configs optim: # Learning rate lr: 1e-6 # Warmup steps; negative value delegates to lr_warmup_steps_ratio lr_warmup_steps: -1 # Warmup steps ratio (used if lr_warmup_steps is negative) lr_warmup_steps_ratio: 0.0 # Minimum LR ratio for cosine schedule min_lr_ratio: 0.0 # Number of cosine cycles in LR schedule num_cycles: 0.5 # LR scheduler type: "constant" or "cosine" lr_scheduler_type: constant # Total training steps (must be overridden at runtime) total_training_steps: -1 # Weight decay weight_decay: 0.01 # configs for FSDP fsdp_config: # policy for wrapping the model wrap_policy: # Minimum number of parameters to trigger wrapping a layer with FSDP min_num_params: 0 # Whether to offload model parameters to CPU (trades speed for memory) param_offload: false # Whether to offload optimizer state to CPU optimizer_offload: false # Only for FSDP2: offload param/grad/optimizer during train offload_policy: false # Only for FSDP2: Reshard after forward pass to reduce memory footprint reshard_after_forward: true # Number of GPUs in each FSDP shard group; -1 means auto fsdp_size: -1 # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # Reference model config. # Reference model will be enabled when actor.use_kl_loss or/and algorithm.use_kl_in_reward is/are True. ref: # actor_rollout_ref.ref: FSDP config same as actor. For models larger than 7B, it’s recommended to turn on offload for ref by default strategy: ${actor_rollout_ref.actor.strategy} # config for FSDP strategy fsdp_config: # whether to offload parameters in FSDP param_offload: False # whether to perform reshard after model forward to save memory. # only for fsdp2, [True, False, int between 1 and fsdp_size] reshard_after_forward: True # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # the wrap policy for FSDP model wrap_policy: # minimum number of params in a wrapped module min_num_params: 0 # whether to enable torch.compile use_torch_compile: ${actor_rollout_ref.actor.use_torch_compile} # [Will be deprecated, use log_prob_micro_batch_size_per_gpu] # The batch size for one forward pass in the computation of log_prob. Global batch size. log_prob_micro_batch_size: null # The batch size for one forward pass in the computation of log_prob. Local batch size per GPU. log_prob_micro_batch_size_per_gpu: null # enable dynamic batch size (sequence packing) for log_prob computation log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} # the max token length per GPU log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} # sequence parallel size ulysses_sequence_parallel_size: ${actor_rollout_ref.actor.ulysses_sequence_parallel_size} # calculate entropy with chunking to reduce memory peak entropy_from_logits_with_chunking: False # recompute entropy entropy_checkpointing: False # Rollout model config. rollout: # actor_rollout_ref.rollout.name: hf/vllm/sglang. name: vllm # sync: LLM, async: AsyncLLM mode: sync # Sampling temperature for rollout. temperature: 1.0 # Top-k sampling parameter. -1 for vLLM rollout, 0 for HF rollout. top_k: -1 # Top-p sampling parameter. Default 1.0. top_p: 1 # typically the same as data max prompt length prompt_length: ${data.max_prompt_length} # typically the same as data max response length response_length: ${data.max_response_length} # for vllm rollout # Rollout model parameters type. Align with actor model's FSDP/Megatron type. dtype: bfloat16 # Fraction of GPU memory used by vLLM/SGLang for KV cache. gpu_memory_utilization: 0.5 # Whether to ignore EOS and continue generating after EOS is hit. ignore_eos: False # Whether to disable CUDA graph. Default True to allow cache freeing. enforce_eager: False # Whether to free engine KVCache after generation. Set enforce_eager=True when enabled. free_cache_engine: True # Which loader to use for rollout model weights: dummy_dtensor, hf, megatron, etc. # safetensors (for huge model, and set use_shm=True); dummy_dtensor: randomly init model weight load_format: dummy # for huge model, layered summon can save memory (prevent OOM) but make it slower layered_summon: False # TP size for rollout. Only effective for vLLM. tensor_model_parallel_size: 2 # max number of tokens in a batch max_num_batched_tokens: 8192 # max length for rollout max_model_len: null # max length of sequences max_num_seqs: 1024 # [Will be deprecated, use log_prob_micro_batch_size_per_gpu] The batch size for one forward pass in the computation of log_prob. Global batch size. log_prob_micro_batch_size: null # The batch size for one forward pass in the computation of log_prob. Local batch size per GPU. log_prob_micro_batch_size_per_gpu: null # enable dynamic batch size (sequence packing) for log_prob computation log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} # max token length for log_prob computation log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} # disable logging statistics disable_log_stats: True # may get higher throughput when set to True. When activated, Please increase max_num_batched_tokens or decrease max_model_len. enable_chunked_prefill: True # for hf rollout # Whether to sample during training rollout. False uses greedy sampling. do_sample: True # number of responses (i.e. num sample times). > 1 for grpo n: 1 # Whether to wake up inference engine in multi-stage to reduce peak memory during training-rollout transition. multi_stage_wake_up: false # Extra inference engine arguments, please refer vllm/sglang official doc for detail engine_kwargs: # vllm engine config vllm: {} # sglang engine config sglang: {} # Sampling parameters used during validation. val_kwargs: # sampling parameters for validation # Top-k sampling parameter. -1 for vLLM rollout, 0 for HF rollout. top_k: -1 # Top-p sampling parameter. Default 1.0. top_p: 1.0 # Sampling temperature for rollout. temperature: 0 # whether to repeat n times for validation n: 1 # Whether to sample during training rollout. False uses greedy sampling. do_sample: False # Multi-turn interaction config for tools or chat. multi_turn: # set to True for multi-turn tool interaction tasks; should set rollout.name to sglang as well enable: False # null for no limit (default max_length // 3) max_assistant_turns: null # null for no tool tool_config_path: null # null for no limit (default max_length // 3) max_user_turns: null # max parallel call for tools in single turn max_parallel_calls: 1 # max length of tool response max_tool_response_length: 256 # truncate side of tool response: left, middle, right tool_response_truncate_side: middle # null for no interaction interaction_config_path: null # - When set to True, the model's default chat template is used for multi-turn rollout, which typically matches production behavior. # - When set to False, the token ids recorded for training are used instead; unlike the default chat template, these always include the model's full output, # which may contain additional content such as reasoning content. This maintains the consistency between training and rollout, but it will lead to longer prompts. use_inference_chat_template: False # Tokenization is performed turn by turn and the resulting token ids are concatenated to form the full conversation. # To ensure this matches the result of tokenizing the entire conversation at once, a sanity check is run at the end of each multi-turn rollout to compare the two sets of token ids. # Some models are known to produce different tokenization results when tokenizing turn by turn vs. all at once. aThis behavior has already been validated for them. # To reduce excessive warnings, you can turn off the sanity check for these models if you are using their default chat template: # Qwen/QwQ-32B, Qwen/Qwen3-xxB # - disable: disable tokenization sanity check # - strict: enable strict tokenization sanity check (default) # - ignore_strippable: ignore strippable tokens when checking tokenization sanity tokenization_sanity_check_mode: strict # Format of the multi-turn interaction. Options: hermes, llama3_json, ... format: hermes # support logging rollout prob for debugging purpose calculate_log_probs: False # [Experimental] agent loop based rollout configs agent: # Number of agent loop workers num_workers: 8 # custom async server configs custom_async_server: # Path to the custom async server implementation path: null # Class name of the custom async server class (e.g. AsyncvLLMServer) name: null # profiler configs profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # configs for the critic critic: # Number of rollouts per update (mirrors actor rollout_n) rollout_n: ${actor_rollout_ref.rollout.n} # fsdp or fsdp2 strategy used for critic model training strategy: ${actor_rollout_ref.actor.strategy} # optimizer configs optim: # Learning rate lr: 1e-5 # Warmup steps ratio; total steps will be injected at runtime lr_warmup_steps_ratio: 0. # Minimum LR ratio for cosine schedule min_lr_ratio: 0.0 # LR scheduler type: "constant" or "cosine" lr_scheduler_type: constant # Total training steps (must be overridden at runtime) total_training_steps: -1 # Weight decay weight_decay: 0.01 # model config for the critic model: # Path to pretrained model weights path: ~/models/deepseek-llm-7b-chat # Whether to use shared memory for loading the model use_shm: False # Tokenizer path (defaults to actor's model path) tokenizer_path: ${actor_rollout_ref.model.path} # Hugging Face config override override_config: { } # External model implementation (optional) external_lib: ${actor_rollout_ref.model.external_lib} # Enable gradient checkpointing to save memory enable_gradient_checkpointing: True # Offload activations to CPU to reduce GPU memory usage enable_activation_offload: False # Use remove padding optimization (saves compute) use_remove_padding: False # Whether to trust remote code from Hugging Face models trust_remote_code: ${actor_rollout_ref.model.trust_remote_code} # FSDP-specific config fsdp_config: # Whether to offload model parameters to CPU param_offload: False # Whether to offload optimizer state to CPU optimizer_offload: False # Only for FSDP2: offload param/grad/optimizer during train offload_policy: False # Only for FSDP2: Reshard after forward pass to reduce memory footprint reshard_after_forward: True # Policy for wrapping layers with FSDP wrap_policy: # Minimum number of parameters to trigger wrapping min_num_params: 0 # Number of GPUs in each FSDP shard group; -1 means auto fsdp_size: -1 # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # Set to positive value to enable LoRA (e.g., 32) lora_rank: 0 # LoRA scaling factor lora_alpha: 16 # LoRA target modules: "all-linear" or list of linear projection layers target_modules: all-linear # PPO mini-batch size per update ppo_mini_batch_size: ${actor_rollout_ref.actor.ppo_mini_batch_size} # [Deprecated] Global micro batch size ppo_micro_batch_size: null # Local per-GPU micro batch size ppo_micro_batch_size_per_gpu: null # Forward-only batch size (global) forward_micro_batch_size: ${critic.ppo_micro_batch_size} # Forward-only batch size (per GPU) forward_micro_batch_size_per_gpu: ${critic.ppo_micro_batch_size_per_gpu} # Whether to automatically adjust batch size at runtime use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} # Max tokens per GPU in one PPO batch (doubled for critic) ppo_max_token_len_per_gpu: 32768 # Max token length per GPU in forward pass forward_max_token_len_per_gpu: ${critic.ppo_max_token_len_per_gpu} # Sequence parallelism size for Ulysses-style model parallelism ulysses_sequence_parallel_size: 1 # Number of PPO epochs per batch ppo_epochs: ${actor_rollout_ref.actor.ppo_epochs} # Shuffle training data across PPO epochs shuffle: ${actor_rollout_ref.actor.shuffle} # Gradient clipping for critic updates grad_clip: 1.0 # PPO value function clipping range cliprange_value: 0.5 # Loss aggregation mode: "token-mean", "seq-mean-token-sum", or "seq-mean-token-mean" loss_agg_mode: ${actor_rollout_ref.actor.loss_agg_mode} # checkpoint configs checkpoint: # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # What to include when loading checkpoints load_contents: ${critic.checkpoint.save_contents} # profiler configs # the corresponding dataclass is verl.utils.profiler.ProfilerConfig. profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # configs for the reward model reward_model: # Whether to enable reward model. If False, we compute the reward only with the user-defined reward functions. # In GSM8K and Math examples, we disable reward model. # For RLHF alignment example using full_hh_rlhf, we utilize reward model to assess the responses. # If False, the following parameters are not effective enable: False # FSDP strategy: "fsdp" or "fsdp2" strategy: ${actor_rollout_ref.actor.strategy} # model config for reward scoring model: # Input tokenizer. If the reward model’s chat template is inconsistent with the policy, # we need to first decode to plaintext, then apply the rm’s chat_template. # Then score with RM. If chat_templates are consistent, it can be set to null. input_tokenizer: ${actor_rollout_ref.model.path} # RM’s HDFS path or local path. Note that RM only supports AutoModelForSequenceClassification. # Other model types need to define their own RewardModelWorker and pass it from the code. path: ~/models/FsfairX-LLaMA3-RM-v0.1 # Whether to use shared memory for loading the model use_shm: False # External model implementation (optional) external_lib: ${actor_rollout_ref.model.external_lib} # Use remove padding optimization (saves compute) use_remove_padding: False # Whether to use fused reward kernels for speedup use_fused_kernels: ${actor_rollout_ref.model.use_fused_kernels} # Whether to enable loading a remote code model, default to False trust_remote_code: False # FSDP-specific config fsdp_config: # Policy for wrapping layers with FSDP wrap_policy: # Minimum number of parameters to trigger wrapping min_num_params: 0 # Whether to offload model parameters to CPU param_offload: False # Only for FSDP2: Reshard after forward pass to reduce memory footprint reshard_after_forward: True # Number of GPUs in each FSDP shard group; -1 means auto fsdp_size: -1 # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # [Deprecated] Global micro batch size micro_batch_size: null # Local per-GPU micro batch size micro_batch_size_per_gpu: null # Maximum sequence length to process for scoring max_length: null # Sequence parallelism size for Ulysses-style model parallelism ulysses_sequence_parallel_size: 1 # Whether to dynamically adjust batch size at runtime use_dynamic_bsz: ${critic.use_dynamic_bsz} # Maximum number of tokens per GPU in one forward pass forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} # Reward Manager. This defines the mechanism of computing rule-based reward and handling different reward sources. # Default is naive. If all verification functions are multiprocessing-safe, # the reward manager can be set to prime for parallel verification. reward_manager: naive # Whether to launch custom reward function asynchronously during log_prob launch_reward_fn_async: False # Cloud/local sandbox fusion configuration for custom reward logic sandbox_fusion: # Cloud/local function URL for sandbox execution url: null # Max concurrent requests allowed to sandbox max_concurrent: 64 # Max memory limit for each sandbox process in MB memory_limit_mb: 1024 # profiler configs profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # custom reward function definition custom_reward_function: # The path to the file containing your customized reward function. # If not specified, pre-implemented reward functions will be used. path: null # The name of the reward function within the specified file. Default is 'compute_score'. name: compute_score # config for the algorithm algorithm: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.AlgoConfig # Discount factor for future rewards gamma: 1.0 # Trade-off between bias and variance in the GAE estimator lam: 1.0 # Advantage estimator type: "gae", "grpo", "reinforce_plus_plus", etc. adv_estimator: gae # Whether to normalize advantages by std (specific to GRPO) norm_adv_by_std_in_grpo: True # Whether to enable in-reward KL penalty use_kl_in_reward: False # How to estimate KL divergence: "kl", "abs", "mse", "low_var_kl", or "full" kl_penalty: kl # KL control configuration kl_ctrl: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.KLControlConfig # KL control type: "fixed" or "adaptive" type: fixed # Initial coefficient for KL penalty kl_coef: 0.001 # Horizon value for adaptive controller (if enabled) horizon: 10000 # Target KL divergence (used for adaptive controller) target_kl: 0.1 # Whether to enable preference feedback PPO use_pf_ppo: False # Preference feedback PPO settings pf_ppo: # Method for reweighting samples: "pow", "max_min", or "max_random" reweight_method: pow # Power used for weight scaling in "pow" method weight_pow: 2.0 # config for the trainer trainer: # Whether to balance batch sizes across distributed workers balance_batch: True # Number of epochs in training total_epochs: 30 # Total training steps (can be set explicitly or derived from epochs) total_training_steps: null # The steps that will be profiled. null means no profiling. null or [1,2,5,...] profile_steps: null # controller Nvidia Nsight Systems Options. Must set when profile_steps is not None. ## reference https://docs.nvidia.com/nsight-systems/UserGuide/index.html ## reference https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html controller_nsight_options: # Select the API(s) to be traced. trace: "cuda,nvtx,cublas,ucx" # Track the GPU memory usage by CUDA kernels. Must be string type "true" or "false". cuda-memory-usage: "true" # CUDA graphs will be traced as a whole cuda-graph-trace: "graph" # worker Nvidia Nsight Systems Options. Must set when profile_steps is not None. worker_nsight_options: # Select the API(s) to be traced. trace: "cuda,nvtx,cublas,ucx" # Track the GPU memory usage by CUDA kernels. Must be string type "true" or "false". cuda-memory-usage: "true" # CUDA graphs will be traced as a whole cuda-graph-trace: "graph" # Profiling only in a range of torch.cuda.profiler.start and stop. Do not change this config. capture-range: "cudaProfilerApi" # Specify the desired behavior when a capture range ends. # In verl we need the orch.cuda.profiler.start/stop pair to repeats n times. # valid values are "repeat-shutdown:n" or null. # For normal whole step profiling, n = len(profile_steps); # but for discrete profiling, n = len(profile_steps) * Number(subtasks). # Or you can just leave it null and the program will use n = len(profile_steps) * 6; capture-range-end: null # Send signal to the target application's process group. We let the program to exit by itself. kill: none # Config for npu profiler. Must set when profile_steps is not None and torch_npu is available. npu_profile: # Options for the npu profiler options: # Storage path of collected data. save_path: ./profiler_data # The roles that will be profiled. Only takes effect in discrete mode. # optional values: all, rollout_generate, actor_compute_log_prob, actor_update and ref_compute_log_prob. # "all" means all roles will be profiled. roles: ["all"] # Collection level, optional values: level_none, level0, level1, level2. level: level1 # Whether to enable memory analysis. with_memory: False # Whether to record tensor shape. record_shapes: False # Whether to record Device-side performance data. with_npu: True # Whether to record Host-side performance data. with_cpu: True # Whether to record Python call stack information. with_module: False # Whether to record operator call stack information. with_stack: False # Whether to automatically parse the data. analysis: True # Project name for experiment tracking (e.g., wandb) project_name: verl_examples # Experiment name for run identification in tracking tools experiment_name: gsm8k # Logging backends to use: "console", "wandb", etc. logger: [ 'console', 'wandb' ] # Number of generations to log during validation log_val_generations: 0 # Directory for logging rollout data; no dump if null rollout_data_dir: null # Directory for logging validation data; no dump if null validation_data_dir: null # Number of nodes used in the training nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 # Save frequency (by iteration) for model checkpoints save_freq: -1 # ESI refers to the elastic server instance used during training, similar to the training plan. For example, # if you purchase 10 hours of computing power, the ESI will automatically shut down after 10 hours of training. # To ensure a checkpoint is saved before ESI shuts down, the system will start saving a checkpoint in advance. # The advance time is calculated as: Advance Time = Longest historical step duration + Checkpoint save duration + esi_redundant_time. # Here, esi_redundant_time is a user-defined value that further extends the advance time for added safety. esi_redundant_time: 0 # Resume mode: "auto", "disable", or "resume_path" # "auto": resume from last checkpoint if available # "disable": start from scratch # "resume_path": resume from a user-defined path resume_mode: auto # Path to resume training from (only used when resume_mode is "resume_path") resume_from_path: null # Whether to run validation before training begins val_before_train: True # Whether to run validation only val_only: False # Validation frequency (in training iterations) test_freq: -1 # Number of iterations to warm up the critic before updating policy critic_warmup: 0 # Default path to distributed filesystem for saving checkpoints default_hdfs_dir: null # Whether to delete local checkpoints after loading del_local_ckpt_after_load: False # Default local directory for saving checkpoints default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} # Maximum number of actor checkpoints to keep max_actor_ckpt_to_keep: null # Maximum number of critic checkpoints to keep max_critic_ckpt_to_keep: null # Timeout (in seconds) for Ray worker to wait for registration ray_wait_register_center_timeout: 300 # Device to run training on (e.g., "cuda", "cpu") device: cuda # configs related to ray ray_kwargs: # configs related to ray initialization ray_init: # Number of CPUs for Ray. Use a fixed number instead of null when using SLURM. num_cpus: null # Path to save Ray timeline JSON for performance profiling timeline_json_file: null ================================================ FILE: verl_distillation/tests/trainer/config/test_algo_config_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from omegaconf import OmegaConf from verl.trainer.config import AlgoConfig, KLControlConfig from verl.trainer.ppo.core_algos import ( compute_gae_advantage_return, compute_grpo_outcome_advantage, get_adv_estimator_fn, ) from verl.utils.config import omega_conf_to_dataclass class TestAlgoConfig(unittest.TestCase): """Test the AlgoConfig dataclass and its integration with core algorithms.""" def setUp(self): """Set up test fixtures.""" # Create a sample algorithm config as DictConfig (similar to what comes from YAML) self.config_dict = { "_target_": "verl.trainer.config.AlgoConfig", "gamma": 0.99, "lam": 0.95, "adv_estimator": "gae", "norm_adv_by_std_in_grpo": True, "use_kl_in_reward": True, "kl_penalty": "kl", "kl_ctrl": { "_target_": "verl.trainer.config.KLControlConfig", "type": "adaptive", "kl_coef": 0.002, "horizon": 5000, "target_kl": 0.05, }, "use_pf_ppo": True, "pf_ppo": {"reweight_method": "max_min", "weight_pow": 3.0}, } self.omega_config = OmegaConf.create(self.config_dict) def test_dataclass_creation_from_dict(self): """Test creating AlgoConfig from dictionary.""" config = omega_conf_to_dataclass(self.config_dict) self.assertIsInstance(config, AlgoConfig) self.assertEqual(config.gamma, 0.99) self.assertEqual(config.lam, 0.95) self.assertEqual(config.adv_estimator, "gae") self.assertTrue(config.norm_adv_by_std_in_grpo) self.assertTrue(config.use_kl_in_reward) self.assertEqual(config.kl_penalty, "kl") self.assertTrue(config.use_pf_ppo) def test_dataclass_creation_from_omega_config(self): """Test creating AlgoConfig from OmegaConf DictConfig.""" config = omega_conf_to_dataclass(self.omega_config) self.assertIsInstance(config, AlgoConfig) self.assertEqual(config.gamma, 0.99) self.assertEqual(config.lam, 0.95) def test_nested_configs(self): """Test that nested configurations are properly converted.""" config = omega_conf_to_dataclass(self.omega_config) # Test KL control config self.assertIsInstance(config.kl_ctrl, KLControlConfig) self.assertEqual(config.kl_ctrl.type, "adaptive") self.assertEqual(config.kl_ctrl.kl_coef, 0.002) self.assertEqual(config.kl_ctrl.horizon, 5000) self.assertEqual(config.kl_ctrl.target_kl, 0.05) # Test PF PPO config self.assertEqual(config.pf_ppo.get("reweight_method"), "max_min") self.assertEqual(config.pf_ppo.get("weight_pow"), 3.0) def test_default_values(self): """Test that default values are properly set.""" minimal_config = {"gamma": 0.8} config = omega_conf_to_dataclass(minimal_config, AlgoConfig) self.assertEqual(config.gamma, 0.8) self.assertEqual(config.lam, 1.0) # default value self.assertEqual(config.adv_estimator, "gae") # default value self.assertTrue(config.norm_adv_by_std_in_grpo) # default value self.assertFalse(config.use_kl_in_reward) # default value self.assertEqual(config.kl_penalty, "kl") # default value self.assertFalse(config.use_pf_ppo) # default value def test_get_method_backward_compatibility(self): """Test the get method for backward compatibility.""" config = omega_conf_to_dataclass(self.omega_config) # Test existing attribute self.assertEqual(config.get("gamma"), 0.99) self.assertEqual(config.get("gamma", 1.0), 0.99) # Test non-existing attribute self.assertIsNone(config.get("non_existing")) self.assertEqual(config.get("non_existing", "default"), "default") def test_post_init_nested_configs(self): """Test that __post_init__ properly initializes nested configs when None.""" # Create config without nested configs minimal_config = AlgoConfig(gamma=0.9) # Check that nested configs are initialized self.assertIsNotNone(minimal_config.kl_ctrl) self.assertIsInstance(minimal_config.kl_ctrl, KLControlConfig) assert not minimal_config.pf_ppo def test_config_init_from_yaml(self): import os from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): cfg = compose(config_name="ppo_trainer") algo_config = omega_conf_to_dataclass(cfg.algorithm) from verl.trainer.config import AlgoConfig assert isinstance(algo_config, AlgoConfig) class TestAlgoCompute(unittest.TestCase): """Test the AlgoConfig dataclass and its integration with core algorithms.""" def setUp(self): """Set up test fixtures.""" self.algo_config = AlgoConfig( gamma=0.99, lam=0.95, adv_estimator="gae", norm_adv_by_std_in_grpo=True, use_kl_in_reward=True, kl_penalty="kl", kl_ctrl=KLControlConfig(type="adaptive", kl_coef=0.002, horizon=5000, target_kl=0.05), use_pf_ppo=True, pf_ppo={"reweight_method": "max_min", "weight_pow": 3.0}, ) def test_advantage_estimator_with_cfg(self): """Test integration with advantage estimators from core_algos.""" config = self.algo_config # Test GAE advantage estimator adv_fn = get_adv_estimator_fn(config.adv_estimator) self.assertIsNotNone(adv_fn) # Test with actual GAE computation batch_size, seq_len = 2, 5 token_level_rewards = torch.randn(batch_size, seq_len) values = torch.randn(batch_size, seq_len) response_mask = torch.ones(batch_size, seq_len) advantages, returns = compute_gae_advantage_return( token_level_rewards=token_level_rewards, values=values, response_mask=response_mask, gamma=config.gamma, lam=config.lam, ) self.assertEqual(advantages.shape, (batch_size, seq_len)) self.assertEqual(returns.shape, (batch_size, seq_len)) def test_grpo_advantage_estimator_with_cfg(self): """Test integration with GRPO advantage estimator.""" grpo_config = AlgoConfig(adv_estimator="grpo", norm_adv_by_std_in_grpo=True) # Test GRPO advantage computation batch_size, seq_len = 4, 3 token_level_rewards = torch.tensor([[1.0, 0.5, 0.0], [2.0, 1.0, 0.0], [0.5, 0.2, 0.0], [1.5, 0.8, 0.0]]) response_mask = torch.ones(batch_size, seq_len) index = np.array([0, 0, 1, 1]) # Two groups advantages, returns = compute_grpo_outcome_advantage( token_level_rewards=token_level_rewards, response_mask=response_mask, index=index, norm_adv_by_std_in_grpo=grpo_config.norm_adv_by_std_in_grpo, ) self.assertEqual(advantages.shape, (batch_size, seq_len)) self.assertEqual(returns.shape, (batch_size, seq_len)) if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/trainer/config/test_legacy_config_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest import warnings from hydra import compose, initialize_config_dir from hydra.core.global_hydra import GlobalHydra from omegaconf import OmegaConf _BREAKING_CHANGES = [ "critic.optim.lr", # mcore critic lr init value 1e-6 -> 1e-5 "actor_rollout_ref.actor.optim.lr_warmup_steps", # None -> -1 "critic.optim.lr_warmup_steps", # None -> -1 "actor_rollout_ref.rollout.name", # vllm -> ??? "actor_rollout_ref.actor.megatron.expert_tensor_parallel_size", "actor_rollout_ref.ref.megatron.expert_tensor_parallel_size", "critic.megatron.expert_tensor_parallel_size", "reward_model.megatron.expert_tensor_parallel_size", ] class TestConfigComparison(unittest.TestCase): """Test that current configs match their legacy counterparts exactly.""" ignored_keys = [ "enable_gradient_checkpointing", "gradient_checkpointing_kwargs", "activations_checkpoint_method", "activations_checkpoint_granularity", "activations_checkpoint_num_layers", "discrete", "profiler", "profile", "use_profile", "npu_profile", "profile_steps", "worker_nsight_options", "controller_nsight_options", ] def _compare_configs_recursively( self, current_config, legacy_config, path="", legacy_allow_missing=True, current_allow_missing=False ): """Recursively compare two OmegaConf configs and assert they are identical. Args: legacy_allow_missing (bool): sometimes the legacy megatron config contains fewer keys and we allow that to happen """ if isinstance(current_config, dict) and isinstance(legacy_config, dict): current_keys = set(current_config.keys()) legacy_keys = set(legacy_config.keys()) missing_in_current = legacy_keys - current_keys missing_in_legacy = current_keys - legacy_keys # Ignore specific keys that are allowed to be missing for key in self.ignored_keys: if key in missing_in_current: missing_in_current.remove(key) if key in missing_in_legacy: missing_in_legacy.remove(key) if missing_in_current: msg = f"Keys missing in current config at {path}: {missing_in_current}" if current_allow_missing: warnings.warn(msg, stacklevel=1) else: self.fail(f"Keys missing in current config at {path}: {missing_in_current}") if missing_in_legacy: # if the legacy msg = f"Keys missing in legacy config at {path}: {missing_in_legacy}" if legacy_allow_missing: warnings.warn(msg, stacklevel=1) else: self.fail(msg) for key in current_keys: current_path = f"{path}.{key}" if path else key if key in legacy_config: self._compare_configs_recursively(current_config[key], legacy_config[key], current_path) elif isinstance(current_config, list) and isinstance(legacy_config, list): self.assertEqual( len(current_config), len(legacy_config), f"List lengths differ at {path}: current={len(current_config)}, legacy={len(legacy_config)}", ) for i, (current_item, legacy_item) in enumerate(zip(current_config, legacy_config, strict=True)): self._compare_configs_recursively(current_item, legacy_item, f"{path}[{i}]") elif path not in _BREAKING_CHANGES: self.assertEqual( current_config, legacy_config, f"Values differ at {path}: current={current_config}, legacy={legacy_config}", ) def test_ppo_trainer_config_matches_legacy(self): """Test that ppo_trainer.yaml matches legacy_ppo_trainer.yaml exactly.""" import os from hydra import compose, initialize_config_dir from hydra.core.global_hydra import GlobalHydra GlobalHydra.instance().clear() try: with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): current_config = compose(config_name="ppo_trainer") legacy_config = OmegaConf.load("tests/trainer/config/legacy_ppo_trainer.yaml") current_dict = OmegaConf.to_container(current_config, resolve=True) legacy_dict = OmegaConf.to_container(legacy_config, resolve=True) if "defaults" in current_dict: del current_dict["defaults"] self._compare_configs_recursively(current_dict, legacy_dict) finally: GlobalHydra.instance().clear() def test_ppo_megatron_trainer_config_matches_legacy(self): """Test that ppo_megatron_trainer.yaml matches legacy_ppo_megatron_trainer.yaml exactly.""" GlobalHydra.instance().clear() try: with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): current_config = compose(config_name="ppo_megatron_trainer") legacy_config = OmegaConf.load("tests/trainer/config/legacy_ppo_megatron_trainer.yaml") current_dict = OmegaConf.to_container(current_config, resolve=True) legacy_dict = OmegaConf.to_container(legacy_config, resolve=True) if "defaults" in current_dict: del current_dict["defaults"] self._compare_configs_recursively( current_dict, legacy_dict, legacy_allow_missing=True, current_allow_missing=False ) finally: GlobalHydra.instance().clear() def test_load_component(self): """Test that ppo_megatron_trainer.yaml matches legacy_ppo_megatron_trainer.yaml exactly.""" GlobalHydra.instance().clear() configs_to_load = [ ("verl/trainer/config/actor", "dp_actor"), ("verl/trainer/config/actor", "megatron_actor"), ("verl/trainer/config/ref", "dp_ref"), ("verl/trainer/config/ref", "megatron_ref"), ("verl/trainer/config/rollout", "rollout"), ] for config_dir, config_file in configs_to_load: try: with initialize_config_dir(config_dir=os.path.abspath(config_dir)): compose(config_name=config_file) finally: GlobalHydra.instance().clear() if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/trainer/ppo/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for the PPO trainer module. """ ================================================ FILE: verl_distillation/tests/trainer/ppo/test_core_algos_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import pytest import torch import verl.trainer.ppo.core_algos from verl.trainer.ppo.core_algos import ( compute_gae_advantage_return, compute_grpo_outcome_advantage, compute_grpo_vectorized_outcome_advantage, compute_rloo_outcome_advantage, compute_rloo_vectorized_outcome_advantage, get_adv_estimator_fn, register_adv_est, ) def mock_test_fn(): pass class TestRegisterAdvEst(unittest.TestCase): def setUp(self): """Clear the registry before each test""" verl.trainer.ppo.core_algos.ADV_ESTIMATOR_REGISTRY.clear() verl.trainer.ppo.core_algos.ADV_ESTIMATOR_REGISTRY = { "gae": lambda x: x * 2, "vtrace": lambda x: x + 1, } self.ADV_ESTIMATOR_REGISTRY = verl.trainer.ppo.core_algos.ADV_ESTIMATOR_REGISTRY def tearDown(self) -> None: verl.trainer.ppo.core_algos.ADV_ESTIMATOR_REGISTRY.clear() return super().tearDown() def test_register_new_function(self): """Test registering a new function with a string name""" @register_adv_est("test_estimator") def test_fn(): pass self.assertIn("test_estimator", self.ADV_ESTIMATOR_REGISTRY) self.assertEqual(self.ADV_ESTIMATOR_REGISTRY["test_estimator"], test_fn) def test_register_with_enum(self): """Test registering with an enum value (assuming AdvantageEstimator exists)""" from enum import Enum class AdvantageEstimator(Enum): TEST = "test_enum_estimator" @register_adv_est(AdvantageEstimator.TEST) def test_fn(): pass self.assertIn("test_enum_estimator", self.ADV_ESTIMATOR_REGISTRY) self.assertEqual(self.ADV_ESTIMATOR_REGISTRY["test_enum_estimator"], test_fn) def test_duplicate_registration_same_function(self): """Test that registering the same function twice doesn't raise an error""" register_adv_est("duplicate_test")(mock_test_fn) register_adv_est("duplicate_test")(mock_test_fn) self.assertEqual(self.ADV_ESTIMATOR_REGISTRY["duplicate_test"], mock_test_fn) def test_duplicate_registration_different_function(self): """Test that registering different functions with same name raises ValueError""" @register_adv_est("conflict_test") def test_fn1(): pass with self.assertRaises(ValueError): @register_adv_est("conflict_test") def test_fn2(): pass def test_decorator_preserves_function(self): """Test that the decorator returns the original function""" def test_fn(): return "original" decorated = register_adv_est("preserve_test")(test_fn) self.assertEqual(decorated(), "original") def test_multiple_registrations(self): """Test registering multiple different functions""" init_adv_count = len(self.ADV_ESTIMATOR_REGISTRY) @register_adv_est("estimator1") def fn1(): pass @register_adv_est("estimator2") def fn2(): pass self.assertEqual(len(self.ADV_ESTIMATOR_REGISTRY), 2 + init_adv_count) self.assertEqual(self.ADV_ESTIMATOR_REGISTRY["estimator1"], fn1) self.assertEqual(self.ADV_ESTIMATOR_REGISTRY["estimator2"], fn2) def test_get_adv_estimator_fn_valid_names(self): """Test that valid names return the correct function from registry.""" # Test GAE gae_fn = get_adv_estimator_fn("gae") assert gae_fn(5) == 10 # 5 * 2 = 10 # Test Vtrace vtrace_fn = get_adv_estimator_fn("vtrace") assert vtrace_fn(5) == 6 # 5 + 1 = 6 def test_get_adv_estimator_fn_invalid_name(self): """Test that invalid names raise ValueError.""" with pytest.raises(ValueError) as excinfo: get_adv_estimator_fn("invalid_name") assert "Unknown advantage estimator simply: invalid_name" in str(excinfo.value) def test_get_adv_estimator_fn_case_sensitive(self): """Test that name lookup is case-sensitive.""" with pytest.raises(ValueError): get_adv_estimator_fn("GAE") # Different case def test_multi_turn_compute_gae_advantage_return(): """Test multi-turn GAE skip observation tokens.""" gamma = random.uniform(0.0, 1.0) lam = random.uniform(0.0, 1.0) rewards = torch.tensor([[0.0, 0.0, 0.1, 0.1, 0.1, 0.0, 0.0, 0.1, 1.0, 0.0, 0.0]], dtype=torch.float) values1 = torch.tensor( [ [ random.uniform(-100.0, 100.0), random.random(), 4.0, 5.0, 6.0, random.uniform(-100.0, 0), random.random(), 7.0, 9.0, 0.0, 0.0, ] ], dtype=torch.float, ) values2 = torch.tensor( [ [ random.random(), random.uniform(-100.0, 100.0), 4.0, 5.0, 6.0, random.random(), random.uniform(0.0, 100.0), 7.0, 9.0, 0.0, 0.0, ] ], dtype=torch.float, ) response_mask = torch.tensor([[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0]], dtype=torch.float) adv1, ret1 = compute_gae_advantage_return(rewards, values1, response_mask, gamma, lam) adv2, ret2 = compute_gae_advantage_return(rewards, values2, response_mask, gamma, lam) ret1 *= response_mask ret2 *= response_mask assert torch.equal(adv1, adv2), f"{adv1=}, {adv2=}" assert torch.equal(ret1, ret2), f"{ret1=}, {ret2=}" print(f" [CORRECT] \n\n{adv1=}, \n\n{ret1=}") def _make_group_index(batch_size: int, num_groups: int) -> np.ndarray: """Create a numpy index array ensuring each group has at least 2 samples.""" assert num_groups * 2 <= batch_size, "batch_size must allow >=2 samples per group" counts: list[int] = [2] * num_groups remaining = batch_size - 2 * num_groups for _ in range(remaining): counts[random.randrange(num_groups)] += 1 index = [] for gid, c in enumerate(counts): index.extend([gid] * c) random.shuffle(index) return np.asarray(index, dtype=np.int64) def _rand_mask(batch_size: int, seq_len: int) -> torch.Tensor: mask = torch.randint(0, 2, (batch_size, seq_len), dtype=torch.int64).float() rows_without_one = (mask.sum(dim=-1) == 0).nonzero(as_tuple=True)[0] if len(rows_without_one) > 0: mask[rows_without_one, -1] = 1.0 return mask @pytest.mark.parametrize( "batch_size,seq_len,num_groups,seed", [ (64, 128, 5, 0), (128, 256, 8, 1), (512, 512, 10, 2), ], ) def test_rloo_and_vectorized_equivalence(batch_size: int, seq_len: int, num_groups: int, seed: int): torch.manual_seed(seed) random.seed(seed) np.random.seed(seed) index = _make_group_index(batch_size, num_groups) response_mask = _rand_mask(batch_size, seq_len) base_rewards = torch.randn(batch_size, seq_len, dtype=torch.float32) token_level_rewards = base_rewards * response_mask adv1, ret1 = compute_rloo_outcome_advantage( token_level_rewards=token_level_rewards, response_mask=response_mask, index=index, ) adv2, ret2 = compute_rloo_vectorized_outcome_advantage( token_level_rewards=token_level_rewards, response_mask=response_mask, index=index, ) # Print concise diagnostics for visibility during test runs adv_max_diff = (adv1 - adv2).abs().max().item() ret_max_diff = (ret1 - ret2).abs().max().item() total_mask_tokens = int(response_mask.sum().item()) print( f"[RLOO] seed={seed} groups={num_groups} shape={adv1.shape} " f"mask_tokens={total_mask_tokens} adv_max_diff={adv_max_diff:.3e} ret_max_diff={ret_max_diff:.3e}" ) assert adv1.shape == adv2.shape == (batch_size, seq_len) assert ret1.shape == ret2.shape == (batch_size, seq_len) assert torch.allclose(adv1, adv2, rtol=1e-5, atol=1e-6) assert torch.allclose(ret1, ret2, rtol=1e-5, atol=1e-6) @pytest.mark.parametrize( "batch_size,seq_len,num_groups,seed", [ (64, 128, 5, 0), (128, 256, 8, 1), (512, 512, 10, 2), ], ) def test_grpo_and_vectorized_equivalence(batch_size: int, seq_len: int, num_groups: int, seed: int): # Set seeds for reproducibility torch.manual_seed(seed) random.seed(seed) np.random.seed(seed) # Generate group indices (numpy array of shape [batch_size]) index = _make_group_index(batch_size, num_groups) # Generate binary response mask (at least one valid token per row) response_mask = _rand_mask(batch_size, seq_len) # Generate token-level rewards and apply mask base_rewards = torch.randn(batch_size, seq_len, dtype=torch.float32) token_level_rewards = base_rewards * response_mask # Compute GRPO outcome advantage (original implementation) adv1, ret1 = compute_grpo_outcome_advantage( token_level_rewards=token_level_rewards, response_mask=response_mask, index=index, ) # Compute GRPO outcome advantage (vectorized implementation) adv2, ret2 = compute_grpo_vectorized_outcome_advantage( token_level_rewards=token_level_rewards, response_mask=response_mask, index=index, ) # Diagnostic info for visibility (same style as RLOO test) adv_max_diff = (adv1 - adv2).abs().max().item() ret_max_diff = (ret1 - ret2).abs().max().item() total_mask_tokens = int(response_mask.sum().item()) print( f"[GRPO] seed={seed} groups={num_groups} shape={adv1.shape} " f"mask_tokens={total_mask_tokens} adv_max_diff={adv_max_diff:.3e} ret_max_diff={ret_max_diff:.3e}" ) # Assert shape and numerical equivalence assert adv1.shape == adv2.shape == (batch_size, seq_len) assert ret1.shape == ret2.shape == (batch_size, seq_len) assert torch.allclose(adv1, adv2, rtol=1e-5, atol=1e-6) assert torch.allclose(ret1, ret2, rtol=1e-5, atol=1e-6) if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/trainer/ppo/test_metric_utils_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for the metric utilities in verl.trainer.ppo.metric_utils. """ import unittest from unittest.mock import MagicMock, patch import numpy as np import torch from verl.trainer.ppo.metric_utils import ( bootstrap_metric, calc_maj_val, compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, process_validation_metrics, ) from verl.utils.metric import ( reduce_metrics, ) class TestReduceMetrics(unittest.TestCase): """Tests for the reduce_metrics function.""" def test_reduce_metrics_basic(self): """Test that reduce_metrics correctly computes means.""" metrics = { "loss": [1.0, 2.0, 3.0], "accuracy": [0.0, 0.5, 1.0], } result = reduce_metrics(metrics) self.assertEqual(result["loss"], 2.0) self.assertEqual(result["accuracy"], 0.5) def test_reduce_metrics_empty(self): """Test that reduce_metrics handles empty lists.""" metrics = { "empty": [], } result = reduce_metrics(metrics) self.assertTrue(np.isnan(result["empty"])) def test_reduce_metrics_single_value(self): """Test that reduce_metrics works with single values.""" metrics = { "single": [5.0], } result = reduce_metrics(metrics) self.assertEqual(result["single"], 5.0) class TestComputeDataMetrics(unittest.TestCase): """Tests for the compute_data_metrics function.""" def setUp(self): """Set up common test data.""" # Create a mock DataProto object self.batch = MagicMock() self.batch.batch = { "token_level_scores": torch.tensor([[1.0, 2.0], [3.0, 4.0]]), "token_level_rewards": torch.tensor([[0.5, 1.0], [1.5, 2.0]]), "advantages": torch.tensor([[0.1, 0.2], [0.3, 0.4]]), "returns": torch.tensor([[1.1, 1.2], [1.3, 1.4]]), "responses": torch.zeros((2, 2)), # 2 samples, 2 tokens each "attention_mask": torch.tensor( [ [1, 1, 1, 1], # 2 prompt tokens, 2 response tokens [1, 1, 1, 1], ] ), "response_mask": torch.tensor( [ [1, 1], # 2 response tokens [1, 1], ] ), "values": torch.tensor([[0.9, 1.0], [1.1, 1.2]]), } def test_compute_data_metrics_with_critic(self): """Test compute_data_metrics with critic enabled.""" metrics = compute_data_metrics(self.batch, use_critic=True) # Check that all expected metrics are present self.assertIn("critic/score/mean", metrics) self.assertIn("critic/rewards/mean", metrics) self.assertIn("critic/advantages/mean", metrics) self.assertIn("critic/returns/mean", metrics) self.assertIn("critic/values/mean", metrics) self.assertIn("critic/vf_explained_var", metrics) self.assertIn("response_length/mean", metrics) self.assertIn("prompt_length/mean", metrics) # Check some specific values self.assertAlmostEqual(metrics["critic/score/mean"], 5.0) # Sum of token_level_scores self.assertAlmostEqual(metrics["critic/rewards/mean"], 2.5) # Sum of token_level_rewards def test_compute_data_metrics_without_critic(self): """Test compute_data_metrics with critic disabled.""" metrics = compute_data_metrics(self.batch, use_critic=False) # Check that critic-specific metrics are not present self.assertNotIn("critic/values/mean", metrics) self.assertNotIn("critic/vf_explained_var", metrics) # Check that other metrics are still present self.assertIn("critic/score/mean", metrics) self.assertIn("critic/rewards/mean", metrics) self.assertIn("response_length/mean", metrics) class TestComputeTimingMetrics(unittest.TestCase): """Tests for the compute_timing_metrics function.""" def setUp(self): """Set up common test data.""" # Create a mock DataProto object self.batch = MagicMock() self.batch.batch = { "responses": torch.zeros((2, 3)), # 2 samples, 3 response tokens each "attention_mask": torch.tensor( [ [1, 1, 1, 1, 1, 1], # 3 prompt tokens, 3 response tokens [1, 1, 1, 1, 1, 1], ] ), } # Mock the _compute_response_info function to return known values self.response_info = { "prompt_length": torch.tensor([3.0, 3.0]), "response_length": torch.tensor([3.0, 3.0]), "response_mask": torch.ones((2, 3)), } @patch("verl.trainer.ppo.metric_utils._compute_response_info") def test_compute_timing_metrics(self, mock_compute_response_info): """Test compute_timing_metrics with various timing data.""" mock_compute_response_info.return_value = self.response_info timing_raw = { "gen": 0.5, # 500ms "ref": 0.3, # 300ms "values": 0.2, # 200ms } metrics = compute_timing_metrics(self.batch, timing_raw) # Check raw timing metrics self.assertEqual(metrics["timing_s/gen"], 0.5) self.assertEqual(metrics["timing_s/ref"], 0.3) self.assertEqual(metrics["timing_s/values"], 0.2) # Check per-token timing metrics # gen uses only response tokens (6 tokens) self.assertAlmostEqual(metrics["timing_per_token_ms/gen"], 0.5 * 1000 / 6, places=5) # ref and values use all tokens (12 tokens) self.assertAlmostEqual(metrics["timing_per_token_ms/ref"], 0.3 * 1000 / 12, places=5) self.assertAlmostEqual(metrics["timing_per_token_ms/values"], 0.2 * 1000 / 12, places=5) class TestComputeThroughputMetrics(unittest.TestCase): """Tests for the compute_throughout_metrics function.""" def setUp(self): """Set up common test data.""" # Create a mock DataProto object self.batch = MagicMock() self.batch.meta_info = { "global_token_num": [100, 200, 300], # 600 tokens total } def test_compute_throughout_metrics(self): """Test compute_throughout_metrics with various timing data.""" timing_raw = { "step": 2.0, # 2 seconds per step } # Test with 1 GPU metrics = compute_throughout_metrics(self.batch, timing_raw, n_gpus=1) self.assertEqual(metrics["perf/total_num_tokens"], 600) self.assertEqual(metrics["perf/time_per_step"], 2.0) self.assertEqual(metrics["perf/throughput"], 600 / 2.0) # 300 tokens/sec # Test with 2 GPUs metrics = compute_throughout_metrics(self.batch, timing_raw, n_gpus=2) self.assertEqual(metrics["perf/total_num_tokens"], 600) self.assertEqual(metrics["perf/time_per_step"], 2.0) self.assertEqual(metrics["perf/throughput"], 600 / (2.0 * 2)) # 150 tokens/sec/GPU class TestBootstrapMetric(unittest.TestCase): """Tests for the bootstrap_metric function.""" def test_bootstrap_metric_basic(self): """Test bootstrap_metric with simple data and functions.""" data = [1, 2, 3, 4, 5] reduce_fns = [np.mean, np.max] # Use a fixed seed for reproducibility result = bootstrap_metric(data, subset_size=3, reduce_fns=reduce_fns, n_bootstrap=100, seed=42) # Check that we get two results (one for each reduce_fn) self.assertEqual(len(result), 2) # Each result should be a tuple of (mean, std) mean_result, max_result = result self.assertEqual(len(mean_result), 2) self.assertEqual(len(max_result), 2) # The mean of means should be close to the true mean (3.0) self.assertAlmostEqual(mean_result[0], 3.0, delta=0.3) # The mean of maxes should be close to the expected value for samples of size 3 # For samples of size 3 from [1,2,3,4,5], the expected max is around 4.0-4.5 self.assertGreater(max_result[0], 3.5) self.assertLess(max_result[0], 5.0) def test_bootstrap_metric_empty(self): """Test bootstrap_metric with empty data.""" with self.assertRaises(ValueError): bootstrap_metric([], subset_size=1, reduce_fns=[np.mean]) class TestCalcMajVal(unittest.TestCase): """Tests for the calc_maj_val function.""" def test_calc_maj_val_basic(self): """Test calc_maj_val with simple data.""" data = [ {"pred": "A", "val": 0.9}, {"pred": "B", "val": 0.8}, {"pred": "A", "val": 0.7}, ] result = calc_maj_val(data, vote_key="pred", val_key="val") # "A" is the majority vote, so we should get the first "val" for "A" self.assertEqual(result, 0.9) def test_calc_maj_val_tie(self): """Test calc_maj_val with tied votes.""" data = [ {"pred": "A", "val": 0.9}, {"pred": "B", "val": 0.8}, {"pred": "B", "val": 0.7}, {"pred": "A", "val": 0.6}, ] # In case of a tie, the first key in sorted order wins # This depends on Python's dict implementation, but for this test # we just verify that one of the valid values is returned result = calc_maj_val(data, vote_key="pred", val_key="val") self.assertTrue(result in [0.9, 0.8]) class TestProcessValidationMetrics(unittest.TestCase): """Tests for the process_validation_metrics function.""" def test_process_validation_metrics_basic(self): """Test process_validation_metrics with simple data.""" data_sources = ["source1", "source1", "source2"] sample_inputs = ["prompt1", "prompt1", "prompt2"] infos_dict = { "score": [0.8, 0.9, 0.7], } result = process_validation_metrics(data_sources, sample_inputs, infos_dict, seed=42) # Check the structure of the result self.assertIn("source1", result) self.assertIn("source2", result) # Check that source1 has metrics for score self.assertIn("score", result["source1"]) # Check that mean@2 is present for source1/score self.assertIn("mean@2", result["source1"]["score"]) # Check the value of mean@2 for source1/score self.assertAlmostEqual(result["source1"]["score"]["mean@2"], 0.85) def test_process_validation_metrics_with_pred(self): """Test process_validation_metrics with prediction data.""" data_sources = ["source1", "source1", "source1"] sample_inputs = ["prompt1", "prompt1", "prompt1"] infos_dict = { "score": [0.8, 0.9, 0.7], "pred": ["A", "B", "A"], } result = process_validation_metrics(data_sources, sample_inputs, infos_dict, seed=42) # Check that majority voting metrics are present self.assertIn("maj@2/mean", result["source1"]["score"]) # For bootstrap with n=2, the majority vote could be either A or B # depending on the random sampling, so we don't check the exact value if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/trainer/ppo/test_rollout_is.py ================================================ #!/usr/bin/env python3 # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Quick Sanity Test for Rollout Importance Sampling This is a standalone test script that can be run without pytest to quickly verify the rollout IS implementation is working correctly. For comprehensive integration tests, see: tests/trainer/ppo/test_rollout_is_integration.py Usage: python test_rollout_is.py This tests: - Basic rollout IS functionality (3 levels, 2 modes) - Metrics completeness (32 total: 21 IS + 11 mismatch metrics) - Veto mechanism - Edge cases """ import torch from verl.trainer.ppo.mismatch_helper import compute_mismatch_metrics, compute_rollout_importance_weights def test_basic_rollout_is(): """Test basic rollout IS functionality.""" print("Testing basic rollout IS functionality...") # Create test data batch_size, seq_length = 4, 10 device = "cuda" if torch.cuda.is_available() else "cpu" # Create slightly different log probs (simulating BF16 vs FP32 mismatch) old_log_prob = torch.randn(batch_size, seq_length, device=device) rollout_log_prob = old_log_prob + torch.randn(batch_size, seq_length, device=device) * 0.1 eos_mask = torch.ones(batch_size, seq_length, device=device) # Test token-level truncate mode print("\n1. Testing token-level truncate mode...") weights_proto, modified_response_mask, metrics = compute_rollout_importance_weights( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=eos_mask, rollout_is_level="token", rollout_is_mode="truncate", rollout_is_threshold=2.0, rollout_is_veto_threshold=1e-4, ) weights = weights_proto.batch["rollout_is_weights"] print(f" Weights shape: {weights.shape}") print(f" Mean weight: {metrics['mismatch/rollout_is_mean']:.4f}") print(f" Max weight: {metrics['mismatch/rollout_is_max']:.4f}") print(f" Min weight: {metrics['mismatch/rollout_is_min']:.4f}") print(f" Veto fraction: {metrics['mismatch/rollout_is_veto_fraction']:.4f}") assert weights.shape == old_log_prob.shape assert weights.max() <= 2.0, "Weights should be capped at threshold" print(" ✓ Token-level truncate mode passed") # Test sequence-level mode print("\n2. Testing sequence-level mode...") weights_seq_proto, _, metrics_seq = compute_rollout_importance_weights( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=eos_mask, rollout_is_level="sequence", rollout_is_mode="truncate", rollout_is_threshold=5.0, rollout_is_veto_threshold=1e-4, ) weights_seq = weights_seq_proto.batch["rollout_is_weights"] print(f" Mean weight: {metrics_seq['mismatch/rollout_is_mean']:.4f}") print(f" Effective sample size: {metrics_seq['mismatch/rollout_is_eff_sample_size']:.4f}") # Check that all tokens in a sequence have the same weight for i in range(batch_size): seq_weights = weights_seq[i, eos_mask[i].bool()] assert torch.allclose(seq_weights, seq_weights[0]), "All tokens in sequence should have same weight" print(" ✓ Sequence-level mode passed") # Test geometric mean mode print("\n3. Testing geometric mean mode...") weights_geo_proto, _, metrics_geo = compute_rollout_importance_weights( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=eos_mask, rollout_is_level="geometric", rollout_is_mode="mask", rollout_is_threshold=1.5, rollout_is_threshold_lower=0.5, rollout_is_veto_threshold=1e-4, ) print(f" Mean weight: {metrics_geo['mismatch/rollout_is_mean']:.4f}") print(f" Masked fraction: {metrics_geo['mismatch/rollout_is_masked_fraction']:.4f}") print(" ✓ Geometric mean mode passed") # Test veto mechanism print("\n4. Testing veto mechanism...") # Create data with catastrophic outliers old_log_prob_veto = torch.randn(2, 5, device=device) rollout_log_prob_veto = old_log_prob_veto.clone() # Make one token have catastrophically low ratio rollout_log_prob_veto[0, 2] = old_log_prob_veto[0, 2] + 15.0 # ratio ~= 3e-7 eos_mask_veto = torch.ones(2, 5, device=device) weights_veto_proto, modified_response_mask_veto, metrics_veto = compute_rollout_importance_weights( old_log_prob=old_log_prob_veto, rollout_log_prob=rollout_log_prob_veto, response_mask=eos_mask_veto, rollout_is_level="token", rollout_is_mode="truncate", rollout_is_threshold=2.0, rollout_is_veto_threshold=1e-4, ) weights_veto = weights_veto_proto.batch["rollout_is_weights"] print(f" Veto fraction: {metrics_veto['mismatch/rollout_is_veto_fraction']:.4f}") # KEY FIX: Veto is applied via response_mask, not by zeroing weights # Check that weights are NON-ZERO (safety-bounded ratios preserved, not zeroed) assert weights_veto[0].sum() > 0, "Weights should be non-zero (not zeroed by veto)" # Check that response_mask has veto applied assert modified_response_mask_veto[0].sum() == 0, "Vetoed sequence should have response_mask zeroed" assert modified_response_mask_veto[1].sum() > 0, "Normal sequence should have response_mask unchanged" print(" ✓ Veto mechanism passed") # Test disabled IS (threshold=None) print("\n5. Testing disabled IS...") weights_disabled, modified_response_mask_disabled, metrics_disabled = compute_rollout_importance_weights( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=eos_mask, rollout_is_threshold=None, ) assert weights_disabled is None, "Should return None when threshold is None" assert torch.equal(modified_response_mask_disabled, eos_mask), "Should return original mask unchanged" assert len(metrics_disabled) == 0, "Should return empty metrics when disabled" print(" ✓ Disabled IS passed") print("\n✓ All tests passed!") def test_metrics_completeness(): """Test that all expected metrics are returned.""" print("\nTesting metrics completeness...") batch_size, seq_length = 3, 8 device = "cuda" if torch.cuda.is_available() else "cpu" old_log_prob = torch.randn(batch_size, seq_length, device=device) rollout_log_prob = old_log_prob + torch.randn(batch_size, seq_length, device=device) * 0.2 eos_mask = torch.ones(batch_size, seq_length, device=device) _, _, metrics = compute_rollout_importance_weights( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=eos_mask, rollout_is_level="token", rollout_is_mode="truncate", rollout_is_threshold=2.5, ) # Expected IS metrics expected_is_metrics = [ "mismatch/rollout_is_mean", "mismatch/rollout_is_max", "mismatch/rollout_is_min", "mismatch/rollout_is_std", "mismatch/rollout_is_eff_sample_size", "mismatch/rollout_is_veto_fraction", "mismatch/rollout_is_catastrophic_token_fraction", "mismatch/rollout_is_ratio_fraction_high", "mismatch/rollout_is_ratio_fraction_low", ] # Expected mismatch/diagnostic metrics (also included now) expected_mismatch_metrics = [ "mismatch/mismatch_training_ppl", "mismatch/mismatch_training_log_ppl", "mismatch/mismatch_kl", "mismatch/mismatch_k3_kl", "mismatch/mismatch_rollout_ppl", "mismatch/mismatch_rollout_log_ppl", "mismatch/mismatch_log_ppl_diff", "mismatch/mismatch_log_ppl_abs_diff", "mismatch/mismatch_log_ppl_diff_max", "mismatch/mismatch_log_ppl_diff_min", "mismatch/mismatch_ppl_ratio", ] expected_metrics = expected_is_metrics + expected_mismatch_metrics missing_metrics = [m for m in expected_metrics if m not in metrics] if missing_metrics: print(f" ✗ Missing metrics: {missing_metrics}") return False print(f" ✓ All {len(expected_metrics)} expected metrics present") print(f" Total metrics returned: {len(metrics)}") return True def test_mismatch_metrics(): """Test mismatch metrics computation.""" print("\nTesting mismatch metrics computation...") batch_size, seq_length = 4, 12 device = "cuda" if torch.cuda.is_available() else "cpu" # Create test data with some mismatch old_log_prob = torch.randn(batch_size, seq_length, device=device) - 2.0 # training policy rollout_log_prob = torch.randn(batch_size, seq_length, device=device) - 1.5 # rollout policy (more confident) response_mask = torch.ones(batch_size, seq_length, device=device) # Test with rollout log probs metrics = compute_mismatch_metrics( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=response_mask, ) expected_metrics = [ "mismatch_training_ppl", "mismatch_training_log_ppl", "mismatch_kl", "mismatch_k3_kl", "mismatch_rollout_ppl", "mismatch_rollout_log_ppl", "mismatch_log_ppl_diff", "mismatch_log_ppl_abs_diff", "mismatch_log_ppl_diff_max", "mismatch_log_ppl_diff_min", "mismatch_ppl_ratio", ] for metric in expected_metrics: assert metric in metrics, f"Missing metric: {metric}" print(f" Training PPL: {metrics['mismatch_training_ppl']:.4f}") print(f" Rollout PPL: {metrics['mismatch_rollout_ppl']:.4f}") print(f" KL divergence: {metrics['mismatch_kl']:.6f}") print(f" K3 KL: {metrics['mismatch_k3_kl']:.6f}") print(f" PPL ratio: {metrics['mismatch_ppl_ratio']:.4f}") print(f" ✓ All {len(expected_metrics)} mismatch metrics present") # Test without rollout log probs metrics_no_rollout = compute_mismatch_metrics( old_log_prob=old_log_prob, rollout_log_prob=None, response_mask=response_mask, ) assert "mismatch_training_ppl" in metrics_no_rollout assert "mismatch_rollout_ppl" not in metrics_no_rollout print(" ✓ Mismatch metrics work without rollout log probs") def test_mask_mode(): """Test mask mode applies rejection via response_mask, keeps true IS weights.""" print("\nTesting mask mode behavior...") batch_size = 2 seq_length = 5 device = "cuda" if torch.cuda.is_available() else "cpu" # Sequence 0: ratio ≈ 0.37 (below 0.5, should be rejected) # Sequence 1: ratio ≈ 1.65 (in [0.5, 2.0], should be accepted) old_log_prob = torch.tensor([[-2.0] * seq_length, [-2.0] * seq_length], device=device) rollout_log_prob = torch.tensor( [ [-1.0] * seq_length, # exp(-2.0 - (-1.0)) = exp(-1.0) ≈ 0.37 [-2.5] * seq_length, # exp(-2.0 - (-2.5)) = exp(0.5) ≈ 1.65 ], device=device, ) response_mask = torch.ones(batch_size, seq_length, device=device) weights_proto, modified_response_mask, metrics = compute_rollout_importance_weights( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=response_mask, rollout_is_level="token", rollout_is_mode="mask", rollout_is_threshold=2.0, rollout_is_threshold_lower=0.5, rollout_is_veto_threshold=None, ) weights = weights_proto.batch["rollout_is_weights"] # KEY FIX: Weights should be safety-bounded ratios (NOT zeroed) assert torch.all(weights[0, :] > 0), "Weights should remain as safety-bounded ratios (not zeroed)" assert torch.allclose(weights[0, 0], torch.tensor(0.368, device=device), atol=0.01), ( "First seq ratio should be ≈0.37" ) assert torch.allclose(weights[1, 0], torch.tensor(1.649, device=device), atol=0.01), ( "Second seq ratio should be ≈1.65" ) # Rejection should be applied via response_mask assert torch.all(modified_response_mask[0, :] == 0), "First sequence should be rejected via mask" assert torch.all(modified_response_mask[1, :] == 1), "Second sequence should be accepted" # Verify mask metrics exist assert "mismatch/rollout_is_masked_fraction" in metrics assert abs(metrics["mismatch/rollout_is_masked_fraction"] - 0.5) < 0.01, "Should reject 50% of tokens" print(f" First seq IS weight: {weights[0, 0]:.4f} (expected ≈0.37)") print(f" Second seq IS weight: {weights[1, 0]:.4f} (expected ≈1.65)") print(f" First seq mask: {modified_response_mask[0, 0]:.0f} (expected 0 - rejected)") print(f" Second seq mask: {modified_response_mask[1, 0]:.0f} (expected 1 - accepted)") print(f" Masked fraction: {metrics['mismatch/rollout_is_masked_fraction']:.2f}") print(" ✓ Mask mode correctly separates IS weights from rejection") if __name__ == "__main__": print("=" * 60) print("Rollout Importance Sampling Test Suite") print("=" * 60) try: test_basic_rollout_is() test_metrics_completeness() test_mismatch_metrics() test_mask_mode() print("\n" + "=" * 60) print("ALL TESTS PASSED ✓") print("=" * 60) except Exception as e: print(f"\n✗ Test failed with error: {e}") import traceback traceback.print_exc() exit(1) ================================================ FILE: verl_distillation/tests/trainer/ppo/test_rollout_is_integration.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Integration tests for Rollout Importance Sampling.""" import pytest import torch from verl.trainer.ppo.core_algos import compute_policy_loss_vanilla from verl.trainer.ppo.mismatch_helper import compute_mismatch_metrics, compute_rollout_importance_weights from verl.workers.config.actor import ActorConfig class TestRolloutISIntegration: """Integration tests for Rollout IS with PPO.""" @pytest.fixture def sample_data(self): """Create sample training data.""" batch_size, seq_length = 4, 16 device = "cuda" if torch.cuda.is_available() else "cpu" return { "old_log_prob": torch.randn(batch_size, seq_length, device=device), "log_prob": torch.randn(batch_size, seq_length, device=device), "rollout_log_prob": torch.randn(batch_size, seq_length, device=device), "advantages": torch.randn(batch_size, seq_length, device=device), "response_mask": torch.ones(batch_size, seq_length, device=device), } @pytest.fixture def config_with_rollout_is(self): """Create config for policy loss computation. Note: rollout_is config has been moved to algorithm config. This config only needs fields used by policy loss (clip_ratio, etc). """ config = ActorConfig( strategy="fsdp", rollout_n=1, ppo_micro_batch_size=2, clip_ratio=0.2, ) return config def test_policy_loss_with_rollout_is(self, sample_data, config_with_rollout_is): """Test that policy loss computation works with rollout IS weights. Note: In production, IS weights are computed centrally in the trainer (before advantage computation) and passed to policy loss. This test simulates that workflow. """ # First compute IS weights (as trainer would do centrally) rollout_is_weights_proto, _, _ = compute_rollout_importance_weights( old_log_prob=sample_data["old_log_prob"], rollout_log_prob=sample_data["rollout_log_prob"], response_mask=sample_data["response_mask"], rollout_is_level="token", rollout_is_mode="truncate", rollout_is_threshold=2.0, rollout_is_veto_threshold=1e-4, ) rollout_is_weights = rollout_is_weights_proto.batch["rollout_is_weights"] # Policy loss function receives pre-computed IS weights pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = compute_policy_loss_vanilla( old_log_prob=sample_data["old_log_prob"], log_prob=sample_data["log_prob"], advantages=sample_data["advantages"], response_mask=sample_data["response_mask"], loss_agg_mode="token-mean", config=config_with_rollout_is, rollout_is_weights=rollout_is_weights, ) # Check loss is valid assert isinstance(pg_loss, torch.Tensor) assert pg_loss.ndim == 0 # Scalar assert not torch.isnan(pg_loss) assert not torch.isinf(pg_loss) def test_rollout_is_weights_computation(self, sample_data): """Test rollout IS weights and metrics computation.""" weights_proto, _, metrics = compute_rollout_importance_weights( old_log_prob=sample_data["old_log_prob"], rollout_log_prob=sample_data["rollout_log_prob"], response_mask=sample_data["response_mask"], rollout_is_level="token", rollout_is_mode="truncate", rollout_is_threshold=2.0, rollout_is_veto_threshold=1e-4, ) # Check weights from verl.protocol import DataProto assert isinstance(weights_proto, DataProto) weights = weights_proto.batch["rollout_is_weights"] assert isinstance(weights, torch.Tensor) assert weights.shape == sample_data["old_log_prob"].shape # Check metrics are returned assert isinstance(metrics, dict) assert len(metrics) > 0 assert "mismatch/rollout_is_mean" in metrics def test_all_aggregation_levels(self, sample_data): """Test all three aggregation levels.""" levels = ["token", "sequence", "geometric"] for level in levels: _, _, metrics = compute_rollout_importance_weights( old_log_prob=sample_data["old_log_prob"], rollout_log_prob=sample_data["rollout_log_prob"], response_mask=sample_data["response_mask"], rollout_is_level=level, rollout_is_mode="truncate", rollout_is_threshold=2.0, ) assert "mismatch/rollout_is_mean" in metrics def test_both_bounding_modes(self, sample_data): """Test both truncate and mask modes.""" modes = ["truncate", "mask"] for mode in modes: _, _, metrics = compute_rollout_importance_weights( old_log_prob=sample_data["old_log_prob"], rollout_log_prob=sample_data["rollout_log_prob"], response_mask=sample_data["response_mask"], rollout_is_level="token", rollout_is_mode=mode, rollout_is_threshold=2.0, rollout_is_threshold_lower=0.5, ) assert "mismatch/rollout_is_mean" in metrics def test_mismatch_metrics(self, sample_data): """Test mismatch diagnostic metrics computation.""" metrics = compute_mismatch_metrics( old_log_prob=sample_data["old_log_prob"], rollout_log_prob=sample_data["rollout_log_prob"], response_mask=sample_data["response_mask"], ) # Check key metrics are present assert "mismatch_training_ppl" in metrics assert "mismatch_rollout_ppl" in metrics assert "mismatch_kl" in metrics assert isinstance(metrics["mismatch_kl"], float) def test_veto_mechanism(self): """Test veto mechanism with catastrophic outliers.""" batch_size, seq_length = 2, 5 device = "cuda" if torch.cuda.is_available() else "cpu" old_log_prob = torch.randn(batch_size, seq_length, device=device) rollout_log_prob = old_log_prob.clone() # Create catastrophic outlier in first sequence rollout_log_prob[0, 2] += 15.0 # Makes ratio ~3e-7 response_mask = torch.ones(batch_size, seq_length, device=device) _, _, metrics = compute_rollout_importance_weights( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=response_mask, rollout_is_level="token", rollout_is_mode="truncate", rollout_is_threshold=2.0, rollout_is_veto_threshold=1e-4, ) # Should have vetoed one sequence assert metrics["mismatch/rollout_is_veto_fraction"] > 0 assert metrics["mismatch/rollout_is_veto_fraction"] <= 1.0 def test_metrics_only_mode(self, sample_data, config_with_rollout_is): """Test metrics-only mode: compute IS weights/metrics but don't apply to loss. This tests the use case where rollout_is_threshold is set (enables computation) but rollout_is=False (disables weight application to policy loss). """ # Compute IS weights (as trainer would do) rollout_is_weights_proto, _, is_metrics = compute_rollout_importance_weights( old_log_prob=sample_data["old_log_prob"], rollout_log_prob=sample_data["rollout_log_prob"], response_mask=sample_data["response_mask"], rollout_is_level="token", rollout_is_mode="truncate", rollout_is_threshold=2.0, ) # Metrics should be computed assert len(is_metrics) > 0 assert "mismatch/rollout_is_mean" in is_metrics # In metrics-only mode, we compute loss WITHOUT applying weights # (simulating rollout_is=False) pg_loss_no_weights, _, _, _ = compute_policy_loss_vanilla( old_log_prob=sample_data["old_log_prob"], log_prob=sample_data["log_prob"], advantages=sample_data["advantages"], response_mask=sample_data["response_mask"], loss_agg_mode="token-mean", config=config_with_rollout_is, rollout_is_weights=None, # Don't apply weights ) # Compare to loss WITH weights (rollout_is=True) rollout_is_weights = rollout_is_weights_proto.batch["rollout_is_weights"] pg_loss_with_weights, _, _, _ = compute_policy_loss_vanilla( old_log_prob=sample_data["old_log_prob"], log_prob=sample_data["log_prob"], advantages=sample_data["advantages"], response_mask=sample_data["response_mask"], loss_agg_mode="token-mean", config=config_with_rollout_is, rollout_is_weights=rollout_is_weights, ) # Losses should be different (weights have an effect) assert not torch.allclose(pg_loss_no_weights, pg_loss_with_weights) if __name__ == "__main__": pytest.main([__file__, "-v", "-s"]) ================================================ FILE: verl_distillation/tests/utils/_test_module.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Test module for import_utils.load_extern_type testing class TestClass: """A test class to be imported by load_extern_type""" def __init__(self, value=None): self.value = value or "default" def get_value(self): return self.value TEST_CONSTANT = "test_constant_value" def test_function(): return "test_function_result" ================================================ FILE: verl_distillation/tests/utils/dataset/test_create_rl_sampler_on_cpu.py ================================================ # Copyright 2025 Amazon.com Inc and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ test create_rl_sampler """ from collections.abc import Sized import pytest import torch from omegaconf import DictConfig, OmegaConf from torch.utils.data import Dataset, RandomSampler from verl.experimental.dataset.sampler import AbstractCurriculumSampler from verl.trainer.main_ppo import create_rl_sampler class RandomCurriculumSampler(AbstractCurriculumSampler): def __init__( self, data_source: Sized, data_config: DictConfig, ): train_dataloader_generator = torch.Generator() train_dataloader_generator.manual_seed(1) sampler = RandomSampler(data_source=data_source) self.sampler = sampler def __iter__(self): return self.sampler.__iter__() def __len__(self) -> int: return len(self.sampler) def update(self, batch) -> None: return class MockIncorrectSampler: """A fake sampler class that does not adhere to the AbstractCurriculumSampler interface.""" def __init__(self, data_source, data_config): pass class MockChatDataset(Dataset): def __init__(self): self.data = [ {"prompt": "What's your name?", "response": "My name is Assistant."}, {"prompt": "How are you?", "response": "I'm doing well, thank you."}, {"prompt": "What is the capital of France?", "response": "Paris."}, { "prompt": "Tell me a joke.", "response": "Why did the chicken cross the road? To get to the other side!", }, {"prompt": "What is 2+2?", "response": "4"}, ] def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) def test_create_custom_curriculum_samper(): data_config = OmegaConf.create( { "dataloader_num_workers": 0, "sampler": { "class_path": "pkg://tests.utils.dataset.test_create_rl_sampler_on_cpu", "class_name": "RandomCurriculumSampler", }, } ) dataset = MockChatDataset() # doesn't raise create_rl_sampler(data_config, dataset) def test_create_custom_curriculum_samper_wrong_class(): data_config = OmegaConf.create( { "sampler": { "class_path": "pkg://tests.utils.dataset.test_create_rl_sampler_on_cpu", "class_name": "MockIncorrectSampler", } } ) dataset = MockChatDataset() # MockIncorrectSampler is not an instance of AbstractCurriculumSampler, so raises with pytest.raises(AssertionError): create_rl_sampler(data_config, dataset) ================================================ FILE: verl_distillation/tests/utils/dataset/test_multiturn_sft_dataset_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test the MultiTurnSFTDataset implementation """ import os import pandas as pd import torch from transformers import AutoTokenizer from verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset def test_multiturn_sft_dataset(): print("Starting test...") # Create a temporary parquet file with test data test_data = { "messages": [ [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}, {"role": "assistant", "content": "2+2 equals 4."}, {"role": "user", "content": "And what is 4+4?"}, {"role": "assistant", "content": "4+4 equals 8."}, ], [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Tell me a joke."}, {"role": "assistant", "content": "Why did the chicken cross the road?"}, {"role": "user", "content": "Why?"}, {"role": "assistant", "content": "To get to the other side!"}, ], ] } # Create test directory if it doesn't exist os.makedirs("test_data", exist_ok=True) test_file = "test_data/test.parquet" # Save test data to parquet df = pd.DataFrame(test_data) df.to_parquet(test_file) # Initialize tokenizer and dataset tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-7B-Instruct") config = {"max_length": 512, "truncation": "error", "multiturn": {"messages_key": "messages"}} dataset = MultiTurnSFTDataset(parquet_files=test_file, tokenizer=tokenizer, config=config) # Test 1: Dataset Length assert len(dataset) == 2, f"Expected dataset length 2, got {len(dataset)}" # Get items for testing item0 = dataset[0] # Math conversation item1 = dataset[1] # Joke conversation # Test 2: Required Keys and Types required_keys = ["input_ids", "attention_mask", "position_ids", "loss_mask"] for key in required_keys: assert key in item0, f"Missing key {key} in dataset item" assert isinstance(item0[key], torch.Tensor), f"Expected torch.Tensor for {key}" assert item0[key].dtype == torch.long, f"Expected torch.long for {key}, got {item0[key].dtype}" # Test 3: Shape Consistency assert item0["loss_mask"].shape == item0["input_ids"].shape, "Loss mask shape doesn't match input_ids shape" assert item0["attention_mask"].shape == item0["input_ids"].shape, ( "Attention mask shape doesn't match input_ids shape" ) assert item0["position_ids"].shape == item0["input_ids"].shape, "Position IDs shape doesn't match input_ids shape" # Test 4: Loss Mask Pattern - Math Conversation loss_mask0 = item0["loss_mask"] input_ids0 = item0["input_ids"] # Find assistant response positions assistant_positions0 = torch.where(loss_mask0 == 1)[0] assert len(assistant_positions0) > 0, "No assistant positions found in loss mask" # Decode and verify assistant responses assistant_text0 = tokenizer.decode(input_ids0[loss_mask0 == 1]) print(f"Math conversation assistant text: {assistant_text0}") assert "2+2 equals 4" in assistant_text0, "First assistant response not found" assert "4+4 equals 8" in assistant_text0, "Second assistant response not found" # Test 5: Loss Mask Pattern - Joke Conversation loss_mask1 = item1["loss_mask"] input_ids1 = item1["input_ids"] # Find assistant response positions assistant_positions1 = torch.where(loss_mask1 == 1)[0] assert len(assistant_positions1) > 0, "No assistant positions found in loss mask" # Decode and verify assistant responses assistant_text1 = tokenizer.decode(input_ids1[loss_mask1 == 1]) print(f"Joke conversation assistant text: {assistant_text1}") assert "chicken cross the road" in assistant_text1, "First assistant response not found" assert "other side" in assistant_text1, "Second assistant response not found" # Test 6: Attention Mask Pattern attention_mask0 = item0["attention_mask"] sequence_length = torch.sum(attention_mask0) assert sequence_length > 0, "No tokens marked as attended in attention mask" assert torch.all(attention_mask0[:sequence_length] == 1), "Incorrect attention mask pattern" if sequence_length < len(attention_mask0): assert torch.all(attention_mask0[sequence_length:] == 0), "Padding not properly masked" # Test 7: Position IDs Pattern position_ids0 = item0["position_ids"] assert torch.equal(position_ids0[:sequence_length], torch.arange(sequence_length)), ( "Position IDs not sequential for non-padded tokens" ) if sequence_length < len(position_ids0): assert torch.all(position_ids0[sequence_length:] == 0), "Padding position IDs not zero" # Test 8: Verify loss mask for assistant responses # Get the full conversation text full_text = tokenizer.decode(input_ids0) print(f"\nFull conversation text:\n{full_text}") # Get the assistant responses assistant_text = tokenizer.decode(input_ids0[loss_mask0 == 1]) print(f"\nAssistant responses (from loss mask):\n{assistant_text}") # Verify that loss mask is set for all assistant responses for msg in test_data["messages"][0]: # First conversation if msg["role"] == "assistant": # The content should appear in the masked text assert msg["content"] in assistant_text, f"Assistant message '{msg['content']}' not found in masked text" # The content should NOT appear in the non-masked text non_assistant_text = tokenizer.decode(input_ids0[loss_mask0 == 0]) assert msg["content"] not in non_assistant_text, ( f"Assistant message '{msg['content']}' found in non-assistant text" ) # Test 9: Verify non-assistant parts have loss_mask=0 # Get non-assistant text non_assistant_text = tokenizer.decode(input_ids0[loss_mask0 == 0]) print(f"\nNon-assistant text (from loss mask):\n{non_assistant_text}") # Verify that system and user messages are in the non-assistant text for msg in test_data["messages"][0]: # First conversation if msg["role"] in ["system", "user"]: assert msg["content"] in non_assistant_text, ( f"{msg['role'].title()} message '{msg['content']}' not found in non-assistant text" ) # And verify they're NOT in the assistant text assert msg["content"] not in assistant_text, ( f"{msg['role'].title()} message '{msg['content']}' found in assistant text" ) # Test 10: Verify padding behavior padding_config = {"max_length": 1024, "truncation": "error", "multiturn": {"messages_key": "messages"}} small_dataset = MultiTurnSFTDataset(parquet_files=test_file, tokenizer=tokenizer, config=padding_config) padded_item = small_dataset[0] # Get actual sequence length (before padding) actual_length = torch.sum(padded_item["attention_mask"]) # Verify padding tokens assert torch.all(padded_item["input_ids"][actual_length:] == tokenizer.pad_token_id), ( "Padding tokens not set correctly" ) assert torch.all(padded_item["attention_mask"][actual_length:] == 0), "Attention mask not set correctly for padding" assert torch.all(padded_item["loss_mask"][actual_length:] == 0), "Loss mask not set correctly for padding" # test no-padding config = { "max_length": 512, "truncation": "error", "multiturn": {"messages_key": "messages"}, "pad_mode": "no_padding", } dataset = MultiTurnSFTDataset(parquet_files=test_file, tokenizer=tokenizer, config=config) item0 = dataset[0] # Verify that the output contains expected keys for no-padding mode required_keys = ["input_ids", "position_ids", "loss_mask"] for key in required_keys: assert key in item0, f"Missing key {key} in no-padding mode dataset item" assert isinstance(item0[key], torch.Tensor), f"Expected torch.Tensor for {key} in no-padding mode" # make sure assistant_text matches with expected assistant_text = tokenizer.decode(item0["input_ids"][item0["loss_mask"] == 1]) assert assistant_text == "2+2 equals 4.<|im_end|>\n4+4 equals 8.<|im_end|>\n" print("All tests passed!") print("Starting test...") ================================================ FILE: verl_distillation/tests/utils/dataset/test_rl_collate_fn_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch def test_rl_collate_fn(): from verl.utils.dataset.rl_dataset import collate_fn max_prompt_length = 5 test_data = [ { # test tensor "input_ids": torch.randint(0, 10, (max_prompt_length,)), # test fixed length (1) list within a batch "messages": [{"role": "user", "content": "Hi."}], # test variable length list within a batch "raw_prompt_ids": [1, 2, 3, 4], # test string "ability": "math", # test dict "reward_model": {"ground_truth": 5, "style": "rule"}, # test empty dict "tools_kwargs": {}, }, { "input_ids": torch.randint(0, 10, (max_prompt_length,)), "messages": [{"role": "user", "content": "Hello."}], "raw_prompt_ids": [1, 2, 3], "ability": "toolcall", "reward_model": { "ground_truth": '[{"name": "rgb_to_cmyk", "arguments": {"r": 0, "g": 0, "b": 255}}]', "style": "rule", }, "tools_kwargs": {}, }, ] batch_size = len(test_data) batch = collate_fn(test_data) # Tensor part assert batch["input_ids"].shape == (batch_size, max_prompt_length) assert isinstance(batch["input_ids"], torch.Tensor) # Non-tensor parts expected_types = { "messages": list, "raw_prompt_ids": list, "ability": str, "reward_model": dict, "tools_kwargs": dict, } for key, dtype in expected_types.items(): assert batch[key].shape == (batch_size,), ( f"Expected shape {(batch_size,)} for '{key}', but got {batch[key].shape}" ) assert isinstance(batch[key][0], dtype), ( f"'{key}' should contain elements of type {dtype}, but got {type(batch[key][0])}" ) ================================================ FILE: verl_distillation/tests/utils/dataset/test_rl_dataset_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from omegaconf import OmegaConf from torch.utils.data import DataLoader def get_gsm8k_data(): # prepare test dataset local_folder = os.path.expanduser("~/verl-data/gsm8k/") local_path = os.path.join(local_folder, "train.parquet") os.makedirs(local_folder, exist_ok=True) return local_path def test_rl_dataset(): from verl.utils import hf_tokenizer from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn tokenizer = hf_tokenizer("deepseek-ai/deepseek-coder-1.3b-instruct") local_path = get_gsm8k_data() config = OmegaConf.create( { "prompt_key": "prompt", "max_prompt_length": 256, "filter_overlong_prompts": True, "filter_overlong_prompts_workers": 2, } ) dataset = RLHFDataset(data_files=local_path, tokenizer=tokenizer, config=config) dataloader = DataLoader(dataset=dataset, batch_size=16, shuffle=True, drop_last=True, collate_fn=collate_fn) a = next(iter(dataloader)) from verl import DataProto tensors = {} non_tensors = {} for key, val in a.items(): if isinstance(val, torch.Tensor): tensors[key] = val else: non_tensors[key] = val data_proto = DataProto.from_dict(tensors=tensors, non_tensors=non_tensors) assert "input_ids" in data_proto.batch data = dataset[0]["input_ids"] output = tokenizer.batch_decode([data])[0] print(f"type: type{output}") print(f"\n\noutput: {output}") def test_rl_dataset_with_max_samples(): from verl.utils import hf_tokenizer from verl.utils.dataset.rl_dataset import RLHFDataset tokenizer = hf_tokenizer("deepseek-ai/deepseek-coder-1.3b-instruct") local_path = get_gsm8k_data() config = OmegaConf.create( { "prompt_key": "prompt", "max_prompt_length": 256, "filter_overlong_prompts": True, "filter_overlong_prompts_workers": 2, "max_samples": 5, } ) dataset = RLHFDataset(data_files=local_path, tokenizer=tokenizer, config=config, max_samples=5) assert len(dataset) == 5 def test_image_rl_data(): from verl.utils import hf_processor, hf_tokenizer from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn tokenizer = hf_tokenizer("Qwen/Qwen2-VL-2B-Instruct") processor = hf_processor("Qwen/Qwen2-VL-2B-Instruct") config = OmegaConf.create( { "prompt_key": "prompt", "max_prompt_length": 1024, "filter_overlong_prompts": True, "filter_overlong_prompts_workers": 1, } ) dataset = RLHFDataset( data_files=os.path.expanduser("~/data/geo3k/train.parquet"), tokenizer=tokenizer, config=config, processor=processor, ) dataloader = DataLoader(dataset=dataset, batch_size=16, shuffle=True, drop_last=True, collate_fn=collate_fn) a = next(iter(dataloader)) from verl import DataProto tensors = {} non_tensors = {} for key, val in a.items(): if isinstance(val, torch.Tensor): tensors[key] = val else: non_tensors[key] = val data_proto = DataProto.from_dict(tensors=tensors, non_tensors=non_tensors) assert "multi_modal_data" in data_proto.non_tensor_batch, data_proto assert "multi_modal_inputs" in data_proto.non_tensor_batch, data_proto data = dataset[0]["input_ids"] output = tokenizer.batch_decode([data])[0] print(f"type: type{output}") print(f"\n\noutput: {output}") ================================================ FILE: verl_distillation/tests/utils/dataset/test_sft_dataset_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from verl.utils import hf_tokenizer from verl.utils.dataset.sft_dataset import SFTDataset def get_gsm8k_data(): # prepare test dataset local_folder = os.path.expanduser("~/verl-data/gsm8k/") local_path = os.path.join(local_folder, "train.parquet") return local_path def test_sft_cot_dataset(): tokenizer = hf_tokenizer("deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct") local_path = get_gsm8k_data() from omegaconf import OmegaConf dataset = SFTDataset( parquet_files=local_path, tokenizer=tokenizer, config=OmegaConf.create( { "prompt_key": "prompt", "prompt_dict_keys": ["content"], "response_key": "extra_info", "response_dict_keys": ["answer"], "max_length": 512, } ), ) data = dataset[0]["input_ids"] output = tokenizer.batch_decode([data])[0] assert len(output) > 1 assert isinstance(output, str) def test_sft_dataset(): tokenizer = hf_tokenizer("deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct") local_path = get_gsm8k_data() from omegaconf import OmegaConf dataset = SFTDataset( parquet_files=local_path, tokenizer=tokenizer, config=OmegaConf.create( { "prompt_key": "extra_info", "prompt_dict_keys": ["question"], "response_key": "extra_info", "response_dict_keys": ["answer"], "max_length": 512, } ), ) data = dataset[0]["input_ids"] output = tokenizer.batch_decode([data])[0] assert len(output) > 1 assert isinstance(output, str) def test_sft_dataset_with_max_samples(): tokenizer = hf_tokenizer("deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct") local_path = get_gsm8k_data() from omegaconf import OmegaConf dataset = SFTDataset( parquet_files=local_path, tokenizer=tokenizer, config=OmegaConf.create( { "prompt_key": "extra_info", "prompt_dict_keys": ["question"], "response_key": "extra_info", "response_dict_keys": ["answer"], "max_length": 512, } ), max_samples=5, ) assert len(dataset) == 5 ================================================ FILE: verl_distillation/tests/utils/debug/test_metrics.py ================================================ # Copyright 2025 Individual Contributor: TomQunChaoA # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from verl.protocol import DataProto from verl.utils.debug.metrics import calculate_debug_metrics class TestMetrics(unittest.TestCase): def test_calculate_debug_metrics(self): data = DataProto.from_dict( { "rollout_log_probs": torch.tensor( [ [-1.5085, -0.1200, -0.6650, -0.4823, -0.1426, -1.5557, -2.8532, -0.3919, -0.4294, -0.4700], [-0.0585, -0.0573, -0.4681, -0.5187, -0.7451, -1.2737, -0.0682, -0.4284, -0.5754, -0.0611], ] ), "old_log_probs": torch.tensor( [ [-1.8636, -0.7863, -0.2136, -0.4376, -2.0257, -0.2579, -1.1547, -0.5203, -0.3802, -0.9872], [-0.3507, -0.5426, -0.2725, -0.4637, -0.3577, -0.3733, -1.7560, -1.9542, -0.4229, -1.3098], ] ), "loss_mask": torch.tensor([[1, 0, 0, 0, 1, 1, 0, 1, 1, 0], [1, 0, 1, 0, 1, 1, 1, 0, 1, 1]]), "responses": torch.zeros((2, 10)), } ) metrics = calculate_debug_metrics(data) print(metrics) assert metrics["training/rollout_probs_diff_valid"] == 1 if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/utils/megatron/test_pipeline_parallel.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from verl.model_merger.megatron_model_merger import get_dynamic_pipeline_shards from verl.utils.megatron.pipeline_parallel import make_batch_generator def test_make_batch_generator_no_vpp(): batches = [1, 2, 3] vpp_size = 1 generator = make_batch_generator(batches, vpp_size) assert list(generator) == batches def test_make_batch_generator_with_vpp(): batches = [{"data": 1}, {"data": 2}] vpp_size = 2 generators = make_batch_generator(batches, vpp_size) assert isinstance(generators, list) assert len(generators) == vpp_size # Check each generator yields the original batches for gen in generators: assert list(gen) == batches def test_make_batch_generator_empty(): batches = [] vpp_size = 1 generator = make_batch_generator(batches, vpp_size) assert list(generator) == [] vpp_size = 3 generators = make_batch_generator(batches, vpp_size) assert len(generators) == vpp_size for gen in generators: assert list(gen) == [] @pytest.mark.parametrize( "layer_num,pp_size,gt", [ (61, 8, [6, 8, 8, 8, 8, 8, 8, 7]), (61, 7, [8, 9, 9, 9, 9, 9, 8]), (61, 1, [61]), (61, 0, ValueError), (10, 16, ValueError), ], ) def test_get_dynamic_pipeline_shards(layer_num, pp_size, gt): if isinstance(gt, list): shards = get_dynamic_pipeline_shards(layer_num, pp_size) assert len(shards) == len(gt) == pp_size, f"Expected {pp_size} shards, got {len(shards)}" assert all([shard == gt[i] for i, shard in enumerate(shards)]), f"Expected shards {gt}, got {shards}" elif issubclass(gt, Exception): with pytest.raises(gt): shards = get_dynamic_pipeline_shards(layer_num, pp_size) ================================================ FILE: verl_distillation/tests/utils/reward_score/reward_score/test_sandbox_fusion_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing import os import time from concurrent.futures import ProcessPoolExecutor from unittest.mock import patch import pytest # Import the function to be tested from verl.utils.reward_score.sandbox_fusion.utils import check_correctness # Get SANDBOX_URL from environment variable SANDBOX_URL = os.environ.get("SANDBOX_FUSION_URL") # Define skip condition and reason skip_reason = "SANDBOX_FUSION_URL environment variable not set" skip_condition = not SANDBOX_URL # --- Test code (for real API calls) --- CODE_SUCCESS = """ import sys data = sys.stdin.read() if data == 'input1': print('output1\\n', end='') elif data == 'input2': print('output2\\n', end='') else: print('unexpected input', end='') """ CODE_WRONG_OUTPUT = """ print('wrong_output\\n', end='') """ CODE_COMPILE_ERROR = """ a=b """ CODE_RUNTIME_ERROR = """ import sys print("About to raise error", file=sys.stderr) raise ValueError("This is a runtime error") """ CODE_TIMEOUT = """ import time import sys print("Sleeping...", file=sys.stderr) time.sleep(10) # Sleep time should be longer than the timeout set in the test print("Finished sleeping", file=sys.stderr) """ # --- Test input/output data --- INPUT_OUTPUT_VALID = {"inputs": ["input1", "input2"], "outputs": ["output1\n", "output2\n"]} INPUT_OUTPUT_SINGLE = {"inputs": ["input1"], "outputs": ["output1\n"]} INPUT_OUTPUT_MISMATCH = {"inputs": ["input1"], "outputs": ["output1\n", "output2\n"]} INPUT_OUTPUT_INVALID_MISSING_KEY = {"inputs": ["input1"]} # --- Integration test cases (calling real API) --- @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_integration_success_correct(): """Integration test: Code is correct, output is correct""" results, metadata_list = check_correctness(SANDBOX_URL, INPUT_OUTPUT_VALID, CODE_SUCCESS) assert results == [True, True] assert metadata_list[0]["status"] == "success" assert metadata_list[0]["stdout"] == "output1\n" assert metadata_list[1]["status"] == "success" assert metadata_list[1]["stdout"] == "output2\n" @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_integration_success_wrong_output(): """Integration test: Code runs successfully, but output is wrong""" results, metadata_list = check_correctness(SANDBOX_URL, INPUT_OUTPUT_VALID, CODE_WRONG_OUTPUT) assert results == [False, False] assert metadata_list[0]["status"] == "wrong_answer" assert metadata_list[0]["stdout"] == "wrong_output\n" assert metadata_list[1]["status"] == "wrong_answer" @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_integration_compile_error(): """Integration test: Code causes compile error""" results, metadata_list = check_correctness(SANDBOX_URL, INPUT_OUTPUT_VALID, CODE_COMPILE_ERROR, language="cpp") assert results == [-4, -4] assert metadata_list[0]["status"] == "compile_error" assert metadata_list[1]["status"] == "compile_error" @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_integration_runtime_error(): """Integration test: Code causes runtime error""" results, metadata_list = check_correctness(SANDBOX_URL, INPUT_OUTPUT_SINGLE, CODE_RUNTIME_ERROR) assert results == [-2] assert metadata_list[0]["status"] == "runtime_error" # More assertions can be added based on the actual API response, e.g., exit_code, stderr @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_integration_runtime_timeout(): """Integration test: Code causes runtime timeout""" test_timeout = 5 # Set a timeout shorter than the sleep time in CODE_TIMEOUT results, metadata_list = check_correctness(SANDBOX_URL, INPUT_OUTPUT_SINGLE, CODE_TIMEOUT, timeout=test_timeout) assert results == [-3] assert metadata_list[0]["status"] == "timeout" # More assertions can be added based on the actual API response, e.g., run_status @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_integration_concurrency_high_load(): """Integration test: High concurrency (100 cases) against real API with mixed results (success, wrong answer, timeout)""" concurrency_level = 100 # Indices for different expected outcomes wrong_answer_indices = {10, 25, 50} timeout_indices = {5, 30, 60, 90} # Indices where we expect a timeout # Generate 100 input/output pairs and code high_load_inputs = [] high_load_outputs = [] expected_results_map = {} # Store expected result for each index for i in range(concurrency_level): if i in timeout_indices: # Use a special input to trigger timeout in the code high_load_inputs.append(f"input_timeout_{i}") # Output doesn't matter for timeout, but keep it consistent high_load_outputs.append(f"output_{i}\n") expected_results_map[i] = -3 # Expect timeout elif i in wrong_answer_indices: high_load_inputs.append(f"input_{i}") # Intentionally set wrong expected output high_load_outputs.append(f"wrong_output_{i}\n") expected_results_map[i] = False # Expect wrong answer else: high_load_inputs.append(f"input_{i}") # Correct expected output high_load_outputs.append(f"output_{i}\n") expected_results_map[i] = True # Expect success high_load_in_outs = {"inputs": high_load_inputs, "outputs": high_load_outputs} # Code that handles normal inputs, and sleeps on specific "timeout" inputs code_mixed_concurrent = """ import sys import time data = sys.stdin.read() if data.startswith('input_timeout_'): time.sleep(20) # Sleep longer than the test timeout print(f"output_{data.split('_')[-1]}\\n", end='') # Still print something in case it finishes early elif data.startswith('input_'): print(f"output_{data.split('_')[-1]}\\n", end='') else: print("unknown_input\\n", end='') """ # Set a reasonable timeout per case (must be less than the sleep time in the code) test_timeout = 15 # Allow slightly more time due to potential API load, but less than 20s sleep start_time = time.time() results, metadata_list = check_correctness( SANDBOX_URL, high_load_in_outs, code_mixed_concurrent, # Use the new code timeout=test_timeout, ) end_time = time.time() duration = end_time - start_time print( f"\nHigh concurrency test ({concurrency_level} cases with {len(wrong_answer_indices)} wrong answers, " f"{len(timeout_indices)} timeouts) duration: {duration:.2f} seconds" ) # Verify results against the expected map assert len(results) == concurrency_level, f"Expected {concurrency_level} results, got {len(results)}" correct_count = 0 wrong_count = 0 timeout_count = 0 unexpected_results = [] for i, r in enumerate(results): expected = expected_results_map[i] if r == expected: if expected is True: correct_count += 1 elif expected is False: wrong_count += 1 elif expected == -3: timeout_count += 1 else: unexpected_results.append((i, r, f"Expected {expected}")) print( f"Correct results (True): {correct_count}/" f"{concurrency_level - len(wrong_answer_indices) - len(timeout_indices)}" ) print(f"Expected wrong answers (False, correctly identified): {wrong_count}/{len(wrong_answer_indices)}") print(f"Expected timeouts (-3, correctly identified): {timeout_count}/{len(timeout_indices)}") if unexpected_results: print("Unexpected results found:") for idx, res, expected_str in unexpected_results[:10]: # Print first 10 unexpected print(f" Index {idx}: Got {res}, {expected_str}. Metadata: {metadata_list[idx]}") raise AssertionError(f"Found {len(unexpected_results)} unexpected results.") assert correct_count == concurrency_level - len(wrong_answer_indices) - len(timeout_indices), ( "Incorrect number of successful results" ) assert wrong_count == len(wrong_answer_indices), "Incorrect number of identified wrong answers" assert timeout_count == len(timeout_indices), "Incorrect number of identified timeouts" # Verify metadata count and basic status of one of each type assert len(metadata_list) == concurrency_level # Find the first correct index first_correct_index = next( i for i in range(concurrency_level) if i not in wrong_answer_indices and i not in timeout_indices ) assert metadata_list[first_correct_index]["status"] == "success" assert metadata_list[first_correct_index]["stdout"] == f"output_{first_correct_index}\n" # Check the status of the first intentionally wrong case first_wrong_index = min(wrong_answer_indices) assert metadata_list[first_wrong_index]["status"] == "wrong_answer" assert metadata_list[first_wrong_index]["stdout"] == f"output_{first_wrong_index}\n" assert metadata_list[first_wrong_index]["expected_output"] == f"wrong_output_{first_wrong_index}\n" # Check the status of the first intentionally timeout case first_timeout_index = min(timeout_indices) assert metadata_list[first_timeout_index]["status"] == "timeout" # For timeout, stdout might be None or empty depending on when the timeout occurred # assert metadata_list[first_timeout_index]["stdout"] is None or metadata_list[first_timeout_index]["stdout"] == "" # --- Unit test cases (using mock) --- @patch("verl.utils.reward_score.sandbox_fusion.utils.call_sandbox_api") def test_unit_concurrency_order(mock_call_sandbox_api): sandbox_url = "mock_url" generation = "print(input())" language = "python" timeout = 5 in_outs = {"inputs": ["input1", "input2", "input3"], "outputs": ["output1", "output2", "output3"]} def side_effect(*args, **kwargs): stdin = kwargs.get("stdin") if stdin == "input1": return ( {"status": "Success", "run_result": {"status": "Finished", "stdout": "output1", "return_code": 0}}, None, ) elif stdin == "input2": time.sleep(0.1) return ( {"status": "Success", "run_result": {"status": "Finished", "stdout": "output2", "return_code": 0}}, None, ) elif stdin == "input3": return ( {"status": "Success", "run_result": {"status": "Finished", "stdout": "output3", "return_code": 0}}, None, ) else: return (None, "Unknown input in mock") mock_call_sandbox_api.side_effect = side_effect results, metadata_list = check_correctness(sandbox_url, in_outs, generation, timeout, language) assert results == [True, True, True] assert len(metadata_list) == 3 assert metadata_list[0]["case_index"] == 0 assert metadata_list[0]["status"] == "success" assert metadata_list[1]["case_index"] == 1 assert metadata_list[1]["status"] == "success" assert metadata_list[2]["case_index"] == 2 assert metadata_list[2]["status"] == "success" assert mock_call_sandbox_api.call_count == 3 @patch("verl.utils.reward_score.sandbox_fusion.utils.call_sandbox_api") def test_unit_api_timeout_error_concurrent(mock_call_sandbox_api): sandbox_url = "mock_url" generation = "print(input())" language = "python" timeout = 5 in_outs = {"inputs": ["input1", "input2_timeout", "input3"], "outputs": ["output1", "output2", "output3"]} api_error_message = "API Call Failed: Gateway Timeout (504) on attempt 3/3" def side_effect(*args, **kwargs): stdin = kwargs.get("stdin") if stdin == "input1": return ( {"status": "Success", "run_result": {"status": "Finished", "stdout": "output1", "return_code": 0}}, None, ) elif stdin == "input2_timeout": return (None, api_error_message) elif stdin == "input3": return ( {"status": "Success", "run_result": {"status": "Finished", "stdout": "output3", "return_code": 0}}, None, ) else: return (None, "Unknown input in mock") mock_call_sandbox_api.side_effect = side_effect results, metadata_list = check_correctness(sandbox_url, in_outs, generation, timeout, language) assert results == [True, -1, True] assert len(metadata_list) == 3 assert metadata_list[0]["status"] == "success" assert metadata_list[1]["status"] == "api_error" assert metadata_list[1]["api_request_error"] == api_error_message assert metadata_list[2]["status"] == "success" assert mock_call_sandbox_api.call_count == 3 # --- Constants for the new concurrency test --- # Define a low global concurrency limit to test the semaphore's effect MAX_GLOBAL_CONCURRENCY_LIMIT_TEST = 5 # Define the number of processes used in the test NUM_PROCESSES_TEST = 4 # Define the number of tasks processed by check_correctness in each process (i.e., internal # ThreadPoolExecutor's concurrency potential) NUM_TASKS_PER_PROCESS_TEST = 3 # Simulate API call duration to ensure calls can overlap SIMULATED_API_CALL_DURATION_TEST = 0.2 # seconds # --- Mock API call function for concurrency tracking --- # This function will replace the real call_sandbox_api and use shared variables to track concurrency def _mock_api_call_for_concurrency_tracking( active_calls_counter, # multiprocessing.Value max_calls_tracker, # multiprocessing.Value call_lock, # multiprocessing.Lock # Standard call_sandbox_api parameters sandbox_fusion_url, code, stdin, compile_timeout, run_timeout, memory_limit_mb, language, ): # entry_time = time.time() # For detailed logging with call_lock: active_calls_counter.value += 1 if active_calls_counter.value > max_calls_tracker.value: max_calls_tracker.value = active_calls_counter.value # Optional debug log: # print(f"[PID:{os.getpid()}-TID:{threading.get_ident()}] API Call Start. Active: " # f"{active_calls_counter.value}, Max Observed: {max_calls_tracker.value}, Input: {stdin}") time.sleep(SIMULATED_API_CALL_DURATION_TEST) # Simulate actual work duration # exit_time = time.time() # For detailed logging with call_lock: active_calls_counter.value -= 1 # Optional debug log: # print(f"[PID:{os.getpid()}-TID:{threading.get_ident()}] API Call End. Active: " # f"{active_calls_counter.value}, Input: {stdin}, Duration: {exit_time - entry_time:.2f}s") # Return a simulated successful API response return { "status": "Success", "run_result": {"status": "Finished", "stdout": f"mock_output_for_{stdin}", "return_code": 0}, }, None # --- Worker function for ProcessPoolExecutor --- # This function runs in each child process of ProcessPoolExecutor def _process_pool_worker_for_concurrency_test( sandbox_url, in_outs, generation, memory_limit_mb, language, timeout, mp_semaphore_for_check_correctness, active_calls_counter, max_calls_tracker, call_lock, ): # Corrected lambda to accept keyword arguments matching call_sandbox_api's usage curried_mock_api_call = ( lambda sandbox_fusion_url, code, stdin, compile_timeout, run_timeout, memory_limit_mb, language: ( _mock_api_call_for_concurrency_tracking( active_calls_counter, max_calls_tracker, call_lock, sandbox_fusion_url, code, stdin, compile_timeout, run_timeout, memory_limit_mb, language, ) ) ) # ---- START DEBUG PRINTS ---- import os import verl.utils.reward_score.sandbox_fusion.utils print( f"[Worker PID:{os.getpid()}] Original call_sandbox_api: " f"{verl.utils.reward_score.sandbox_fusion.utils.call_sandbox_api}", flush=True, ) # ---- END DEBUG PRINTS ---- with patch( "verl.utils.reward_score.sandbox_fusion.utils.call_sandbox_api", side_effect=curried_mock_api_call ) as mock_obj: # ---- START DEBUG PRINTS ---- print( f"[Worker PID:{os.getpid()}] Patched call_sandbox_api: " f"{verl.utils.reward_score.sandbox_fusion.utils.call_sandbox_api}", flush=True, ) print(f"[Worker PID:{os.getpid()}] Mock object: {mock_obj}", flush=True) # ---- END DEBUG PRINTS ---- results, metadata_list = check_correctness( sandbox_fusion_url=sandbox_url, in_outs=in_outs, generation=generation, timeout=timeout, memory_limit_mb=memory_limit_mb, language=language, concurrent_semaphore=mp_semaphore_for_check_correctness, # Pass multiprocessing.Semaphore ) # print(f"Process {os.getpid()} finished check_correctness. Processed {len(results)} tasks.") return len(results) # Return the number of processed tasks for basic validation # --- The actual test case for multiprocess concurrency control --- def test_multiprocess_global_concurrency_limit_with_semaphore(): """ Tests that the global concurrent_semaphore (multiprocessing.Semaphore) correctly limits the number of concurrent calls to call_sandbox_api across multiple processes, each potentially running multiple threads via check_correctness's internal ThreadPoolExecutor. """ manager = multiprocessing.Manager() active_calls_counter = manager.Value("i", 0) # Current active mock API calls max_calls_tracker = manager.Value("i", 0) # Observed maximum concurrent mock API calls call_lock = manager.Lock() # Lock to protect counters # Create a multiprocessing.Semaphore instance, this is the global semaphore we are testing. # It will be passed to check_correctness and used by _process_single_case to limit calls to call_sandbox_api. global_mp_semaphore = manager.Semaphore(MAX_GLOBAL_CONCURRENCY_LIMIT_TEST) mock_sandbox_url = "mock_url_for_concurrency_test" mock_generation = "pass" # Specific code content is not important as API call is mocked mock_memory_limit_mb = 1024 mock_language = "python" mock_timeout = 5 # Timeout setting, not critical for mock calls # Input/output data for each process # NUM_TASKS_PER_PROCESS_TEST tasks will be handled by check_correctness's internal ThreadPoolExecutor process_in_outs = { "inputs": [f"task_input_{i}" for i in range(NUM_TASKS_PER_PROCESS_TEST)], "outputs": [f"task_output_{i}" for i in range(NUM_TASKS_PER_PROCESS_TEST)], } futures = [] total_tasks_expected_to_run = NUM_PROCESSES_TEST * NUM_TASKS_PER_PROCESS_TEST test_start_time = time.time() with ProcessPoolExecutor(max_workers=NUM_PROCESSES_TEST) as executor: for i in range(NUM_PROCESSES_TEST): future = executor.submit( _process_pool_worker_for_concurrency_test, # Worker function mock_sandbox_url, process_in_outs, mock_generation, mock_memory_limit_mb, mock_language, mock_timeout, global_mp_semaphore, # Global semaphore to test active_calls_counter, # Shared variables for tracking max_calls_tracker, call_lock, ) futures.append(future) # Wait for all processes to complete and collect results num_tasks_processed_per_worker = [f.result() for f in futures] test_end_time = time.time() total_execution_time = test_end_time - test_start_time # Print some test statistics for debugging and validation print("\n--- Global Concurrency Test Stats ---") print(f"Semaphore Limit (MAX_GLOBAL_CONCURRENCY_LIMIT_TEST): {MAX_GLOBAL_CONCURRENCY_LIMIT_TEST}") print(f"Number of Processes (NUM_PROCESSES_TEST): {NUM_PROCESSES_TEST}") print(f"Tasks per Process (NUM_TASKS_PER_PROCESS_TEST): {NUM_TASKS_PER_PROCESS_TEST}") print(f"Total Tasks Submitted: {total_tasks_expected_to_run}") print(f"Simulated API Call Duration: {SIMULATED_API_CALL_DURATION_TEST}s") print(f"Total Test Execution Time: {total_execution_time:.2f}s") print(f"Max Concurrent Mock API Calls Observed: {max_calls_tracker.value}") # print(f"Tasks processed per worker: {num_tasks_processed_per_worker}") # Verify that all submitted tasks have been processed assert sum(num_tasks_processed_per_worker) == total_tasks_expected_to_run, ( "Mismatch in the number of tasks processed." ) # Verify that the mock API was called at least once assert max_calls_tracker.value > 0, "The mocked API call_sandbox_api was not called." # Core assertion: Observed maximum concurrent calls should not exceed the semaphore's limit assert max_calls_tracker.value <= MAX_GLOBAL_CONCURRENCY_LIMIT_TEST, ( f"Observed concurrency ({max_calls_tracker.value}) exceeded semaphore limit " f"({MAX_GLOBAL_CONCURRENCY_LIMIT_TEST})." ) # Optional: Rough check on execution time to verify semaphore is working to limit concurrency # Theoretical minimum execution time = (Total tasks / Concurrency limit) * Single task duration # Actual time will be longer due to various overheads min_expected_duration = ( total_tasks_expected_to_run * SIMULATED_API_CALL_DURATION_TEST ) / MAX_GLOBAL_CONCURRENCY_LIMIT_TEST # print(f"Minimum Expected Execution Time (approx): {min_expected_duration:.2f}s") # Allow some margin, e.g., 80% of theoretical minimum time assert total_execution_time >= min_expected_duration * 0.8, ( f"Total execution time ({total_execution_time:.2f}s) was unexpectedly short, suggesting the " f"semaphore might not be effectively limiting concurrency as expected " f"(min expected: {min_expected_duration * 0.8:.2f}s)." ) # Ensure there is no more code after this point if these were the last functions. # If there was other code, it would follow here. def test_unit_invalid_input_format(): """Unit test: Invalid in_outs format passed""" results, metadata_list = check_correctness(SANDBOX_URL, None, CODE_SUCCESS) assert results == [-1] assert metadata_list[0]["error"] == "Invalid input/output data" results, metadata_list = check_correctness(SANDBOX_URL, {}, CODE_SUCCESS) assert results == [-1] assert metadata_list[0]["error"] == "Invalid input/output data" results, metadata_list = check_correctness(SANDBOX_URL, INPUT_OUTPUT_INVALID_MISSING_KEY, CODE_SUCCESS) assert results == [-1] assert metadata_list[0]["error"] == "Invalid input/output data" @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_unit_input_output_mismatch(): """Unit test: Mismatch between the number of inputs and outputs""" results, metadata_list = check_correctness(SANDBOX_URL, INPUT_OUTPUT_MISMATCH, CODE_SUCCESS) assert results == [-1] assert len(metadata_list) == 1 assert metadata_list[0]["error"] == "Input/output count mismatch" @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_integration_concurrency_all_timeout(): """Integration test: High concurrency (100 cases) against real API, all causing timeout""" concurrency_level = 100 code_infinite_loop = """ def knight_moves(X, Y): MOD = 10**9 + 7 dp = [[0] * (Y + 1) for _ in range(X + 1)] dp[0][0] = 1 for i in range(1, X + 1): for j in range(1, Y + 1): dp[i][j] = (dp[i - 1][j] + dp[i][j - 1]) % MOD return dp[X][Y] def solve(): X, Y = map(int, input().split()) print(knight_moves(X, Y)) if __name__ == "__main__": solve() """ # Generate 100 simple input/output pairs (content doesn't matter) timeout_inputs = ["324 384429" for i in range(concurrency_level)] timeout_outputs = [f"output_{i}\n" for i in range(concurrency_level)] timeout_in_outs = {"inputs": timeout_inputs, "outputs": timeout_outputs} # Set a timeout for the test cases test_timeout = 10 # Set a timeout value start_time = time.time() results, metadata_list = check_correctness(SANDBOX_URL, timeout_in_outs, code_infinite_loop, timeout=test_timeout) end_time = time.time() duration = end_time - start_time print(f"\nHigh concurrency all timeout test ({concurrency_level} cases) duration: {duration:.2f} seconds") # Verify all results are -3 (timeout) assert len(results) == concurrency_level, f"Expected {concurrency_level} results, got {len(results)}" all_timed_out = all(r == -3 for r in results) if not all_timed_out: non_timeout_indices = [i for i, r in enumerate(results) if r != -3] print(f"Indices that did not time out: {non_timeout_indices}") # Print metadata for the first few non-timeout cases for debugging for i in non_timeout_indices[:5]: print(f"Metadata for non-timeout case {i}: {metadata_list[i]}") assert all_timed_out, f"Not all {concurrency_level} concurrent tests resulted in timeout (-3). Results: {results}" # Verify metadata count and status of the first case assert len(metadata_list) == concurrency_level assert metadata_list[0]["status"] == "timeout" @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_fn_name_success_single_case(): """Tests successful execution for a single test case with fn_name. from livecodebench/code_generation_lite test 510 """ generation_code = """ class Solution: def occurrencesOfElement(self, nums: List[int], queries: List[int], x: int) -> List[int]: positions = defaultdict(list) for idx, num in enumerate(nums): positions[num].append(idx) x_positions = positions[x] answer = [] for k in queries: if k > len(x_positions): answer.append(-1) else: answer.append(x_positions[k-1]) return answer """ in_outs = { "fn_name": "occurrencesOfElement", "inputs": ["[1, 3, 1, 7]\n[1, 3, 2, 4]\n1", "[1, 2, 3]\n[10]\n5"], "outputs": ["[0, -1, 2, -1]", "[-1]"], } # Use a short timeout for fast tests results, metadata_list = check_correctness(SANDBOX_URL, in_outs, generation_code, timeout=5) # from verl.utils.reward_score.prime_code import apps_check_correctness # results, metadata_list = apps_check_correctness(in_outs=in_outs, generation=generation_code, # timeout=50000, debug=True) assert results == [True, True] assert "error" not in metadata_list[0] assert metadata_list[0].get("status") != "compile_error" assert metadata_list[0].get("status") != "runtime_error" @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_none_and_empty_stdin_passed_correctly(): """ Tests that when stdin data is set to an empty string or None, it is still is passed correctly to Sandbox Fusion as an empty string. """ echo_code = """ import sys print(f"You said '{sys.stdin.readline().strip()}'") """ in_outs = { "inputs": [None, "", "hello"], "outputs": ["You said ''", "You said ''", "You said 'hello'"], } # Use a short timeout for fast tests results, metadata_list = check_correctness(SANDBOX_URL, in_outs, echo_code, timeout=5) assert results == [True, True, True] assert "error" not in metadata_list[0] assert metadata_list[0].get("status") != "compile_error" assert metadata_list[0].get("status") != "runtime_error" @pytest.mark.skipif(skip_condition, reason=skip_reason) def test_assert_case_success(): """Tests successful execution for assert case. from KodCode """ generation_code = """ from typing import List, Tuple def merge_intervals(intervals: List[Tuple[int, int]]) -> List[Tuple[int, int]]: if not intervals: return [] # Sort intervals by the start time intervals.sort(key=lambda x: x[0]) merged = [intervals[0]] for current in intervals[1:]: last = merged[-1] # If intervals overlap, merge them if current[0] <= last[1]: merged[-1] = (last[0], max(last[1], current[1])) else: merged.append(current) return merged """ test_cases = { "fn_name": "merge_intervals", "assert_case": [ "assert merge_intervals([(0, 1), (3, 5), (4, 7), (6, 8), (10, 12)," " (12, 14)]) == [(0, 1), (3, 8), (10, 14)]", "assert merge_intervals([(1, 2), (2, 3), (3, 4)]) == [(1, 4)]", "assert merge_intervals([(1, 2), (3, 4), (5, 6)]) == [(1, 2), (3, 4), (5, 5)]", ], } assert_cases = test_cases.get("assert_case") test_cases.setdefault("inputs", ["" for _ in assert_cases]) test_cases.setdefault("outputs", [None for _ in assert_cases]) # Use a short timeout for fast tests results, metadata_list = check_correctness(SANDBOX_URL, test_cases, generation_code, timeout=5) assert results == [True, True, -2] for i in range(2): assert "error" not in metadata_list[i] assert metadata_list[i].get("status") == "success" assert metadata_list[i].get("expected_output") is None assert metadata_list[i].get("status") != "runtime_error" assert "error" not in metadata_list[2] assert metadata_list[2].get("status") != "success" assert metadata_list[2].get("expected_output") is None assert metadata_list[2].get("status") == "runtime_error" ================================================ FILE: verl_distillation/tests/utils/reward_score/test_sandbox_on_cpu.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import json import os import pytest from verl.utils.reward_score import default_compute_score, prime_code, sandbox_fusion from verl.utils.reward_score.prime_code import apps_check_correctness from verl.workers.reward_manager.prime import parallel_compute_score_async prime_math_answers = [ """\\begin{bmatrix}\n -7 & 6 & -8 \\\\\n 11 & -9 & 12 \\\\\n 15 & -16 & 19 \n \\end{bmatrix}""", """\\frac{\\sqrt{505}}{7}""", """x^2 + y^2 + 4x - 6y + 13""", ] prime_math_gts = [ """\\begin{pmatrix}\n -7 & 6 & -8 \\\\\n 11 & -9 & 12 \\\\\n 15 & -16 & 19\n \\end{pmatrix}""", # mat test """\\frac{\\sqrt{505}}{7}""", # frac test """(x + 2)^2 + (y - 3)^2 """, # symbolic test ] prime_code_answers = [ """import sys from collections import deque def main(): data = sys.stdin.read().split() it = iter(data) # Read start and target positions x0, y0, x1, y1 = int(next(it)), int(next(it)), int(next(it)), int(next(it)) n = int(next(it)) allowed = set() # The total number of allowed cells is at most 10^5. for _ in range(n): r = int(next(it)) a = int(next(it)) b = int(next(it)) for c in range(a, b + 1): allowed.add((r, c)) # Directions for the king (8 neighboring cells) directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)] start = (x0, y0) target = (x1, y1) # BFS initialization queue = deque() queue.append((x0, y0, 0)) # Mark the starting cell as visited by removing it from allowed set. allowed.discard(start) while queue: x, y, moves = queue.popleft() if (x, y) == target: print(moves) return for dx, dy in directions: nx, ny = x + dx, y + dy if (nx, ny) in allowed: allowed.remove((nx, ny)) queue.append((nx, ny, moves + 1)) print(-1) if __name__ == '__main__': main() """ ] * 2 prime_code_gts = [ """{\n \"inputs\": [\n \"5 7 6 11\\n3\\n5 3 8\\n6 7 11\\n5 2 5\\n\",\n \"3 4 3 10\\n3\\n3 1 4\\n4 5 9\\n3 10 10\\n\",\n \"1 1 2 10\\n2\\n1 1 3\\n2 6 10\\n\",\n \"9 8 7 8\\n9\\n10 6 6\\n10 6 6\\n7 7 8\\n9 5 6\\n8 9 9\\n9 5 5\\n9 8 8\\n8 5 6\\n9 10 10\\n\",\n \"6 15 7 15\\n9\\n6 15 15\\n7 14 14\\n6 15 15\\n9 14 14\\n7 14 16\\n6 15 15\\n6 15 15\\n7 14 14\\n8 15 15\\n\",\n \"13 16 20 10\\n18\\n13 16 16\\n20 10 10\\n19 10 10\\n12 15 15\\n20 10 10\\n18 11 11\\n19 10 10\\n19 10 10\\n20 10 10\\n19 10 10\\n20 10 10\\n20 10 10\\n19 10 10\\n18 11 11\\n13 16 16\\n12 15 15\\n19 10 10\\n19 10 10\\n\",\n \"89 29 88 30\\n16\\n87 31 31\\n14 95 95\\n98 88 89\\n96 88 88\\n14 97 97\\n13 97 98\\n100 88 88\\n88 32 32\\n99 88 89\\n90 29 29\\n87 31 31\\n15 94 96\\n89 29 29\\n88 32 32\\n97 89 89\\n88 29 30\\n\",\n \"30 14 39 19\\n31\\n35 7 11\\n37 11 12\\n32 13 13\\n37 5 6\\n46 13 13\\n37 14 14\\n31 13 13\\n43 13 19\\n45 15 19\\n46 13 13\\n32 17 17\\n41 14 19\\n30 14 14\\n43 13 17\\n34 16 18\\n44 11 19\\n38 13 13\\n40 12 20\\n37 16 18\\n46 16 18\\n34 10 14\\n36 9 10\\n36 15 19\\n38 15 19\\n42 13 19\\n33 14 15\\n35 15 19\\n33 17 18\\n39 12 20\\n36 5 7\\n45 12 12\\n\",\n \"2 1 1 1\\n2\\n1 1 2\\n2 1 2\\n\",\n \"1 1 1 2\\n5\\n1000000000 1 10000\\n19920401 1188 5566\\n1000000000 1 10000\\n1 1 10000\\n5 100 200\\n\",\n \"1 1 1000000000 2\\n5\\n1000000000 1 10000\\n19920401 1188 5566\\n1000000000 1 10000\\n1 1 10000\\n5 100 200\\n\"\n ],\n \"outputs\": [\n \"4\\n\",\n \"6\\n\",\n \"-1\\n\",\n \"2\\n\",\n \"1\\n\",\n \"-1\\n\",\n \"1\\n\",\n \"9\\n\",\n \"1\\n\",\n \"1\\n\",\n \"-1\\n\"\n ]\n}""", # A correct sample # noqa: E501 """{\n \"inputs\": [\n \"5 7 6 11\\n3\\n5 3 8\\n6 7 11\\n5 2 5\\n\",\n \"3 4 3 10\\n3\\n3 1 4\\n4 5 9\\n3 10 10\\n\",\n \"1 1 2 10\\n2\\n1 1 3\\n2 6 10\\n\",\n \"9 8 7 8\\n9\\n10 6 6\\n10 6 6\\n7 7 8\\n9 5 6\\n8 9 9\\n9 5 5\\n9 8 8\\n8 5 6\\n9 10 10\\n\",\n \"6 15 7 15\\n9\\n6 15 15\\n7 14 14\\n6 15 15\\n9 14 14\\n7 14 16\\n6 15 15\\n6 15 15\\n7 14 14\\n8 15 15\\n\",\n \"13 16 20 10\\n18\\n13 16 16\\n20 10 10\\n19 10 10\\n12 15 15\\n20 10 10\\n18 11 11\\n19 10 10\\n19 10 10\\n20 10 10\\n19 10 10\\n20 10 10\\n20 10 10\\n19 10 10\\n18 11 11\\n13 16 16\\n12 15 15\\n19 10 10\\n19 10 10\\n\",\n \"89 29 88 30\\n16\\n87 31 31\\n14 95 95\\n98 88 89\\n96 88 88\\n14 97 97\\n13 97 98\\n100 88 88\\n88 32 32\\n99 88 89\\n90 29 29\\n87 31 31\\n15 94 96\\n89 29 29\\n88 32 32\\n97 89 89\\n88 29 30\\n\",\n \"30 14 39 19\\n31\\n35 7 11\\n37 11 12\\n32 13 13\\n37 5 6\\n46 13 13\\n37 14 14\\n31 13 13\\n43 13 19\\n45 15 19\\n46 13 13\\n32 17 17\\n41 14 19\\n30 14 14\\n43 13 17\\n34 16 18\\n44 11 19\\n38 13 13\\n40 12 20\\n37 16 18\\n46 16 18\\n34 10 14\\n36 9 10\\n36 15 19\\n38 15 19\\n42 13 19\\n33 14 15\\n35 15 19\\n33 17 18\\n39 12 20\\n36 5 7\\n45 12 12\\n\",\n \"2 1 1 1\\n2\\n1 1 2\\n2 1 2\\n\",\n \"1 1 1 2\\n5\\n1000000000 1 10000\\n19920401 1188 5566\\n1000000000 1 10000\\n1 1 10000\\n5 100 200\\n\",\n \"1 1 1000000000 2\\n5\\n1000000000 1 10000\\n19920401 1188 5566\\n1000000000 1 10000\\n1 1 10000\\n5 100 200\\n\"\n ],\n \"outputs\": [\n \"4\\n\",\n \"6\\n\",\n \"-1\\n\",\n \"-1\\n\",\n \"1\\n\",\n \"-1\\n\",\n \"1\\n\",\n \"9\\n\",\n \"1\\n\",\n \"1\\n\",\n \"-1\\n\"\n ]\n}""", # noqa: E501 ] # A failed sample with first several in-out passed prime_code_scores = [1.0, 0.9] def test_parallelism(): """ Test if process pool works properly """ sequences_str = [] ground_truth = [] data_sources = [] while len(sequences_str) < 32: sequences_str.extend(prime_code_answers) ground_truth.extend(prime_code_gts) data_sources.extend(["codecontests"] * len(prime_code_answers)) sequences_str.extend(prime_math_answers) ground_truth.extend(prime_math_gts) data_sources.extend(["numina_aops_forum"] * len(prime_math_answers)) scores = asyncio.run( parallel_compute_score_async(default_compute_score, sequences_str, ground_truth, data_sources, num_processes=16) ) print(scores) def test_prime_code(): """ Test PRIME code sandbox. """ data_source = "codecontests" for completion, ground_truth, score_ in zip(prime_code_answers, prime_code_gts, prime_code_scores, strict=True): score = default_compute_score(data_source, completion, ground_truth) assert float(score) == score_ # Use the pytest.mark.skipif decorator to skip the test @pytest.mark.skipif(not os.environ.get("SANDBOX_FUSION_URL"), reason="SANDBOX_FUSION_URL environment variable not set") def test_prime_code_sandbox_fusion(): """ Test PRIME code on sandbox fusion. Skips if SANDBOX_FUSION_URL is not set. """ data_source = "codecontests" # Get the URL from the environment variable, as skipif ensures it is set at this point sandbox_fusion_url = os.environ.get("SANDBOX_FUSION_URL") # Removed the previous 'if not sandbox_url' check block for completion, ground_truth, score_ in zip(prime_code_answers, prime_code_gts, prime_code_scores, strict=True): score = default_compute_score( data_source, completion, ground_truth, extra_info={"sandbox_fusion_url": sandbox_fusion_url} ) # <-- Use the URL obtained from the environment variable assert float(score) == score_ @pytest.mark.skipif(not os.environ.get("SANDBOX_FUSION_URL"), reason="SANDBOX_FUSION_URL environment variable not set") def test_continuous_score_consistency(): """ Verify that continuous score calculation is consistent between prime_code and sandbox_fusion. Uses a test case where the first 9 out of 11 sub-cases pass (expected score 0.9). """ completion = prime_code_answers[1] # Use the second sample ground_truth = prime_code_gts[1] # Use the second sample (9/11 pass, first 9 pass) expected_continuous_score = 0.9 # 1. Calculate score using prime_code (default) with continuous=True prime_score, _ = sandbox_fusion.compute_score( os.environ.get("SANDBOX_FUSION_URL"), None, completion, ground_truth, continuous=True ) # 2. Calculate score using sandbox_fusion with continuous=True # Ensure the extra_info key triggers the sandbox_fusion path in default_compute_score fusion_score, _ = prime_code.compute_score(completion, ground_truth, continuous=True) # 3. Assert scores are equal (using pytest.approx for float comparison) assert float(prime_score) == pytest.approx(expected_continuous_score) assert float(fusion_score) == pytest.approx(expected_continuous_score) assert float(prime_score) == pytest.approx(float(fusion_score)) print(f"Continuous Score (Prime Code): {prime_score}") print(f"Continuous Score (Sandbox Fusion): {fusion_score}") def test_check_correctness(): completion = prime_code_answers[0] ground_truth = json.loads(prime_code_gts[0]) ground_truth_single = {"inputs": ground_truth["inputs"][:1], "outputs": ground_truth["outputs"][:1]} res, meta = apps_check_correctness(in_outs=ground_truth_single, generation=completion, timeout=5, debug=False) print(res, meta) def test_prime_math(): data_source = "numina_aops_forum" for completion, ground_truth in zip(prime_math_answers, prime_math_gts, strict=True): score = default_compute_score(data_source, completion, ground_truth) assert float(score) == 1.0 ================================================ FILE: verl_distillation/tests/utils/test_activation_offload.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import pytest import torch import torch.distributed import torch.multiprocessing as mp from torch.distributed import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import MixedPrecision, ShardingStrategy from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config from verl.utils.activation_offload import enable_activation_offloading from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.fsdp_utils import MixedPrecisionPolicy, apply_fsdp2, get_fsdp_wrap_policy def create_random_input_ids(batch_size, seq_len, vocab_size): from flash_attn.bert_padding import unpad_input from verl.utils.model import compute_position_id_with_mask, create_random_mask input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device="cuda") attention_mask = create_random_mask( input_ids, max_ratio_of_left_padding=0.1, min_ratio_of_valid_token=0.5, max_ratio_of_valid_token=0.7 ) position_ids = compute_position_id_with_mask(attention_mask) input_ids = unpad_input(input_ids.unsqueeze(-1), attention_mask)[0].transpose(0, 1) position_ids = unpad_input(position_ids.unsqueeze(-1), attention_mask)[0].transpose(0, 1) return input_ids, position_ids def _fsdp_activation_offloading_test(rank, world_size, rendezvous_file, strategy="fsdp"): torch.cuda.set_device(rank) torch.distributed.init_process_group( backend="nccl", init_method=f"file://{rendezvous_file}", rank=rank, world_size=world_size, ) device_mesh = init_device_mesh("cuda", mesh_shape=(world_size,), mesh_dim_names=("dp",)) model_name = "Qwen/Qwen2.5-0.5B-Instruct" config = Qwen2Config(num_hidden_layers=4) with torch.device("cuda"): model = AutoModelForCausalLM.from_config( config=config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model = model.to(device="cuda") # Wrap model with FSDP mixed_precision = MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32) if strategy == "fsdp": model = FSDP( model, use_orig_params=False, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, mixed_precision=mixed_precision, device_mesh=device_mesh, auto_wrap_policy=get_fsdp_wrap_policy(module=model), ) else: mp_policy = MixedPrecisionPolicy( param_dtype=torch.bfloat16, reduce_dtype=torch.float32, cast_forward_inputs=True ) fsdp_kwargs = { "mesh": device_mesh, "mp_policy": mp_policy, } apply_fsdp2(model, fsdp_kwargs, {}) optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9) # Create checkpoint manager tokenizer = AutoTokenizer.from_pretrained(model_name) checkpoint_manager = FSDPCheckpointManager( model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, tokenizer=tokenizer ) # Generate sample input batch_size = 2 seq_len = 32 vocab_size = 32000 # First input for initial update input_ids1, position_ids1 = create_random_input_ids(batch_size, seq_len, vocab_size) # Second input for verification input_ids2, position_ids2 = create_random_input_ids(batch_size, seq_len, vocab_size) # Step 1: Initial update and save checkpoint outputs1 = model(input_ids=input_ids1, position_ids=position_ids1) loss1 = outputs1.logits.mean() loss1.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Save checkpoint after first update temp_dir = tempfile.mkdtemp() checkpoint_path = os.path.join(temp_dir, "checkpoint") checkpoint_manager.save_checkpoint(local_path=checkpoint_path, hdfs_path=None, global_step=0) # Step 2: Second update and forward pass outputs2 = model(input_ids=input_ids2, position_ids=position_ids2) loss2 = outputs2.logits.mean() loss2.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Record logits after second update with torch.no_grad(): logits_without_offloading = model(input_ids=input_ids2, position_ids=position_ids2).logits # Step 3: wrap module with activation offloading and load checkpoint enable_activation_offloading(model, strategy=strategy) checkpoint_manager.load_checkpoint(checkpoint_path) # Step 4: Repeat the second update with same input outputs3 = model(input_ids=input_ids2, position_ids=position_ids2) loss3 = outputs3.logits.mean() loss3.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Record logits after loaded checkpoint and update with torch.no_grad(): logits_with_offloading = model(input_ids=input_ids2, position_ids=position_ids2).logits # Step 4: Verify outputs match torch.testing.assert_close(logits_without_offloading, logits_with_offloading, atol=0.0, rtol=0.0) print(f"Activaiton offloading for {strategy} test passed on {world_size} GPUs!") # Cleanup shutil.rmtree(temp_dir) torch.distributed.barrier() torch.distributed.destroy_process_group() @pytest.mark.parametrize("world_size", (2, 4)) @pytest.mark.parametrize("strategy", ("fsdp", "fsdp2")) def test_activation_offloading(world_size, strategy, tmp_path): rendezvous_file = str(tmp_path / "rdzv_file") os.makedirs(os.path.dirname(rendezvous_file), exist_ok=True) mp.spawn( fn=_fsdp_activation_offloading_test, args=(world_size, rendezvous_file, strategy), nprocs=world_size, join=True, ) ================================================ FILE: verl_distillation/tests/utils/test_config_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from dataclasses import dataclass, field from omegaconf import OmegaConf from verl.base_config import BaseConfig from verl.utils import omega_conf_to_dataclass @dataclass class TestDataclass(BaseConfig): hidden_size: int = 0 activation: str = "relu" @dataclass class TestTrainConfig(BaseConfig): batch_size: int = 0 model: TestDataclass = field(default_factory=TestDataclass) override_config: dict = field(default_factory=dict) _cfg_str = """train_config: _target_: tests.utils.test_config_on_cpu.TestTrainConfig batch_size: 32 model: hidden_size: 768 activation: relu override_config: {}""" class TestConfigOnCPU(unittest.TestCase): """Test cases for configuration utilities on CPU. Test Plan: 1. Test basic OmegaConf to dataclass conversion for simple nested structures 2. Test nested OmegaConf to dataclass conversion for complex hierarchical configurations 3. Verify all configuration values are correctly converted and accessible """ def setUp(self): self.config = OmegaConf.create(_cfg_str) def test_omega_conf_to_dataclass(self): sub_cfg = self.config.train_config.model cfg = omega_conf_to_dataclass(sub_cfg, TestDataclass) self.assertEqual(cfg.hidden_size, 768) self.assertEqual(cfg.activation, "relu") assert isinstance(cfg, TestDataclass) def test_nested_omega_conf_to_dataclass(self): cfg = omega_conf_to_dataclass(self.config.train_config, TestTrainConfig) self.assertEqual(cfg.batch_size, 32) self.assertEqual(cfg.model.hidden_size, 768) self.assertEqual(cfg.model.activation, "relu") assert isinstance(cfg, TestTrainConfig) assert isinstance(cfg.model, TestDataclass) class TestPrintCfgCommand(unittest.TestCase): """Test suite for the print_cfg.py command-line tool.""" def test_command_with_override(self): """Test that the command runs without error when overriding config values.""" import subprocess # Run the command result = subprocess.run( ["python3", "scripts/print_cfg.py"], capture_output=True, text=True, ) # Verify the command exited successfully self.assertEqual(result.returncode, 0, f"Command failed with stderr: {result.stderr}") # Verify the output contains expected config information self.assertIn("critic", result.stdout) self.assertIn("profiler", result.stdout) if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/utils/test_flops_counter.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import pytest from verl.utils.flops_counter import FlopsCounter VALID_CONFIG_TYPE = {"llama", "qwen2", "qwen3", "qwen3_moe", "deepseek_v3", "mistral", "gemma3_text", "apertus"} class Config: def __init__(self, config_dict): for key, value in config_dict.items(): setattr(self, key, value) CONFIG = { "llama": { "config": { # llama2-7B "model_type": "llama", "vocab_size": 32000, "hidden_size": 4096, "intermediate_size": 11008, "num_hidden_layers": 32, "num_attention_heads": 32, "num_key_value_heads": 32, }, "batch_seqlens_tuple": ([512, 1024, 2048], [4096, 4096, 4096]), # 6*(vocab*hidden*2+layer*(hidden*(q+k+v+head*head_dim)+ hidden*inter*3))*token_sum + # 12*sum(seqlen^2)*layer*head*head_dim # 6*(32000*4096*2+32*(4096*4096*4+4096*11008*3))*(512+1024+2048) + # 12*(512*512+1024*1024+2048*2048)*32*4096 # 6*(32000*4096*2+32*(4096*4096*4+4096*11008*3))*(4096+4096+4096) + # 12*(4096*4096+4096*4096+4096*4096)*32*4096 "expected_flops_tuple": (153555818250240 / 1e12, 575955114393600 / 1e12), }, "qwen2": { "config": { # Qwen/Qwen2.5-7B-Instruct "model_type": "qwen2", "vocab_size": 152064, "hidden_size": 3584, "intermediate_size": 18944, "num_hidden_layers": 28, "num_attention_heads": 28, "num_key_value_heads": 4, }, "batch_seqlens_tuple": ([512, 1024, 2048], [4096, 4096, 4096]), # 6*(vocab*hidden*2+layer*(hidden*(q+k+v+head*head_dim)+ hidden*inter*3))*token_sum + # 12*sum(seqlen^2)*layer*head*head_dim # 6*(152064*3584*2+28*(3584*(3584+512+512+3584)+3584*18944*3))*(512+1024+2048) + # 12*(512*512+1024*1024+2048*2048)*28*3584 # 6*(152064*3584*2+28*(3584*(3584+512+512+3584)+3584*18944*3))*(4096+4096+4096) + # 12*(4096*4096+4096*4096+4096*4096)*28*3584 "expected_flops_tuple": (170388331954176 / 1e12, 622070178250752 / 1e12), }, "qwen3": { "config": { # Qwen/Qwen3-8B "model_type": "qwen3", "vocab_size": 151936, "hidden_size": 4096, "intermediate_size": 12288, "num_hidden_layers": 36, "num_attention_heads": 32, "num_key_value_heads": 8, "head_dim": 128, }, "batch_seqlens_tuple": ([512, 1024, 2048], [4096, 4096, 4096]), # 6*(vocab*hidden*2+layer*(hidden*(q+k+v+head*head_dim)+ hidden*inter*3))*token_sum + # 12*sum(seqlen^2)*layer*head*head_dim # 6*(151936*4096*2+36*(4096*(128*32+128*8*2+128*32)+4096*12288*3))*(512+1024+2048) + # 12*(512*512+1024*1024+2048*2048)*36*128*32 # 6*(151936*4096*2+36*(4096*(128*32+128*8*2+128*32)+4096*12288*3))*(4096+4096+4096) + # 12*(4096*4096+4096*4096+4096*4096)*36*128*32 "expected_flops_tuple": (185867930959872 / 1e12, 692924253732864 / 1e12), }, "qwen3_moe": { "config": { # Qwen/Qwen3-30B-A3B-Base "model_type": "qwen3_moe", "hidden_size": 2048, "vocab_size": 151936, "num_hidden_layers": 48, "num_key_value_heads": 4, "num_attention_heads": 32, "head_dim": 128, "moe_intermediate_size": 768, "num_experts_per_tok": 8, "num_experts": 128, }, "batch_seqlens_tuple": ([512, 1024, 2048], [4096, 4096, 4096]), # 6*(vocab*hidden*2+layer*(hidden*(q+k+v+head*head_dim)+hidden*inter*top_k_exp*3 + # hidden*num_experts))*token_sum + 12*sum(seqlen^2)*layer*head*head_dim # 6*(151936*2048*2+48*(2048*(128*32+128*4*2+128*32)+2048*768*8*3+2048*128))*(512+1024+2048) + # 12*(512*512+1024*1024+2048*2048)*48*128*32 # 6*(151936*2048*2+48*(2048*(128*32+128*4*2+128*32)+2048*768*8*3+2048*128))*(4096+4096+4096) + # 12*(4096*4096+4096*4096+4096*4096)*48*128*32 "expected_flops_tuple": (85087060230144 / 1e12, 365944098521088 / 1e12), }, "deepseek_v3": { "config": { # deepseek-ai/DeepSeek-Prover-V2-671B "model_type": "deepseek_v3", "hidden_size": 7168, "vocab_size": 129280, "moe_intermediate_size": 2048, "num_hidden_layers": 61, "first_k_dense_replace": 3, "num_attention_heads": 128, "n_routed_experts": 256, "num_experts_per_tok": 8, "n_shared_experts": 1, "kv_lora_rank": 512, "qk_rope_head_dim": 64, "v_head_dim": 128, "intermediate_size": 18432, "qk_nope_head_dim": 128, "q_lora_rank": 1536, }, "batch_seqlens_tuple": ([512, 1024, 2048], [4096, 4096, 4096]), # (1536*7168+128*192*1536+7168*(512+64)+128*(128+128)*512+128*128*7168) = 187105280 # 6*(129280*7168*2+ 3*(7168*18432*3+187105280)+ 58*(187105280+7168*256+7168*2048*9*3))*(512+1024+2048) + # 12*(512*512+1024*1024+2048*2048)*61*192*128 # 6*(129280*7168*2+ 3*(7168*18432*3+187105280)+ 58*(187105280+7168*256+7168*2048*9*3))*(4096+4096+4096) + # 12*(4096*4096+4096*4096+4096*4096)*61*192*128 "expected_flops_tuple": (906535995703296 / 1e12, 3674028304760832 / 1e12), }, "mistral": { "config": { # mistralai/Mistral-Small-24B-Instruct-2501 "model_type": "mistral", "vocab_size": 131072, "hidden_size": 5120, "intermediate_size": 32768, "num_hidden_layers": 40, "num_attention_heads": 32, "num_key_value_heads": 8, "head_dim": 128, }, "batch_seqlens_tuple": ([512, 1024, 2048], [4096, 4096, 4096]), # Mistral uses same architecture as Llama, with GQA # 6*(vocab*hidden*2+layer*(hidden*(q+k+v+head*head_dim)+ hidden*inter*3))*token_sum + # 12*sum(seqlen^2)*layer*head*head_dim # vocab part: 131072*5120*2 = 1342177280 # attn part per layer: 5120*(128*32+128*8+128*8+128*32) = 5120*10240 = 52428800 # mlp part per layer: 5120*32768*3 = 503316480 # total per layer: 52428800 + 503316480 = 555745280 # all layers: 1342177280 + 40*555745280 = 23571988480 # For batch [512, 1024, 2048], tokens_sum = 3584: # dense flops: 6 * 23571988480 * 3584 = 506892040273920 # attn flops: 12 * 5505024 * 40 * 128 * 32 = 10823317585920 # total: 517715357859840 / 1e12 = 517.71535785984 # For batch [4096, 4096, 4096], tokens_sum = 12288: # dense flops: 6 * 23571988480 * 12288 = 1737915566653440 # attn flops: 12 * 50331648 * 40 * 128 * 32 = 98956046499840 # total: 1836871613153280 / 1e12 = 1836.87161315328 "expected_flops_tuple": (517715357859840 / 1e12, 1836871613153280 / 1e12), }, "gemma3_text": { "config": { # Gemma3-12B-IT-TextOnly "model_type": "gemma3_text", "vocab_size": 262208, "hidden_size": 3840, "intermediate_size": 15360, "num_hidden_layers": 48, "num_attention_heads": 16, "num_key_value_heads": 8, "head_dim": 256, "sliding_window": 1024, "layer_types": None, # Will be auto-generated based on sliding_window_pattern "sliding_window_pattern": 6, # Every 6th layer is full attention }, "batch_seqlens_tuple": ([512, 1024, 2048], [4096, 4096, 4096]), # Gemma3 has alternating sliding window attention # With sliding_window_pattern=6: layers 5,11,17,23,29,35,41,47 use full attention (8 layers) # Other 40 layers use sliding window attention with window_size=1024 # # Non-attention FLOPs: # vocab part: 262208*3840*2 = 2013757440 # attn part per layer: 3840*(256*16+256*8+256*8+256*16) = 3840*12288 = 47185920 # mlp part per layer: 3840*15360*3 = 176947200 # total per layer: 47185920 + 176947200 = 224133120 # all layers: 2013757440 + 48*224133120 = 12772147200 # # For batch [512, 1024, 2048], tokens_sum = 3584: # dense flops: 6 * 12772147200 * 3584 = 274652253388800 # seqlen_square_sum: 180355072 (calculated with sliding window logic) # attn flops: 12 * 180355072 * 256 * 16 = 8864812498944 # total: 283517065887744 / 1e12 = 283.517065887744 # # For batch [4096, 4096, 4096], tokens_sum = 12288: # dense flops: 6 * 12772147200 * 12288 = 941664868761600 # seqlen_square_sum: 905969664 (calculated with sliding window logic) # attn flops: 12 * 905969664 * 256 * 16 = 44530220924928 # total: 986195089686528 / 1e12 = 986.195089686528 "expected_flops_tuple": (283517065887744 / 1e12, 986195089686528 / 1e12), }, "apertus": { "config": { # swiss-ai/Apertus-8B "model_type": "apertus", "vocab_size": 131072, "hidden_size": 4096, "intermediate_size": 21504, "num_hidden_layers": 32, "num_attention_heads": 32, "num_key_value_heads": 32, "hidden_act": "xielu", # head_dim will be derived as 4096 / 32 = 128 }, "batch_seqlens_tuple": ([512, 1024, 2048], [4096, 4096, 4096]), # Calculation for Apertus (hidden_act="xielu" -> MLP uses [k_mlp=2]*H*I params; qk_norm=True -> [k_qkn=2]*H): # V=131072, H=4096, I=21504, L=32, k_mlp=2 (XIELU), k_qkn=2 (QK norm), S=6 # S*(2*V*H + L*(4*H**2 + k_mlp*H*I + k_qkn*H)) * (SUM[seqlen]) + 12*SUM[seqlen**2]*L*H "expected_flops_tuple": (199154680725504 / 1e12, 732294071451648 / 1e12), }, } @pytest.mark.parametrize( "config_type", ["llama", "qwen2", "qwen3", "qwen3_moe", "deepseek_v3", "mistral", "gemma3_text", "apertus"], ) def test_flops_counter(config_type: str): test_config = CONFIG[config_type] config = Config(test_config["config"]) flops_counter = FlopsCounter(config) for batch_seqlens, expected_flops in zip( test_config["batch_seqlens_tuple"], test_config["expected_flops_tuple"], strict=True ): # set delta time to 1 to get the flops counted_flops, _ = flops_counter.estimate_flops(batch_seqlens, 1) print(f"Expect flops for {test_config['config']} is {expected_flops}, but get {counted_flops}") assert math.isclose(counted_flops, expected_flops), ( f"Expect flops for {test_config['config']} is {expected_flops}, but get {counted_flops}" ) ================================================ FILE: verl_distillation/tests/utils/test_fs_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pathlib import Path import verl.utils.fs as fs def test_record_and_check_directory_structure(tmp_path): # Create test directory structure test_dir = tmp_path / "test_dir" test_dir.mkdir() (test_dir / "file1.txt").write_text("test") (test_dir / "subdir").mkdir() (test_dir / "subdir" / "file2.txt").write_text("test") # Create structure record record_file = fs._record_directory_structure(test_dir) # Verify record file exists assert os.path.exists(record_file) # Initial check should pass assert fs._check_directory_structure(test_dir, record_file) is True # Modify structure and verify check fails (test_dir / "new_file.txt").write_text("test") assert fs._check_directory_structure(test_dir, record_file) is False def test_copy_from_hdfs_with_mocks(tmp_path, monkeypatch): # Mock HDFS dependencies monkeypatch.setattr(fs, "is_non_local", lambda path: True) # side_effect will simulate the copy by creating parent dirs + empty file def fake_copy(src: str, dst: str, *args, **kwargs): dst_path = Path(dst) dst_path.parent.mkdir(parents=True, exist_ok=True) dst_path.write_bytes(b"") # touch an empty file monkeypatch.setattr(fs, "copy", fake_copy) # Mock actual HDFS copy # Test parameters test_cache = tmp_path / "cache" hdfs_path = "hdfs://test/path/file.txt" # Test initial copy local_path = fs.copy_to_local(hdfs_path, cache_dir=test_cache) expected_path = os.path.join(test_cache, fs.md5_encode(hdfs_path), os.path.basename(hdfs_path)) assert local_path == expected_path assert os.path.exists(local_path) def test_always_recopy_flag(tmp_path, monkeypatch): # Mock HDFS dependencies monkeypatch.setattr(fs, "is_non_local", lambda path: True) copy_call_count = 0 def fake_copy(src: str, dst: str, *args, **kwargs): nonlocal copy_call_count copy_call_count += 1 dst_path = Path(dst) dst_path.parent.mkdir(parents=True, exist_ok=True) dst_path.write_bytes(b"") monkeypatch.setattr(fs, "copy", fake_copy) # Mock actual HDFS copy test_cache = tmp_path / "cache" hdfs_path = "hdfs://test/path/file.txt" # Initial copy (always_recopy=False) fs.copy_to_local(hdfs_path, cache_dir=test_cache) assert copy_call_count == 1 # Force recopy (always_recopy=True) fs.copy_to_local(hdfs_path, cache_dir=test_cache, always_recopy=True) assert copy_call_count == 2 # Subsequent normal call (always_recopy=False) fs.copy_to_local(hdfs_path, cache_dir=test_cache) assert copy_call_count == 2 # Should not increment ================================================ FILE: verl_distillation/tests/utils/test_groupwise.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os os.environ.setdefault("VERL_FORCE_DEVICE", "cpu") # ensure CPU for tests import numpy as np import pytest import torch from verl.utils import as_torch_index, group_mean_std def test_as_torch_index_basic_integers(): g = as_torch_index([2, 2, 5, 7, 5, 2]) assert g.dtype == torch.long assert g.device.type == "cpu" # Values should be contiguous 0..G-1, keeping equal labels equal assert g.tolist()[0] == g.tolist()[1] assert len(torch.unique(g)) == 3 # {2,5,7} -> 3 groups def test_as_torch_index_near_integer_floats(): arr = np.array([1.0000001, 2.0, 1.0, 3.0000000001], dtype=np.float64) g = as_torch_index(arr) # should round to integers then factorize assert g.dtype == torch.long assert len(torch.unique(g)) == 3 # {1,2,3} def test_as_torch_index_factorization_mixed(): labels = ["a", "b", "a", "c", "0042", 42] g = as_torch_index(labels) # "0042" and 42 should NOT be the same group (strings are not coerced here) assert g.tolist()[4] != g.tolist()[5] assert len(torch.unique(g)) == 5 def test_group_mean_std_simple(): # groups: 0 -> [1, 3], 1 -> [2] scores = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32) gidx = as_torch_index([0, 1, 0]) mean_g, std_g, cnt_g = group_mean_std(scores, gidx) # group 0: mean = (1+3)/2 = 2 # sample std (unbiased) = sqrt( (sum(x^2) - (sum(x)^2)/n) / (n-1) ) # = sqrt( (1^2+3^2) - (1+3)^2/2 ) / (2-1) = sqrt(10 - 16/2) = sqrt(2) assert torch.allclose(mean_g, torch.tensor([2.0, 0.0])) assert torch.allclose(cnt_g, torch.tensor([2.0, 1.0])) # singleton group -> std = 1.0 assert mean_g[1].item() == 0.0 assert std_g[1].item() == 1.0 assert pytest.approx(std_g[0].item(), rel=1e-6) == (2.0**0.5) def test_group_mean_std_empty(): scores = torch.tensor([], dtype=torch.float32) gidx = torch.tensor([], dtype=torch.long) mean_g, std_g, cnt_g = group_mean_std(scores, gidx) assert mean_g.numel() == 0 and std_g.numel() == 0 and cnt_g.numel() == 0 ================================================ FILE: verl_distillation/tests/utils/test_import_utils_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from verl.utils.import_utils import load_extern_type # Path to the test module TEST_MODULE_PATH = os.path.join(os.path.dirname(__file__), "_test_module.py") def test_load_extern_type_class(): """Test loading a class from an external file""" TestClass = load_extern_type(TEST_MODULE_PATH, "TestClass") # Verify the class was loaded correctly assert TestClass is not None assert TestClass.__name__ == "TestClass" # Test instantiation and functionality instance = TestClass() assert instance.value == "default" # Test with a custom value custom_instance = TestClass("custom") assert custom_instance.get_value() == "custom" def test_load_extern_type_function(): """Test loading a function from an external file""" test_function = load_extern_type(TEST_MODULE_PATH, "test_function") # Verify the function was loaded correctly assert test_function is not None assert callable(test_function) # Test function execution result = test_function() assert result == "test_function_result" def test_load_extern_type_constant(): """Test loading a constant from an external file""" constant = load_extern_type(TEST_MODULE_PATH, "TEST_CONSTANT") # Verify the constant was loaded correctly assert constant is not None assert constant == "test_constant_value" def test_load_extern_type_nonexistent_file(): """Test behavior when file doesn't exist""" with pytest.raises(FileNotFoundError): load_extern_type("/nonexistent/path.py", "SomeType") def test_load_extern_type_nonexistent_type(): """Test behavior when type doesn't exist in the file""" with pytest.raises(AttributeError): load_extern_type(TEST_MODULE_PATH, "NonExistentType") def test_load_extern_type_none_path(): """Test behavior when file path is None""" result = load_extern_type(None, "SomeType") assert result is None def test_load_extern_type_invalid_module(): """Test behavior when module has syntax errors""" # Create a temporary file with syntax errors import tempfile with tempfile.NamedTemporaryFile(suffix=".py", mode="w+", delete=False) as temp_file: temp_file.write("This is not valid Python syntax :") temp_path = temp_file.name try: with pytest.raises(RuntimeError): load_extern_type(temp_path, "SomeType") finally: # Clean up the temporary file if os.path.exists(temp_path): os.remove(temp_path) ================================================ FILE: verl_distillation/tests/utils/test_linear_cross_entropy.py ================================================ # # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import verl.utils.torch_functional as verl_F from verl.utils.experimental.torch_functional import FusedLinearForPPO from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy from verl.utils.torch_functional import logprobs_from_logits compute_entropy_from_logits = torch.compile(verl_F.entropy_from_logits, dynamic=True) fused_linear_for_ppo = FusedLinearForPPO() fused_linear_for_ppo.compile(dynamic=True) MAX_TEST_CASES = os.environ.get("MAX_TEST_CASES", 5) def run_torch_entropy( hidden: torch.Tensor, weight: torch.Tensor, labels: torch.Tensor, temperature: float, reduction="none" ) -> list[torch.Tensor]: hidden = hidden.squeeze(0).to(torch.float32) weight = weight.transpose(0, 1).to(torch.float32) logits = torch.matmul(hidden, weight) # [num_tokens, vocab_size] logits /= temperature pd = torch.nn.functional.softmax(logits, dim=-1) # [num_tokens, vocab_size] entropy_a = torch.logsumexp(logits, dim=-1) # [num_tokens] entropy_b = torch.sum(pd * logits, dim=-1) # [num_tokens] entropy = entropy_a - entropy_b logprobs = torch.nn.functional.cross_entropy(logits, labels.squeeze(0), reduction=reduction) # [num_tokens] logprobs = torch.neg(logprobs) return logprobs, entropy def run_verl_original_entropy( hidden: torch.Tensor, weight: torch.Tensor, labels: torch.Tensor, temperature: float, ) -> list[torch.Tensor]: hidden = hidden.squeeze(0).to(torch.float32) weight = weight.transpose(0, 1).to(torch.float32) logits = torch.matmul(hidden, weight) # [num_tokens, vocab_size] logits /= temperature # compute entropy entropy = compute_entropy_from_logits(logits) # ((total_nnz / sp) + pad) # if use_sp: ((total_nnz / sp) + pad) ; if not use_sp: (batch, seqlen) logprobs = logprobs_from_logits(logits=logits, labels=labels, inplace_backward=False) return logprobs, entropy # To be tested def run_verl_torch_fused_entropy( hidden: torch.Tensor, weight: torch.Tensor, labels: torch.Tensor, temperature: float, ): hidden = hidden.to(torch.float32) weight = weight.to(torch.float32) logprobs, entropy = fused_linear_for_ppo( hidden, weight, labels, temperature=temperature, ) return logprobs.squeeze(0), entropy.squeeze(0) class TestLinearCrossEntropy: def __init__(self, test_case_idx: int, temperature: float = 1.5) -> None: self.test_case_idx = test_case_idx self.temperature = temperature def cleanup(self): torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() import gc gc.collect() torch.cuda.synchronize() def generate_hyper(self): global MAX_TEST_CASES self.dtype = torch.bfloat16 if self.test_case_idx == 0: self.batch_size = 1 self.num_tokens = 1937 self.hidden_size = 3584 self.vocab_size = 152064 elif self.test_case_idx == 1: self.batch_size = 1 self.num_tokens = 2169 self.hidden_size = 896 self.vocab_size = 151936 elif self.test_case_idx == 2: self.batch_size = 1 self.num_tokens = 1530 self.hidden_size = 2048 self.vocab_size = 32256 elif self.test_case_idx == 3: self.batch_size = 1 self.num_tokens = 1388 self.hidden_size = 4096 self.vocab_size = 102400 elif self.test_case_idx == 4: self.batch_size = 1 self.num_tokens = 8192 self.hidden_size = 4096 self.vocab_size = 102400 else: raise ValueError(f"Invalid test case index: {self.test_case_idx}") assert MAX_TEST_CASES <= 5, "MAX_TEST_CASES should be less than or equal to 5." def generate_forward_inputs(self): hidden = ( torch.empty((self.batch_size, self.num_tokens, self.hidden_size), dtype=self.dtype, device="cuda") .uniform_(-0.5, 0.5) .requires_grad_() ) weight = ( torch.empty((self.vocab_size, self.hidden_size), dtype=self.dtype, device="cuda") .uniform_(-0.5, 0.5) .requires_grad_() ) labels = torch.randint(0, self.vocab_size, (self.batch_size, self.num_tokens), device="cuda") return hidden, weight, labels def generate_backward_inputs(self): g_entropy = torch.empty((self.num_tokens,), dtype=self.dtype, device="cuda").uniform_(-0.5, 0.5) g_logprobs = torch.empty((self.num_tokens,), dtype=self.dtype, device="cuda").uniform_(-1, 1) return g_entropy, g_logprobs def verify_correctness(self, iterations=5): self.cleanup() self.generate_hyper() torch_forward_latency = list() torch_backward_latency = list() verl_forward_latency = list() verl_backward_latency = list() verl_fused_forward_latency = list() verl_fused_backward_latency = list() kernel_forward_latency = list() kernel_backward_latency = list() start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) for i in range(iterations): print(f"[INFO]: Iteration {i + 1} / {iterations}...", end="\r") hidden, weight, labels = self.generate_forward_inputs() start_event.record() (torch_logprobs, torch_entropy) = run_torch_entropy(hidden, weight, labels, self.temperature) end_event.record() torch.cuda.synchronize() torch_forward_latency.append(start_event.elapsed_time(end_event)) start_event.record() (verl_logprobs, verl_entropy) = run_verl_original_entropy(hidden, weight, labels, self.temperature) end_event.record() torch.cuda.synchronize() verl_forward_latency.append(start_event.elapsed_time(end_event)) start_event.record() (verl_fused_logprobs, verl_fused_entropy) = run_verl_torch_fused_entropy( hidden, weight, labels, self.temperature ) end_event.record() torch.cuda.synchronize() verl_fused_forward_latency.append(start_event.elapsed_time(end_event)) start_event.record() (kernel_logprobs, kernel_entropy) = linear_cross_entropy(hidden, weight, labels, self.temperature) end_event.record() torch.cuda.synchronize() kernel_forward_latency.append(start_event.elapsed_time(end_event)) torch.testing.assert_close(torch_logprobs, verl_logprobs, atol=1e-4, rtol=1e-4) torch.testing.assert_close(torch_entropy, verl_entropy, atol=1e-4, rtol=1e-4) torch.testing.assert_close(torch_logprobs, verl_fused_logprobs, atol=1e-4, rtol=1e-4) torch.testing.assert_close(torch_entropy, verl_fused_entropy, atol=1e-4, rtol=1e-4) torch.testing.assert_close(verl_logprobs, verl_fused_logprobs, atol=1e-4, rtol=1e-4) torch.testing.assert_close(verl_entropy, verl_fused_entropy, atol=1e-4, rtol=1e-4) torch.testing.assert_close(torch_logprobs, kernel_logprobs, atol=1e-3, rtol=2e-4) torch.testing.assert_close(torch_entropy, kernel_entropy, atol=5e-3, rtol=5e-4) torch.testing.assert_close(verl_logprobs, kernel_logprobs, atol=1e-3, rtol=2e-4) torch.testing.assert_close(verl_entropy, kernel_entropy, atol=5e-3, rtol=5e-4) torch.testing.assert_close(verl_fused_logprobs, kernel_logprobs, atol=1e-3, rtol=2e-4) torch.testing.assert_close(verl_fused_entropy, kernel_entropy, atol=5e-3, rtol=5e-4) # backward g_entropy, g_logprobs = self.generate_backward_inputs() start_event.record() (d_torch_hidden, d_torch_weight) = torch.autograd.grad( (torch_entropy, torch_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) end_event.record() torch.cuda.synchronize() torch_backward_latency.append(start_event.elapsed_time(end_event)) start_event.record() (d_verl_hidden, d_verl_weight) = torch.autograd.grad( (verl_entropy, verl_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) end_event.record() torch.cuda.synchronize() verl_backward_latency.append(start_event.elapsed_time(end_event)) start_event.record() (d_verl_fused_hidden, d_verl_fused_weight) = torch.autograd.grad( (verl_fused_entropy, verl_fused_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) end_event.record() torch.cuda.synchronize() verl_fused_backward_latency.append(start_event.elapsed_time(end_event)) start_event.record() (d_kernel_hidden, d_kernel_weight) = torch.autograd.grad( (kernel_entropy, kernel_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) end_event.record() torch.cuda.synchronize() kernel_backward_latency.append(start_event.elapsed_time(end_event)) torch.testing.assert_close(d_torch_hidden, d_verl_hidden, atol=1e-2, rtol=1e-4) torch.testing.assert_close(d_torch_weight, d_verl_weight, atol=1e-2, rtol=1e-4) torch.testing.assert_close(d_torch_hidden, d_verl_fused_hidden, atol=1e-2, rtol=1e-4) torch.testing.assert_close(d_torch_weight, d_verl_fused_weight, atol=1e-2, rtol=1e-4) torch.testing.assert_close(d_verl_hidden, d_verl_fused_hidden, atol=1e-2, rtol=1e-4) torch.testing.assert_close(d_verl_weight, d_verl_fused_weight, atol=1e-2, rtol=1e-4) torch.testing.assert_close(d_torch_hidden, d_verl_hidden, atol=1e-2, rtol=1e-4) torch.testing.assert_close(d_torch_weight, d_verl_weight, atol=1e-2, rtol=1e-4) torch.testing.assert_close(d_torch_hidden, d_kernel_hidden, atol=2e-2, rtol=4e-2) torch.testing.assert_close(d_torch_weight, d_kernel_weight, atol=2e-2, rtol=4e-2) torch.testing.assert_close(d_verl_hidden, d_kernel_hidden, atol=2e-2, rtol=4e-2) torch.testing.assert_close(d_verl_weight, d_kernel_weight, atol=2e-2, rtol=4e-2) torch.testing.assert_close(d_verl_fused_hidden, d_kernel_hidden, atol=2e-2, rtol=4e-2) torch.testing.assert_close(d_verl_fused_weight, d_kernel_weight, atol=2e-2, rtol=4e-2) # remove first latency torch_forward_latency = torch_forward_latency[1:] torch_backward_latency = torch_backward_latency[1:] verl_forward_latency = verl_forward_latency[1:] verl_backward_latency = verl_backward_latency[1:] verl_fused_forward_latency = verl_fused_forward_latency[1:] verl_fused_backward_latency = verl_fused_backward_latency[1:] kernel_forward_latency = kernel_forward_latency[1:] kernel_backward_latency = kernel_backward_latency[1:] print("\n[INFO]: Verified forward & backward correctness.") print( f"[INFO]: Forward pass: Torch implementation average time: " f"{sum(torch_forward_latency) / len(torch_forward_latency):.2f} ms" ) print( f"[INFO]: Backward pass: torch implementation average time: " f"{sum(torch_backward_latency) / len(torch_backward_latency):.2f} ms" ) print( f"[INFO]: Forward pass: VeRL implementation average time: " f"{sum(verl_forward_latency) / len(verl_forward_latency):.2f} ms" ) print( f"[INFO]: Backward pass: VeRL implementation average time: " f"{sum(verl_backward_latency) / len(verl_backward_latency):.2f} ms" ) print( f"[INFO]: Forward pass: VeRL Fused Entropy implementation average time: " f"{sum(verl_fused_forward_latency) / len(verl_fused_forward_latency):.2f} ms" ) print( f"[INFO]: Backward pass: VeRL Fused Entropy implementation average time: " f"{sum(verl_fused_backward_latency) / len(verl_fused_backward_latency):.2f} ms" ) print( f"[INFO]: Forward pass: Kernel implementation average time: " f"{sum(kernel_forward_latency) / len(kernel_forward_latency):.2f} ms" ) print( f"[INFO]: Backward pass: kernel implementation average time: " f"{sum(kernel_backward_latency) / len(kernel_backward_latency):.2f} ms" ) def check_storage(self, method_name, run_forward): self.cleanup() self.generate_hyper() hidden, weight, labels = self.generate_forward_inputs() torch.cuda.reset_peak_memory_stats() (logprobs, entropy) = run_forward(hidden, weight, labels, self.temperature) torch.cuda.synchronize() torch_max_memory = torch.cuda.max_memory_allocated() / 1024 / 1024 print(f"[INFO]: {method_name} Forward pass peak memory: {torch_max_memory:.2f} MB") g_entropy, g_logprobs = self.generate_backward_inputs() torch.cuda.reset_peak_memory_stats() (d_torch_hidden, d_torch_weight) = torch.autograd.grad( (entropy, logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) torch.cuda.synchronize() torch_backward_max_memory = torch.cuda.max_memory_allocated() / 1024 / 1024 print(f"[INFO]: {method_name} Backward pass peak memory: {torch_backward_max_memory:.2f} MB") def check_storage_all(self): self.check_storage("Torch", run_torch_entropy) self.check_storage("VeRL", run_verl_original_entropy) self.check_storage("VeRL Torch Fused", run_verl_torch_fused_entropy) self.check_storage("Kernel", linear_cross_entropy) if __name__ == "__main__": # torch.cuda.memory._record_memory_history() for test_case_idx in range(MAX_TEST_CASES): print(f"[INFO] Running test case {test_case_idx}") test = TestLinearCrossEntropy(test_case_idx) test.verify_correctness() test.check_storage_all() # torch.cuda.memory._dump_snapshot("test_linear_cross_entropy.pkl") ================================================ FILE: verl_distillation/tests/utils/test_mlflow_key_sanitization.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import patch from verl.utils.tracking import _MlflowLoggingAdapter class TestMlflowLoggingAdapter(unittest.TestCase): def test_sanitize_key_and_warning(self): adapter = _MlflowLoggingAdapter() data = {"valid_key": 1.0, "invalid@key!": 2.0, "another/valid-key": 3.0, "bad key#": 4.0} # Patch mlflow.log_metrics to capture the metrics actually sent with ( patch("mlflow.log_metrics") as mock_log_metrics, patch.object(adapter, "logger") as mock_logger, ): adapter.log(data, step=5) # Check that keys are sanitized sent_metrics = mock_log_metrics.call_args[1]["metrics"] self.assertIn("invalid_at_key_", sent_metrics) # @ becomes _at_, ! becomes _ self.assertIn("bad key_", sent_metrics) # # becomes _, space remains self.assertNotIn("invalid@key!", sent_metrics) self.assertNotIn("bad key#", sent_metrics) # Check that a warning was logged for each sanitized key warning_msgs = [str(call) for call in mock_logger.warning.call_args_list] self.assertTrue(any("invalid@key!" in msg and "invalid_at_key_" in msg for msg in warning_msgs)) self.assertTrue(any("bad key#" in msg and "bad key_" in msg for msg in warning_msgs)) if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/utils/test_model_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from types import SimpleNamespace # Or use a mock object library import pytest from verl.utils.model import update_model_config # Parametrize with different override scenarios @pytest.mark.parametrize( "override_kwargs", [ {"param_a": 5, "new_param": "plain_added"}, {"param_a": 2, "nested_params": {"sub_param_x": "updated_x", "sub_param_z": True}}, ], ) def test_update_model_config(override_kwargs): """ Tests that update_model_config correctly updates attributes, handling both plain and nested overrides via parametrization. """ # Create a fresh mock config object for each test case mock_config = SimpleNamespace( param_a=1, nested_params=SimpleNamespace(sub_param_x="original_x", sub_param_y=100), other_param="keep_me" ) # Apply the updates using the parametrized override_kwargs update_model_config(mock_config, override_kwargs) # Assertions to check if the config was updated correctly if "nested_params" in override_kwargs: # Case 2: Nested override override_nested = override_kwargs["nested_params"] assert mock_config.nested_params.sub_param_x == override_nested["sub_param_x"], "Nested sub_param_x mismatch" assert mock_config.nested_params.sub_param_y == 100, "Nested sub_param_y should be unchanged" assert hasattr(mock_config.nested_params, "sub_param_z"), "Expected nested sub_param_z to be added" assert mock_config.nested_params.sub_param_z == override_nested["sub_param_z"], "Value of sub_param_z mismatch" else: # Case 1: Plain override (nested params untouched) assert mock_config.nested_params.sub_param_x == "original_x", "Nested sub_param_x should be unchanged" assert mock_config.nested_params.sub_param_y == 100, "Nested sub_param_y should be unchanged" assert not hasattr(mock_config.nested_params, "sub_param_z"), "Nested sub_param_z should not exist" ================================================ FILE: verl_distillation/tests/utils/test_nvtx_profile.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import MagicMock, patch from verl.utils import omega_conf_to_dataclass from verl.utils.profiler.config import NsightToolConfig, ProfilerConfig from verl.utils.profiler.nvtx_profile import NsightSystemsProfiler class TestProfilerConfig(unittest.TestCase): def test_config_init(self): import os from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): cfg = compose(config_name="ppo_trainer") for config in [ cfg.actor_rollout_ref.actor.profiler, cfg.actor_rollout_ref.rollout.profiler, cfg.actor_rollout_ref.ref.profiler, cfg.critic.profiler, cfg.reward_model.profiler, ]: profiler_config = omega_conf_to_dataclass(config) self.assertEqual(profiler_config.tool, config.tool) self.assertEqual(profiler_config.enable, config.enable) self.assertEqual(profiler_config.all_ranks, config.all_ranks) self.assertEqual(profiler_config.ranks, config.ranks) self.assertEqual(profiler_config.save_path, config.save_path) self.assertEqual(profiler_config.ranks, config.ranks) assert isinstance(profiler_config, ProfilerConfig) with self.assertRaises(AttributeError): _ = profiler_config.non_existing_key assert config.get("non_existing_key") == profiler_config.get("non_existing_key") assert config.get("non_existing_key", 1) == profiler_config.get("non_existing_key", 1) def test_frozen_config(self): """Test that modifying frozen keys in ProfilerConfig raises exceptions.""" from dataclasses import FrozenInstanceError from verl.utils.profiler.config import ProfilerConfig # Create a new ProfilerConfig instance config = ProfilerConfig(all_ranks=False, ranks=[0]) with self.assertRaises(FrozenInstanceError): config.all_ranks = True with self.assertRaises(FrozenInstanceError): config.ranks = [1, 2, 3] with self.assertRaises(TypeError): config["all_ranks"] = True with self.assertRaises(TypeError): config["ranks"] = [1, 2, 3] class TestNsightSystemsProfiler(unittest.TestCase): """Test suite for NsightSystemsProfiler functionality. Test Plan: 1. Initialization: Verify profiler state after creation 2. Basic Profiling: Test start/stop functionality 3. Discrete Mode: TODO: Test discrete profiling behavior 4. Annotation: Test the annotate decorator in both normal and discrete modes 5. Config Validation: Verify proper config initialization from OmegaConf """ def setUp(self): self.config = ProfilerConfig(enable=True, all_ranks=True) self.rank = 0 self.profiler = NsightSystemsProfiler(self.rank, self.config, tool_config=NsightToolConfig(discrete=False)) def test_initialization(self): self.assertEqual(self.profiler.this_rank, True) self.assertEqual(self.profiler.this_step, False) def test_start_stop_profiling(self): with patch("torch.cuda.profiler.start") as mock_start, patch("torch.cuda.profiler.stop") as mock_stop: # Test start self.profiler.start() self.assertTrue(self.profiler.this_step) mock_start.assert_called_once() # Test stop self.profiler.stop() self.assertFalse(self.profiler.this_step) mock_stop.assert_called_once() # def test_discrete_profiling(self): # discrete_config = ProfilerConfig(discrete=True, all_ranks=True) # profiler = NsightSystemsProfiler(self.rank, discrete_config) # with patch("torch.cuda.profiler.start") as mock_start, patch("torch.cuda.profiler.stop") as mock_stop: # profiler.start() # self.assertTrue(profiler.this_step) # mock_start.assert_not_called() # Shouldn't start immediately in discrete mode # profiler.stop() # self.assertFalse(profiler.this_step) # mock_stop.assert_not_called() # Shouldn't stop immediately in discrete mode def test_annotate_decorator(self): mock_self = MagicMock() mock_self.profiler = self.profiler mock_self.profiler.this_step = True decorator = mock_self.profiler.annotate(message="test") @decorator def test_func(self, *args, **kwargs): return "result" with ( patch("torch.cuda.profiler.start") as mock_start, patch("torch.cuda.profiler.stop") as mock_stop, patch("verl.utils.profiler.nvtx_profile.mark_start_range") as mock_start_range, patch("verl.utils.profiler.nvtx_profile.mark_end_range") as mock_end_range, ): result = test_func(mock_self) self.assertEqual(result, "result") mock_start_range.assert_called_once() mock_end_range.assert_called_once() mock_start.assert_not_called() # Not discrete mode mock_stop.assert_not_called() # Not discrete mode # def test_annotate_discrete_mode(self): # discrete_config = ProfilerConfig(discrete=True, all_ranks=True) # profiler = NsightSystemsProfiler(self.rank, discrete_config) # mock_self = MagicMock() # mock_self.profiler = profiler # mock_self.profiler.this_step = True # @NsightSystemsProfiler.annotate(message="test") # def test_func(self, *args, **kwargs): # return "result" # with ( # patch("torch.cuda.profiler.start") as mock_start, # patch("torch.cuda.profiler.stop") as mock_stop, # patch("verl.utils.profiler.nvtx_profile.mark_start_range") as mock_start_range, # patch("verl.utils.profiler.nvtx_profile.mark_end_range") as mock_end_range, # ): # result = test_func(mock_self) # self.assertEqual(result, "result") # mock_start_range.assert_called_once() # mock_end_range.assert_called_once() # mock_start.assert_called_once() # Should start in discrete mode # mock_stop.assert_called_once() # Should stop in discrete mode if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/utils/test_rollout_skip_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile from pathlib import Path from unittest.mock import MagicMock import pytest import torch from verl.utils.rollout_skip import DataProto, RolloutSkip len_prompt = 50 len_response = 100 def temp_dir(): # Create a temporary directory temp_dir = Path(tempfile.mkdtemp()) yield temp_dir # Cleanup shutil.rmtree(temp_dir) def build_generate_fn(gen_bs, n): len_tokenizer = 1024 def iterate(): while True: prompt = torch.randint(len_tokenizer, size=(gen_bs, len_prompt)).repeat_interleave(n, dim=0) generate = torch.randint(len_tokenizer, size=(gen_bs * n, len_response)) data = DataProto.from_dict(tensors={"prompt": prompt, "response": generate}) yield data mock_infer_engine = iterate() def fn(batch, **kwargs): # Simulate the inference engine returning the next batch return next(mock_infer_engine) return fn @pytest.fixture(params=[(32, 4), (64, 4), (64, 8)]) def mock_rollout_wg(request): gen_bs, n = request.param rollout_wg = MagicMock() config = MagicMock() config.actor_rollout_ref.rollout = { "n": n, "skip_dump_dir": next(temp_dir()), } config.data = {"gen_batch_size": gen_bs} rollout_wg.generate_sequences = build_generate_fn(gen_bs, n) yield config, rollout_wg # Cleanup shutil.rmtree(next(temp_dir())) class TestRolloutSkip: def test_initialization(self, capsys): """Test that RolloutSkip initializes correctly""" config = MagicMock() config.actor_rollout_ref.rollout = { "n": 16, "skip_dump_dir": "tmp/rollout_dump", } config.data = {"gen_batch_size": 128} mock_rollout_wg = MagicMock() skip = RolloutSkip(config, mock_rollout_wg) assert skip.n == 16 assert skip.gbs == 128 assert str(skip.dumped_dir) == "tmp/rollout_dump" assert skip._rollout_wg == mock_rollout_wg skip.wrap_generate_sequences() captured = capsys.readouterr() assert "Successfully patched" in captured.out def test_generate_without_wrap(self, mock_rollout_wg): """Test that generate_sequences works without wrapping""" config, rollout_wg = mock_rollout_wg _ = RolloutSkip(config, rollout_wg) _result = rollout_wg.generate_sequences(MagicMock()) for _ in range(10): result = rollout_wg.generate_sequences(MagicMock()) assert isinstance(result, DataProto) # * make sure the data is different assert torch.abs(_result.batch["prompt"] - result.batch["prompt"]).sum() > 0 assert torch.abs(_result.batch["response"] - result.batch["response"]).sum() > 0 _result = result def test_dump(self, mock_rollout_wg, capsys): config, rollout_wg = mock_rollout_wg skip = RolloutSkip(config, rollout_wg) skip.wrap_generate_sequences() result = rollout_wg.generate_sequences(MagicMock()) # * check if dump is OK assert skip.curr_path_dump.exists() captured = capsys.readouterr() assert "Successfully dump data in" in captured.out # * get file size, estimate file size file_size = skip.curr_path_dump.stat().st_size est_file_size = (len_prompt + len_response) * skip.gbs * skip.n * result.batch["prompt"].dtype.itemsize assert file_size >= est_file_size, "Dumped file size is smaller than expected" def test_generate_with_wrap(self, mock_rollout_wg, capsys): """Test that generate_sequences works without wrapping""" config, rollout_wg = mock_rollout_wg skip = RolloutSkip(config, rollout_wg) skip.wrap_generate_sequences() _result = rollout_wg.generate_sequences(MagicMock()) for _ in range(10): result = rollout_wg.generate_sequences(MagicMock()) assert isinstance(result, DataProto) # * make sure the data is different assert torch.abs(_result.batch["prompt"] - result.batch["prompt"]).sum() == 0 assert torch.abs(_result.batch["response"] - result.batch["response"]).sum() == 0 captured = capsys.readouterr() assert "Successfully load pre-generated data from" in captured.out _result = result ================================================ FILE: verl_distillation/tests/utils/test_rollout_trace_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from unittest.mock import MagicMock, patch import pytest from verl.utils.rollout_trace import RolloutTraceConfig, rollout_trace_attr, rollout_trace_op @pytest.fixture(autouse=True) def reset_rollout_trace_config_singleton(): """Fixture to reset the RolloutTraceConfig singleton before each test.""" RolloutTraceConfig.reset() @pytest.fixture def mock_weave_client(): """Mocks the weave module and its client, yielding the mock client.""" mock_weave = MagicMock() mock_client = MagicMock() mock_call = MagicMock() mock_client.create_call.return_value = mock_call mock_weave.init.return_value = mock_client # Also mock the call_context if it's used internally by the decorator mock_weave.trace.context.call_context.return_value = MagicMock() with patch.dict(sys.modules, {"weave": mock_weave, "weave.trace.context": mock_weave.trace.context}): yield mock_client class TracedClass: @rollout_trace_op # @weave.op # @mlflow.trace async def my_method(self, a, b="default"): return f"result: {a}, {b}" @rollout_trace_op # @weave.op # @mlflow.trace async def middle_method(self, a, b="default"): await self.my_method("test_a1", b="test_b1") return f"result: {a}, {b}" @rollout_trace_op # @mlflow.trace async def my_method_with_exception(self): raise ValueError("Test Exception") async def upper_method(self): await self.my_method("test_a0", b="test_b0") await self.middle_method("test_a2", b="test_b2") return True class UntracedClass: @rollout_trace_op async def my_method(self, x): return x * 2 async def test_rollout_trace_on_untraced_class(): """Tests that the decorator works correctly when no backend is configured.""" instance = UntracedClass() assert await instance.my_method(10) == 20 async def test_rollout_trace_with_tracer(mock_weave_client): """Tests that the decorator calls the tracer's methods correctly.""" RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="weave") instance = TracedClass() assert RolloutTraceConfig.get_client() is mock_weave_client result = await instance.my_method("test_a", b="test_b") assert result == "result: test_a, test_b" mock_weave_client.create_call.assert_called_once() call_kwargs = mock_weave_client.create_call.call_args.kwargs assert call_kwargs["op"] == "TracedClass.my_method" expected_inputs = {"a": "test_a", "b": "test_b"} assert call_kwargs["inputs"] == expected_inputs mock_call = mock_weave_client.create_call.return_value mock_weave_client.finish_call.assert_called_once_with(mock_call, output=result) async def test_rollout_trace_with_exception(mock_weave_client): """Tests that `finish` is called with the exception when one is raised.""" RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="weave") instance = TracedClass() with pytest.raises(ValueError, match="Test Exception"): await instance.my_method_with_exception() mock_weave_client.create_call.assert_called_once() mock_call = mock_weave_client.create_call.return_value mock_weave_client.finish_call.assert_called_once() # Check that finish_call was called with the exception args, kwargs = mock_weave_client.finish_call.call_args assert args[0] == mock_call assert "exception" in kwargs assert isinstance(kwargs["exception"], ValueError) async def test_rollout_trace_with_dummy_backend(mock_weave_client): """Tests that the tracer is not called when the backend is 'dummy'.""" RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="dummy") instance = TracedClass() await instance.my_method("test_a") mock_weave_client.create_call.assert_not_called() @pytest.mark.skipif( os.environ.get("RUN_WEAVE_INTEGRATION_TESTS", "false").lower() != "true", reason="Skipping weave integration test. Set RUN_WEAVE_INTEGRATION_TESTS=true to run.", ) async def test_rollout_trace_with_real_weave_backend(): """Integration test with a real weave backend.""" # This assumes that the weave environment (e.g., project) is configured RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="weave") instance = TracedClass() with rollout_trace_attr(step=1, sample_index=2, rollout_n=3): await instance.upper_method() with pytest.raises(ValueError, match="Test Exception"): await instance.my_method_with_exception() print("\nWeave integration test ran successfully. Check your weave project for the trace.") @pytest.mark.skipif( os.environ.get("RUN_MLFLOW_INTEGRATION_TESTS", "false").lower() != "true", reason="Skipping mlflow integration test. Set RUN_MLFLOW_INTEGRATION_TESTS=true to run.", ) async def test_rollout_trace_with_real_mlflow_backend(): """Integration test with a real mlflow backend.""" # This assumes that the mlflow environment (e.g., project) is configured RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="mlflow") instance = TracedClass() with rollout_trace_attr(step=1, sample_index=2, rollout_n=3, name="agent_run"): assert await instance.upper_method() # with pytest.raises(ValueError, match="Test Exception"): # await instance.my_method_with_exception() print("\nWeave integration test ran successfully. Check your weave project for the trace.") ================================================ FILE: verl_distillation/tests/utils/test_seqlen_balancing.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed as dist import torch.multiprocessing as mp from verl import DataProto from verl.utils.model import create_random_mask from verl.utils.seqlen_balancing import ( ceildiv, get_reverse_idx, prepare_dynamic_batch, rearrange_micro_batches, restore_dynamic_batch, ) def test_seqlen_balancing(): input_ids = torch.randint(low=0, high=10, size=(20, 100)) attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_left_padding=0.1, max_ratio_of_valid_token=0.9, min_ratio_of_valid_token=0.5 ) data = {"input_ids": input_ids, "attention_mask": attention_mask} dataproto = DataProto.from_single_dict(data) micro_batches, micro_bsz_idx_lst = rearrange_micro_batches(dataproto.batch, max_token_len=300) batch = torch.cat(micro_batches) micro_bsz_idx = [] for idx in micro_bsz_idx_lst: micro_bsz_idx.extend(idx) reverse_idx_map = get_reverse_idx(micro_bsz_idx) reverse_idx_map = torch.tensor(reverse_idx_map) new_batch = batch[reverse_idx_map] torch.testing.assert_close(new_batch, dataproto.batch) def test_dynamic_batch(): input_ids = torch.randint(low=0, high=10, size=(20, 100)) attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_left_padding=0.1, max_ratio_of_valid_token=0.9, min_ratio_of_valid_token=0.5 ) data = {"input_ids": input_ids, "attention_mask": attention_mask} dataproto = DataProto.from_single_dict(data) micro_batches, micro_bsz_idx_lst = prepare_dynamic_batch(dataproto, max_token_len=300) input_ids = torch.cat([micro_batch.batch["input_ids"] for micro_batch in micro_batches], dim=0) input_ids = restore_dynamic_batch(input_ids, micro_bsz_idx_lst) torch.testing.assert_close(input_ids, dataproto.batch["input_ids"]) def _worker(rank, world_size, init_method, max_token_len, use_same_dp, min_mb): # 1) init process group & CUDA torch.cuda.set_device(rank) dist.init_process_group( backend="nccl", init_method=init_method, world_size=world_size, rank=rank, ) # 2) build a small random batch (each rank different length to force mismatch) torch.manual_seed(42 + rank) input_ids = torch.randint(0, 10, (20 + rank * 5, 100), device=f"cuda:{rank}") attention_mask = create_random_mask( input_ids=input_ids, max_ratio_of_left_padding=0.1, max_ratio_of_valid_token=0.9, min_ratio_of_valid_token=0.5, ) dp = {"input_ids": input_ids, "attention_mask": attention_mask} proto = DataProto.from_single_dict(dp) batch = proto.batch # 3) call rearrange_micro_batches with one of the two params under test micros, idx_lst = rearrange_micro_batches( batch, max_token_len=max_token_len, dp_group=dist.group.WORLD, same_micro_num_in_dp=use_same_dp, min_num_micro_batch=min_mb, ) # 4) check the enforced counts seq_len_effective: torch.Tensor = batch["attention_mask"].sum(dim=1) total_seqlen = seq_len_effective.sum().item() local = min(len(seq_len_effective), ceildiv(total_seqlen, max_token_len)) if min_mb is not None: expected = max(local, min_mb) assert len(micros) == expected if use_same_dp: # gather all local_counts counts = [torch.zeros(1, device=f"cuda:{rank}") for _ in range(world_size)] counts[rank].fill_(local) dist.all_gather(counts, counts[rank]) expected = max(int(c.item()) for c in counts) assert len(micros) == expected else: # if neither, we get the local natural count assert len(micros) == local # 5) reconstruction sanity: concat→reverse_idx→orig flat = torch.cat(micros, dim=0) idx = [] for sub in idx_lst: idx.extend(sub) inv = get_reverse_idx(idx) inv = torch.tensor(inv, device=flat.device) reconstructed = flat[inv] torch.testing.assert_close(reconstructed, batch) dist.destroy_process_group() def test_dataproto_split_uneven(): """Test DataProto.split with uneven splits""" # Create test data with 10 items input_ids = torch.randint(low=0, high=10, size=(10, 5)) attention_mask = torch.ones(10, 5) data = {"input_ids": input_ids, "attention_mask": attention_mask} dataproto = DataProto.from_single_dict(data) # Test split with size 3 (should create chunks of [3, 3, 3, 1]) splits = dataproto.split(3) assert len(splits) == 4 assert len(splits[0]) == 3 assert len(splits[1]) == 3 assert len(splits[2]) == 3 assert len(splits[3]) == 1 reconstructed = DataProto.concat(splits) torch.testing.assert_close(reconstructed.batch["input_ids"], dataproto.batch["input_ids"]) torch.testing.assert_close(reconstructed.batch["attention_mask"], dataproto.batch["attention_mask"]) # Test split with size equal to length (should create one chunk) splits = dataproto.split(10) assert len(splits) == 1 assert len(splits[0]) == 10 # Test split with size larger than length (should create one chunk with all data) splits = dataproto.split(15) assert len(splits) == 1 assert len(splits[0]) == 10 # Test with non-tensor batch data import numpy as np data_with_non_tensor = { "input_ids": input_ids, "attention_mask": attention_mask, "labels": np.array([f"label_{i}" for i in range(10)], dtype=object), } dataproto_with_non_tensor = DataProto.from_single_dict(data_with_non_tensor) splits = dataproto_with_non_tensor.split(3) assert len(splits) == 4 assert len(splits[0]) == 3 assert len(splits[1]) == 3 assert len(splits[2]) == 3 assert len(splits[3]) == 1 # Verify non-tensor data integrity reconstructed = DataProto.concat(splits) np.testing.assert_array_equal( reconstructed.non_tensor_batch["labels"], dataproto_with_non_tensor.non_tensor_batch["labels"] ) def test_seqlen_balancing_distributed_params(tmp_path): world_size = 2 init_file = tmp_path / "dist_init" init_file.write_text("") # empty file init_method = f"file://{init_file}" # test min_num_micro_batch only mp.spawn( _worker, args=(world_size, init_method, 300, False, 4), nprocs=world_size, join=True, ) # test same_micro_num_in_dp only mp.spawn( _worker, args=(world_size, init_method, 300, True, None), nprocs=world_size, join=True, ) ================================================ FILE: verl_distillation/tests/utils/test_special_linear_cross_entropy_tp.py ================================================ # # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import torch.distributed as dist try: from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy except ImportError: # FIXME: remove these manually included paths import sys sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../"))) finally: from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy import verl.utils.torch_functional as verl_F compute_entropy_from_logits = torch.compile(verl_F.entropy_from_logits, dynamic=True) MAX_TEST_CASES = os.environ.get("MAX_TEST_CASES", 5) VERIFY_TORCH_SELF = os.environ.get("VERIFY_TORCH_SELF", False) LOW_MEMORY = os.environ.get("LOW_MEMORY", False) LOW_MEMORY_DIV_FACTOR = os.environ.get("LOW_MEMORY_DIV_FACTOR", 16) def run_torch_entropy( hidden: torch.Tensor, weight: torch.Tensor, labels: torch.Tensor, temperature: float, reduction="none" ) -> list[torch.Tensor]: # [num_tokens, vocab_size] if len(hidden.shape) > 2: hidden = hidden.view(-1, hidden.shape[-1]) # [num_tokens, hidden_size] if len(labels.shape) > 1: labels = labels.view(-1) logits = torch.matmul( hidden.to(torch.float32), weight.to(torch.float32) if weight.size(0) == hidden.size(1) else weight.T.to(torch.float32), ) logits /= temperature pd = torch.nn.functional.softmax(logits, dim=-1) # [num_tokens, vocab_size] entropy_a = torch.logsumexp(logits, dim=-1) # [num_tokens] entropy_b = torch.sum(pd * logits, dim=-1) # [num_tokens] entropy = entropy_a - entropy_b logprobs = torch.nn.functional.cross_entropy(logits, labels, reduction=reduction) # [num_tokens] logprobs = torch.neg(logprobs) return logprobs, entropy class TorchEntropyTP(torch.autograd.Function): """ it is used for testing the correctness of the kernel it is not efficient and is not recommended to use in practice """ @staticmethod def forward( ctx, hidden: torch.Tensor, weight: torch.Tensor, labels: torch.Tensor, temperature: float, dist_process_group: torch.distributed.ProcessGroup, ): # weight has shape [vocab_size, hidden_size], hidden has shape [num_tokens, hidden_size] ctx.original_hidden_shape = hidden.shape if len(hidden.shape) > 2: hidden = hidden.view(-1, hidden.shape[-1]) # [num_tokens, hidden_size] if len(labels.shape) > 1: labels = labels.view(-1) logits = torch.matmul(hidden.to(torch.float32), weight.to(torch.float32).T) # [num_tokens, vocab_size] logits /= temperature whole_logits = torch.empty( (logits.shape[0], logits.shape[1] * dist.get_world_size(dist_process_group)), dtype=logits.dtype, device=logits.device, ) whole_logits_ref = [ whole_logits[:, i * logits.shape[1] : (i + 1) * logits.shape[1]] for i in range(dist.get_world_size(dist_process_group)) ] dist.all_gather(whole_logits_ref, logits, group=dist_process_group) pd = torch.nn.functional.softmax(whole_logits, dim=-1) entropy_a = torch.logsumexp(whole_logits, dim=-1) # [num_tokens] entropy_b = torch.sum(pd * whole_logits, dim=-1) # [num_tokens] entropy = entropy_a - entropy_b logprobs = torch.nn.functional.cross_entropy(whole_logits, labels, reduction="none") logprobs = torch.neg(logprobs) ctx.save_for_backward(hidden, weight, labels, whole_logits, entropy_b) ctx.dist_process_group = dist_process_group ctx.temperature = temperature return logprobs, entropy @staticmethod def backward(ctx, g_logprobs: torch.Tensor, g_entropy: torch.Tensor): hidden, weight, labels, whole_logits, entropy_b = ctx.saved_tensors dist_process_group = ctx.dist_process_group temperature = ctx.temperature batch_size, hidden_size = hidden.shape vocab_size, hidden_size = weight.shape rank = dist.get_rank(dist_process_group) # Compute softmax probabilities maximum, _ = torch.max(whole_logits, dim=-1, keepdim=True) exp_logits = torch.exp(whole_logits - maximum) accumulate = exp_logits.sum(dim=-1, keepdim=True) pd = exp_logits / accumulate # Gradient for entropy # entropy = entropy_a - entropy_b # entropy_a = log(sum(exp(logits))) # entropy_b = sum(pd * logits) # d_entropy_a/d_logits = pd # d_entropy_b/d_logits = pd * (logits - b.unsqueeze(1) + 1) # d_entropy/d_logits = d_entropy_a - d_entropy_b # d_entropy/d_logits = pd - pd * (logits - b.unsqueeze(1) + 1) # d_entropy/d_logits = -pd * (logits - b.unsqueeze(1)) d_logits_entropy = g_entropy.unsqueeze(1) * (-pd * (whole_logits - entropy_b.unsqueeze(1))) # Gradient for logprobs # logprobs = -cross_entropy = -log(pd[labels]) # d_logprobs/d_logits = (pd - one_hot(labels)) one_hot = torch.zeros_like(whole_logits) one_hot.scatter_(1, labels.unsqueeze(1), 1) g_logprobs = torch.neg(g_logprobs) d_logits_logprobs = g_logprobs.unsqueeze(1) * (pd - one_hot) # NOTE: This will lead to wrong result # d_logits_logprobs = g_logprobs.unsqueeze(1) * (pd - 1) * one_hot # Combine gradients d_logits = d_logits_entropy + d_logits_logprobs d_logits /= temperature # Get local slice of gradients local_d_logits = d_logits[:, rank * vocab_size : (rank + 1) * vocab_size] # Compute gradients for hidden and weight d_hidden = torch.matmul(local_d_logits, weight.to(torch.float32)) d_weight = torch.matmul(local_d_logits.T, hidden.to(torch.float32)) d_hidden = d_hidden.view(ctx.original_hidden_shape) return d_hidden, d_weight, None, None, None run_torch_entropy_tp = TorchEntropyTP.apply class TestLinearCrossEntropy_TensorParallel: def __init__(self): dist.init_process_group(backend="nccl") self.group = dist.group.WORLD self.local_rank = dist.get_rank(self.group) self.world_size = dist.get_world_size(self.group) device = torch.device(f"cuda:{self.local_rank}") torch.cuda.set_device(device) print(f"[INFO]: Local rank: {self.local_rank}, World size: {self.world_size}") def initialize(self, test_case_idx: int, temperature: float = 1.5): self.test_case_idx = test_case_idx self.temperature = temperature def shutdown(self): dist.destroy_process_group() def cleanup(self): torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() import gc gc.collect() torch.cuda.synchronize() def generate_hyper(self): global LOW_MEMORY, LOW_MEMORY_DIV_FACTOR, MAX_TEST_CASES self.dtype = torch.bfloat16 if self.test_case_idx == 0: self.batch_size = 1 self.num_tokens = 1937 self.hidden_size = 3584 self.vocab_size = 152064 elif self.test_case_idx == 1: self.batch_size = 1 self.num_tokens = 2169 self.hidden_size = 896 self.vocab_size = 151936 elif self.test_case_idx == 2: self.batch_size = 1 self.num_tokens = 1530 self.hidden_size = 2048 self.vocab_size = 32256 elif self.test_case_idx == 3: self.batch_size = 1 self.num_tokens = 1388 self.hidden_size = 4096 self.vocab_size = 102400 elif self.test_case_idx == 4: self.batch_size = 1 self.num_tokens = 8192 self.hidden_size = 4096 self.vocab_size = 102400 else: raise ValueError(f"Invalid test case index: {self.test_case_idx}") if LOW_MEMORY: self.vocab_size = int(self.vocab_size / LOW_MEMORY_DIV_FACTOR) assert MAX_TEST_CASES <= 5, "MAX_TEST_CASES should be less than or equal to 5." def generate_forward_inputs(self): hidden = ( torch.empty((self.batch_size, self.num_tokens, self.hidden_size), dtype=self.dtype, device="cuda") .uniform_(-0.5, 0.5) .requires_grad_() ) weight = ( torch.empty((self.vocab_size, self.hidden_size), dtype=self.dtype, device="cuda") .uniform_(-0.5, 0.5) .requires_grad_() ) labels = torch.randint(0, self.vocab_size, (self.batch_size, self.num_tokens), device="cuda") return hidden, weight, labels def generate_backward_inputs(self): g_entropy = torch.empty((self.num_tokens,), dtype=self.dtype, device="cuda").uniform_(-0.5, 0.5) g_logprobs = torch.empty((self.num_tokens,), dtype=self.dtype, device="cuda").uniform_(-1, 1) return g_entropy, g_logprobs def verify_torch_itself(self, iterations: int = 5): self.cleanup() self.generate_hyper() for i in range(iterations): hidden, weight, labels = self.generate_forward_inputs() # NOTE: we need to manually synchronize hidden and labels among Process Group dist.broadcast(hidden, src=0, group=self.group) dist.broadcast(labels, src=0, group=self.group) # forward pass # Create a tensor to hold the gathered weights from all ranks # weight has shape [vocab_size, hidden_size] # We want to gather along the first dimension to get [vocab_size * world_size, hidden_size] # Create a single contiguous tensor to hold all gathered weights whole_weight = torch.empty( (self.vocab_size * self.world_size, self.hidden_size), dtype=weight.dtype, device=weight.device ) # Create views into the tensor for each rank's portion whole_weight_views = [ whole_weight[i * self.vocab_size : (i + 1) * self.vocab_size] for i in range(self.world_size) ] # Perform all_gather operation using the views dist.all_gather(whole_weight_views, weight, group=self.group) # Set requires_grad for autograd whole_weight.requires_grad_() (single_logprobs, single_entropy) = run_torch_entropy(hidden, whole_weight, labels, self.temperature) (tp_logprobs, tp_entropy) = run_torch_entropy_tp(hidden, weight, labels, self.temperature, self.group) torch.testing.assert_close(single_logprobs, tp_logprobs, atol=1e-4, rtol=1e-4) torch.testing.assert_close(single_entropy, tp_entropy, atol=1e-4, rtol=1e-4) # backward pass g_entropy, g_logprobs = self.generate_backward_inputs() # NOTE: we need to manually synchronize g_entropy and g_logprobs among Process Group dist.broadcast(g_entropy, src=0, group=self.group) dist.broadcast(g_logprobs, src=0, group=self.group) (single_d_hidden, single_d_weight) = torch.autograd.grad( (single_entropy, single_logprobs), (hidden, whole_weight), (g_entropy, g_logprobs), retain_graph=False ) (tp_d_hidden, tp_d_weight) = torch.autograd.grad( (tp_entropy, tp_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) # NOTE: all-reduce on hidden is conducted outside the kernel dist.all_reduce(tp_d_hidden, op=dist.ReduceOp.SUM, group=self.group) torch.testing.assert_close(tp_d_hidden, single_d_hidden, atol=1e-2, rtol=1e-4) # Extract the corresponding slice from single_d_weight for comparison # tp_d_weight has shape [vocab_size, hidden_size] # single_d_weight has shape [vocab_size * world_size, hidden_size] torch.testing.assert_close( tp_d_weight, single_d_weight[self.local_rank * self.vocab_size : (self.local_rank + 1) * self.vocab_size], atol=1e-2, rtol=1e-4, ) # atol=1e-3, rtol=1e-4) if self.local_rank == 0: print("[PASS] torch TP correctness is verified") def check_torch_storage(self): self.cleanup() self.generate_hyper() hidden, weight, labels = self.generate_forward_inputs() # NOTE: we need to manually synchronize hidden and labels among Process Group dist.broadcast(hidden, src=0, group=self.group) dist.broadcast(labels, src=0, group=self.group) torch.cuda.reset_peak_memory_stats() (tp_logprobs, tp_entropy) = run_torch_entropy_tp(hidden, weight, labels, self.temperature, self.group) torch.cuda.synchronize() forward_max_memory = torch.cuda.max_memory_allocated() / 1024 / 1024 g_entropy, g_logprobs = self.generate_backward_inputs() # NOTE: we need to manually synchronize g_entropy and g_logprobs among Process Group dist.broadcast(g_entropy, src=0, group=self.group) dist.broadcast(g_logprobs, src=0, group=self.group) torch.cuda.reset_peak_memory_stats() (d_tp_hidden, d_tp_weight) = torch.autograd.grad( (tp_entropy, tp_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) torch.cuda.synchronize() backward_max_memory = torch.cuda.max_memory_allocated() / 1024 / 1024 # NOTE: all-reduce on hidden is conducted outside the kernel dist.all_reduce(d_tp_hidden, op=dist.ReduceOp.SUM, group=self.group) if self.local_rank == 0: print(f"[INFO]: Torch Forward pass peak memory: {forward_max_memory:.2f} MB") print(f"[INFO]: Torch Backward pass peak memory: {backward_max_memory:.2f} MB") def verify_kernel_correctness(self, iterations: int = 5): self.cleanup() self.generate_hyper() torch_forward_latency = list() torch_backward_latency = list() kernel_forward_latency = list() kernel_backward_latency = list() start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) for i in range(iterations): hidden, weight, labels = self.generate_forward_inputs() # NOTE: we need to manually synchronize hidden and labels among Process Group dist.broadcast(hidden, src=0, group=self.group) dist.broadcast(labels, src=0, group=self.group) start_event.record() (torch_logprobs, torch_entropy) = run_torch_entropy_tp(hidden, weight, labels, self.temperature, self.group) end_event.record() torch.cuda.synchronize() torch_forward_latency.append(start_event.elapsed_time(end_event)) start_event.record() (kernel_logprobs, kernel_entropy) = linear_cross_entropy( hidden, weight, labels, self.temperature, "none", self.group ) end_event.record() torch.cuda.synchronize() kernel_forward_latency.append(start_event.elapsed_time(end_event)) torch.testing.assert_close(torch_logprobs, kernel_logprobs, atol=1e-1, rtol=1e-2) torch.testing.assert_close(torch_entropy, kernel_entropy, atol=1e-1, rtol=1e-2) # backward pass g_entropy, g_logprobs = self.generate_backward_inputs() # NOTE: we need to manually synchronize g_entropy and g_logprobs among Process Group dist.broadcast(g_entropy, src=0, group=self.group) dist.broadcast(g_logprobs, src=0, group=self.group) start_event.record() (torch_d_hidden, torch_d_weight) = torch.autograd.grad( (torch_entropy, torch_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) end_event.record() torch.cuda.synchronize() torch_backward_latency.append(start_event.elapsed_time(end_event)) # NOTE: all-reduce on hidden is conducted outside the kernel dist.all_reduce(torch_d_hidden, op=dist.ReduceOp.SUM, group=self.group) start_event.record() (kernel_d_hidden, kernel_d_weight) = torch.autograd.grad( (kernel_entropy, kernel_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) end_event.record() torch.cuda.synchronize() kernel_backward_latency.append(start_event.elapsed_time(end_event)) # NOTE: all-reduce on hidden is conducted outside the kernel dist.all_reduce(kernel_d_hidden, op=dist.ReduceOp.SUM, group=self.group) torch.testing.assert_close(torch_d_hidden, kernel_d_hidden, atol=2e-2, rtol=4e-2) torch.testing.assert_close(torch_d_weight, kernel_d_weight, atol=2e-2, rtol=4e-2) # remove first latency torch_forward_latency = torch_forward_latency[1:] torch_backward_latency = torch_backward_latency[1:] kernel_forward_latency = kernel_forward_latency[1:] kernel_backward_latency = kernel_backward_latency[1:] if self.local_rank == 0: print("\n[PASS]: Verified kernel forward & backward correctness.") print( f"[INFO]: Forward pass: Torch implementation average time: " f"{sum(torch_forward_latency) / len(torch_forward_latency):.2f} ms" ) print( f"[INFO]: Backward pass: torch implementation average time: " f"{sum(torch_backward_latency) / len(torch_backward_latency):.2f} ms" ) print( f"[INFO]: Forward pass: Kernel implementation average time: " f"{sum(kernel_forward_latency) / len(kernel_forward_latency):.2f} ms" ) print( f"[INFO]: Backward pass: kernel implementation average time: " f"{sum(kernel_backward_latency) / len(kernel_backward_latency):.2f} ms" ) def check_kernel_storage(self): self.cleanup() self.generate_hyper() hidden, weight, labels = self.generate_forward_inputs() # NOTE: we need to manually synchronize hidden and labels among Process Group dist.broadcast(hidden, src=0, group=self.group) dist.broadcast(labels, src=0, group=self.group) torch.cuda.reset_peak_memory_stats() (kernel_logprobs, kernel_entropy) = linear_cross_entropy( hidden, weight, labels, self.temperature, "none", self.group ) torch.cuda.synchronize() kernel_max_memory = torch.cuda.max_memory_allocated() / 1024 / 1024 g_entropy, g_logprobs = self.generate_backward_inputs() # NOTE: we need to manually synchronize g_entropy and g_logprobs among Process Group dist.broadcast(g_entropy, src=0, group=self.group) dist.broadcast(g_logprobs, src=0, group=self.group) torch.cuda.reset_peak_memory_stats() (d_kernel_hidden, d_kernel_weight) = torch.autograd.grad( (kernel_entropy, kernel_logprobs), (hidden, weight), (g_entropy, g_logprobs), retain_graph=False ) torch.cuda.synchronize() kernel_backward_max_memory = torch.cuda.max_memory_allocated() / 1024 / 1024 # NOTE: all-reduce on hidden is conducted outside the kernel dist.all_reduce(d_kernel_hidden, op=dist.ReduceOp.SUM, group=self.group) if self.local_rank == 0: print(f"[INFO]: Kernel Forward pass peak memory: {kernel_max_memory:.2f} MB") print(f"[INFO]: Kernel Backward pass peak memory: {kernel_backward_max_memory:.2f} MB") if __name__ == "__main__": # TP command: torchrun --standalone --nnodes=1 --nproc-per-node=2 tests/kernels/test_linear_cross_entropy_tp.py # Check if running with torchrun (distributed mode) assert int(os.environ["WORLD_SIZE"]) > 1, ( "[ERROR]: This test is designed to run in distributed mode with torchrun. Please use torchrun to " "execute this script." ) torch.manual_seed(233376 + int(os.environ.get("RANK", 0))) # set_backward_method(BackwardEnum._Total_Fuse_MN) # set_backward_method(BackwardEnum._Split_Dlogits_N) test = TestLinearCrossEntropy_TensorParallel() for test_case_idx in range(MAX_TEST_CASES): print(f"[INFO] Running test case {test_case_idx}") test.initialize(test_case_idx) if VERIFY_TORCH_SELF: test.verify_torch_itself() test.check_torch_storage() test.verify_kernel_correctness() test.check_kernel_storage() test.shutdown() ================================================ FILE: verl_distillation/tests/utils/test_special_mstx_profile.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from unittest.mock import MagicMock, patch from verl.utils.profiler.config import NPUToolConfig, ProfilerConfig from verl.utils.profiler.mstx_profile import NPUProfiler class TestNPUProfilerInitialization(unittest.TestCase): def setUp(self): NPUProfiler._define_count = 0 def test_init_with_default_config(self): tool_config = NPUToolConfig() profiler = NPUProfiler(rank=0, config=None, tool_config=tool_config) self.assertFalse(profiler.enable) self.assertFalse(hasattr(profiler, "profile_npu")) def test_init_with_disabled_config(self): config = ProfilerConfig(enable=False) tool_config = NPUToolConfig() profiler = NPUProfiler(rank=0, config=config, tool_config=tool_config) self.assertFalse(profiler.enable) self.assertFalse(hasattr(profiler, "profile_npu")) def test_init_with_all_ranks_true(self): config = ProfilerConfig(enable=True, all_ranks=True) tool_config = NPUToolConfig() profiler = NPUProfiler(rank=0, config=config, tool_config=tool_config) self.assertTrue(profiler.this_rank) def test_init_with_ranks_list(self): config = ProfilerConfig(enable=True, ranks=[1, 2]) tool_config = NPUToolConfig() profiler = NPUProfiler(rank=1, config=config, tool_config=tool_config) self.assertTrue(profiler.this_rank) def test_init_with_rank_not_in_ranks(self): config = ProfilerConfig(enable=True, ranks=[1, 2]) tool_config = NPUToolConfig() profiler = NPUProfiler(rank=3, config=config, tool_config=tool_config) self.assertFalse(profiler.this_rank) class TestNPUProfilerStart(unittest.TestCase): def setUp(self): NPUProfiler._define_count = 0 self.config = ProfilerConfig(enable=True, ranks=[0]) self.tool_config = NPUToolConfig(discrete=False) @patch("verl.utils.profiler.mstx_profile.get_npu_profiler") def test_start_when_enabled_and_this_rank(self, mock_get_profiler): profiler = NPUProfiler(rank=0, config=self.config, tool_config=self.tool_config) profiler.start(role="worker", profile_step="1") self.assertTrue(profiler.this_step) self.assertEqual(NPUProfiler._define_count, 1) mock_get_profiler.assert_called_once() @patch("verl.utils.profiler.mstx_profile.get_npu_profiler") def test_start_when_not_this_rank(self, mock_get_profiler): profiler = NPUProfiler(rank=1, config=self.config, tool_config=self.tool_config) profiler.start() self.assertFalse(profiler.this_step) self.assertEqual(NPUProfiler._define_count, 0) mock_get_profiler.assert_not_called() @patch("verl.utils.profiler.mstx_profile.get_npu_profiler") def test_start_discrete_mode_does_not_increase_count(self, mock_get_profiler): tool_config = NPUToolConfig(discrete=True) profiler = NPUProfiler(rank=0, config=self.config, tool_config=tool_config) profiler.start() self.assertEqual(NPUProfiler._define_count, 0) mock_get_profiler.assert_not_called() @patch("verl.utils.profiler.mstx_profile.get_npu_profiler") def test_multiple_start_calls_do_not_increase_count(self, mock_get_profiler): profiler = NPUProfiler(rank=0, config=self.config, tool_config=self.tool_config) profiler.start() profiler.start() self.assertEqual(NPUProfiler._define_count, 1) mock_get_profiler.assert_called_once() class TestNPUProfilerStartStopInteraction(unittest.TestCase): def setUp(self): NPUProfiler._define_count = 0 self.config = ProfilerConfig(enable=True, ranks=[0]) self.tool_config = NPUToolConfig(discrete=False) @patch("verl.utils.profiler.mstx_profile.get_npu_profiler") def test_start_stop_cycle(self, mock_get_profiler): mock_profile_npu = MagicMock() mock_get_profiler.return_value = mock_profile_npu profiler = NPUProfiler(rank=0, config=self.config, tool_config=self.tool_config) profiler.start() self.assertEqual(NPUProfiler._define_count, 1) self.assertEqual(mock_profile_npu.start.call_count, 1) profiler.stop() self.assertEqual(NPUProfiler._define_count, 0) self.assertEqual(mock_profile_npu.step.call_count, 1) self.assertEqual(mock_profile_npu.stop.call_count, 1) @patch("verl.utils.profiler.mstx_profile.get_npu_profiler") def test_multiple_instances_share_define_count(self, mock_get_profiler): mock_profile_npu = MagicMock() mock_get_profiler.return_value = mock_profile_npu profiler1 = NPUProfiler(rank=0, config=self.config, tool_config=self.tool_config) profiler2 = NPUProfiler(rank=0, config=self.config, tool_config=self.tool_config) profiler1.start() profiler2.start() self.assertEqual(NPUProfiler._define_count, 1) self.assertEqual(mock_profile_npu.start.call_count, 1) profiler1.stop() self.assertEqual(NPUProfiler._define_count, 0) class TestNPUProfilerAnnotate(unittest.TestCase): def setUp(self): self.config = ProfilerConfig(enable=True, all_ranks=True) self.tool_config = NPUToolConfig(discrete=False) self.rank = 0 def test_annotate_decorator_applied_correctly(self): mock_worker = MagicMock() mock_worker.profiler = NPUProfiler(rank=self.rank, config=self.config, tool_config=self.tool_config) mock_worker.profiler.this_step = True mock_mark_range = "mocked_range_handle" with ( patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch, patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch, ): mock_start_patch.return_value = mock_mark_range with patch("verl.utils.profiler.mstx_profile.get_npu_profiler") as mock_get_profiler: decorator = mock_worker.profiler.annotate(message="test") @decorator def test_func(self, *args, **kwargs): return "result" result = test_func(mock_worker) self.assertEqual(result, "result") mock_start_patch.assert_called_once_with(message="test") mock_end_patch.assert_called_once_with(mock_mark_range) mock_get_profiler.assert_not_called() def test_annotate_when_profiler_disabled(self): disabled_config = ProfilerConfig(enable=False) mock_worker = MagicMock() mock_worker.profiler = NPUProfiler(rank=self.rank, config=disabled_config, tool_config=self.tool_config) with ( patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch, patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch, patch("verl.utils.profiler.mstx_profile.get_npu_profiler") as mock_get_profiler, ): decorator = mock_worker.profiler.annotate(message="test") @decorator def test_func(self, *args, **kwargs): return "result" result = test_func(mock_worker) self.assertEqual(result, "result") mock_start_patch.assert_not_called() mock_end_patch.assert_not_called() mock_get_profiler.assert_not_called() def test_annotate_when_this_step_disabled(self): mock_worker = MagicMock() mock_worker.profiler = NPUProfiler(rank=self.rank, config=self.config, tool_config=self.tool_config) mock_worker.profiler.this_step = False with ( patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch, patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch, patch("verl.utils.profiler.mstx_profile.get_npu_profiler") as mock_get_profiler, ): decorator = mock_worker.profiler.annotate(message="test") @decorator def test_func(self, *args, **kwargs): return "result" result = test_func(mock_worker) self.assertEqual(result, "result") mock_start_patch.assert_not_called() mock_end_patch.assert_not_called() mock_get_profiler.assert_not_called() def test_annotate_discrete_mode_enabled(self): discrete_tool_config = NPUToolConfig(discrete=True) mock_worker = MagicMock() mock_worker.profiler = NPUProfiler(rank=self.rank, config=self.config, tool_config=discrete_tool_config) mock_worker.profiler.this_step = True mock_mark_range = "mocked_range_handle" mock_profile_npu = MagicMock() with ( patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch, patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch, patch("verl.utils.profiler.mstx_profile.get_npu_profiler") as mock_get_profiler, ): mock_start_patch.return_value = mock_mark_range mock_get_profiler.return_value = mock_profile_npu decorator = mock_worker.profiler.annotate(message="test", role="test_role") @decorator def test_func(self, *args, **kwargs): return "result" result = test_func(mock_worker) self.assertEqual(result, "result") mock_start_patch.assert_called_once_with(message="test") mock_end_patch.assert_called_once_with(mock_mark_range) mock_get_profiler.assert_called_once_with( contents=mock_worker.profiler.profile_contents, profile_level=mock_worker.profiler.profile_level, profile_save_path=mock_worker.profiler.profile_save_path, analysis=mock_worker.profiler.analysis, role="test_role", ) mock_profile_npu.start.assert_called_once() mock_profile_npu.step.assert_called_once() mock_profile_npu.stop.assert_called_once() def test_annotate_with_default_message(self): mock_worker = MagicMock() mock_worker.profiler = NPUProfiler(rank=self.rank, config=self.config, tool_config=self.tool_config) mock_worker.profiler.this_step = True mock_mark_range = "mocked_range_handle" with ( patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch, patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch, ): mock_start_patch.return_value = mock_mark_range decorator = mock_worker.profiler.annotate() @decorator def test_func(self, *args, **kwargs): return "result" test_func(mock_worker) mock_start_patch.assert_called_once_with(message="test_func") mock_end_patch.assert_called_once_with(mock_mark_range) if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/utils/test_temp_env_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from verl.utils.py_functional import temp_env_var @pytest.fixture(autouse=True) def clean_env(): """Fixture to clean up environment variables before and after each test.""" # Store original environment state original_env = dict(os.environ) # Clean up any test variables that might exist test_vars = ["TEST_VAR", "TEST_VAR_2", "EXISTING_VAR"] for var in test_vars: if var in os.environ: del os.environ[var] # Yield control to the test function yield # Restore original environment state after test os.environ.clear() os.environ.update(original_env) def test_set_new_env_var(): """Test setting a new environment variable that didn't exist before.""" # Ensure variable doesn't exist assert "TEST_VAR" not in os.environ with temp_env_var("TEST_VAR", "test_value"): # Variable should be set inside context assert os.environ["TEST_VAR"] == "test_value" assert "TEST_VAR" in os.environ # Variable should be removed after context assert "TEST_VAR" not in os.environ def test_restore_existing_env_var(): """Test restoring an environment variable that already existed.""" # Set up existing variable os.environ["EXISTING_VAR"] = "original_value" with temp_env_var("EXISTING_VAR", "temporary_value"): # Variable should be temporarily changed assert os.environ["EXISTING_VAR"] == "temporary_value" # Variable should be restored to original value assert os.environ["EXISTING_VAR"] == "original_value" def test_env_var_restored_on_exception(): """Test that environment variables are restored even when exceptions occur.""" # Set up existing variable os.environ["EXISTING_VAR"] = "original_value" with pytest.raises(ValueError): with temp_env_var("EXISTING_VAR", "temporary_value"): # Verify variable is set assert os.environ["EXISTING_VAR"] == "temporary_value" # Raise exception raise ValueError("Test exception") # Variable should still be restored despite exception assert os.environ["EXISTING_VAR"] == "original_value" def test_nested_context_managers(): """Test nested temp_env_var context managers.""" # Set up original variable os.environ["TEST_VAR"] = "original" with temp_env_var("TEST_VAR", "level1"): assert os.environ["TEST_VAR"] == "level1" with temp_env_var("TEST_VAR", "level2"): assert os.environ["TEST_VAR"] == "level2" # Should restore to level1 assert os.environ["TEST_VAR"] == "level1" # Should restore to original assert os.environ["TEST_VAR"] == "original" def test_multiple_different_vars(): """Test setting multiple different environment variables.""" # Set up one existing variable os.environ["EXISTING_VAR"] = "existing_value" with temp_env_var("EXISTING_VAR", "modified"): with temp_env_var("TEST_VAR", "new_value"): assert os.environ["EXISTING_VAR"] == "modified" assert os.environ["TEST_VAR"] == "new_value" # Check restoration assert os.environ["EXISTING_VAR"] == "existing_value" assert "TEST_VAR" not in os.environ def test_empty_string_value(): """Test setting environment variable to empty string.""" with temp_env_var("TEST_VAR", ""): assert os.environ["TEST_VAR"] == "" assert "TEST_VAR" in os.environ # Should be removed after context assert "TEST_VAR" not in os.environ def test_overwrite_with_empty_string(): """Test overwriting existing variable with empty string.""" os.environ["EXISTING_VAR"] = "original" with temp_env_var("EXISTING_VAR", ""): assert os.environ["EXISTING_VAR"] == "" # Should restore original value assert os.environ["EXISTING_VAR"] == "original" def test_context_manager_returns_none(): """Test that context manager yields None.""" with temp_env_var("TEST_VAR", "value") as result: assert result is None assert os.environ["TEST_VAR"] == "value" ================================================ FILE: verl_distillation/tests/utils/test_timeout_decorator_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing import sys import threading import time import pytest # Import pytest from verl.utils.py_functional import timeout_limit as timeout # --- Test Task Functions --- TEST_TIMEOUT_SECONDS = 1.5 # Timeout duration for tests LONG_TASK_DURATION = TEST_TIMEOUT_SECONDS + 0.5 # Duration slightly longer than timeout @timeout(seconds=TEST_TIMEOUT_SECONDS) # Keep global decorator for mp tests def quick_task(x): """A task that completes quickly.""" time.sleep(0.1) return "quick_ok" @timeout(seconds=TEST_TIMEOUT_SECONDS) # Keep global decorator for mp tests def slow_task(x): """A task that takes longer than the timeout.""" time.sleep(LONG_TASK_DURATION) return "slow_finished" # This return value indicates it didn't time out # REMOVE global decorator here def task_raises_value_error(): # Now truly not globally decorated """A task that intentionally raises a ValueError.""" raise ValueError("Specific value error from task") # --- Top-level function for signal test in subprocess --- # Keep this decorated globally for the specific subprocess test case @timeout(seconds=TEST_TIMEOUT_SECONDS, use_signals=True) def top_level_decorated_quick_task_signal(): """A pickleable top-level function decorated with signal timeout.""" # Assuming this calls the logic of quick_task directly for the test purpose time.sleep(0.1) return "quick_ok_signal_subprocess" # Different return for clarity if needed # --- Top-level function for signal test in subprocess --- # Keep this decorated globally for the specific subprocess test case @timeout(seconds=TEST_TIMEOUT_SECONDS, use_signals=True) def top_level_decorated_slow_task_signal(): """A pickleable top-level function decorated with signal timeout.""" time.sleep(LONG_TASK_DURATION) return "slow_finished" # --- NEW: Top-level helper function to run target in process --- def run_target_and_put_in_queue(target_func, q): """ Top-level helper function to run a target function and put its result or exception into a queue. This function is pickleable and can be used as the target for multiprocessing.Process. """ try: result = target_func() q.put(("success", result)) except Exception as e: q.put(("error", e)) # Use a module-level fixture to set the start method on macOS @pytest.fixture(scope="module", autouse=True) # Changed scope to module def set_macos_start_method(): if sys.platform == "darwin": # Force fork method on macOS to avoid pickling issues with globally decorated functions # when running tests via pytest discovery. current_method = multiprocessing.get_start_method(allow_none=True) # Only set if not already set or if set to something else (less likely in test run) if current_method is None or current_method != "fork": try: multiprocessing.set_start_method("fork", force=True) except RuntimeError: # Might fail if context is already started, ignore in that case. pass def test_quick_task(): # Renamed from test_multiprocessing_quick_task """Tests timeout handles a quick task correctly.""" # Call the globally decorated function directly result = quick_task(1) assert result == "quick_ok" # Use pytest assert def test_slow_task_timeout(): # Renamed from test_multiprocessing_slow_task_timeout """Tests timeout correctly raises TimeoutError for a slow task.""" # Call the globally decorated function directly within pytest.raises with pytest.raises(TimeoutError) as excinfo: # Use pytest.raises slow_task(1) # Check the error message from the multiprocessing implementation assert f"timed out after {TEST_TIMEOUT_SECONDS} seconds" in str(excinfo.value) # Use pytest assert def test_internal_exception(): # Renamed from test_multiprocessing_internal_exception """Tests timeout correctly propagates internal exceptions.""" # Apply the default timeout decorator dynamically to the undecorated function decorated_task = timeout(seconds=TEST_TIMEOUT_SECONDS)(task_raises_value_error) # Apply decorator dynamically with pytest.raises(ValueError) as excinfo: # Use pytest.raises decorated_task() # Call the dynamically decorated function assert str(excinfo.value) == "Specific value error from task" # Use pytest assert # --- Test the signal implementation (use_signals=True) --- # Note: As per py_functional.py, use_signals=True currently falls back to # multiprocessing on POSIX. These tests verify that behavior. def test_signal_quick_task_main_process(): # Removed self """Tests signal timeout handles a quick task correctly in the main process.""" # Apply the signal decorator dynamically def plain_quick_task_logic(): time.sleep(0.1) return "quick_ok_signal" decorated_task = timeout(seconds=TEST_TIMEOUT_SECONDS, use_signals=True)(plain_quick_task_logic) assert decorated_task() == "quick_ok_signal" # Use pytest assert def test_signal_slow_task_main_process_timeout(): # Removed self """Tests signal timeout correctly raises TimeoutError for a slow task in the main process.""" # Apply the signal decorator dynamically def plain_slow_task_logic(): time.sleep(LONG_TASK_DURATION) return "slow_finished_signal" decorated_task = timeout(seconds=TEST_TIMEOUT_SECONDS, use_signals=True)(plain_slow_task_logic) with pytest.raises(TimeoutError) as excinfo: # Use pytest.raises decorated_task() # Check the error message (falls back to multiprocessing message on POSIX) assert f"timed out after {TEST_TIMEOUT_SECONDS} seconds" in str(excinfo.value) # Use pytest assert @pytest.mark.skip(reason="this test won't pass. Just to show why use_signals should not be used") def test_signal_in_thread_does_not_timeout(): """ Tests that signal-based timeout does NOT work reliably in a child thread. The TimeoutError from the signal handler is not expected to be raised. """ result_container = [] # Use a list to store result from thread exception_container = [] # Use a list to store exception from thread @timeout(seconds=TEST_TIMEOUT_SECONDS, use_signals=True) def slow_task_in_thread(): try: print("Thread: Starting slow task...") time.sleep(LONG_TASK_DURATION) print("Thread: Slow task finished.") return "slow_finished_in_thread" except Exception as e: # Catch any exception within the thread's target function print(f"Thread: Caught exception: {e}") exception_container.append(e) return None # Indicate failure def thread_target(): try: # Run the decorated function inside the thread res = slow_task_in_thread() if res is not None: result_container.append(res) except Exception as e: # This might catch exceptions happening *outside* the decorated function # but still within the thread target, though less likely here. print(f"Thread Target: Caught exception: {e}") exception_container.append(e) thread = threading.Thread(target=thread_target) print("Main: Starting thread...") thread.start() # Wait longer than the timeout + task duration to ensure the thread finishes # regardless of whether timeout worked or not. thread.join(timeout=LONG_TASK_DURATION + 1) assert len(exception_container) == 1 assert isinstance(exception_container[0], TimeoutError) assert not result_container def test_in_thread_timeout(): result_container = [] # Use a list to store result from thread exception_container = [] # Use a list to store exception from thread @timeout(seconds=TEST_TIMEOUT_SECONDS, use_signals=False) def slow_task_in_thread(): try: print("Thread: Starting slow task...") time.sleep(LONG_TASK_DURATION) print("Thread: Slow task finished.") return "slow_finished_in_thread" except Exception as e: # Catch any exception within the thread's target function print(f"Thread: Caught exception: {e}") exception_container.append(e) return None # Indicate failure def thread_target(): try: # Run the decorated function inside the thread res = slow_task_in_thread() if res is not None: result_container.append(res) except Exception as e: # This might catch exceptions happening *outside* the decorated function # but still within the thread target, though less likely here. print(f"Thread Target: Caught exception: {e}") exception_container.append(e) thread = threading.Thread(target=thread_target) print("Main: Starting thread...") thread.start() # Wait longer than the timeout + task duration to ensure the thread finishes # regardless of whether timeout worked or not. thread.join(timeout=LONG_TASK_DURATION + 1) assert len(exception_container) == 1 assert isinstance(exception_container[0], TimeoutError) assert not result_container ================================================ FILE: verl_distillation/tests/utils/test_torch_functional.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from verl.utils.torch_functional import distributed_masked_mean, distributed_mean_max_min_std, masked_mean def _worker_mean(rank: int, world_size: int, rendezvous_file: str): # 1) set GPU and init NCCL torch.cuda.set_device(rank) dist.init_process_group( backend="nccl", init_method=f"file://{rendezvous_file}", rank=rank, world_size=world_size, ) # each rank holds tensor [rank+1] local = torch.tensor([float(rank + 1)], device=f"cuda:{rank}") mean, gmax, gmin, gstd = distributed_mean_max_min_std(local, True, True, True) values = [float(i + 1) for i in range(world_size)] exp_mean = sum(values) / len(values) exp_max = max(values) exp_min = min(values) var = sum((x - exp_mean) ** 2 for x in values) / (len(values) - 1) exp_std = var**0.5 # all ranks should see the same result assert torch.allclose(mean.cpu(), torch.tensor(exp_mean)), f"mean@{rank}" assert torch.allclose(gmax.cpu(), torch.tensor(exp_max)), f"max@{rank}" assert torch.allclose(gmin.cpu(), torch.tensor(exp_min)), f"min@{rank}" assert torch.allclose(gstd.cpu(), torch.tensor(exp_std)), f"std@{rank}" dist.destroy_process_group() @pytest.mark.parametrize( "value,mask,gt", [ ([1.0, 2.0, 3.0, 4.0], [1, 0, 0, 1], 2.5), ([1.0, 2.0, float("nan"), 4.0], [1, 0, 0, 1], 2.5), ([1.0, 2.0, float("nan"), 4.0], [1, 0, 1, 0], float("nan")), ], ) def test_masked_mean(value, mask, gt): res = masked_mean(torch.tensor(value), torch.tensor(mask)) gt = torch.tensor(gt) assert torch.allclose(res, gt) or (torch.isnan(res) and torch.isnan(gt)) @pytest.mark.parametrize("world_size", [2, 4]) def test_distributed_mean_max_min_std(world_size, tmp_path): rendezvous_file = str(tmp_path / "rdzv_mean") os.makedirs(os.path.dirname(rendezvous_file), exist_ok=True) mp.spawn( fn=_worker_mean, args=(world_size, rendezvous_file), nprocs=world_size, join=True, ) def _worker_mask(rank: int, world_size: int, rendezvous_file: str): torch.cuda.set_device(rank) dist.init_process_group( backend="nccl", init_method=f"file://{rendezvous_file}", rank=rank, world_size=world_size, ) # build per‐rank tensor and mask local_tensor = torch.tensor([rank * 2 + 1.0, rank * 2 + 2.0], device=f"cuda:{rank}") if rank == 0: mask = torch.tensor([1, 0], device=f"cuda:{rank}", dtype=torch.float32) else: mask = torch.tensor([0, 1], device=f"cuda:{rank}", dtype=torch.float32) gmean = distributed_masked_mean(local_tensor, mask) valid_values = [1.0] + [2 * i + 2.0 for i in range(1, world_size)] expected_mean = sum(valid_values) / len(valid_values) assert torch.allclose(gmean.cpu(), torch.tensor(expected_mean)), f"masked_mean@{rank}" dist.destroy_process_group() @pytest.mark.parametrize("world_size", [2, 4]) def test_distributed_masked_mean(world_size, tmp_path): rendezvous_file = str(tmp_path / "rdzv_mask") os.makedirs(os.path.dirname(rendezvous_file), exist_ok=True) mp.spawn( fn=_worker_mask, args=(world_size, rendezvous_file), nprocs=world_size, join=True, ) ================================================ FILE: verl_distillation/tests/workers/actor/test_special_dp_actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch import torch.nn as nn from tensordict import TensorDict from transformers import AutoModelForCausalLM, Qwen3Config from verl import DataProto from verl.workers.actor.dp_actor import DataParallelPPOActor from verl.workers.config import FSDPActorConfig, OptimizerConfig class MockTransformerModel(nn.Module): """Mock transformer model for testing DataParallelPPOActor""" def __init__(self, vocab_size=1000, hidden_size=64): super().__init__() self.vocab_size = vocab_size self.hidden_size = hidden_size self.embedding = nn.Embedding(vocab_size, hidden_size) self.transformer = nn.TransformerEncoder( nn.TransformerEncoderLayer(d_model=hidden_size, nhead=4, batch_first=True), num_layers=2 ) self.lm_head = nn.Linear(hidden_size, vocab_size) def forward(self, input_ids, attention_mask=None, position_ids=None, use_cache=False, **kwargs): batch_size, seq_len = input_ids.shape embeddings = self.embedding(input_ids) hidden_states = self.transformer(embeddings) logits = self.lm_head(hidden_states) class MockOutput: def __init__(self, logits): self.logits = logits return MockOutput(logits) class TestDataParallelPPOActor(unittest.TestCase): """Test DataParallelPPOActor compute_log_prob and update_policy methods""" @classmethod def setUpClass(cls): """Set up distributed environment""" if not torch.distributed.is_initialized(): torch.distributed.init_process_group( backend="nccl" if torch.cuda.is_available() else "gloo", init_method="env://" ) cls.rank = torch.distributed.get_rank() cls.world_size = torch.distributed.get_world_size() if torch.cuda.is_available(): torch.cuda.set_device(cls.rank) cls.device = torch.device(f"cuda:{cls.rank}") else: cls.device = torch.device("cpu") def setUp(self): """Set up test fixtures""" self.config = FSDPActorConfig( strategy="fsdp2", ppo_mini_batch_size=4, ppo_micro_batch_size_per_gpu=2, ppo_epochs=1, clip_ratio=0.2, entropy_coeff=0.01, grad_clip=1.0, use_dynamic_bsz=False, use_torch_compile=False, # Disable torch.compile for testing ulysses_sequence_parallel_size=1, optim=OptimizerConfig(lr=1e-6), ) self.mock_model = MockTransformerModel(vocab_size=1000, hidden_size=64).to(self.device) self.mock_optimizer = torch.optim.Adam(self.mock_model.parameters(), lr=1e-4) self.actor = DataParallelPPOActor( config=self.config, actor_module=self.mock_model, actor_optimizer=self.mock_optimizer ) @classmethod def tearDownClass(cls): """Clean up distributed environment""" if torch.distributed.is_initialized(): torch.distributed.destroy_process_group() def _create_test_data_for_compute_log_prob(self): """Create test DataProto for compute_log_prob method""" batch_size = 2 prompt_length = 8 response_length = 4 total_length = prompt_length + response_length vocab_size = 1000 input_ids = torch.randint(0, vocab_size, (batch_size, total_length)).to(self.device) attention_mask = torch.ones(batch_size, total_length).to(self.device) position_ids = torch.arange(total_length).unsqueeze(0).expand(batch_size, -1).to(self.device) responses = input_ids[:, -response_length:] # Last part is the response tensor_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, "responses": responses, }, batch_size=[batch_size], ) meta_info = {"micro_batch_size": batch_size, "temperature": 1.0, "use_dynamic_bsz": False} return DataProto(batch=tensor_dict, meta_info=meta_info) def _create_test_data_for_update_policy(self): """Create test DataProto for update_policy method""" batch_size = 4 # Must match ppo_mini_batch_size prompt_length = 8 response_length = 4 total_length = prompt_length + response_length vocab_size = 1000 input_ids = torch.randint(0, vocab_size, (batch_size, total_length)).to(self.device) attention_mask = torch.ones(batch_size, total_length).to(self.device) position_ids = torch.arange(total_length).unsqueeze(0).expand(batch_size, -1).to(self.device) responses = input_ids[:, -response_length:] response_mask = torch.ones(batch_size, response_length).to(self.device) old_log_probs = torch.randn(batch_size, response_length).to(self.device) * 0.1 # Small values advantages = torch.randn(batch_size, response_length).to(self.device) * 0.5 tensor_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, "responses": responses, "response_mask": response_mask, "old_log_probs": old_log_probs, "advantages": advantages, }, batch_size=[batch_size], ) meta_info = {"temperature": 1.0} return DataProto(batch=tensor_dict, meta_info=meta_info) def test_compute_log_prob(self): """Test compute_log_prob method""" data = self._create_test_data_for_compute_log_prob() log_probs, entropies = self.actor.compute_log_prob(data, calculate_entropy=True) batch_size = data.batch["responses"].shape[0] response_length = data.batch["responses"].shape[1] self.assertIsInstance(log_probs, torch.Tensor) self.assertEqual(log_probs.shape, (batch_size, response_length)) self.assertTrue(torch.all(torch.isfinite(log_probs))) self.assertIsInstance(entropies, torch.Tensor) self.assertEqual(entropies.shape, (batch_size, response_length)) self.assertTrue(torch.all(torch.isfinite(entropies))) self.assertTrue(torch.all(entropies >= 0)) # Entropy should be non-negative def test_compute_log_prob_without_entropy(self): """Test compute_log_prob method without entropy calculation""" data = self._create_test_data_for_compute_log_prob() log_probs, entropies = self.actor.compute_log_prob(data, calculate_entropy=False) batch_size = data.batch["responses"].shape[0] response_length = data.batch["responses"].shape[1] self.assertIsInstance(log_probs, torch.Tensor) self.assertEqual(log_probs.shape, (batch_size, response_length)) self.assertTrue(torch.all(torch.isfinite(log_probs))) self.assertIsNone(entropies) def test_update_policy(self): """Test update_policy method""" data = self._create_test_data_for_update_policy() metrics = self.actor.update_policy(data) self.assertIsInstance(metrics, dict) expected_metric_keys = [ "actor/pg_loss", "actor/pg_clipfrac", "actor/ppo_kl", "actor/pg_clipfrac_lower", "actor/grad_norm", ] for key in expected_metric_keys: self.assertIn(key, metrics) if isinstance(metrics[key], list): self.assertTrue(all(torch.isfinite(torch.tensor(v)) for v in metrics[key])) else: self.assertIsInstance(metrics[key], (float, int)) self.assertTrue(torch.isfinite(torch.tensor(metrics[key]))) def test_dataparallelppoactor_initialization(self): """Test DataParallelPPOActor initialization""" self.assertIsNotNone(self.actor.actor_module) self.assertIsNotNone(self.actor.actor_optimizer) self.assertEqual(self.actor.config, self.config) self.assertEqual(self.actor.config.strategy, "fsdp2") self.assertEqual(self.actor.config.ppo_mini_batch_size, 4) self.assertEqual(self.actor.config.clip_ratio, 0.2) def test_dataparallelppoactor_with_qwen3_model(self): """Test DataParallelPPOActor with real Qwen3ForCausalLM model""" qwen_config = Qwen3Config( vocab_size=1000, hidden_size=64, intermediate_size=128, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=2, max_position_embeddings=512, torch_dtype=torch.float32, use_cache=False, ) with torch.device(self.device): qwen_model = AutoModelForCausalLM.from_config(config=qwen_config, torch_dtype=torch.float32).to(self.device) qwen_optimizer = torch.optim.Adam(qwen_model.parameters(), lr=1e-4) qwen_actor = DataParallelPPOActor(config=self.config, actor_module=qwen_model, actor_optimizer=qwen_optimizer) data = self._create_test_data_for_compute_log_prob() log_probs, entropies = qwen_actor.compute_log_prob(data, calculate_entropy=True) batch_size = data.batch["responses"].shape[0] response_length = data.batch["responses"].shape[1] self.assertIsInstance(log_probs, torch.Tensor) self.assertEqual(log_probs.shape, (batch_size, response_length)) self.assertTrue(torch.all(torch.isfinite(log_probs))) self.assertIsInstance(entropies, torch.Tensor) self.assertEqual(entropies.shape, (batch_size, response_length)) self.assertTrue(torch.all(torch.isfinite(entropies))) self.assertTrue(torch.all(entropies >= 0)) policy_data = self._create_test_data_for_update_policy() metrics = qwen_actor.update_policy(policy_data) self.assertIsInstance(metrics, dict) expected_metric_keys = [ "actor/pg_loss", "actor/pg_clipfrac", "actor/ppo_kl", "actor/pg_clipfrac_lower", "actor/grad_norm", ] for key in expected_metric_keys: self.assertIn(key, metrics) if isinstance(metrics[key], list): self.assertTrue(all(torch.isfinite(torch.tensor(v)) for v in metrics[key])) else: self.assertIsInstance(metrics[key], (float, int)) self.assertTrue(torch.isfinite(torch.tensor(metrics[key]))) if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/workers/config/test_actor_config_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import ( ActorConfig, FSDPActorConfig, McoreActorConfig, OptimizerConfig, ) class TestActorConfig(unittest.TestCase): """Test the ActorConfig dataclass and its variants.""" def test_config_inheritance(self): """Test that the inheritance hierarchy works correctly.""" megatron_dict = { "_target_": "verl.workers.config.McoreActorConfig", "strategy": "megatron", "ppo_mini_batch_size": 256, "ppo_micro_batch_size_per_gpu": 256, "clip_ratio": 0.2, "optim": { "_target_": "verl.workers.config.McoreOptimizerConfig", "lr": 0.1, }, } fsdp_dict = { "_target_": "verl.workers.config.FSDPActorConfig", "strategy": "fsdp", "ppo_mini_batch_size": 256, "ppo_micro_batch_size_per_gpu": 256, "clip_ratio": 0.2, "optim": { "_target_": "verl.workers.config.FSDPOptimizerConfig", "lr": 0.1, }, } megatron_config = omega_conf_to_dataclass(megatron_dict) fsdp_config = omega_conf_to_dataclass(fsdp_dict) self.assertIsInstance(megatron_config, ActorConfig) self.assertIsInstance(fsdp_config, ActorConfig) self.assertEqual(megatron_config.ppo_mini_batch_size, fsdp_config.ppo_mini_batch_size) self.assertEqual(megatron_config.clip_ratio, fsdp_config.clip_ratio) def test_actor_config_from_yaml(self): """Test creating ActorConfig from YAML file.""" from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/actor")): cfg = compose(config_name="actor", overrides=["strategy=fsdp", "ppo_micro_batch_size_per_gpu=128"]) config = omega_conf_to_dataclass(cfg) self.assertIsInstance(config, ActorConfig) self.assertEqual(config.strategy, "fsdp") def test_fsdp_actor_config_from_yaml(self): """Test creating FSDPActorConfig from YAML file.""" from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/actor")): cfg = compose(config_name="dp_actor", overrides=["strategy=fsdp2", "ppo_micro_batch_size_per_gpu=128"]) config = omega_conf_to_dataclass(cfg) self.assertIsInstance(config, FSDPActorConfig) self.assertEqual(config.strategy, "fsdp2") def test_megatron_actor_config_from_yaml(self): """Test creating McoreActorConfig from YAML file.""" from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/actor")): cfg = compose(config_name="megatron_actor", overrides=["ppo_micro_batch_size_per_gpu=128"]) config = omega_conf_to_dataclass(cfg) self.assertIsInstance(config, McoreActorConfig) self.assertEqual(config.strategy, "megatron") def test_config_get_method(self): """Test the get method for backward compatibility.""" config_dict = { "_target_": "verl.workers.config.ActorConfig", "strategy": "fsdp", "ppo_mini_batch_size": 256, "ppo_micro_batch_size_per_gpu": 256, "optim": { "_target_": "verl.workers.config.OptimizerConfig", "lr": 0.1, }, } config = omega_conf_to_dataclass(config_dict) self.assertEqual(config.get("strategy"), "fsdp") self.assertEqual(config.get("ppo_mini_batch_size"), 256) self.assertIsNone(config.get("non_existing")) self.assertEqual(config.get("non_existing", "default"), "default") def test_config_dict_like_access(self): """Test dictionary-like access to config fields.""" config_dict = { "_target_": "verl.workers.config.ActorConfig", "strategy": "fsdp", "ppo_mini_batch_size": 256, "ppo_micro_batch_size_per_gpu": 256, "optim": { "_target_": "verl.workers.config.OptimizerConfig", "lr": 0.1, }, } config = omega_conf_to_dataclass(config_dict) self.assertEqual(config["strategy"], "fsdp") self.assertEqual(config["ppo_mini_batch_size"], 256) field_names = list(config) self.assertIn("strategy", field_names) self.assertIn("ppo_mini_batch_size", field_names) self.assertGreater(len(config), 0) def test_frozen_fields_modification_raises_exception(self): """Test that modifying frozen fields raises an exception.""" config_dict = { "_target_": "verl.workers.config.ActorConfig", "strategy": "fsdp", "ppo_mini_batch_size": 256, "ppo_micro_batch_size_per_gpu": 256, "optim": { "_target_": "verl.workers.config.OptimizerConfig", "lr": 0.1, }, } config = omega_conf_to_dataclass(config_dict) with self.assertRaises(AttributeError): config.strategy = "megatron" with self.assertRaises(AttributeError): config.clip_ratio = 0.5 config.ppo_mini_batch_size = 512 # This should work since it's not in frozen fields anymore self.assertEqual(config.ppo_mini_batch_size, 512) def test_actor_config_validation_exceptions(self): """Test that ActorConfig.__post_init__ raises appropriate validation exceptions.""" optim = OptimizerConfig(lr=0.1) with self.assertRaises((ValueError, AssertionError)) as cm: ActorConfig( strategy="fsdp", loss_agg_mode="invalid-mode", use_dynamic_bsz=True, optim=optim, ppo_micro_batch_size_per_gpu=4, ) self.assertIn("Invalid loss_agg_mode", str(cm.exception)) with self.assertRaises((ValueError, AssertionError)) as cm: ActorConfig( strategy="fsdp", use_dynamic_bsz=False, ppo_micro_batch_size=4, ppo_micro_batch_size_per_gpu=2, optim=optim, ) self.assertIn("You have set both", str(cm.exception)) with self.assertRaises((ValueError, AssertionError)) as cm: ActorConfig( strategy="fsdp", use_dynamic_bsz=False, ppo_micro_batch_size=None, ppo_micro_batch_size_per_gpu=None, optim=optim, ) self.assertIn("Please set at least one", str(cm.exception)) config = ActorConfig( strategy="fsdp", use_dynamic_bsz=True, ppo_micro_batch_size=None, ppo_micro_batch_size_per_gpu=None, optim=optim, ) self.assertIsNotNone(config) # Should not raise an exception def test_fsdp_actor_config_validation_exceptions(self): """Test that FSDPActorConfig.validate() raises appropriate validation exceptions.""" optim = OptimizerConfig(lr=0.1) config = FSDPActorConfig( strategy="fsdp", ulysses_sequence_parallel_size=2, use_dynamic_bsz=True, # Skip batch size validation to focus on FSDP validation optim=optim, ) model_config = {"use_remove_padding": False} with self.assertRaises(ValueError) as cm: config.validate(n_gpus=8, train_batch_size=256, model_config=model_config) self.assertIn("you must enable `use_remove_padding`", str(cm.exception)) def test_actor_config_validate_method_exceptions(self): """Test that ActorConfig.validate() raises appropriate validation exceptions.""" optim = OptimizerConfig(lr=0.1) config = ActorConfig( strategy="fsdp", use_dynamic_bsz=False, ppo_mini_batch_size=256, ppo_micro_batch_size=8, ppo_micro_batch_size_per_gpu=None, # Ensure only one batch size setting is used optim=optim, ) with self.assertRaises(ValueError) as cm: config.validate(n_gpus=8, train_batch_size=128) self.assertIn("train_batch_size", str(cm.exception)) with self.assertRaises(ValueError) as cm: config.validate(n_gpus=16, train_batch_size=512) self.assertIn("must be >= n_gpus", str(cm.exception)) if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/workers/config/test_critic_config_on_cpu.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pathlib import Path import pytest from hydra import compose, initialize_config_dir from verl.utils.config import omega_conf_to_dataclass from verl.utils.profiler import ProfilerConfig from verl.workers.config import ( CriticConfig, FSDPCriticConfig, FSDPOptimizerConfig, McoreCriticConfig, McoreOptimizerConfig, OptimizerConfig, ) class TestCriticConfig: """Test suite for critic configuration dataclasses.""" @pytest.fixture def config_dir(self): """Get the path to the config directory.""" return Path(__file__).parent.parent.parent.parent / "verl" / "trainer" / "config" / "critic" def test_megatron_critic_config_instantiation_from_yaml(self, config_dir): """Test that McoreCriticConfig can be instantiated from megatron_critic.yaml.""" yaml_path = config_dir / "megatron_critic.yaml" assert yaml_path.exists(), f"Config file not found: {yaml_path}" with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/critic")): test_config = compose(config_name="megatron_critic", overrides=["ppo_micro_batch_size_per_gpu=1"]) megatron_config_obj = omega_conf_to_dataclass(test_config) assert isinstance(megatron_config_obj, McoreCriticConfig) assert isinstance(megatron_config_obj, CriticConfig) expected_attrs = [ "strategy", "rollout_n", "optim", "model", "ppo_mini_batch_size", "ppo_max_token_len_per_gpu", "cliprange_value", "get", "nccl_timeout", "megatron", "load_weight", ] for attr in expected_attrs: assert hasattr(megatron_config_obj, attr), f"Missing attribute: {attr}" assert callable(megatron_config_obj.get) assert megatron_config_obj.strategy == "megatron" def test_fsdp_critic_config_instantiation_from_yaml(self, config_dir): """Test that FSDPCriticConfig can be instantiated from dp_critic.yaml.""" yaml_path = config_dir / "dp_critic.yaml" assert yaml_path.exists(), f"Config file not found: {yaml_path}" with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/critic")): test_config = compose(config_name="dp_critic", overrides=["ppo_micro_batch_size_per_gpu=1"]) fsdp_config_obj = omega_conf_to_dataclass(test_config) assert isinstance(fsdp_config_obj, FSDPCriticConfig) assert isinstance(fsdp_config_obj, CriticConfig) expected_attrs = [ "strategy", "rollout_n", "optim", "model", "ppo_mini_batch_size", "ppo_max_token_len_per_gpu", "cliprange_value", "get", "forward_micro_batch_size", "forward_micro_batch_size_per_gpu", "ulysses_sequence_parallel_size", "grad_clip", ] for attr in expected_attrs: assert hasattr(fsdp_config_obj, attr), f"Missing attribute: {attr}" assert callable(fsdp_config_obj.get) assert fsdp_config_obj.strategy == "fsdp" def test_config_inheritance_hierarchy(self): """Test that the inheritance hierarchy is correct.""" megatron_config = McoreCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=McoreOptimizerConfig(lr=0.1)) assert isinstance(megatron_config, CriticConfig) assert isinstance(megatron_config, McoreCriticConfig) fsdp_config = FSDPCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=FSDPOptimizerConfig(lr=0.1)) assert isinstance(fsdp_config, CriticConfig) assert isinstance(fsdp_config, FSDPCriticConfig) critic_config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=OptimizerConfig(lr=0.1)) assert isinstance(critic_config, CriticConfig) assert not isinstance(critic_config, McoreCriticConfig) assert not isinstance(critic_config, FSDPCriticConfig) def test_config_dict_interface(self): """Test that configs provide dict-like interface from BaseConfig.""" optim = OptimizerConfig(lr=0.1) config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=optim) assert "strategy" in config assert config["strategy"] == "fsdp2" assert config.get("strategy") == "fsdp2" assert config.get("nonexistent_key", "default") == "default" keys = list(config) assert "strategy" in keys assert "rollout_n" in keys assert len(config) > 0 def test_frozen_fields_immutability(self): """Test that frozen fields raise exceptions when modified after creation.""" critic_config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=OptimizerConfig(lr=0.1)) frozen_fields = ["rollout_n", "strategy", "cliprange_value"] for field_name in frozen_fields: with pytest.raises((AttributeError, TypeError, ValueError)): setattr(critic_config, field_name, "modified_value") megatron_config = McoreCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=McoreOptimizerConfig(lr=0.1)) megatron_frozen_fields = ["nccl_timeout", "load_weight", "data_loader_seed"] for field_name in megatron_frozen_fields: with pytest.raises((AttributeError, TypeError, ValueError)): setattr(megatron_config, field_name, "modified_value") fsdp_config = FSDPCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=FSDPOptimizerConfig(lr=0.1)) fsdp_frozen_fields = ["ulysses_sequence_parallel_size", "grad_clip"] for field_name in fsdp_frozen_fields: with pytest.raises((AttributeError, TypeError, ValueError)): setattr(fsdp_config, field_name, "modified_value") def test_batch_size_fields_modifiable(self): """Test that batch size fields can be modified after creation.""" optim = OptimizerConfig(lr=0.1) critic_config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=optim) critic_config.ppo_mini_batch_size = 8 critic_config.ppo_micro_batch_size = 4 critic_config.ppo_micro_batch_size_per_gpu = 2 assert critic_config.ppo_mini_batch_size == 8 assert critic_config.ppo_micro_batch_size == 4 assert critic_config.ppo_micro_batch_size_per_gpu == 2 fsdp_config = FSDPCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=FSDPOptimizerConfig(lr=0.1)) fsdp_config.forward_micro_batch_size = 16 fsdp_config.forward_micro_batch_size_per_gpu = 8 assert fsdp_config.forward_micro_batch_size == 16 assert fsdp_config.forward_micro_batch_size_per_gpu == 8 def test_profiler_config_type_validation(self): """Test that profiler field has correct type and validation.""" optim = OptimizerConfig(lr=0.1) critic_config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=optim) assert isinstance(critic_config.profiler, ProfilerConfig) assert critic_config.profiler.all_ranks is False assert critic_config.profiler.ranks == [] custom_profiler = ProfilerConfig(all_ranks=True, ranks=[0, 1]) critic_config_custom = CriticConfig( profiler=custom_profiler, ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=optim ) assert isinstance(critic_config_custom.profiler, ProfilerConfig) assert critic_config_custom.profiler.all_ranks is True assert critic_config_custom.profiler.ranks == [0, 1] profiler1 = ProfilerConfig(enable=True, ranks=[0, 1]) profiler2 = ProfilerConfig(all_ranks=True, ranks=[1, 2]) union_result = profiler1.union(profiler2) assert union_result.enable is True assert union_result.all_ranks is True assert set(union_result.ranks) == {0, 1, 2} intersect_result = profiler1.intersect(profiler2) assert intersect_result.all_ranks is False assert intersect_result.ranks == [1] def test_critic_config_validation_logic(self): """Test the __post_init__ validation logic for CriticConfig.""" optim = OptimizerConfig(lr=0.1) valid_config = CriticConfig( strategy="fsdp2", ppo_micro_batch_size_per_gpu=2, use_dynamic_bsz=False, optim=optim ) assert valid_config.ppo_micro_batch_size_per_gpu == 2 valid_config2 = CriticConfig( strategy="fsdp2", ppo_micro_batch_size_per_gpu=None, ppo_micro_batch_size=4, ppo_mini_batch_size=8, use_dynamic_bsz=False, optim=optim, ) assert valid_config2.ppo_micro_batch_size == 4 dynamic_config = CriticConfig( strategy="fsdp2", ppo_micro_batch_size_per_gpu=2, use_dynamic_bsz=True, optim=optim ) assert dynamic_config.use_dynamic_bsz is True with pytest.raises(ValueError, match="You have set both.*micro_batch_size.*AND.*micro_batch_size_per_gpu"): CriticConfig( strategy="fsdp2", ppo_micro_batch_size=4, ppo_micro_batch_size_per_gpu=2, use_dynamic_bsz=False, optim=optim, ) with pytest.raises( ValueError, match="Please set at least one of.*micro_batch_size.*or.*micro_batch_size_per_gpu" ): CriticConfig( strategy="fsdp2", ppo_micro_batch_size=None, ppo_micro_batch_size_per_gpu=None, use_dynamic_bsz=False, optim=optim, ) def test_micro_batch_size_divisibility_validation(self): """Test micro batch size divisibility validation in __post_init__.""" optim = OptimizerConfig(lr=0.1) valid_config = CriticConfig( strategy="fsdp2", ppo_micro_batch_size_per_gpu=2, ppo_mini_batch_size=8, use_dynamic_bsz=False, optim=optim ) assert valid_config.ppo_mini_batch_size == 8 assert valid_config.ppo_micro_batch_size_per_gpu == 2 valid_config_with_mbs = CriticConfig( strategy="fsdp2", ppo_mini_batch_size=8, ppo_micro_batch_size=4, use_dynamic_bsz=False, optim=optim ) assert valid_config_with_mbs.ppo_mini_batch_size == 8 assert valid_config_with_mbs.ppo_micro_batch_size == 4 with pytest.raises(ValueError, match="ppo_mini_batch_size.*must be divisible by.*ppo_micro_batch_size"): CriticConfig( strategy="fsdp2", ppo_mini_batch_size=7, ppo_micro_batch_size=4, use_dynamic_bsz=False, optim=optim ) dynamic_config = CriticConfig( strategy="fsdp2", ppo_mini_batch_size=7, ppo_micro_batch_size=4, use_dynamic_bsz=True, optim=optim ) assert dynamic_config.use_dynamic_bsz is True def test_fsdp_sequence_parallelism_validation(self): """Test FSDP sequence parallelism validation in FSDPCriticConfig.__post_init__.""" valid_config = FSDPCriticConfig( ppo_micro_batch_size_per_gpu=2, ulysses_sequence_parallel_size=2, model={"use_remove_padding": True}, optim=FSDPOptimizerConfig(lr=0.1), ) assert valid_config.ulysses_sequence_parallel_size == 2 with pytest.raises( ValueError, match="When using sequence parallelism for critic, you must enable.*use_remove_padding" ): FSDPCriticConfig( ppo_micro_batch_size_per_gpu=2, ulysses_sequence_parallel_size=2, model={"use_remove_padding": False}, optim=FSDPOptimizerConfig(lr=0.1), ) valid_config_no_sp = FSDPCriticConfig( ppo_micro_batch_size_per_gpu=2, ulysses_sequence_parallel_size=1, model={"use_remove_padding": False}, optim=FSDPOptimizerConfig(lr=0.1), ) assert valid_config_no_sp.ulysses_sequence_parallel_size == 1 ================================================ FILE: verl_distillation/tests/workers/config/test_engine_config_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from verl.workers.config.engine import FSDPEngineConfig, McoreEngineConfig class TestMcoreEngineConfig: def test_default_values(self): config = McoreEngineConfig() assert config.tensor_model_parallel_size == 1 assert config.sequence_parallel is False # Should be auto-corrected assert config.seed == 42 def test_post_init_validation(self): # Test TP size 1 forces sequence_parallel=False config = McoreEngineConfig(tensor_model_parallel_size=1) assert config.sequence_parallel is False # Test TP >1 keeps sequence_parallel=True config = McoreEngineConfig(tensor_model_parallel_size=2) assert config.sequence_parallel is True def test_mutable_fields(self): config = McoreEngineConfig() config.sequence_parallel = True # Should be mutable with pytest.raises(AttributeError): config.tensor_model_parallel_size = 2 # Frozen field @pytest.mark.parametrize("offload_field", ["param_offload", "grad_offload", "optimizer_offload"]) def test_offload_flags(self, offload_field): config = McoreEngineConfig(**{offload_field: True}) assert getattr(config, offload_field) is True class TestFSDPEngineConfigCPU: def test_default_values(self): config = FSDPEngineConfig() assert config.param_offload is False assert config.optimizer_offload is False assert config.fsdp_size == -1 @pytest.mark.parametrize( "offload_params", [{"param_offload": True}, {"optimizer_offload": True}, {"param_offload": True, "optimizer_offload": True}], ) def test_offload_combinations(self, offload_params): config = FSDPEngineConfig(**offload_params) assert config.param_offload == offload_params.get("param_offload", False) assert config.optimizer_offload == offload_params.get("optimizer_offload", False) def test_wrap_policy_configuration(self): test_policy = {"layer_class": "TransformerBlock"} config = FSDPEngineConfig(wrap_policy=test_policy) assert config.wrap_policy == test_policy ================================================ FILE: verl_distillation/tests/workers/config/test_optim_config_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from verl.workers.config.optimizer import FSDPOptimizerConfig class TestFSDPOptimizerConfigCPU: def test_default_configuration(self): config = FSDPOptimizerConfig(lr=0.1) assert config.min_lr_ratio is None assert config.lr_scheduler_type == "constant" assert config.num_cycles == 0.5 @pytest.mark.parametrize("lr_scheduler_type", ["constant", "cosine"]) def test_valid_lr_scheduler_types(self, lr_scheduler_type): config = FSDPOptimizerConfig(lr_scheduler_type=lr_scheduler_type, lr=0.1) assert config.lr_scheduler_type == lr_scheduler_type @pytest.mark.parametrize("warmup_style", ["constant", "cosine"]) def test_valid_warmup_style_types(self, warmup_style): config = FSDPOptimizerConfig(warmup_style=warmup_style, lr=0.1) assert config.lr_scheduler_type == warmup_style def test_invalid_lr_scheduler_type(self): with pytest.raises((ValueError, AssertionError)): FSDPOptimizerConfig(lr_scheduler_type="invalid_style", lr=0.1) def test_invalid_warmup_style_type(self): with pytest.raises((ValueError, AssertionError)): FSDPOptimizerConfig(warmup_style="invalid_style", lr=0.1) @pytest.mark.parametrize("num_cycles", [0.1, 1.0, 2.5]) def test_num_cycles_configuration(self, num_cycles): config = FSDPOptimizerConfig(num_cycles=num_cycles, lr=0.1) assert config.num_cycles == num_cycles ================================================ FILE: verl_distillation/tests/workers/critic/test_special_dp_critic.py ================================================ #!/usr/bin/env python3 # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from unittest.mock import Mock, patch import torch import torch.distributed from omegaconf import OmegaConf from tensordict import TensorDict from transformers import AutoConfig from verl import DataProto from verl.workers.config import FSDPCriticConfig, FSDPOptimizerConfig from verl.workers.config.critic import FSDPCriticModelCfg from verl.workers.config.engine import FSDPEngineConfig from verl.workers.fsdp_workers import CriticWorker class TestCriticWorker(unittest.TestCase): @classmethod def setUpClass(cls): """Set up distributed environment""" if not torch.distributed.is_initialized(): torch.distributed.init_process_group( backend="nccl" if torch.cuda.is_available() else "gloo", init_method="env://" ) cls.rank = torch.distributed.get_rank() cls.world_size = torch.distributed.get_world_size() if torch.cuda.is_available(): torch.cuda.set_device(cls.rank) cls.device = torch.device(f"cuda:{cls.rank}") else: cls.device = torch.device("cpu") @classmethod def tearDownClass(cls): """Clean up distributed environment""" if torch.distributed.is_initialized(): torch.distributed.destroy_process_group() def setUp(self): """Set up test fixtures""" self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.temp_dir = tempfile.mkdtemp() config = AutoConfig.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") config.save_pretrained(self.temp_dir) self.config = FSDPCriticConfig( strategy="fsdp2", ppo_mini_batch_size=4, ppo_micro_batch_size_per_gpu=2, forward_micro_batch_size_per_gpu=2, ppo_epochs=1, cliprange_value=0.5, grad_clip=1.0, use_dynamic_bsz=False, ulysses_sequence_parallel_size=1, rollout_n=1, optim=FSDPOptimizerConfig(lr=1e-6), model=FSDPCriticModelCfg( path="Qwen/Qwen2.5-0.5B-Instruct", tokenizer_path="Qwen/Qwen2.5-0.5B-Instruct", fsdp_config=FSDPEngineConfig(fsdp_size=-1), use_remove_padding=False, ), ) assert self.world_size <= 4 // 2 def tearDown(self): """Clean up test fixtures""" import shutil shutil.rmtree(self.temp_dir, ignore_errors=True) def _create_test_data_for_compute_values(self, batch_size=2, seq_len=10, response_len=5): """Create test data for compute_values method""" input_ids = torch.randint(0, 1000, (batch_size, seq_len), dtype=torch.long) attention_mask = torch.ones(batch_size, seq_len, dtype=torch.long) position_ids = torch.arange(seq_len).unsqueeze(0).expand(batch_size, -1) responses = torch.randint(0, 1000, (batch_size, response_len), dtype=torch.long) response_mask = torch.ones(batch_size, response_len, dtype=torch.float) batch = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, "responses": responses, "response_mask": response_mask, }, batch_size=[batch_size], ) data = DataProto( batch=batch, meta_info={"micro_batch_size": 2, "max_token_len": seq_len, "use_dynamic_bsz": False} ) return data def _create_test_data_for_update_critic(self, batch_size=2, seq_len=10, response_len=5): """Create test data for update_critic method""" input_ids = torch.randint(0, 1000, (batch_size, seq_len), dtype=torch.long) attention_mask = torch.ones(batch_size, seq_len, dtype=torch.long) position_ids = torch.arange(seq_len).unsqueeze(0).expand(batch_size, -1) responses = torch.randint(0, 1000, (batch_size, response_len), dtype=torch.long) response_mask = torch.ones(batch_size, response_len, dtype=torch.float) values = torch.randn(batch_size, response_len, dtype=torch.float) returns = torch.randn(batch_size, response_len, dtype=torch.float) batch = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, "responses": responses, "response_mask": response_mask, "values": values, "returns": returns, }, batch_size=[batch_size], ) data = DataProto( batch=batch, meta_info={"global_token_num": [response_len] * batch_size, "batch_seqlens": [response_len] * batch_size}, ) return data def test_init_model(self): """Test CriticWorker.init_model() method""" worker = CriticWorker(self.config) worker.init_model() self.assertIsNotNone(worker.critic_module) self.assertIsNotNone(worker.critic_optimizer) self.assertIsNotNone(worker.critic) self.assertIsNotNone(worker.checkpoint_manager) def test_compute_values(self): """Test CriticWorker.compute_values() method""" worker = CriticWorker(self.config) worker.init_model() data = self._create_test_data_for_compute_values() result = worker.compute_values(data) self.assertIsInstance(result, DataProto) self.assertIn("values", result.batch) values = result.batch["values"] batch_size, response_len = 2, 5 self.assertEqual(values.shape, (batch_size, response_len)) self.assertTrue(torch.isfinite(values).all()) def test_update_critic(self): """Test CriticWorker.update_critic() method""" worker = CriticWorker(self.config) worker.init_model() data = self._create_test_data_for_update_critic() result = worker.update_critic(data) self.assertIsInstance(result, DataProto) self.assertIn("metrics", result.meta_info) metrics = result.meta_info["metrics"] expected_keys = ["critic/vf_loss", "critic/vf_clipfrac", "critic/vpred_mean", "critic/grad_norm"] for key in expected_keys: self.assertIn(key, metrics) for key, value in metrics.items(): if isinstance(value, list | tuple): for v in value: self.assertTrue(torch.isfinite(torch.tensor(v)).all()) else: self.assertTrue(torch.isfinite(torch.tensor(value)).all()) @patch("transformers.AutoConfig.from_pretrained") def test_critic_attn_implementation_override_functionality(self, mock_config_from_pretrained): """Test that CriticWorker correctly uses attn_implementation from override_config""" # Mock the AutoConfig return value mock_config = Mock() mock_config.tie_word_embeddings = False mock_config.architectures = ["LlamaForCausalLM"] mock_config.num_labels = 1 mock_config_from_pretrained.return_value = mock_config # Test different attn_implementation values test_cases = [ ("eager", "eager"), ("sdpa", "sdpa"), ("flash_attention_2", "flash_attention_2"), (None, "flash_attention_2"), # Default case ] for override_value, expected_value in test_cases: mock_config_from_pretrained.reset_mock() # Create config with override_config config_dict = { "model": { "path": "/test/model/path", "tokenizer_path": "/test/tokenizer/path", "fsdp_config": { "fsdp_size": 1, "param_offload": False, "optimizer_offload": False, }, }, "optim": {"lr": 1e-4, "type": "AdamW"}, "strategy": "fsdp", "ppo_mini_batch_size": 1, "ppo_epochs": 1, "rollout_n": 1, "checkpoint": {"save_contents": [], "load_contents": []}, } # Add override_config with attn_implementation if specified if override_value is not None: config_dict["model"]["override_config"] = {"attn_implementation": override_value} # Convert to OmegaConf test_config = OmegaConf.create(config_dict) # Test the extraction logic that should happen in CriticWorker._build_critic_model_optimizer override_config = OmegaConf.to_container(OmegaConf.create(test_config.model.get("override_config", {}))) extracted_attn_implementation = override_config.get("attn_implementation", "flash_attention_2") # Verify the extraction works correctly self.assertEqual( extracted_attn_implementation, expected_value, f"Expected {expected_value}, got {extracted_attn_implementation} for override_value {override_value}", ) def test_critic_model_config_structure(self): """Test that critic model config properly incorporates override settings""" # Test configuration scenarios test_scenarios = [ {"name": "default_flash_attention", "override_config": {}, "expected_attn": "flash_attention_2"}, {"name": "eager_override", "override_config": {"attn_implementation": "eager"}, "expected_attn": "eager"}, {"name": "sdpa_override", "override_config": {"attn_implementation": "sdpa"}, "expected_attn": "sdpa"}, { "name": "mixed_config", "override_config": {"attn_implementation": "eager", "dropout": 0.1, "num_labels": 1}, "expected_attn": "eager", }, ] for scenario in test_scenarios: with self.subTest(scenario=scenario["name"]): # Simulate the config processing logic from CriticWorker override_config = scenario["override_config"] # Test the extraction logic extracted_attn = override_config.get("attn_implementation", "flash_attention_2") # Verify correct extraction self.assertEqual(extracted_attn, scenario["expected_attn"], f"Failed for scenario {scenario['name']}") # Verify other configs are preserved if "dropout" in override_config: self.assertEqual(override_config["dropout"], 0.1) def test_critic_hydra_config_compatibility(self): """Test that Hydra +prefix configurations work correctly for CriticWorker""" # Simulate Hydra configuration with +prefix for critic # This would come from: +critic.model.override_config.attn_implementation=eager hydra_config_dict = { "critic": {"model": {"path": "/test/model/path", "override_config": {"attn_implementation": "eager"}}} } omegaconf = OmegaConf.create(hydra_config_dict) # Extract override config as would be done in CriticWorker override_model_config = OmegaConf.to_container( OmegaConf.create(omegaconf.critic.model.get("override_config", {})) ) # Test extraction attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2") self.assertEqual(attn_implementation, "eager") def test_critic_backward_compatibility(self): """Test that CriticWorker maintains backward compatibility with existing configurations""" # Test cases for backward compatibility compatibility_tests = [ {"name": "no_override_config", "config": {}, "expected": "flash_attention_2"}, {"name": "empty_override_config", "config": {"override_config": {}}, "expected": "flash_attention_2"}, { "name": "other_overrides_only", "config": {"override_config": {"dropout": 0.1, "hidden_size": 768}}, "expected": "flash_attention_2", }, ] for test in compatibility_tests: with self.subTest(test=test["name"]): override_config = test["config"].get("override_config", {}) attn_implementation = override_config.get("attn_implementation", "flash_attention_2") self.assertEqual( attn_implementation, test["expected"], f"Backward compatibility failed for {test['name']}" ) def test_critic_and_actor_independent_configuration(self): """Test that critic and actor can have independent attention implementation configurations""" # Simulate a complete training configuration with both actor and critic complete_config = { "actor_rollout_ref": {"model": {"override_config": {"attn_implementation": "eager"}}}, "critic": {"model": {"override_config": {"attn_implementation": "sdpa"}}}, } omegaconf = OmegaConf.create(complete_config) # Extract actor config actor_override = OmegaConf.to_container( OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {})) ) actor_attn = actor_override.get("attn_implementation", "flash_attention_2") # Extract critic config critic_override = OmegaConf.to_container(OmegaConf.create(omegaconf.critic.model.get("override_config", {}))) critic_attn = critic_override.get("attn_implementation", "flash_attention_2") # Verify independent configuration self.assertEqual(actor_attn, "eager") self.assertEqual(critic_attn, "sdpa") self.assertNotEqual(actor_attn, critic_attn) # Ensure they are indeed different if __name__ == "__main__": unittest.main() ================================================ FILE: verl_distillation/tests/workers/reward_manager/test_registry_on_cpu.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest # Assuming REWARD_MANAGER_REGISTRY is defined somewhere in the module from verl.workers.reward_manager.registry import REWARD_MANAGER_REGISTRY, get_reward_manager_cls, register @pytest.fixture def setup(): """Setup test cases with a mock registry.""" REWARD_MANAGER_REGISTRY.clear() REWARD_MANAGER_REGISTRY.update({"manager1": "Manager1Class", "manager2": "Manager2Class"}) return REWARD_MANAGER_REGISTRY def test_get_existing_manager(setup): """Test getting an existing reward manager class.""" assert get_reward_manager_cls("manager1") == "Manager1Class" assert get_reward_manager_cls("manager2") == "Manager2Class" def test_get_nonexistent_manager(setup): """Test getting a non-existent reward manager raises ValueError.""" with pytest.raises(ValueError) as excinfo: get_reward_manager_cls("unknown_manager") assert "Unknown reward manager: unknown_manager" in str(excinfo.value) def test_case_sensitivity(setup): """Test that manager names are case-sensitive.""" with pytest.raises(ValueError): get_reward_manager_cls("MANAGER1") with pytest.raises(ValueError): get_reward_manager_cls("Manager1") def test_empty_registry(setup): """Test behavior when registry is empty.""" REWARD_MANAGER_REGISTRY.clear() with pytest.raises(ValueError) as excinfo: get_reward_manager_cls("any_manager") assert "Unknown reward manager: any_manager" in str(excinfo.value) def test_register_new_class(setup): """Test registering a new class with the decorator.""" @register("test_manager") class TestManager: pass assert "test_manager" in REWARD_MANAGER_REGISTRY assert REWARD_MANAGER_REGISTRY["test_manager"] == TestManager def test_register_different_classes_same_name(setup): """Test that registering different classes with same name raises ValueError.""" @register("conflict_manager") class Manager1: pass with pytest.raises(ValueError): @register("conflict_manager") class Manager2: pass assert REWARD_MANAGER_REGISTRY["conflict_manager"] == Manager1 def test_decorator_returns_original_class(setup): """Test that the decorator returns the original class unchanged.""" @register("return_test") class OriginalClass: def method(setup): return 42 assert OriginalClass().method() == 42 assert REWARD_MANAGER_REGISTRY["return_test"] == OriginalClass ================================================ FILE: verl_distillation/tests/workers/rollout/perf/vllm_async_rollout.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Compare vLLM AsyncLLM backend: ExternalRayDistributedExecutor(remote call) vs RayDistributedExecutor(compiled graph) 1. Prepare openai/gsm8k dataset python3 examples/data_preprocess/gsm8k.py 2. Run perf test python3 tests/workers/rollout/perf/vllm_async_rollout.py >perf.log 2>&1 hardware: Nvidia 8*H20 packages: - torch==2.6.0 - vllm==0.8.5 [DEBUG] backend: sync, n_gpus_per_node: 8, batch_size: 2048, step: 0, step_time: 21.27 secs [DEBUG] backend: zeromq, n_gpus_per_node: 8, batch_size: 2048, step: 0, step_time: 23.40 secs [DEBUG] backend: ray, n_gpus_per_node: 8, batch_size: 2048, step: 0, step_time: 25.33 secs """ import os import time import ray from omegaconf import DictConfig from torch.utils.data import SequentialSampler from torchdata.stateful_dataloader import StatefulDataLoader from tests.experimental.agent_loop.agent_utils import AgentLoopManager, RayWorkerGroup, init_agent_loop_manager from verl.protocol import DataProto from verl.utils import hf_tokenizer from verl.utils.dataset import RLHFDataset from verl.utils.dataset.rl_dataset import collate_fn as default_collate_fn def init_config(n_gpus_per_node) -> DictConfig: import os from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose( config_name="ppo_trainer", overrides=[ "actor_rollout_ref.actor.use_dynamic_bsz=true", "actor_rollout_ref.actor.fsdp_config.param_offload=True", "actor_rollout_ref.actor.fsdp_config.optimizer_offload=True", ], ) config.trainer.n_gpus_per_node = n_gpus_per_node config.data.train_batch_size = 128 config.data.return_raw_chat = True config.actor_rollout_ref.model.path = "Qwen/Qwen2.5-7B-Instruct" config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.tensor_model_parallel_size = 2 config.actor_rollout_ref.rollout.gpu_memory_utilization = 0.9 config.actor_rollout_ref.rollout.multi_turn.format = "hermes" config.actor_rollout_ref.rollout.prompt_length = 4096 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.n = 16 return config def initialize(config, backend) -> tuple[AgentLoopManager | RayWorkerGroup, StatefulDataLoader]: env_vars = { "NCCL_DEBUG": "WARN", "VLLM_USE_V1": "1", "VERL_VLLM_DISTRIBUTED_BACKEND": backend, } ray.init(runtime_env={"env_vars": env_vars}) # STEP 1: init async llm server server = init_agent_loop_manager(config) # STEP 2: create dataloader tokenizer = hf_tokenizer(config.actor_rollout_ref.model.path) dataset = RLHFDataset( data_files=os.path.expanduser("~/data/gsm8k/train.parquet"), tokenizer=tokenizer, config=config.data, ) dataloader = StatefulDataLoader( dataset=dataset, batch_size=config.data.get("gen_batch_size", config.data.train_batch_size), num_workers=config.data.get("dataloader_num_workers", 8), drop_last=True, collate_fn=default_collate_fn, sampler=SequentialSampler(dataset), ) return server, dataloader def perf_rollout(mode, backend, n_gpus_per_node, num_steps): config = init_config(n_gpus_per_node) config.actor_rollout_ref.rollout.mode = mode agent_loop_manager, dataloader = initialize(config, backend) for step, batch in enumerate(dataloader): batch: DataProto = DataProto.from_single_dict(batch) batch = batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids", "raw_prompt"], ) t_start = time.time() gen_batch = agent_loop_manager.generate_sequences(batch) t_end = time.time() print( f"[DEBUG] backend: {backend}, n_gpus_per_node: {n_gpus_per_node}, batch_size: {len(gen_batch)}, " f"step: {step}, step_time: {t_end - t_start:.2f} secs" ) if step + 1 >= num_steps: break ray.shutdown() if __name__ == "__main__": num_steps = 1 n_gpus_per_node = 8 # test_cases = [("sync", "sync"), ("async", "zeromq"), ("async", "ray")] test_cases = [("async", "zeromq"), ("async", "ray")] for mode, backend in test_cases: perf_rollout(mode=mode, backend=backend, n_gpus_per_node=n_gpus_per_node, num_steps=num_steps) ================================================ FILE: verl_distillation/tests/workers/rollout/resource/tool_configs/mcp_server.json ================================================ { "mcpServers": { "Tavily Expert": { "url": "https://tavily.api.tadata.com/mcp/tavily/your_expert", "auth_token": "your_tavily_token" } } } ================================================ FILE: verl_distillation/tests/workers/rollout/resource/tool_configs/mcp_tool_config ================================================ tools: - class_name: verl.tools.mcp_search_tool.MCPSearchTool config: rate_limit: 120 timeout: 120 type: mcp mcp: mcp_servers_config_path: ./resource/tool_configs/mcp_server.json # optional tool_selected_list: - tavily_search_tool ================================================ FILE: verl_distillation/tests/workers/rollout/resource/tool_configs/sandbox_fusion_tool_config ================================================ tools: - class_name: "verl.tools.sandbox_fusion_tools.SandboxFusionTool" config: sandbox_fusion_url: "https://xxx.apigateway-cn-beijing.volceapi.com/run_code" type: native tool_schema: type: "function" function: name: "code_interpreter" description: "A tool for executing code." parameters: type: "object" properties: code: type: "string" description: "The code to execute." required: ["code"] ================================================ FILE: verl_distillation/tests/workers/rollout/resource/tool_configs/search_tool_config ================================================ tools: - class_name: verl.tools.search_tool.SearchTool config: retrieval_service_url: http://127.0.0.1:8000/retrieve num_workers: 120 rate_limit: 120 timeout: 30 type: native tool_schema: type: function function: name: search description: Searches the web for relevant information based on the given query. parameters: type: object properties: query_list: type: array item: type: string description: A list of fully-formed semantic queries. The tool will return search results for each query. required: - query_list ================================================ FILE: verl_distillation/tests/workers/rollout/rollout_sglang/test_http_server_engine.py ================================================ # Copyright 2025 z.ai # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is adapted from multiple sources: # 1. THUDM/slime project # Original source: https://github.com/THUDM/slime/blob/main/slime/backends/sglang_utils/http_server_engine.py # Copyright 2025 z.ai # Licensed under the Apache License, Version 2.0 # 2. SGLang project # Original source: https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/entrypoints/http_server_engine.py # Copyright 2023-2024 SGLang Team # Licensed under the Apache License, Version 2.0 # # Modifications made by z.ai and ModelBest Inc. include but are not limited to: # - Enhanced error handling and retry logic # - Added async support with connection pooling # - Extended functionality for distributed weight updates # - Improved logging and monitoring capabilities # - Additional configuration options and optimizations """Complete unit tests for HTTP Server Engine Adapters. This module contains comprehensive unit tests for both HttpServerEngineAdapter and AsyncHttpServerEngineAdapter classes, covering all public methods, error handling scenarios, edge cases, and boundary conditions using pytest and mock frameworks. Tests use real SGLang modules for integration testing while mocking external dependencies. """ import asyncio from unittest.mock import AsyncMock, Mock, patch import aiohttp import pytest import requests from sglang.srt.managers.io_struct import ( UpdateWeightsFromTensorReqInput, ) from sglang.srt.utils import MultiprocessingSerializer # Import the module under test from verl.workers.rollout.sglang_rollout.http_server_engine import ( AsyncHttpServerAdapter, HttpServerAdapter, launch_server_process, ) @pytest.fixture(scope="session") def event_loop(): """Create an event loop for the entire test session.""" loop = asyncio.new_event_loop() yield loop loop.close() @pytest.fixture def basic_adapter_kwargs(): """Provide basic kwargs for creating HTTP server adapters.""" return { "host": "localhost", "port": 8000, "node_rank": 0, "model_path": "/tmp/test_model", } @pytest.fixture def router_adapter_kwargs(): """Provide kwargs for creating adapters with router configuration.""" return { "router_ip": "192.168.1.1", "router_port": 8080, "host": "localhost", "port": 8000, "node_rank": 0, "model_path": "/tmp/test_model", } @pytest.fixture def non_master_adapter_kwargs(): """Provide kwargs for creating non-master node adapters.""" return { "host": "localhost", "port": 8000, "node_rank": 1, # Non-master "model_path": "/tmp/test_model", } @pytest.fixture def mock_launch_server_process(): """Mock the launch_server_process function for testing without actual server startup.""" from unittest.mock import patch with patch("verl.workers.rollout.sglang_rollout.http_server_engine.launch_server_process") as mock_launch: mock_process = Mock() mock_process.is_alive.return_value = True mock_process.pid = 12345 mock_launch.return_value = mock_process yield mock_launch @pytest.fixture def mock_multiprocessing_process(): """Create mock multiprocessing.Process for testing without actual process creation.""" from unittest.mock import patch with patch("verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process") as mock_process_class: mock_process = Mock() mock_process.is_alive.return_value = True mock_process.pid = 12345 mock_process_class.return_value = mock_process yield mock_process @pytest.fixture def mock_requests_session(): """Create mock requests.Session for testing HTTP interactions.""" from unittest.mock import patch with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.Session") as mock_session_class: mock_session = Mock() mock_response = Mock() mock_response.status_code = 200 mock_response.json.return_value = {"status": "success"} mock_session.get.return_value = mock_response mock_session.post.return_value = mock_response mock_session_class.return_value.__enter__.return_value = mock_session yield mock_session @pytest.fixture def mock_requests_post(): """Mock requests.post for testing HTTP POST requests.""" from unittest.mock import patch with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: mock_response = Mock() mock_response.status_code = 200 mock_response.json.return_value = {"status": "success"} mock_post.return_value = mock_response yield mock_post @pytest.fixture def mock_requests_get(): """Mock requests.get for testing HTTP GET requests.""" from unittest.mock import patch with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get: mock_response = Mock() mock_response.status_code = 200 mock_response.json.return_value = {"status": "success"} mock_get.return_value = mock_response yield mock_get @pytest.fixture def mock_aiohttp_session(): """Create mock aiohttp.ClientSession for testing async HTTP interactions.""" mock_session = AsyncMock() mock_session.closed = False # Mock response mock_response = AsyncMock() mock_response.status = 200 mock_response.json = AsyncMock(return_value={"status": "success"}) mock_response.raise_for_status = Mock() # Mock context managers mock_session.get.return_value.__aenter__.return_value = mock_response mock_session.post.return_value.__aenter__.return_value = mock_response return mock_session @pytest.fixture def mock_kill_process_tree(): """Mock kill_process_tree function for testing cleanup without actual process termination.""" from unittest.mock import patch with patch("verl.workers.rollout.sglang_rollout.http_server_engine.kill_process_tree") as mock_kill: yield mock_kill # Test environment fixtures for real SGLang testing @pytest.fixture(scope="session") def sglang_test_model_path(): """Provide a test model path for SGLang tests. This can be overridden by environment variable SGLANG_TEST_MODEL_PATH for tests that need a real model. """ import os return os.getenv("SGLANG_TEST_MODEL_PATH", "/tmp/test_model") @pytest.fixture def real_adapter_kwargs(sglang_test_model_path): """Provide kwargs for creating adapters with real SGLang integration.""" return { "host": "localhost", "port": 8000, "node_rank": 0, "model_path": sglang_test_model_path, } @pytest.fixture(autouse=True) def mock_server_args_post_init(): """Mock ServerArgs.__post_init__ to skip model path validation.""" from unittest.mock import patch with patch( "verl.workers.rollout.sglang_rollout.http_server_engine.ServerArgs.__post_init__", return_value=None ) as mock_post_init: yield mock_post_init class TestLaunchServerProcess: """Test cases for launch_server_process function.""" def test_launch_server_process_success( self, mock_multiprocessing_process, mock_requests_session, real_adapter_kwargs ): """Test successful server process launch and health check.""" # Import real SGLang ServerArgs from sglang.srt.server_args import ServerArgs # Create server args using real ServerArgs server_args = ServerArgs(**real_adapter_kwargs) # Test with patch( "verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process" ) as mock_process_class: mock_process_class.return_value = mock_multiprocessing_process with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.Session") as mock_session_class: mock_session_class.return_value.__enter__.return_value = mock_requests_session result = launch_server_process(server_args, first_rank_in_node=True) # Assertions assert result == mock_multiprocessing_process mock_multiprocessing_process.start.assert_called_once() assert mock_requests_session.get.call_count >= 2 # health_generate and flush_cache def test_launch_server_process_non_master(self, mock_multiprocessing_process, non_master_adapter_kwargs): """Test server launch for non-master nodes (should return immediately).""" from sglang.srt.server_args import ServerArgs server_args = ServerArgs(**non_master_adapter_kwargs) with patch( "verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process" ) as mock_process_class: mock_process_class.return_value = mock_multiprocessing_process result = launch_server_process(server_args, first_rank_in_node=True) assert result == mock_multiprocessing_process mock_multiprocessing_process.start.assert_not_called() def test_launch_server_process_timeout(self, mock_multiprocessing_process, real_adapter_kwargs): """Test timeout during server health check.""" from sglang.srt.server_args import ServerArgs server_args = ServerArgs(**real_adapter_kwargs) with patch( "verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process" ) as mock_process_class: mock_process_class.return_value = mock_multiprocessing_process with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.Session") as mock_session_class: mock_session = Mock() mock_session.get.side_effect = requests.RequestException("Connection failed") mock_session_class.return_value.__enter__.return_value = mock_session import itertools with patch( "verl.workers.rollout.sglang_rollout.http_server_engine.time.time", side_effect=itertools.chain([0], itertools.repeat(400)), # 第一次返回0,之后一直返回400 ): with pytest.raises(TimeoutError): launch_server_process(server_args, first_rank_in_node=True) mock_multiprocessing_process.terminate.assert_called_once() def test_launch_server_process_died(self, real_adapter_kwargs): """Test server process dies during startup.""" from sglang.srt.server_args import ServerArgs server_args = ServerArgs(**real_adapter_kwargs) with patch( "verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process" ) as mock_process_class: mock_process = Mock() mock_process.is_alive.return_value = False mock_process_class.return_value = mock_process with pytest.raises(RuntimeError, match="Server process terminated unexpectedly"): launch_server_process(server_args, first_rank_in_node=True) class TestHttpServerEngineAdapter: """Test cases for HttpServerEngineAdapter class.""" def test_init_with_router_registration(self, mock_launch_server_process, mock_requests_post, router_adapter_kwargs): """Test initialization with router registration.""" adapter = HttpServerAdapter(**router_adapter_kwargs) assert adapter.router_ip == "192.168.1.1" assert adapter.router_port == 8080 assert adapter.process == mock_launch_server_process.return_value mock_requests_post.assert_called_once() def test_init_without_router(self, mock_launch_server_process, basic_adapter_kwargs): """Test initialization without router registration.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) assert adapter.router_ip is None assert adapter.router_port is None assert adapter.process == mock_launch_server_process.return_value def test_register_with_router_failure(self, mock_launch_server_process, router_adapter_kwargs): """Test router registration failure handling.""" with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: mock_post.side_effect = requests.RequestException("Connection failed") # Should not raise exception, just log error adapter = HttpServerAdapter(**router_adapter_kwargs) assert adapter.router_ip == "192.168.1.1" mock_post.assert_called_once() def test_make_request_success(self, mock_launch_server_process, basic_adapter_kwargs): """Test successful HTTP request.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: mock_response = Mock() mock_response.status_code = 200 mock_response.json.return_value = {"status": "success"} mock_post.return_value = mock_response result = adapter._make_request("test_endpoint", {"param": "value"}) assert result == {"status": "success"} mock_post.assert_called_with( "http://localhost:8000/test_endpoint", json={"param": "value"}, timeout=adapter.timeout, ) def test_make_request_get_method(self, mock_launch_server_process, basic_adapter_kwargs): """Test HTTP GET request.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get: mock_response = Mock() mock_response.status_code = 200 mock_response.json.return_value = {"data": "test"} mock_get.return_value = mock_response result = adapter._make_request("test_endpoint", method="GET") assert result == {"data": "test"} mock_get.assert_called_with("http://localhost:8000/test_endpoint", timeout=adapter.timeout) def test_make_request_non_master(self, mock_launch_server_process): """Test request from non-master node returns empty dict.""" kwargs = {"host": "localhost", "port": 8000, "node_rank": 1, "model_path": "/tmp/test_model"} adapter = HttpServerAdapter(**kwargs) result = adapter._make_request("test_endpoint") assert result == {} def test_make_request_retry_logic(self, mock_launch_server_process, basic_adapter_kwargs): """Test retry logic for failed requests.""" adapter = HttpServerAdapter(max_attempts=3, **basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: with patch("time.sleep") as mock_sleep: # First two calls fail, third succeeds mock_post.side_effect = [ requests.exceptions.Timeout(), requests.exceptions.ConnectionError(), Mock(status_code=200, json=lambda: {"success": True}), ] result = adapter._make_request("test_endpoint") assert result == {"success": True} assert mock_post.call_count == 3 assert mock_sleep.call_count == 2 def test_make_request_http_error(self, mock_launch_server_process, basic_adapter_kwargs): """Test HTTP error handling.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: mock_response = Mock() mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError("404 Not Found") mock_post.return_value = mock_response with pytest.raises(requests.exceptions.HTTPError): adapter._make_request("test_endpoint") def test_make_request_max_attempts_exceeded(self, mock_launch_server_process, basic_adapter_kwargs): """Test max retries exceeded.""" adapter = HttpServerAdapter(max_attempts=1, **basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: with patch("time.sleep"): mock_post.side_effect = requests.exceptions.Timeout() with pytest.raises(RuntimeError, match="Failed to complete request"): adapter._make_request("test_endpoint") assert mock_post.call_count == 1 # Initial retry def test_update_weights_from_tensor_strict(self, mock_launch_server_process, basic_adapter_kwargs): import base64 from sglang.srt.managers.io_struct import UpdateWeightsFromTensorReqInput from verl.workers.rollout.sglang_rollout.http_server_engine import HttpServerAdapter basic_adapter_kwargs.setdefault("node_rank", 0) adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"status": "updated"} req = UpdateWeightsFromTensorReqInput( serialized_named_tensors=[b"tensor1", b"tensor2"], load_format="safetensors", flush_cache=True, ) result = adapter.update_weights_from_tensor(req) assert result == {"status": "updated"} expected_b64_1 = base64.b64encode(b"tensor1").decode("utf-8") expected_b64_2 = base64.b64encode(b"tensor2").decode("utf-8") mock_request.assert_called_once_with( "update_weights_from_tensor", { "serialized_named_tensors": [expected_b64_1, expected_b64_2], "load_format": "safetensors", "flush_cache": True, }, ) def test_update_weights_from_tensor_empty(self, mock_launch_server_process, basic_adapter_kwargs): from sglang.srt.managers.io_struct import UpdateWeightsFromTensorReqInput from verl.workers.rollout.sglang_rollout.http_server_engine import HttpServerAdapter basic_adapter_kwargs.setdefault("node_rank", 0) adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"status": "updated"} req = UpdateWeightsFromTensorReqInput( serialized_named_tensors=[], load_format="safetensors", flush_cache=True, ) result = adapter.update_weights_from_tensor(req) assert result == {"status": "updated"} mock_request.assert_called_once_with( "update_weights_from_tensor", { "serialized_named_tensors": [], "load_format": "safetensors", "flush_cache": True, }, ) def test_update_weights_from_tensor_none(self, mock_launch_server_process, basic_adapter_kwargs): from sglang.srt.managers.io_struct import UpdateWeightsFromTensorReqInput from verl.workers.rollout.sglang_rollout.http_server_engine import HttpServerAdapter basic_adapter_kwargs.setdefault("node_rank", 0) adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"status": "updated"} req = UpdateWeightsFromTensorReqInput( serialized_named_tensors=None, load_format="safetensors", flush_cache=True, ) result = adapter.update_weights_from_tensor(req) assert result == {"status": "updated"} mock_request.assert_called_once_with( "update_weights_from_tensor", { "serialized_named_tensors": [], "load_format": "safetensors", "flush_cache": True, }, ) def test_generate(self, mock_launch_server_process, basic_adapter_kwargs): """Test generate method.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"text": "Generated text"} result = adapter.generate( prompt="Hello world", sampling_params={"temperature": 0.7}, return_logprob=True, ) assert result == {"text": "Generated text"} mock_request.assert_called_once_with( "generate", { "text": "Hello world", "sampling_params": {"temperature": 0.7}, "return_logprob": True, }, only_master=False, ) def test_flush_cache(self, mock_launch_server_process, basic_adapter_kwargs): """Test flush_cache method.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get: with patch("time.sleep") as mock_sleep: # First call fails, second succeeds mock_responses = [ Mock(status_code=503), # Service unavailable Mock(status_code=200, json=lambda: {"cache_flushed": True}), ] mock_get.side_effect = mock_responses result = adapter.flush_cache() assert result == {"cache_flushed": True} assert mock_get.call_count == 2 mock_sleep.assert_called_once() def test_flush_cache_non_master(self, mock_launch_server_process): """Test flush_cache for non-master node.""" kwargs = {"host": "localhost", "port": 8000, "node_rank": 1, "model_path": "/tmp/test_model"} adapter = HttpServerAdapter(**kwargs) result = adapter.flush_cache() assert result == {} def test_memory_management_methods(self, mock_launch_server_process, basic_adapter_kwargs): """Test memory release and resume methods.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"status": "success"} # Test release_memory_occupation result = adapter.release_memory_occupation(["weights", "kv_cache"]) assert result == {"status": "success"} mock_request.assert_called_with("release_memory_occupation", {"tags": ["weights", "kv_cache"]}) # Test resume_memory_occupation result = adapter.resume_memory_occupation(["weights"]) assert result == {"status": "success"} mock_request.assert_called_with("resume_memory_occupation", {"tags": ["weights"]}) def test_generation_control_methods(self, mock_launch_server_process, basic_adapter_kwargs): """Test generation control methods.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"status": "success"} def test_shutdown(self, mock_launch_server_process, mock_kill_process_tree, router_adapter_kwargs): """Test shutdown method.""" with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: mock_response = Mock() mock_response.status_code = 200 mock_post.return_value = mock_response adapter = HttpServerAdapter(**router_adapter_kwargs) adapter.shutdown() # Should unregister from router assert mock_post.call_count == 2 # Once for registration, once for unregistration # Should kill process mock_kill_process_tree.assert_called_once_with(mock_launch_server_process.return_value.pid) def test_shutdown_with_errors(self, mock_launch_server_process, mock_kill_process_tree, router_adapter_kwargs): """Test shutdown method with errors.""" with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: # Mock registration success but unregistration failure mock_post.side_effect = [ Mock(status_code=200), # Registration success requests.RequestException("Unregistration failed"), # Unregistration failure ] # Mock process kill failure mock_kill_process_tree.side_effect = Exception("Kill failed") adapter = HttpServerAdapter(**router_adapter_kwargs) # Should not raise exceptions adapter.shutdown() assert mock_post.call_count == 2 mock_kill_process_tree.assert_called_once_with(mock_launch_server_process.return_value.pid) # Edge cases for HttpServerEngineAdapter def test_empty_and_none_parameters(self, mock_launch_server_process, basic_adapter_kwargs): """Test handling of empty and None parameters.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"status": "success"} req = UpdateWeightsFromTensorReqInput( serialized_named_tensors=None, load_format=None, flush_cache=None, ) # Test generate with all None parameters result = adapter.generate() assert result == {"status": "success"} # Test with empty lists result = adapter.update_weights_from_tensor(req) assert result == {"status": "success"} # Test with empty tags result = adapter.release_memory_occupation(req) assert result == {"status": "success"} def test_large_payload_handling(self, mock_launch_server_process, basic_adapter_kwargs): """Test handling of large payloads.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"status": "success"} # Test with large tensor list large_tensor_list = [MultiprocessingSerializer.serialize(f"tensor_{i}") for i in range(1000)] req = UpdateWeightsFromTensorReqInput( serialized_named_tensors=large_tensor_list, load_format="safetensors", flush_cache=True, ) result = adapter.update_weights_from_tensor(req) assert result == {"status": "success"} # Test with large prompt large_prompt = "A" * 10000 result = adapter.generate(prompt=large_prompt) assert result == {"status": "success"} def test_timeout_edge_cases(self, mock_launch_server_process): """Test various timeout scenarios.""" # Test with very small timeout kwargs = {"host": "localhost", "port": 8000, "node_rank": 0, "model_path": "/tmp/test_model", "timeout": 0.001} adapter = HttpServerAdapter(**kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: mock_post.side_effect = requests.exceptions.Timeout() with pytest.raises(RuntimeError, match="Failed to complete request"): adapter._make_request("test_endpoint") def test_extreme_configuration_values(self, mock_launch_server_process): """Test extreme configuration values.""" # Test with extreme values kwargs = { "host": "localhost", "port": 8000, "node_rank": 0, "model_path": "/tmp/test_model", "timeout": 0.001, # Very small "max_attempts": 100, # Very large "retry_delay": 0.001, # Very small } adapter = HttpServerAdapter(**kwargs) assert adapter.timeout == 0.001 assert adapter.max_attempts == 100 assert adapter.retry_delay == 0.001 class TestAsyncHttpServerEngineAdapter: """Test cases for AsyncHttpServerEngineAdapter class.""" def test_init(self, mock_launch_server_process, basic_adapter_kwargs): """Test async adapter initialization.""" adapter = AsyncHttpServerAdapter(max_connections=50, **basic_adapter_kwargs) assert adapter.max_connections == 50 @pytest.mark.asyncio async def test_make_async_request_success(self, mock_launch_server_process, basic_adapter_kwargs): """Test successful async HTTP request.""" # Instantiate adapter adapter = AsyncHttpServerAdapter(**basic_adapter_kwargs) mock_response = AsyncMock() mock_response.status = 200 mock_response.json = AsyncMock(return_value={"status": "success"}) mock_response.raise_for_status = Mock() mock_post_context_manager = AsyncMock() mock_post_context_manager.__aenter__.return_value = mock_response mock_session = AsyncMock(spec=aiohttp.ClientSession) mock_session.closed = False mock_session.post.return_value = mock_post_context_manager mock_session_cm = AsyncMock() mock_session_cm.__aenter__.return_value = mock_session with patch.object(adapter, "_get_session", return_value=mock_session_cm): result = await adapter._make_async_request("test_endpoint", {"param": "value"}) # Assert result is correct assert result == {"status": "success"} # Verify post was called mock_session.post.assert_called_once_with( "http://localhost:8000/test_endpoint", json={"param": "value"}, timeout=adapter.timeout ) @pytest.mark.asyncio async def test_make_async_request_get_method(self, mock_launch_server_process, basic_adapter_kwargs): """Test async GET request using aiohttp and proper context mocking.""" # Instantiate the async adapter adapter = AsyncHttpServerAdapter(**basic_adapter_kwargs) mock_response = AsyncMock() mock_response.status = 200 mock_response.json = AsyncMock(return_value={"data": "test"}) mock_response.raise_for_status = Mock() mock_get_context_manager = AsyncMock() mock_get_context_manager.__aenter__.return_value = mock_response mock_session = AsyncMock(spec=aiohttp.ClientSession) mock_session.closed = False mock_session.get.return_value = mock_get_context_manager mock_session_cm = AsyncMock() mock_session_cm.__aenter__.return_value = mock_session with patch.object(adapter, "_get_session", return_value=mock_session_cm): result = await adapter._make_async_request("test_endpoint", method="GET") # Validate assert result == {"data": "test"} mock_session.get.assert_called_once_with("http://localhost:8000/test_endpoint", timeout=adapter.timeout) @pytest.mark.asyncio async def test_make_async_request_non_master(self, mock_launch_server_process): """Test async request from non-master node.""" kwargs = {"host": "localhost", "port": 8000, "node_rank": 1, "model_path": "/tmp/test_model"} adapter = AsyncHttpServerAdapter(**kwargs) result = await adapter._make_async_request("test_endpoint") assert result == {} @pytest.mark.asyncio async def test_async_generate(self, mock_launch_server_process, basic_adapter_kwargs): """Test async generate method.""" adapter = AsyncHttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_async_request", new_callable=AsyncMock) as mock_request: mock_request.return_value = {"text": "Generated text"} result = await adapter.generate( prompt="Hello world", sampling_params={"temperature": 0.7}, return_logprob=True, ) assert result == {"text": "Generated text"} mock_request.assert_called_once() @pytest.mark.asyncio async def test_async_memory_management(self, mock_launch_server_process, basic_adapter_kwargs): """Test async memory management methods.""" adapter = AsyncHttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_async_request", new_callable=AsyncMock) as mock_request: mock_request.return_value = {"status": "success"} # Test release_memory_occupation result = await adapter.release_memory_occupation(["weights"]) assert result == {"status": "success"} mock_request.assert_called_with("release_memory_occupation", {"tags": ["weights"]}) # Test resume_memory_occupation result = await adapter.resume_memory_occupation(["weights"]) assert result == {"status": "success"} mock_request.assert_called_with("resume_memory_occupation", {"tags": ["weights"]}) assert ( mock_request.call_count == 2 ) # resume memory occupation will also call release memory occupation once class TestErrorRecovery: """Test error recovery mechanisms.""" def test_flush_cache_recovery(self, mock_launch_server_process, basic_adapter_kwargs): """Test flush cache recovery from failures.""" adapter = HttpServerAdapter(max_attempts=2, **basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get: # Simulate multiple failures then success mock_get.side_effect = [ requests.exceptions.ConnectionError(), requests.exceptions.Timeout(), Mock(status_code=503), # Service unavailable Mock(status_code=200, json=lambda: {"cache_flushed": True}), ] with patch("time.sleep"): result = adapter.flush_cache() assert result == {"cache_flushed": True} def test_flush_cache_max_attempts(self, mock_launch_server_process, basic_adapter_kwargs): """Test flush cache max retries exceeded.""" adapter = HttpServerAdapter(max_attempts=1, **basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get: # All attempts fail mock_get.side_effect = requests.exceptions.ConnectionError() with patch("time.sleep"): result = adapter.flush_cache() assert result == {} # Should return empty dict on failure def test_network_partition_recovery(self, mock_launch_server_process, basic_adapter_kwargs): """Test recovery from network partition scenarios.""" adapter = HttpServerAdapter(max_attempts=3, **basic_adapter_kwargs) with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post: # Simulate network partition then recovery mock_post.side_effect = [ requests.exceptions.ConnectionError("Network unreachable"), requests.exceptions.ConnectionError("Network unreachable"), Mock(status_code=200, json=lambda: {"recovered": True}), ] with patch("time.sleep"): result = adapter._make_request("test_endpoint") assert result == {"recovered": True} class TestResourceManagement: """Test resource management and cleanup.""" def test_resource_cleanup_on_exception( self, mock_launch_server_process, mock_kill_process_tree, basic_adapter_kwargs ): """Test resource cleanup when exceptions occur.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) # Simulate exception during operation with patch.object(adapter, "_make_request", side_effect=Exception("Test error")): try: adapter.generate(prompt="test") except Exception: pass # Cleanup should still work adapter.shutdown() mock_kill_process_tree.assert_called_once_with(mock_launch_server_process.return_value.pid) def test_multiple_shutdown_calls(self, mock_launch_server_process, basic_adapter_kwargs): """Test multiple shutdown calls are safe.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) # Multiple shutdown calls should be safe adapter.shutdown() adapter.shutdown() adapter.shutdown() class TestDataTypeHandling: """Test handling of various data types.""" def test_complex_data_structures(self, mock_launch_server_process, basic_adapter_kwargs): """Test handling of complex data structures.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {"status": "success"} # Test with complex sampling params complex_sampling_params = { "temperature": 0.7, "top_p": 0.9, "top_k": 50, "repetition_penalty": 1.1, "stop_sequences": ["", "\n\n"], "max_tokens": 100, "logit_bias": {"token_123": 0.5, "token_456": -0.5}, "nested_config": { "beam_search": True, "num_beams": 4, "early_stopping": True, }, } result = adapter.generate( prompt="Test prompt", sampling_params=complex_sampling_params, ) assert result == {"status": "success"} # Verify the complex structure was passed through call_args = mock_request.call_args[0][1] assert call_args["sampling_params"] == complex_sampling_params class TestIntegration: """Integration tests for both adapters.""" def test_error_scenarios(self, mock_launch_server_process, basic_adapter_kwargs): """Test various error scenarios.""" adapter = HttpServerAdapter(**basic_adapter_kwargs) # Test with None payload with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {} result = adapter.generate() assert result == {} # Test with empty parameters with patch.object(adapter, "_make_request") as mock_request: mock_request.return_value = {} req = UpdateWeightsFromTensorReqInput( serialized_named_tensors=None, load_format=None, flush_cache=None, ) result = adapter.update_weights_from_tensor(req) assert result == {} ================================================ FILE: verl_distillation/tests/workers/rollout/rollout_vllm/run_fsdp_vllm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import torch import torch.distributed as dist from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ShardedStateDictConfig, ShardingStrategy, StateDictType from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer from vllm import SamplingParams from verl.third_party.vllm import LLM from verl.utils.distributed import initialize_global_process_group def main(): assert torch.cuda.is_available(), "CUDA must be present to run FSDP vLLM example" local_rank, rank, world_size = initialize_global_process_group() local_cache_path = "~/.cache/verl/rlhf" local_cache_path = os.path.expanduser(local_cache_path) hdfs_path = "Qwen/Qwen2-7B-Instruct" from verl.utils.fs import copy_to_local local_model_path = copy_to_local(src=hdfs_path, cache_dir=local_cache_path) tokenizer = AutoTokenizer.from_pretrained(local_model_path, trust_remote_code=True) actor_model_config = AutoConfig.from_pretrained(local_model_path, trust_remote_code=True) with torch.device("cuda"): actor_model = AutoModelForCausalLM.from_pretrained(local_model_path, trust_remote_code=True) actor_model.to(torch.bfloat16) max_prompt_length = 16 response_length = 32 preencode_prompts = [ "The president of the United States is", "The capital of France is", "The future of AI is", ] tokenizer.pad_token = tokenizer.eos_token prompts = tokenizer(preencode_prompts, return_tensors="pt", padding=True) input_ids = prompts["input_ids"] attention_mask = prompts["attention_mask"] from verl.utils.torch_functional import pad_sequence_to_length input_ids = pad_sequence_to_length(input_ids, max_prompt_length, tokenizer.pad_token_id, left_pad=True).cuda() attention_mask = pad_sequence_to_length(attention_mask, max_prompt_length, 0, left_pad=True).cuda() from transformers import GenerationConfig generation_config = GenerationConfig(do_sample=False) actor_model.cuda() output = actor_model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=32, # max_length=max_length, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, generation_config=generation_config, # renormalize_logits=True, output_scores=False, # this is potentially very large return_dict_in_generate=True, use_cache=False, ) # may OOM when use_cache = True seq = output.sequences response = seq[:, max_prompt_length:] print(f"hf response: {tokenizer.batch_decode(response)}") tensor_model_parallel_size = 4 from torch.distributed.device_mesh import init_device_mesh device_mesh = init_device_mesh("cuda", mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) mixed_precision = MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32) fsdp_model = FSDP( actor_model, use_orig_params=True, auto_wrap_policy=None, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, mixed_precision=mixed_precision, cpu_offload=CPUOffload(offload_params=False), sync_module_states=False, device_mesh=device_mesh, ) FSDP.set_state_dict_type( fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig() ) state_dict = fsdp_model.state_dict() sampling_params = SamplingParams( temperature=0, top_p=1, n=1, max_tokens=response_length, logprobs=1, ignore_eos=True, detokenize=False ) print(actor_model_config) llm = LLM( model=None, tokenizer=tokenizer, model_hf_config=actor_model_config, tensor_parallel_size=tensor_model_parallel_size, enforce_eager=True, dtype="bfloat16", load_format="dummy_dtensor", gpu_memory_utilization=0.8, trust_remote_code=True, ) # Warmup iterations for _ in range(10): torch.cuda.synchronize() llm.sync_model_weights(actor_weights=state_dict, load_format="dtensor") torch.cuda.synchronize() dist.barrier() start_time = time.time() llm.sync_model_weights(actor_weights=state_dict, load_format="dtensor") torch.cuda.synchronize() dist.barrier() end_time = time.time() # Calculate elapsed time elapsed_time = end_time - start_time print(f"Time taken: {elapsed_time:.6f} seconds") input_ids = input_ids.cuda() attention_mask = attention_mask.cuda() idx_list = [] batch_size = input_ids.shape[0] pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id from verl.workers.rollout.vllm_rollout.vllm_rollout_spmd import _pre_process_inputs for i in range(batch_size): idx_list.append(_pre_process_inputs(pad_token_id, input_ids[i])) print("start generation") outputs = llm.generate(prompt_token_ids=idx_list, sampling_params=sampling_params, use_tqdm=False) vllm_output = outputs[0].cuda() if torch.distributed.get_rank() == 0: print(f"hf response: {tokenizer.batch_decode(response)}") print(f"vllm response: {tokenizer.batch_decode(vllm_output)}") if __name__ == "__main__": main() ================================================ FILE: verl_distillation/tests/workers/rollout/rollout_vllm/test_vllm_model_rope_scaling.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import torch import torch.distributed import torch.distributed as dist from omegaconf import OmegaConf from transformers import AutoTokenizer from verl import DataProto from verl.utils.config import omega_conf_to_dataclass from verl.utils.distributed import initialize_global_process_group from verl.utils.model import compute_position_id_with_mask from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.vllm_rollout.vllm_rollout_spmd import vLLMRollout def test_vllm_rollout_with_yarn_position_embeddings(): """ Test the vLLM rollout with yarn position embeddings. """ local_rank, rank, world_size = initialize_global_process_group() model_path = os.path.expanduser("~/models/OldKingMeister/Qwen2.5-1.5B-Instruct-YaRN") config = OmegaConf.create( { "name": "vllm", "prompt_length": 35000, "response_length": 512, "dtype": "bfloat16", "enforce_eager": True, "gpu_memory_utilization": 0.4, "enable_chunked_prefill": False, "free_cache_engine": False, "disable_log_stats": True, "max_model_len": 35000 + 512, "max_num_seqs": 1024, "load_format": "auto", "val_kwargs": { "top_k": -1, "top_p": 1.0, "temperature": 0, "n": 1, "do_sample": False, }, "tensor_model_parallel_size": 4, "calculate_log_probs": False, "do_sample": False, "temperature": 0.0, "max_num_batched_tokens": 35000 + 512, } ) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, padding_side="left") tokenizer.pad_token = tokenizer.eos_token # do_sample=False for temperate=0 deterministic input_dataproto = prepare_input_dataproto(tokenizer, config, validate=True, do_sample=False) rollout_config: RolloutConfig = omega_conf_to_dataclass(config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=model_path) model_config.tokenizer.pad_token = tokenizer.eos_token vllm_rollout = vLLMRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) # rollout rollout_response = vllm_rollout.generate_sequences( prompts=input_dataproto, ) if rank == 0: print("VLLM Rollout Outputs:") print(tokenizer.batch_decode(rollout_response.batch["responses"][:], skip_special_tokens=False)) for response in rollout_response.batch["responses"]: assert "<|im_end|>" in tokenizer.decode(response, skip_special_tokens=False), ( "Response should contain <|im_end|> token" ) print("Checks passed.") del vllm_rollout gc.collect() torch.cuda.empty_cache() torch.cuda.ipc_collect() dist.barrier() torch.distributed.destroy_process_group() def prepare_input_dataproto(tokenizer, config, validate, do_sample=False): base_phrase = "Roses are red, sky is blue. " * 4096 preencode_prompts = [ # 32810 tokens > 32768 tokens [{"role": "user", "content": base_phrase + "Who won the Champions League in 2019?"}], [{"role": "user", "content": base_phrase + "The founder of Apple is"}], [{"role": "user", "content": base_phrase + "What's your name"}], ] formatted_prompts = [ tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True) for conversation in preencode_prompts ] prompts = tokenizer(formatted_prompts, return_tensors="pt", padding="max_length", max_length=config.prompt_length) input_dataproto = DataProto.from_dict( { "input_ids": prompts["input_ids"], "attention_mask": prompts["attention_mask"], "position_ids": compute_position_id_with_mask(prompts["attention_mask"]), }, meta_info={ "bos_token_id": tokenizer.bos_token_id, "eos_token_id": tokenizer.eos_token_id, "pad_token_id": tokenizer.pad_token_id, "validate": validate, "do_sample": do_sample, "response_length": config.response_length, "temperature": config.temperature, }, ) return input_dataproto if __name__ == "__main__": test_vllm_rollout_with_yarn_position_embeddings() ================================================ FILE: verl_distillation/tests/workers/rollout/rollout_vllm/test_vllm_spmd.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import torch from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ShardedStateDictConfig, ShardingStrategy, StateDictType from transformers import AutoModelForCausalLM, AutoTokenizer from vllm import LLM, SamplingParams from verl.utils.distributed import initialize_global_process_group from verl.utils.torch_functional import pad_sequence_to_length def levenshtein(s1, s2): m, n = len(s1), len(s2) # Initialize matrix of zeros dp = [[0] * (n + 1) for _ in range(m + 1)] # Initialize first column and first row of the matrix for i in range(m + 1): dp[i][0] = i # Deletion from s1 to empty string for j in range(n + 1): dp[0][j] = j # Insertion to s1 from empty string # Compute the Levenshtein distance matrix for i in range(1, m + 1): for j in range(1, n + 1): cost = 0 if s1[i - 1] == s2[j - 1] else 1 # No cost if characters match dp[i][j] = min( dp[i - 1][j] + 1, # Deletion dp[i][j - 1] + 1, # Insertion dp[i - 1][j - 1] + cost, # Substitution ) return dp[m][n] def are_lists_similar(a, b): if len(a) != len(b): print("The lists are of different lengths.") return False total_length = 0 total_diff = 0 for s1, s2 in zip(a, b, strict=True): max_len = max(len(s1), len(s2)) total_length += max_len diff = levenshtein(s1, s2) total_diff += diff print(f"Comparing strings:\n{s1}\n{s2}\nDifference: {diff} characters\n") percentage_difference = (total_diff / total_length) * 100 print(f"Total difference: {percentage_difference:.2f}%") return percentage_difference <= 15 @pytest.mark.skip("https://github.com/vllm-project/vllm/issues/16993") def test_vllm_spmd(): assert torch.cuda.device_count() >= 2, "At least 2 GPUs is required to run tp+dp tests." local_rank, rank, world_size = initialize_global_process_group() # Initialize model and token local_cache_path = "~/.cache/verl/rlhf" local_cache_path = os.path.expanduser(local_cache_path) hdfs_path = "Qwen/Qwen2.5-1.5B-Instruct" from verl.utils.fs import copy_to_local local_model_path = copy_to_local(src=hdfs_path, cache_dir=local_cache_path) tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side="left", trust_remote_code=True) actor_model = AutoModelForCausalLM.from_pretrained(local_model_path, trust_remote_code=True) actor_model.to(torch.bfloat16) # fill rollout config max_prompt_length = 16 max_response_length = 32 preencode_prompts = [ "Who won the Champions League in 2019?", "The founder of Apple is", "What's your name?", ] tokenizer.pad_token = tokenizer.eos_token prompts = tokenizer(preencode_prompts, return_tensors="pt", padding=True) input_ids = prompts["input_ids"] attention_mask = prompts["attention_mask"] input_ids = pad_sequence_to_length(input_ids, max_prompt_length, tokenizer.pad_token_id, left_pad=True) attention_mask = pad_sequence_to_length(attention_mask, max_prompt_length, 0, left_pad=True) print("start generation") input_ids = input_ids.cuda() attention_mask = attention_mask.cuda() temperature = 0 top_p = 1 kwargs = dict( n=1, temperature=temperature, top_p=top_p, max_tokens=max_response_length, logprobs=1, ignore_eos=True ) tensor_parallel_size = 4 from torch.distributed.device_mesh import init_device_mesh device_mesh = init_device_mesh("cuda", mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) mixed_precision = MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32) fsdp_model = FSDP( actor_model, use_orig_params=True, auto_wrap_policy=None, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, mixed_precision=mixed_precision, cpu_offload=CPUOffload(offload_params=False), sync_module_states=False, device_mesh=device_mesh, ) FSDP.set_state_dict_type( fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig() ) state_dict = fsdp_model.state_dict() sampling_params = SamplingParams(**kwargs) llm = LLM( model=local_model_path, enable_sleep_mode=True, tensor_parallel_size=tensor_parallel_size, distributed_executor_backend="external_launcher", dtype="bfloat16", enforce_eager=True, gpu_memory_utilization=0.8, disable_custom_all_reduce=True, skip_tokenizer_init=False, enable_prefix_caching=True, trust_remote_code=True, seed=1, ) outputs = llm.generate(preencode_prompts, sampling_params=sampling_params, use_tqdm=False) vllm_response_tokens = [] for output in outputs: generated_text = output.outputs[0].text vllm_response_tokens.append(generated_text) world_size = torch.distributed.get_world_size() model = llm.llm_engine.model_executor.driver_worker.worker.model_runner.model model.load_weights( ((name, param.full_tensor() if world_size != 1 else param) for name, param in state_dict.items()) ) outputs = llm.generate(preencode_prompts, sampling_params=sampling_params, use_tqdm=False) verl_vllm_response_tokens = [] for output in outputs: generated_text = output.outputs[0].text verl_vllm_response_tokens.append(generated_text) if torch.distributed.get_rank() == 0: print(f"vllm response: {vllm_response_tokens}") print(f"verl-vllm response: {verl_vllm_response_tokens}") assert are_lists_similar(vllm_response_tokens, verl_vllm_response_tokens), "Strings differ more than 10%:\n" print("Check Pass") torch.distributed.destroy_process_group() if __name__ == "__main__": test_vllm_spmd() ================================================ FILE: verl_distillation/tests/workers/rollout/test_hf_rollout.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from omegaconf import OmegaConf from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ShardedStateDictConfig, ShardingStrategy, StateDictType from transformers import AutoModelForCausalLM, AutoTokenizer from verl import DataProto from verl.utils.distributed import initialize_global_process_group from verl.utils.fs import copy_to_local from verl.utils.model import compute_position_id_with_mask from verl.workers.rollout.hf_rollout import HFRollout BASE_HF_ROLLOUT_CONFIG = { "temperature": 1.0, "top_k": -1, "top_p": 1, "prompt_length": 64, "response_length": 64, "do_sample": True, "n": 1, "val_kwargs": { "top_k": -1, "top_p": 1.0, "temperature": 0, "n": 1, "do_sample": False, }, } def prepare_input_dataproto(tokenizer, config, validate): preencode_prompts = [ [{"role": "user", "content": "Who won the Champions League in 2019?"}], [{"role": "user", "content": "The founder of Apple is"}], [{"role": "user", "content": "What's your name"}], ] formatted_prompts = [ tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True) for conversation in preencode_prompts ] prompts = tokenizer(formatted_prompts, return_tensors="pt", padding="max_length", max_length=config.prompt_length) input_dataproto = DataProto.from_dict( { "input_ids": prompts["input_ids"], "attention_mask": prompts["attention_mask"], "position_ids": compute_position_id_with_mask(prompts["attention_mask"]), }, meta_info={ "bos_token_id": tokenizer.bos_token_id, "eos_token_id": tokenizer.eos_token_id, "pad_token_id": tokenizer.pad_token_id, "validate": validate, }, ) return input_dataproto def prepare_fsdp_model(model, world_size): from torch.distributed.device_mesh import init_device_mesh device_mesh = init_device_mesh("cuda", mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) mixed_precision = MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32) fsdp_model = FSDP( model, use_orig_params=True, auto_wrap_policy=None, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, mixed_precision=mixed_precision, cpu_offload=CPUOffload(offload_params=False), sync_module_states=False, device_mesh=device_mesh, ) FSDP.set_state_dict_type( fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig() ) return fsdp_model def test_hf_rollout(n: int = 1, do_sample: bool = True, validate: bool = False): config = OmegaConf.create(BASE_HF_ROLLOUT_CONFIG) config.update({"n": n, "do_sample": do_sample}) assert torch.cuda.device_count() >= 2, "At least 2 GPUs is required to run tp+dp tests." local_rank, rank, world_size = initialize_global_process_group() # Initialize model and tokenizer local_cache_path = "~/.cache/verl/rlhf" local_cache_path = os.path.expanduser(local_cache_path) hdfs_path = "Qwen/Qwen2-7B-Instruct" local_model_path = copy_to_local(src=hdfs_path, cache_dir=local_cache_path) tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side="left", trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token # Initialize FSDP model actor_model = AutoModelForCausalLM.from_pretrained(local_model_path, trust_remote_code=True) actor_model.to(torch.bfloat16) fsdp_model = prepare_fsdp_model(actor_model, world_size) # Initialize HFRollout and start generate hf_rollout = HFRollout(fsdp_model, OmegaConf.create(config)) input = prepare_input_dataproto(tokenizer, config, validate).to(torch.cuda.current_device()) outputs = hf_rollout.generate_sequences(input) # check generated batch size is expected generated_batch_size = outputs.batch.batch_size[0] assert generated_batch_size == input.batch.batch_size[0] * config.n for i in range(generated_batch_size): prompt_tokens = outputs.batch["prompts"][i] prompt_mask = prompt_tokens != tokenizer.pad_token_id prompt_tokens = prompt_tokens[prompt_mask] decoded_prompt = tokenizer.decode(prompt_tokens, skip_special_tokens=False) response_tokens = outputs.batch["responses"][i] response_mask = response_tokens != tokenizer.pad_token_id response_tokens = response_tokens[response_mask] decoded_response = tokenizer.decode(response_tokens, skip_special_tokens=False) attention_mask = outputs.batch["attention_mask"][i] position_ids = outputs.batch["position_ids"][i] prompt_length = outputs.batch["prompts"].size(1) response_length = outputs.batch["responses"].size(1) assert attention_mask.size(0) == prompt_length + response_length assert position_ids.size(0) == prompt_length + response_length # check response attention mask is expected response_attention = attention_mask[prompt_length:] eos_positions = (outputs.batch["responses"][i] == tokenizer.pad_token_id).nonzero(as_tuple=True)[0] if len(eos_positions) > 0: first_eos_pos = eos_positions[0].item() assert response_attention[: first_eos_pos + 1].all(), "Response attention mask should be 1 until EOS" if first_eos_pos + 1 < response_length: assert not response_attention[first_eos_pos + 1 :].any(), ( "Response attention mask should be 0 after EOS" ) else: assert response_attention.all(), "Response attention mask should be all 1 if no EOS token" # check response position ids is expected prompt_positions = position_ids[:prompt_length] response_positions = position_ids[prompt_length:] valid_response_length = min(len(response_tokens), response_length) if valid_response_length > 0: assert response_positions[0] == prompt_positions[-1] + 1 for j in range(1, valid_response_length): assert response_positions[j] == response_positions[j - 1] + 1 # print generated text for inspection if torch.distributed.get_rank() == 0: print(f"prompt: {decoded_prompt}") print(f"response: {decoded_response}") print("=" * 30) if __name__ == "__main__": test_hf_rollout(n=2, do_sample=True, validate=False) # test_hf_rollout(n=1, do_sample=False, validate=True) # test_hf_rollout(n=1, do_sample=True, validate=False) ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_async_rollout_mcp_tools.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from tests/workers/rollout/test_sglang_async_rollout_sf_tools.py import asyncio import os from copy import deepcopy from unittest.mock import AsyncMock, MagicMock, patch import numpy as np import pytest from tensordict import TensorDict from transformers import AutoConfig, AutoTokenizer from utils_sglang import get_rollout_config, prepare_inputs from verl.protocol import DataProto from verl.tools.mcp_search_tool import MCPSearchTool from verl.tools.schemas import ToolResponse from verl.tools.utils.mcp_clients.McpClientManager import MCPClientManager from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.schemas import AsyncRolloutRequest, AsyncRolloutRequestStateEnum, Message from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout DEFAULT_USER_CONTENT_PREFIX = ( "Answer the given question. You must conduct reasoning inside and " "first every time you get new information. After reasoning, if you find you lack " "some knowledge, you can call a search engine by query " "and it will return the top searched results between and " ". You can search as many times as your want. If you find no " "further external knowledge needed, you can directly provide the answer inside " " and , without detailed illustrations. For example, " " Beijing . Question: " ) user_content = DEFAULT_USER_CONTENT_PREFIX.rstrip("\n") + "How's the weather lately?" def get_search_messages(): user_prompt = { "role": "user", "content": user_content, } expect_turn_0_msg = { "role": "assistant", "content": "Let me search the web.", "tool_calls": [ { "id": "10", "type": "function", "function": { "name": "tavily_search_tool", "arguments": { "what_is_your_intent": "Search for the weather lately", "query": "the weather in Beijing today", "search_depth": "basic", "time_range": "day", "include_domains": ["google.com", "baidu.com"], "max_results": 2, }, }, } ], } expect_turn_1_msg = { "role": "assistant", "content": "Let me search again.", "tool_calls": [ { "type": "function", "function": { "name": "tavily_search_tool", "arguments": { "what_is_your_intent": "Search for the weather lately", "query": "the weather in Beijing tomorrow", "search_depth": "basic", "time_range": "day", "include_domains": ["google.com", "baidu.com"], "max_results": 2, }, }, } ], } expect_turn_2_msg = { "role": "assistant", "content": "Today is sunny and tomorrow will be cloudy in Beijing.", } # Mock search tool responses tool_return_0_msg = {"role": "tool", "content": [{"type": "text", "text": "Today's weather in Beijing is sunny."}]} tool_return_1_msg = { "role": "tool", "content": [{"type": "text", "text": "Tomorrow's weather in Beijing is cloudy."}], } user_prompts = [user_prompt] expect_turn_array = [expect_turn_0_msg, expect_turn_1_msg, expect_turn_2_msg] tool_return_array = [tool_return_0_msg, tool_return_1_msg] return user_prompts, expect_turn_array, tool_return_array class TestRolloutWithMCPSearchTools: local_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") @pytest.fixture def qwen_tokenizer(self): tokenizer = AutoTokenizer.from_pretrained(self.local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token return tokenizer # we only need this for tokenizer @pytest.fixture def qwen_model_config(self): config = AutoConfig.from_pretrained(self.local_model_path) return config @pytest.fixture def search_data(self, qwen_tokenizer): user_prompt, expect_turn_array, tool_return_array = get_search_messages() prompts = [[message] for message in user_prompt] preencode_turn_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=False) for turn in expect_turn_array ] preencode_tool_return_array = [ ToolResponse(text=qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=True)) for turn in tool_return_array ] return prompts, preencode_turn_array, preencode_tool_return_array @pytest.fixture def search_rollout_config(self): max_prompt_length = 4096 max_response_length = 3000 dtype = "bfloat16" tensor_parallel_size = 1 tool_path = "./resource/tool_configs/mcp_tool_config" rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_path ) return rollout_config @pytest.fixture def search_data_proto(self, search_data, qwen_tokenizer): preencode_prompts, _, _ = search_data prompts = [ qwen_tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(qwen_tokenizer, prompts, 1000) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) messages = np.asarray(preencode_prompts) tools_kwargs = np.array( [ { "tavily_search_tool": { "create_kwargs": {"ground_truth": "Today is sunny and tomorrow will be cloudy in Beijing."}, }, } ], dtype=object, ) index = np.array([0], dtype=object) prompts = DataProto( batch=prompt_dict, non_tensor_batch={"raw_prompt": messages, "tools_kwargs": tools_kwargs, "index": index} ) return prompts @pytest.fixture def mock_rollout(self, search_rollout_config, qwen_tokenizer, qwen_model_config): """Mock the rollout instance with sampling_params initialized.""" tool_schema = [ { "type": "function", "function": { "name": "tavily_search_tool", "description": "A powerful web search tool...", "parameters": { "type": "object", "properties": { "what_is_your_intent": { "type": "string", "description": "Describe your intent for using Tavily", }, "query": {"type": "string", "description": "Search query"}, "search_depth": { "type": "string", "description": "The depth of the search ('basic' or 'advanced')", }, "topic": { "type": "string", "description": "The category of the search ('general' or 'news')", }, "days": { "type": "integer", "description": "Number of days back to include in search results (only for " "'news' topic)", }, "time_range": { "type": "string", "description": "Time range for results ('day', 'week', 'month', 'year', 'd', " "'w', 'm', 'y')", }, "include_domains": { "type": "array", "description": "List of domains to specifically include in search results", }, "exclude_domains": { "type": "array", "description": "List of domains to specifically exclude from search results", }, "include_answer": { "type": "boolean", "description": "Whether to include an answer summary generated by an LLM", }, "include_raw_content": { "type": "boolean", "description": "Whether to include the cleaned and parsed HTML content of each result", }, "include_images": { "type": "boolean", "description": "Whether to include images from search results", }, "include_image_descriptions": { "type": "boolean", "description": "Whether to include descriptions with images", }, "max_results": { "type": "integer", "description": "Maximum number of results to return (5-20)", }, "async_search": { "type": "boolean", "description": "Whether to perform the search asynchronously", }, }, "required": ["what_is_your_intent", "query"], }, "strict": False, }, } ] with ( patch.object(MCPClientManager, "fetch_tool_schemas", return_value=tool_schema), patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): rollout_config: RolloutConfig = omega_conf_to_dataclass(search_rollout_config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) rollout.sampling_params = { "n": 1, "max_new_tokens": search_rollout_config.response_length, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, } return rollout def test_tools_registration(self, mock_rollout): assert len(mock_rollout._tool_schemas) != 0 assert "tavily_search_tool" in mock_rollout._tool_map.keys() from verl.tools.mcp_search_tool import MCPSearchTool assert isinstance(mock_rollout._tool_map["tavily_search_tool"], MCPSearchTool) # depend on the tokenizer assert mock_rollout._tool_call_parser_type == "qwen25" def test_rollout_req_creation(self, mock_rollout, search_data_proto): req_list = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1) assert len(req_list) == 1 assert req_list[0].state == AsyncRolloutRequestStateEnum.PENDING assert len(req_list[0].tool_schemas) == 1 def test_over_size_case(self, mock_rollout, search_data_proto, search_data): mock_rollout.config.multi_turn.max_assistant_turns = 1 req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] _, expect_turn_array, _ = search_data # here we mock a meta info with 'length'. indicate the response is truncate mock_rollout._handle_engine_call = MagicMock() future = asyncio.Future() future.set_result( { "text": expect_turn_array[0], "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "length", "length": 3000}, "prompt_tokens": 132, "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 2.23543, }, } ) mock_rollout._handle_engine_call.return_value = future mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == 1 output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert output_req.reward_scores.get("tavily_search_tool") == [] # we should only have two message, one for prompt, second for response. assert len(output_req.messages) == 2 assert output_req.messages[1] == Message( role="assistant", content=expect_turn_array[0], tool_calls=None, ) @patch.object(MCPSearchTool, "execute", new_callable=AsyncMock) def test_tool_call_basic_case(self, mock_execute, mock_rollout, search_data_proto, search_data): _, expect_turn_array, tool_return_array = search_data # Mock search tool execution to return predefined responses mock_execute.side_effect = [(msg, 0.0, {"status": "success"}) for msg in tool_return_array] mock_rollout.config.multi_turn.max_assistant_turns = 10 req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] mock_rollout._handle_engine_call = MagicMock() futures = [asyncio.Future() for i in expect_turn_array] for idx, (i, turn) in enumerate(zip(futures, expect_turn_array, strict=True)): i.set_result( { "text": turn, "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 2.23543, }, } ) if idx < len(expect_turn_array) - 1: assert mock_rollout._function_call_parser.has_tool_call(turn) assert mock_rollout._function_call_parser.parse_non_stream(turn) mock_rollout._handle_engine_call.side_effect = futures mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather(*[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list]) ) # Verify conversation completed successfully with proper tool usage output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert "tavily_search_tool" in output_req.metrics assert output_req.metrics["tavily_search_tool"][0]["status"] == "success" assert mock_execute.await_count == 2 assert len(output_req.messages) == 6 # Verify tool response messages contain expected content search_counter = 0 for msg in output_req.messages: if msg.role == "tool": assert msg.content == tool_return_array[search_counter].text search_counter += 1 assert search_counter == 2 @patch.object(MCPSearchTool, "execute", new_callable=AsyncMock) def test_tool_call_batch_case(self, mock_execute, mock_rollout, search_data_proto, search_data): _, expect_turn_array, tool_return_array = search_data # Mock tool execution for large batch (100 requests * 2 calls each) mock_execute.side_effect = [ (tool_return_array[0], 0.0, {"status": "success"}), (tool_return_array[1], 0.0, {"status": "success"}), ] * 100 mock_rollout.config.multi_turn.max_assistant_turns = 10 base_req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req_nums = 100 req_list = [] req_turns_map = {} req_turns_counter = {} for i in range(req_nums): tmp_req = deepcopy(base_req) tmp_req.batch_data_id = i tmp_req.request_id = i req_list.append(MagicMock(wraps=tmp_req, spec=AsyncRolloutRequest)) futures = [asyncio.Future() for _ in expect_turn_array] for idx, (fut, turn) in enumerate(zip(futures, expect_turn_array, strict=True)): fut.set_result( { "text": turn, "meta_info": { "id": "dummy", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, }, } ) req_turns_map[i] = futures req_turns_counter[i] = 0 async def hacked_handle_engine_call(self, _req: AsyncRolloutRequest, *_args, **_kwargs): fut = req_turns_map[_req.batch_data_id][req_turns_counter[_req.batch_data_id]] req_turns_counter[_req.batch_data_id] += 1 return await fut with patch.object(SGLangRollout, "_handle_engine_call", new=hacked_handle_engine_call): mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather(*[mock_rollout._async_rollout_a_request(r, True, False) for r in req_list]) ) # Verify all requests completed successfully assert len(output_req_list) == req_nums for out_req in output_req_list: assert out_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert "tavily_search_tool" in out_req.metrics for metric in out_req.metrics["tavily_search_tool"]: assert metric["status"] == "success" assert len(out_req.messages) == 6 assert sum(1 for m in out_req.messages if m.role == "tool") == 2 assert mock_execute.await_count == 2 * req_nums ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_async_rollout_multimodal_delta.py ================================================ # Copyright 2025 Amazon.com, Inc. or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from verl.tools.schemas import ToolResponse from verl.utils.dataset.vision_utils import process_image from verl.utils.tokenizer import hf_processor from verl.workers.rollout.schemas import ( AsyncRolloutRequest, AsyncRolloutRequestStateEnum, TokenizationSanityCheckModeEnum, ) def _test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=False): assert len(image_list) == len(description_list) # Get the smallest dimensions across all images processed_images = [] for img_url in image_list: img = process_image(img_url) processed_images.append(img) min_width = min(img.size[0] for img in processed_images) min_height = min(img.size[1] for img in processed_images) min_size = (min_width, min_height) if resize_image: processed_images_resized = [] for img in processed_images: img = img.resize(min_size) processed_images_resized.append(img) processed_images = processed_images_resized # Initial message history system_prompt = ( "You will be provided with an image. Describe this image and then generate a new image for the next round" ) messages = [ { "role": "system", "content": system_prompt, }, { "role": "user", "content": [ {"type": "text", "text": "Here is the first image provided: "}, {"type": "image", "image": [processed_images[0]]}, ], }, ] # Initial multi_modal_data with one image multi_modal_data = {"image": [processed_images[0]], "video": []} # Minimal required fields for AsyncRolloutRequest req = AsyncRolloutRequest( batch_data_id=0, request_id="test-req-1", state=AsyncRolloutRequestStateEnum.PENDING, messages=messages, multi_modal_keys=["image", "video"], multi_modal_data=multi_modal_data.copy(), tool_schemas=[], tools_kwargs={}, interaction_kwargs={}, input_ids=None, prompt_ids=None, response_ids=None, attention_mask=None, prompt_attention_mask=None, response_attention_mask=None, position_ids=None, prompt_position_ids=None, response_position_ids=None, loss_mask=None, prompt_loss_mask=None, response_loss_mask=None, reward_scores={}, max_prompt_len=8192, max_response_len=8192, max_model_len=16384, metrics={}, use_inference_chat_template=True, tokenization_sanity_check_mode=TokenizationSanityCheckModeEnum.STRICT, generation_prompt_ids=None, base_conv_wo_gen_prompt_end_pos=0, base_conv_with_gen_prompt_end_pos=0, processing_class=processor, ) prev_generated_len = 0 # Add First Assistant Message and first tool response message(image) for idx, img in enumerate(processed_images): if idx == 0: continue _ = req.get_generation_prompt_ids(processor) req.add_assistant_message(processor, content=description_list[idx - 1]) before_tool_call_len = req.input_ids.shape[-1] req.add_tool_response_messages( processor, [ToolResponse(image=[img], text="Here is the new image you requested: ")] ) after_tool_call_len = req.input_ids.shape[-1] if prev_generated_len == 0: prev_generated_len = after_tool_call_len - before_tool_call_len else: if resize_image: assert after_tool_call_len - before_tool_call_len == prev_generated_len assert req.multi_modal_data["image"] == processed_images[: idx + 1] _ = req.get_generation_prompt_ids(processor) req.add_assistant_message(processor, content=description_list[-1]) messages = [msg.model_dump() for msg in req.messages] tools = [tool.model_dump() for tool in req.tool_schemas] if req.tool_schemas else None full_prompt_info = req._handle_apply_chat_template( processor, messages, multi_modal_data=req.multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, return_dict=True, ) full_prompt_ids = full_prompt_info["input_ids"] assert full_prompt_ids.eq(req.input_ids).all() # We must use dict(full_prompt_info) to convert BatchFeature values to a new dict # because np.array() only keeps the keys for BatchFeature. full_prompt_multi_modal_inputs = full_prompt_info.copy() full_prompt_multi_modal_inputs.pop("input_ids", None) full_prompt_multi_modal_inputs.pop("attention_mask", None) for key in full_prompt_multi_modal_inputs: assert full_prompt_multi_modal_inputs[key].eq(req.multi_modal_inputs[key]).all() @pytest.mark.skipif( hf_processor(os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct")) is None, reason="Processor not available for Qwen/Qwen2.5-VL-B-Instruct", ) def test_add_tool_response_messages_image_delta(): processor = hf_processor(os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct")) # From Qwen2.5-VL-3B-Instruct HF example img_1_url = {"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"} img_1_description = "A woman sits on the beach at sunset, smiling as she shares a high five with her large dog." # GitHub Logo img_2_url = {"image": "https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png"} img_2_description = "A GitHub Logo image" # Octocat img_3_url = {"image": "https://octodex.github.com/images/orderedlistocat.png"} img_3_description = "An Octocat image" image_list = [img_1_url, img_2_url, img_3_url] description_list = [img_1_description, img_2_description, img_3_description] _test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=False) @pytest.mark.skipif( hf_processor(os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct")) is None, reason="Processor not available for Qwen/Qwen2.5-VL-B-Instruct", ) def test_add_tool_response_messages_image_delta_resize_image(): processor = hf_processor(os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct")) # From Qwen2.5-VL-3B-Instruct HF example img_1_url = {"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"} img_1_description = "A woman sits on the beach at sunset, smiling as she shares a high five with her large dog." # GitHub Logo img_2_url = {"image": "https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png"} img_2_description = "A GitHub Logo image" # Octocat img_3_url = {"image": "https://octodex.github.com/images/orderedlistocat.png"} img_3_description = "An Octocat image" image_list = [img_1_url, img_2_url, img_3_url] description_list = [img_1_description, img_2_description, img_3_description] _test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=True) ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_async_rollout_search_tools.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from tests/workers/rollout/test_sglang_async_rollout_sf_tools.py import asyncio import os from copy import deepcopy from unittest.mock import AsyncMock, MagicMock, patch import numpy as np import pytest from tensordict import TensorDict from transformers import AutoConfig, AutoTokenizer from utils_sglang import get_rollout_config, prepare_inputs from verl.protocol import DataProto from verl.tools.schemas import ( OpenAIFunctionParametersSchema, OpenAIFunctionPropertySchema, OpenAIFunctionSchema, OpenAIFunctionToolSchema, ToolResponse, ) from verl.tools.search_tool import SearchTool from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.schemas import AsyncRolloutRequest, AsyncRolloutRequestStateEnum, Message from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout DEFAULT_USER_CONTENT_PREFIX = ( "Answer the given question. You must conduct reasoning inside and " "first every time you get new information. After reasoning, if you find you lack " "some knowledge, you can call a search engine by query " "and it will return the top searched results between and " ". You can search as many times as your want. If you find no " "further external knowledge needed, you can directly provide the answer inside " " and , without detailed illustrations. For example, " " Beijing . Question: " ) user_content = DEFAULT_USER_CONTENT_PREFIX.rstrip("\n") + "How's the weather lately?" def get_search_messages(): user_prompt = { "role": "user", "content": user_content, } expect_turn_0_msg = { "role": "assistant", "content": "Let me search the web.", "tool_calls": [{"type": "function", "function": {"name": "search", "arguments": {"query": "today's weather"}}}], } expect_turn_1_msg = { "role": "assistant", "content": "Let me search again.", "tool_calls": [ {"type": "function", "function": {"name": "search", "arguments": {"query": "tomorrow's weather"}}} ], } expect_turn_2_msg = { "role": "assistant", "content": "Today is sunny and tomorrow will be cloudy in Beijing.", } # Mock search tool responses tool_return_0_msg = {"role": "tool", "content": "Today's weather in Beijing is sunny."} tool_return_1_msg = {"role": "tool", "content": "Tomorrow's weather in Beijing is cloudy."} user_prompts = [user_prompt] expect_turn_array = [expect_turn_0_msg, expect_turn_1_msg, expect_turn_2_msg] tool_return_array = [tool_return_0_msg, tool_return_1_msg] return user_prompts, expect_turn_array, tool_return_array class TestRolloutWithSearchTools: local_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") @pytest.fixture def qwen_tokenizer(self): tokenizer = AutoTokenizer.from_pretrained(self.local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token return tokenizer # we only need this for tokenizer @pytest.fixture def qwen_model_config(self): config = AutoConfig.from_pretrained(self.local_model_path) return config @pytest.fixture def search_data(self, qwen_tokenizer): user_prompt, expect_turn_array, tool_return_array = get_search_messages() prompts = [[message] for message in user_prompt] preencode_turn_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=False) for turn in expect_turn_array ] preencode_tool_return_array = [ ToolResponse(text=qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=True)) for turn in tool_return_array ] return prompts, preencode_turn_array, preencode_tool_return_array @pytest.fixture def search_rollout_config(self): max_prompt_length = 4096 max_response_length = 3000 dtype = "bfloat16" tensor_parallel_size = 1 tool_path = "./resource/tool_configs/search_tool_config" rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_path ) return rollout_config @pytest.fixture def search_data_proto(self, search_data, qwen_tokenizer): preencode_prompts, _, _ = search_data prompts = [ qwen_tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(qwen_tokenizer, prompts, 1000) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) messages = np.asarray(preencode_prompts) tools_kwargs = np.array( [ { "search": { "create_kwargs": { "ground_truth": "Today is sunny and tomorrow will be cloudy in Beijing.", "data_source": "searchR1_nq", }, }, } ], dtype=object, ) index = np.array([0], dtype=object) prompts = DataProto( batch=prompt_dict, non_tensor_batch={"raw_prompt": messages, "tools_kwargs": tools_kwargs, "index": index} ) return prompts @pytest.fixture def mock_rollout(self, search_rollout_config, qwen_tokenizer, qwen_model_config): """Mock the rollout instance with sampling_params initialized.""" with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): rollout_config: RolloutConfig = omega_conf_to_dataclass(search_rollout_config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) rollout.sampling_params = { "n": 1, "max_new_tokens": search_rollout_config.response_length, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, } return rollout @patch.object(SGLangRollout, "_init_distributed_env", return_value=None) @patch.object(SGLangRollout, "_init_inference_engine", return_value=None) @patch.object(SGLangRollout, "_init_sampling_params", return_value=None) def test_tools_registration( self, mock_env, mock_engine, mock_sampling, search_rollout_config, qwen_tokenizer, qwen_model_config ): rollout_config: RolloutConfig = omega_conf_to_dataclass(search_rollout_config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) assert len(rollout._tool_schemas) == 1 assert "search" in rollout._tool_map.keys() from verl.tools.search_tool import SearchTool assert isinstance(rollout._tool_map["search"], SearchTool) # depend on the tokenizer assert rollout._tool_call_parser_type == "qwen25" @patch.object(SGLangRollout, "_init_distributed_env", return_value=None) @patch.object(SGLangRollout, "_init_inference_engine", return_value=None) @patch.object(SGLangRollout, "_init_sampling_params", return_value=None) def test_rollout_req_creation( self, mock_env, mock_engine, mock_sampling, search_rollout_config, qwen_tokenizer, qwen_model_config, search_data_proto, ): rollout_config: RolloutConfig = omega_conf_to_dataclass(search_rollout_config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) req_list = rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1) assert len(req_list) == 1 assert req_list[0].state == AsyncRolloutRequestStateEnum.PENDING assert len(req_list[0].tool_schemas) == 1 print(type(req_list[0].tool_schemas[0])) assert req_list[0].tool_schemas[0] == OpenAIFunctionToolSchema( type="function", function=OpenAIFunctionSchema( name="search", description="Searches the web for relevant information based on the given query.", parameters=OpenAIFunctionParametersSchema( type="object", properties={ "query_list": OpenAIFunctionPropertySchema( type="array", description="A list of fully-formed semantic queries. The tool will return search " "results for each query.", items={"type": "string"}, ) }, required=["query_list"], ), strict=False, ), ) def test_over_size_case(self, mock_rollout, search_data_proto, search_data): mock_rollout.config.multi_turn.max_assistant_turns = 1 req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] _, expect_turn_array, _ = search_data mock_rollout._handle_engine_call = MagicMock() future = asyncio.Future() future.set_result( { "text": expect_turn_array[0], "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "length", "length": 3000}, "prompt_tokens": 132, "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 2.23543, }, } ) mock_rollout._handle_engine_call.return_value = future mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == 1 output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert output_req.reward_scores.get("search") == [] assert len(output_req.messages) == 2 assert output_req.messages[1] == Message( role="assistant", content=expect_turn_array[0], tool_calls=None, ) @patch.object(SearchTool, "execute", new_callable=AsyncMock) def test_tool_call_basic_case(self, mock_execute, mock_rollout, search_data_proto, search_data): _, expect_turn_array, tool_return_array = search_data # Mock search tool execution to return predefined responses mock_execute.side_effect = [(msg, 0.0, {"status": "success"}) for msg in tool_return_array] mock_rollout.config.multi_turn.max_assistant_turns = 10 mock_rollout._tool_map["search"].retrieval_service_url = "mock://dummy" req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] mock_rollout._handle_engine_call = MagicMock() futures = [asyncio.Future() for i in expect_turn_array] for idx, (i, turn) in enumerate(zip(futures, expect_turn_array, strict=True)): i.set_result( { "text": turn, "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 2.23543, }, } ) if idx < len(expect_turn_array) - 1: assert mock_rollout._function_call_parser.has_tool_call(turn) assert mock_rollout._function_call_parser.parse_non_stream(turn) mock_rollout._handle_engine_call.side_effect = futures mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather(*[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list]) ) # Verify conversation completed successfully with proper tool usage output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert "search" in output_req.metrics assert output_req.metrics["search"][0]["status"] == "success" assert mock_execute.await_count == 2 assert len(output_req.messages) == 6 # user + 3*assistant + 2*tool_call # Verify tool response messages contain expected content search_counter = 0 for msg in output_req.messages: if msg.role == "tool": assert msg.content == tool_return_array[search_counter].text search_counter += 1 assert search_counter == 2 @patch.object(SearchTool, "execute", new_callable=AsyncMock) def test_tool_call_batch_case(self, mock_execute, mock_rollout, search_data_proto, search_data): _, expect_turn_array, tool_return_array = search_data # Mock tool execution for large batch (100 requests * 2 calls each) mock_execute.side_effect = [ (tool_return_array[0], 0.0, {"status": "success"}), (tool_return_array[1], 0.0, {"status": "success"}), ] * 100 mock_rollout.config.multi_turn.max_assistant_turns = 10 mock_rollout._tool_map["search"].retrieval_service_url = "mock://dummy" base_req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req_nums = 100 req_list = [] req_turns_map = {} req_turns_counter = {} for i in range(req_nums): tmp_req = deepcopy(base_req) tmp_req.batch_data_id = i tmp_req.request_id = i req_list.append(MagicMock(wraps=tmp_req, spec=AsyncRolloutRequest)) futures = [asyncio.Future() for _ in expect_turn_array] for idx, (fut, turn) in enumerate(zip(futures, expect_turn_array, strict=True)): fut.set_result( { "text": turn, "meta_info": { "id": "dummy", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, }, } ) req_turns_map[i] = futures req_turns_counter[i] = 0 async def hacked_handle_engine_call(self, _req: AsyncRolloutRequest, *_args, **_kwargs): fut = req_turns_map[_req.batch_data_id][req_turns_counter[_req.batch_data_id]] req_turns_counter[_req.batch_data_id] += 1 return await fut with patch.object(SGLangRollout, "_handle_engine_call", new=hacked_handle_engine_call): mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather(*[mock_rollout._async_rollout_a_request(r, True, False) for r in req_list]) ) # Verify all requests completed successfully assert len(output_req_list) == req_nums for out_req in output_req_list: assert out_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert "search" in out_req.metrics for metric in out_req.metrics["search"]: assert metric["status"] == "success" assert len(out_req.messages) == 6 # user + 3 assistant + 2 tool assert sum(1 for m in out_req.messages if m.role == "tool") == 2 assert mock_execute.await_count == 2 * req_nums ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_async_rollout_sf_tools.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import os import time from copy import deepcopy from functools import wraps from unittest.mock import MagicMock, patch import numpy as np import pytest import ray from tensordict import TensorDict from torch.testing._internal.common_distributed import MultiProcessTestCase from transformers import AutoConfig, AutoTokenizer from utils_sglang import get_rollout_config, prepare_inputs from verl.protocol import DataProto from verl.tools.sandbox_fusion_tools import TokenBucketWorker from verl.tools.schemas import (OpenAIFunctionParametersSchema, OpenAIFunctionPropertySchema, OpenAIFunctionSchema, OpenAIFunctionToolSchema, ToolResponse) from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.schemas import (AsyncRolloutRequest, AsyncRolloutRequestStateEnum, Message) from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout sandbox_url = "" def get_sandbox_fusion_messages(): user_prompt = { "role": "user", "content": """ Solve the following problem step by step. You now have the ability to selectively write executable Python code to enhance your reasoning process. \n\n**user question:**\nThere are 152 students at Dala High School. Assume the following: \n- 100 students take a Math class \n- 94 students take a Science class \n- 57 students take an English class \n- 73 students take a Math class and a Science class \n- 24 students take a Math class and an English class \n- 27 students take a Science class and an English class \n- 22 students take a Math class and a Science class and an English class\n \nHow many students take neither a Math class nor a Science class nor an Eglish class?\n\nRemember to place the final answer in the last part using the format: \n\n\boxed{'The final answer goes here.'}\n """, } expect_turn_0_msg = { "role": "assistant", "content": """ Okay, so I need to find out how many students at Dala High School are not taking any of the three classes: Math, Science, or English. The total number of students is 152. Let me see... I remember this is a problem about sets and maybe using the principle of inclusion-exclusion. Let me recall how that works.\n\nFirst, the inclusion-exclusion principle for three sets says that the total number of students taking at least one of the classes is equal to the sum of the numbers in each individual class, minus the sum of the numbers in each pair of classes, plus the number in all three classes. Then, subtract that total from the overall number of students to get those not taking any of the three. \n\nLet me write that down step by step. Let M be the set of students taking Math, S for Science, and E for English. Then:\n\nTotal in at least one class = |M ∪ S ∪ E| = |M| + |S| + |E| - |M ∩ S| - |M ∩ E| - |S ∩ E| + |M ∩ S ∩ E|\n\nGiven the numbers:\n\n|M| = 100\n\n|S| = 94\n\n|E| = 57\n\n|M ∩ S| = 73\n\n|M ∩ E| = 24\n\n|S ∩ E| = 27\n\n|M ∩ S ∩ E| = 22\n\nSo plugging these into the formula:\n\nTotal = 100 + 94 + 57 - 73 - 24 - 27 + 22\n\nLet me compute that step by step using code to ensure accuracy.\n """, "tool_calls": [ { "type": "function", "function": { "name": "code_interpreter", "arguments": { "code": """M = 100\\nS = 94\\nE = 57\\nM_S = 73\\nM_E = 24\\nS_E = 27\\nM_S_E = 22\\n\\ntotal_in_any = M + S + E - M_S - M_E - S_E + M_S_E\\nstudents_neither = 152 - total_in_any\\nprint(students_neither)""", }, }, } ], } tool_return_0_msg = { "role": "tool", "content": """3""", } expect_turn_1_msg = { "role": "assistant", "content": """ The code returns 3, indicating that 3 students take none of the classes. To verify, I'll break down each category to ensure the total matches:\n\n- Only Math and Science: 73 - 22 = 51\n- Only Math and English: 24 - 22 = 2\n- Only Science and English: 27 - 22 = 5\n- Only Math: 100 - 51 - 2 - 22 = 25\n- Only Science: 94 - 51 - 5 - 22 = 16\n- Only English: 57 - 2 - 5 - 22 = 28\n\nSumming all categories:\n """, "tool_calls": [ { "type": "function", "function": { "name": "code_interpreter", "arguments": { "code": """only_M_S = 73 - 22\\nonly_M_E = 24 - 22\\nonly_S_E = 27 - 22\\n\\nonly_M = 100 - only_M_S - only_M_E - 22\\nonly_S = 94 - only_M_S - only_S_E - 22\\nonly_E = 57 - only_M_E - only_S_E - 22\\n\\ntotal_verify = only_M + only_S + only_E + only_M_S + only_M_E + only_S_E + 22\\nprint(total_verify)""", }, }, } ], } tool_return_1_msg = { "role": "tool", "content": """149""", } expect_turn_2_msg = { "role": "assistant", "content": """ The verification total is 149, so students not taking any classes are 152 - 149 = 3, confirming the initial result.\n\n\n\\boxed{3}\n """, } user_prompts = [user_prompt] expect_turn_array = [expect_turn_0_msg, expect_turn_1_msg, expect_turn_2_msg] tool_return_array = [tool_return_0_msg, tool_return_1_msg] return user_prompts, expect_turn_array, tool_return_array def skip_if_valid_sandbox(url): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if url == "" or url is None: pytest.skip("No valid sandbox url provided") return wrapper return decorator class TestRolloutWithTools: local_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") @pytest.fixture def qwen_tokenizer(self): tokenizer = AutoTokenizer.from_pretrained(self.local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token return tokenizer # we only need this for tokenizer @pytest.fixture def qwen_model_config(self): config = AutoConfig.from_pretrained(self.local_model_path) return config @pytest.fixture def sandbox_fusion_data(self, qwen_tokenizer): user_prompt, expect_turn_array, tool_return_array = get_sandbox_fusion_messages() prompts = [[message] for message in user_prompt] preencode_turn_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=False) for turn in expect_turn_array ] preencode_tool_return_array = [ ToolResponse(text=qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=True)) for turn in tool_return_array ] return prompts, preencode_turn_array, preencode_tool_return_array @pytest.fixture def sandbox_fusion_rollout_config(self): max_prompt_length = 1024 max_response_length = 1024 dtype = "bfloat16" tensor_parallel_size = 1 tool_path = "./resource/tool_configs/sandbox_fusion_tool_config" rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_path ) return rollout_config @pytest.fixture def sandbox_data_proto(self, sandbox_fusion_data, qwen_tokenizer): preencode_prompts, _, _ = sandbox_fusion_data prompts = [ qwen_tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(qwen_tokenizer, prompts, 1000) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) messages = np.asarray(preencode_prompts) tools_kwargs = np.array( [ { "code_interpreter": { "create_kwargs": {"ground_truth": "test-solution-str"}, }, } ], dtype=object, ) index = np.array([0], dtype=object) prompts = DataProto( batch=prompt_dict, non_tensor_batch={"raw_prompt": messages, "tools_kwargs": tools_kwargs, "index": index} ) return prompts @pytest.fixture def mock_rollout(self, sandbox_fusion_rollout_config, qwen_tokenizer, qwen_model_config): """Mock the rollout instance""" with patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object( SGLangRollout, "_init_inference_engine", return_value=None ), patch.object(SGLangRollout, "_init_sampling_params", return_value=None): rollout_config: RolloutConfig = omega_conf_to_dataclass(sandbox_fusion_rollout_config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) # set default sampling_params rollout.sampling_params = { "n": 1, "max_new_tokens": sandbox_fusion_rollout_config.response_length, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, } return rollout def test_tools_registration(self, mock_rollout): """Test tool registration functionality""" assert len(mock_rollout._tool_schemas) == 1 assert "code_interpreter" in mock_rollout._tool_map.keys() from verl.tools.sandbox_fusion_tools import SandboxFusionTool assert isinstance(mock_rollout._tool_map["code_interpreter"], SandboxFusionTool) assert mock_rollout._tool_call_parser_type == "qwen25" def test_rollout_req_creation(self, mock_rollout, sandbox_data_proto): """Test request creation functionality""" req_list = mock_rollout._preprocess_prompt_to_async_rollout_requests(sandbox_data_proto, n=1) assert len(req_list) == 1 assert req_list[0].state == AsyncRolloutRequestStateEnum.PENDING assert len(req_list[0].tool_schemas) == 1 print(type(req_list[0].tool_schemas[0])) assert req_list[0].tool_schemas[0] == OpenAIFunctionToolSchema( type="function", function=OpenAIFunctionSchema( name="code_interpreter", description="A tool for executing code.", parameters=OpenAIFunctionParametersSchema( type="object", properties={ "code": OpenAIFunctionPropertySchema( type="string", description="The code to execute.", enum=None, ) }, required=["code"], ), strict=False, ), ) def test_over_size_case(self, mock_rollout, sandbox_data_proto, sandbox_fusion_data): """Test over-size response truncation case""" mock_rollout.config.multi_turn.max_assistant_turns = 1 req = mock_rollout._preprocess_prompt_to_async_rollout_requests(sandbox_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] _, expect_turn_array, tool_return_array = sandbox_fusion_data # here we mock a meta info with 'length'. indicate the response is truncate mock_rollout._handle_engine_call = MagicMock() future = asyncio.Future() future.set_result( { "text": expect_turn_array[0], "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "length", "length": 1024}, "prompt_tokens": 132, "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 9.9304039478302, }, } ) mock_rollout._handle_engine_call.return_value = future mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == 1 output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert output_req.reward_scores.get("code_interpreter") == [] # we should only have two message, one for prompt, second for response. assert len(output_req.messages) == 2 assert output_req.messages[1] == Message( role="assistant", content=expect_turn_array[0], tool_calls=None, ) @skip_if_valid_sandbox(sandbox_url) def test_tool_call_basic_case(self, mock_rollout, sandbox_data_proto, sandbox_fusion_data): """Test basic tool call case""" mock_rollout.config.multi_turn.max_assistant_turns = 10 mock_rollout._tool_map["code_interpreter"].sandbox_fusion_url = sandbox_url req = mock_rollout._preprocess_prompt_to_async_rollout_requests(sandbox_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] _, expect_turn_array, tool_return_array = sandbox_fusion_data # here we mock a meta info with 'length'. indicate the response is truncate mock_rollout._handle_engine_call = MagicMock() futures = [asyncio.Future() for i in expect_turn_array] for idx, (i, turn) in enumerate(zip(futures, expect_turn_array)): i.set_result( { "text": turn, "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 9.9304039478302, }, } ) if idx < len(expect_turn_array) - 1: assert mock_rollout._function_call_parser.has_tool_call(turn) assert mock_rollout._function_call_parser.parse_non_stream(turn) mock_rollout._handle_engine_call.side_effect = futures mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == 1 output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED # here we verify whether the code sandbox is executed correctly assert output_req.metrics == {"code_interpreter": ["3", "149"]} assert mock_rollout._handle_engine_call.call_count == 3 assert len(output_req.messages) == 6 # user + 3*assistant + 2*tool_call code_counter = 0 for msg in output_req.messages: if msg.role == "tool": code_counter += 1 assert msg.content == tool_return_array[code_counter] assert code_counter == 2 @skip_if_valid_sandbox(sandbox_url) def test_tool_call_batch_case(self, mock_rollout, sandbox_data_proto, sandbox_fusion_data): """Test batch tool call case""" mock_rollout.config.multi_turn.max_assistant_turns = 10 mock_rollout._tool_map["code_interpreter"].sandbox_fusion_url = sandbox_url req = mock_rollout._preprocess_prompt_to_async_rollout_requests(sandbox_data_proto, n=1)[0] req_nums = 100 req_list = [] req_turns_counter = {} # this map should a Map[id:List[Futures]] req_turns_map = {} _, expect_turn_array, tool_return_array = sandbox_fusion_data for i in range(req_nums): _temp_req = deepcopy(req) _temp_req.batch_data_id = i _temp_req.request_id = i req_list.append(MagicMock(wraps=_temp_req, spec=AsyncRolloutRequest)) futures = [asyncio.Future() for i in expect_turn_array] for idx, (i, turn) in enumerate(zip(futures, expect_turn_array)): i.set_result( { "text": turn, "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 9.9304039478302, }, } ) if idx < len(expect_turn_array) - 1: assert mock_rollout._function_call_parser.has_tool_call(turn) assert mock_rollout._function_call_parser.parse_non_stream(turn) req_turns_map[_temp_req.batch_data_id] = futures req_turns_counter[_temp_req.batch_data_id] = 0 async def hacked_handle_engine_call( self, _req: AsyncRolloutRequest, do_sample: bool, is_validate: bool, **kwargs ): result = req_turns_map[_req.batch_data_id][req_turns_counter[_req.batch_data_id]] req_turns_counter[_req.batch_data_id] += 1 re = await result return re with patch.object(SGLangRollout, "_handle_engine_call", new=hacked_handle_engine_call): mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == req_nums # FIGUER out how to count this # assert rollout._handle_engine_call.call_count == 3 * req_nums for output_req in output_req_list: assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED # here we verify whether the code sandbox is executed correctly assert output_req.metrics == {"code_interpreter": ["3", "149"]} assert len(output_req.messages) == 6 # user + 3*assistant + 2*tool_call code_counter = 0 for msg in output_req.messages: if msg.role == "tool": code_counter += 1 assert code_counter == 2 def test_sampling_params_functionality(self, mock_rollout): """Test sampling_params functionality""" # test basic copy functionality copied_params = mock_rollout.sampling_params.copy() assert copied_params == mock_rollout.sampling_params assert copied_params is not mock_rollout.sampling_params # test parameter update copied_params.update({"temperature": 0.8, "top_p": 0.9}) assert copied_params["temperature"] == 0.8 assert copied_params["top_p"] == 0.9 # ensure original parameters are not modified assert "temperature" not in mock_rollout.sampling_params assert "top_p" not in mock_rollout.sampling_params class RayMultiProcessTestCase(MultiProcessTestCase): def setUp(self): super().setUp() ray.init(ignore_reinit_error=True) print("init_single cluster") self._spawn_processes() def tearDown(self): print("tearDown_single cluster") ray.shutdown() @ray.remote class TestActor: def __init__(self, rank, world_size): self._world_size = world_size self._rank = rank self.rank_list = [] self.time_list = [] def record_rank(self, rank): self.rank_list.append(rank) def get_rank(self): return self._rank def ping(self): return True def record_execution_time(self, time): self.time_list.append(time) def get_time(self, timeout): import time now = time.time() while time.time() - now < timeout: # for start and end time if len(self.time_list) == self._world_size * 2: self.time_list.sort() return self.time_list[-1] - self.time_list[0] else: time.sleep(1) continue return False def verify_rank(self): import time now = time.time() while time.time() - now < 10: if len(self.rank_list) == self._world_size: print(self.rank_list) self.rank_list.sort() for i in range(self._world_size): if self.rank_list[i] != i: return False return True else: time.sleep(1) continue return False class TestRayGlobalActorCase(RayMultiProcessTestCase): @property def world_size(self) -> int: # for DP = 8 return 2 def test_basic_multi_process_init(self): ray.init("auto", namespace="test", ignore_reinit_error=True) handle = TestActor.remote(self.rank, self.world_size) re = ray.get(handle.get_rank.remote()) assert re == self.rank, f"rank not match: {re} != {self.rank}" # def test_global_actor(self): # ray.init("auto",namespace="test",ignore_reinit_error=True) # handle = TestActor.options(get_if_exists=True,name="test-actor").remote(self.rank,self.world_size) # handle.record_rank.remote(self.rank) # # since test actor's concurrency is 1, we need to wait for all processes to finish # time.sleep(5) # assert ray.get(handle.ping.remote()) == True # make sure actor handle is valid # if self.rank == 0: # assert ray.get(handle.verify_rank.remote()) == True # else: # # get_actor use weak_ref, so we need to make sure the actor is not garbage collected # time.sleep(10) class TestSingleNodeRateLimiterCase(RayMultiProcessTestCase): @property def world_size(self) -> int: return 1 def test_rate_limiter(self): ray.init("auto", namespace="test", ignore_reinit_error=True) from verl.tools.sandbox_fusion_tools import (PoolMode, init_execution_pool) # exec_worker = ExecutionWorker.options(max_concurrency=10).remote(enable_global_rate_limit=True, rate_limit=3) exec_worker = init_execution_pool( num_workers=10, enable_global_rate_limit=True, rate_limit=3, mode=PoolMode.ThreadMode ) center = TestActor.options(get_if_exists=True, name="test-actor").remote(self.rank, self.world_size) ray.get(exec_worker.ping.remote()) def fn(i): import time time.sleep(3) return i start = time.time() tasks = [exec_worker.execute.remote(fn, i) for i in range(6)] loop = asyncio.get_event_loop() results = loop.run_until_complete(asyncio.gather(*tasks)) end = time.time() duration = end - start center.record_execution_time.remote(start) center.record_execution_time.remote(end) print(f"Total time: {duration:.2f} seconds for rank: {self.rank}") assert results == list(range(6)) # we have 6 task with rate limit of 3, therefore we need at least 2 round: 3*2=6 seconds assert duration > 6 assert duration < 10 def test_rotten_execution(self): ray.init("auto", namespace="test", ignore_reinit_error=True) from verl.tools.sandbox_fusion_tools import (PoolMode, init_execution_pool) # exec_worker = ExecutionWorker.options(max_concurrency=10).remote(enable_global_rate_limit=True, rate_limit=6) exec_worker = init_execution_pool( num_workers=10, enable_global_rate_limit=True, rate_limit=6, mode=PoolMode.ThreadMode ) ray.get(exec_worker.ping.remote()) def fn(i): if i == 10: raise Exception("test") else: return i tasks = [exec_worker.execute.remote(fn, i) for i in range(20)] loop = asyncio.get_event_loop() results = loop.run_until_complete(asyncio.gather(*tasks)) expect_result = [None] + list(range(10)) + list(range(11, 20)) sorted_data = sorted(results, key=lambda x: (x is not None, x)) assert sorted_data == expect_result, f"results: {results}, expect_result: {expect_result}" rate_limiter = TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote() rate = ray.get(rate_limiter.get_current_count.remote()) assert rate == 0, f"rate: {rate}" class TestMultiNodeRateLimiterCase(RayMultiProcessTestCase): @property def world_size(self) -> int: return 2 def test_rate_limiter(self): ray.init("auto", namespace="test", ignore_reinit_error=True) from verl.tools.sandbox_fusion_tools import (PoolMode, init_execution_pool) # exec_worker = ExecutionWorker.options(max_concurrency=10).remote(enable_global_rate_limit=True, rate_limit=6) exec_worker = init_execution_pool( num_workers=10, enable_global_rate_limit=True, rate_limit=6, mode=PoolMode.ThreadMode ) center = TestActor.options(get_if_exists=True, name="test-actor").remote(self.rank, self.world_size) ray.get(exec_worker.ping.remote()) def fn(i): import time time.sleep(2) return i start = time.time() tasks = [exec_worker.execute.remote(fn, i) for i in range(6)] loop = asyncio.get_event_loop() results = loop.run_until_complete(asyncio.gather(*tasks)) end = time.time() duration = end - start center.record_execution_time.remote(start) center.record_execution_time.remote(end) print(f"Total time: {duration:.2f} seconds for rank: {self.rank}") assert results == list(range(6)) time.sleep(5) if self.rank == 0: total_cost = ray.get(center.get_time.remote(10)) print(f"for total cost: {total_cost}") # # we have 6 task each node * 2node = 12 task, each task take 2 second. # with rate limit of 6, # therefore we need at least 2 round: 12/6*2=4 seconds assert total_cost > 4, total_cost else: time.sleep(10) ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_async_rollout_w_interaction.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_async_rollout_w_interaction.py """ import numpy as np import torch from tensordict import TensorDict from utils_sglang import ( are_lists_similar, clean_torchelastic_env, generate_hf_output, get_rollout_config, initialize_global_process_group, load_tokenizer_and_model, prepare_inputs, ) from verl import DataProto from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout def test_async_sglang_rollout_w_interaction(): import os assert torch.cuda.device_count() >= 2 initialize_global_process_group() clean_torchelastic_env() max_prompt_length = 32 max_response_length = 16 dtype = "bfloat16" tensor_parallel_size = 2 local_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") tokenizer, actor_model = load_tokenizer_and_model(local_model_path) preencode_prompts = [ [{"role": "user", "content": prompt, "tool_calls": None}] for prompt in [ "Who won the Champions League in 2019?", "The founder of Apple is", "What's the best way to learn python?", ] ] interaction_kwargs = [ {"name": "gsm8k", "query": "Who won the Champions League in 2019?", "ground_truth": "Real Madrid"}, {"name": "gsm8k", "query": "The founder of Apple is", "ground_truth": "Steve Jobs"}, {"name": "gsm8k", "query": "What's the best way to learn python?", "ground_truth": "Learn python from scratch"}, ] prompts = [ tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(tokenizer, prompts, max_prompt_length) hf_response_tokens = generate_hf_output(actor_model, input_ids, attention_mask, tokenizer, max_response_length) # Create a temporary interaction config file for testing import tempfile from omegaconf import OmegaConf interaction_config = { "interaction": [ {"name": "gsm8k", "class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}} ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(interaction_config, f.name) interaction_config_path = f.name rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, None, interaction_config_path ) rollout_config: RolloutConfig = omega_conf_to_dataclass(rollout_config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) print(f"preprocessed {input_ids.shape=}") messages = np.asarray(preencode_prompts) prompts = DataProto( batch=prompt_dict, non_tensor_batch={"raw_prompt": messages, "interaction_kwargs": np.asarray(interaction_kwargs)}, ) prompts.meta_info.update( { "eos_token_id": tokenizer.eos_token_id, "pad_token_id": tokenizer.pad_token_id, } ) # log_gpu_memory_usage("Before generating sequences", logger=None) output = rollout.generate_sequences(prompts=prompts) print(f"generated {output.batch['responses'].shape=}") # log_gpu_memory_usage("After generating sequences", logger=None) sglang_output = output.to("cpu") sglang_response_tokens = tokenizer.batch_decode(sglang_output.batch["responses"]) print(f"hf response: {hf_response_tokens}") print(f"sglang response: {sglang_response_tokens}") assert are_lists_similar(hf_response_tokens, sglang_response_tokens) print("SGLang w interaction Test Passed!") # Clean up temporary config file import os os.unlink(interaction_config_path) torch.distributed.barrier() torch.distributed.destroy_process_group() if __name__ == "__main__": test_async_sglang_rollout_w_interaction() ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_async_rollout_w_tools.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_async_rollout_w_tools.py """ import numpy as np import torch from tensordict import TensorDict from utils_sglang import ( are_lists_similar, clean_torchelastic_env, generate_hf_output, get_rollout_config, initialize_global_process_group, load_tokenizer_and_model, prepare_inputs, ) from verl import DataProto from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout def test_async_sglang_rollout_w_tool(): import os assert torch.cuda.device_count() >= 2 initialize_global_process_group() clean_torchelastic_env() max_prompt_length = 32 max_response_length = 16 dtype = "bfloat16" tensor_parallel_size = 2 local_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") tokenizer, actor_model = load_tokenizer_and_model(local_model_path) preencode_prompts = [ [{"role": "user", "content": prompt, "tool_calls": None}] for prompt in [ "Who won the Champions League in 2019?", "The founder of Apple is", "What's the best way to learn python?", ] ] prompts = [ tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(tokenizer, prompts, max_prompt_length) hf_response_tokens = generate_hf_output(actor_model, input_ids, attention_mask, tokenizer, max_response_length) rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, "./resource/tool_configs/sandbox_fusion_tool_config", ) rollout_config: RolloutConfig = omega_conf_to_dataclass(rollout_config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) print(f"preprocessed {input_ids.shape=}") messages = np.asarray(preencode_prompts) prompts = DataProto( batch=prompt_dict, non_tensor_batch={ "raw_prompt": messages, "tools_kwargs": np.array([{}] * input_ids.shape[0], dtype=object), }, ) prompts.meta_info.update( { "eos_token_id": tokenizer.eos_token_id, "pad_token_id": tokenizer.pad_token_id, } ) # log_gpu_memory_usage("Before generating sequences", logger=None) output = rollout.generate_sequences(prompts=prompts) print(f"generated {output.batch['responses'].shape=}") # log_gpu_memory_usage("After generating sequences", logger=None) sglang_output = output.to("cpu") sglang_response_tokens = tokenizer.batch_decode(sglang_output.batch["responses"]) print(f"hf response: {hf_response_tokens}") print(f"sglang response: {sglang_response_tokens}") assert are_lists_similar(hf_response_tokens, sglang_response_tokens) print("SGLang w tool Test Passed!") torch.distributed.barrier() torch.distributed.destroy_process_group() if __name__ == "__main__": test_async_sglang_rollout_w_tool() ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_async_rollout_w_tools_token_out.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_async_rollout_w_tools.py """ import numpy as np import torch from tensordict import TensorDict from utils_sglang import ( are_lists_similar, clean_torchelastic_env, generate_hf_output, get_rollout_config, initialize_global_process_group, load_tokenizer_and_model, prepare_inputs, ) from verl import DataProto from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout def test_async_sglang_rollout_w_tool(): import os assert torch.cuda.device_count() >= 2 initialize_global_process_group() clean_torchelastic_env() max_prompt_length = 32 max_response_length = 16 dtype = "bfloat16" tensor_parallel_size = 2 skip_tokenizer_init = True local_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") tokenizer, actor_model = load_tokenizer_and_model(local_model_path) preencode_prompts = [ [{"role": "user", "content": prompt, "tool_calls": None}] for prompt in [ "Who won the Champions League in 2019?", "The founder of Apple is", "What's the best way to learn python?", ] ] prompts = [ tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(tokenizer, prompts, max_prompt_length) hf_response_tokens = generate_hf_output(actor_model, input_ids, attention_mask, tokenizer, max_response_length) rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_config_path="./resource/tool_configs/sandbox_fusion_tool_config", skip_tokenizer_init=skip_tokenizer_init, ) rollout_config: RolloutConfig = omega_conf_to_dataclass(rollout_config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) print(f"preprocessed {input_ids.shape=}") messages = np.asarray(preencode_prompts) prompts = DataProto( batch=prompt_dict, non_tensor_batch={ "raw_prompt": messages, "tools_kwargs": np.array([{}] * input_ids.shape[0], dtype=object), }, ) prompts.meta_info.update( { "eos_token_id": tokenizer.eos_token_id, "pad_token_id": tokenizer.pad_token_id, } ) # log_gpu_memory_usage("Before generating sequences", logger=None) output = rollout.generate_sequences(prompts=prompts) print(f"generated {output.batch['responses'].shape=}") # log_gpu_memory_usage("After generating sequences", logger=None) sglang_output = output.to("cpu") sglang_response_tokens = tokenizer.batch_decode(sglang_output.batch["responses"]) print(f"hf response: {hf_response_tokens}") print(f"sglang response: {sglang_response_tokens}") assert are_lists_similar(hf_response_tokens, sglang_response_tokens) print("SGLang w tool Test Passed!") torch.distributed.barrier() torch.distributed.destroy_process_group() if __name__ == "__main__": test_async_sglang_rollout_w_tool() ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_multi_interaction.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test for multi-interaction support in SGLangRollout. usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_multi_interaction.py """ import os import tempfile from unittest.mock import MagicMock, patch import torch import torch.distributed as dist from omegaconf import DictConfig, OmegaConf from transformers import AutoTokenizer from verl.interactions.base import BaseInteraction from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout class MockInteraction(BaseInteraction): """Mock interaction for testing.""" def __init__(self, config): super().__init__(config) self.started_instances = set() async def start_interaction(self, instance_id=None, **kwargs): if instance_id is None: instance_id = "mock_instance" self.started_instances.add(instance_id) return instance_id async def generate_response(self, instance_id, messages, **kwargs): return False, f"Mock response from {self.name}", 1.0, {} def create_mock_config_with_multi_interactions(): """Create a mock configuration with multiple interactions.""" # Create temporary interaction config file interaction_config = { "interaction": [ { "name": "mock_agent1", "class_name": "tests.workers.rollout.test_sglang_multi_interaction.MockInteraction", "config": {"param1": "value1"}, }, { "name": "mock_agent2", "class_name": "tests.workers.rollout.test_sglang_multi_interaction.MockInteraction", "config": {"param2": "value2"}, }, ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(interaction_config, f.name) interaction_config_path = f.name # Create mock SGLangRollout config config = DictConfig( { "name": "sglang", "multi_turn": { "interaction_config_path": interaction_config_path, "tool_config_path": None, "enable": True, "max_assistant_turns": 5, "max_user_turns": 3, "use_inference_chat_template": True, "tokenization_sanity_check_mode": "off", }, "prompt_length": 32, "response_length": 16, "max_model_len": 512, "dtype": "bfloat16", "gpu_memory_utilization": 0.8, "load_format": "dummy", "enforce_eager": True, "free_cache_engine": False, "calculate_log_probs": False, "tensor_model_parallel_size": 1, "n": 1, "val_kwargs": {"top_k": 1, "top_p": 1.0, "temperature": 0.0}, } ) return config, interaction_config_path def setup_distributed(): """Initialize distributed environment if not already initialized.""" if not dist.is_initialized(): dist.init_process_group(backend="nccl" if torch.cuda.is_available() else "gloo") class TestSGLangMultiInteraction: local_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") def test_initialize_multiple_interactions(self): """Test that SGLangRollout can initialize multiple interactions.""" setup_distributed() config, temp_config_path = create_mock_config_with_multi_interactions() try: # Mock SGLang engine and initialization methods like the reference test with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): # Create a real tokenizer like the reference test tokenizer = AutoTokenizer.from_pretrained(self.local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token # Mock model config mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 # since this is a mock, we can set any rope scaling config # to test the rope_scaling logic at the same time of this test mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout_config: RolloutConfig = omega_conf_to_dataclass(config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) # Check that interactions were initialized assert len(rollout.interaction_map) == 2 assert "mock_agent1" in rollout.interaction_map assert "mock_agent2" in rollout.interaction_map # Use class name comparison instead of isinstance for multi-process compatibility assert rollout.interaction_map["mock_agent1"].__class__.__name__ == "MockInteraction" assert rollout.interaction_map["mock_agent2"].__class__.__name__ == "MockInteraction" # Also check that they are instances of BaseInteraction (which should work across processes) assert isinstance(rollout.interaction_map["mock_agent1"], BaseInteraction) assert isinstance(rollout.interaction_map["mock_agent2"], BaseInteraction) # Check that names were set correctly assert rollout.interaction_map["mock_agent1"].name == "mock_agent1" assert rollout.interaction_map["mock_agent2"].name == "mock_agent2" finally: os.unlink(temp_config_path) def test_interaction_selection_by_name(self): """Test that interactions are selected by name from interaction_kwargs.""" setup_distributed() config, temp_config_path = create_mock_config_with_multi_interactions() try: with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): tokenizer = AutoTokenizer.from_pretrained(self.local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout_config: RolloutConfig = omega_conf_to_dataclass(config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) # Test interaction selection logic from verl.workers.rollout.schemas import AsyncRolloutRequest, AsyncRolloutRequestStateEnum, Message # Create a mock request with specific interaction name req = AsyncRolloutRequest( request_id="test_req", state=AsyncRolloutRequestStateEnum.INTERACTING, messages=[Message(role="user", content="test message")], interaction_kwargs={"name": "mock_agent2", "test_param": "value"}, input_ids=None, prompt_ids=None, response_ids=None, attention_mask=None, prompt_attention_mask=None, response_attention_mask=None, position_ids=None, prompt_position_ids=None, response_position_ids=None, loss_mask=None, prompt_loss_mask=None, response_loss_mask=None, reward_scores={}, max_prompt_len=32, max_response_len=16, max_model_len=512, use_inference_chat_template=True, tokenization_sanity_check_mode="disable", processing_class=tokenizer, ) # Test that the correct interaction is selected interaction_name = req.interaction_kwargs.get("name", "gsm8k") assert interaction_name == "mock_agent2" assert interaction_name in rollout.interaction_map selected_interaction = rollout.interaction_map[interaction_name] assert selected_interaction.name == "mock_agent2" finally: os.unlink(temp_config_path) def test_fallback_to_default_interaction(self): """Test fallback to default interaction when name is not specified.""" setup_distributed() # Create config with gsm8k interaction interaction_config = { "interaction": [ { "name": "gsm8k", "class_name": "tests.workers.rollout.test_sglang_multi_interaction.MockInteraction", "config": {}, } ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(interaction_config, f.name) interaction_config_path = f.name config = DictConfig( { "name": "sglang", "multi_turn": { "interaction_config_path": interaction_config_path, "tool_config_path": None, "enable": True, "max_assistant_turns": 5, "max_user_turns": 3, "use_inference_chat_template": True, "tokenization_sanity_check_mode": "disable", }, "prompt_length": 32, "response_length": 16, "max_model_len": 512, "dtype": "bfloat16", "gpu_memory_utilization": 0.8, "load_format": "dummy", "enforce_eager": True, "free_cache_engine": False, "calculate_log_probs": False, "tensor_model_parallel_size": 1, "n": 1, "val_kwargs": {"top_k": 1, "top_p": 1.0, "temperature": 0.0}, } ) try: with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): tokenizer = AutoTokenizer.from_pretrained(self.local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout_config: RolloutConfig = omega_conf_to_dataclass(config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) # Test that default interaction name works interaction_kwargs_without_name = {"test_param": "value"} default_name = interaction_kwargs_without_name.get("name", "gsm8k") assert default_name == "gsm8k" assert default_name in rollout.interaction_map finally: os.unlink(interaction_config_path) def test_error_on_missing_interaction(self): """Test that error is raised when requested interaction is not found.""" setup_distributed() config, temp_config_path = create_mock_config_with_multi_interactions() try: with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): tokenizer = AutoTokenizer.from_pretrained(self.local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout_config: RolloutConfig = omega_conf_to_dataclass(config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) # Test error when requesting non-existent interaction non_existent_name = "non_existent_interaction" assert non_existent_name not in rollout.interaction_map # This should raise ValueError in actual usage available_interactions = list(rollout.interaction_map.keys()) assert "mock_agent1" in available_interactions assert "mock_agent2" in available_interactions assert non_existent_name not in available_interactions finally: os.unlink(temp_config_path) def test_backward_compatibility_no_interaction_config(self): """Test backward compatibility when no interaction config is provided.""" setup_distributed() # Create config without interaction config config = DictConfig( { "name": "sglang", "multi_turn": { "interaction_config_path": None, "tool_config_path": None, "enable": True, "max_assistant_turns": 5, "max_user_turns": 3, "use_inference_chat_template": True, "tokenization_sanity_check_mode": "disable", }, "prompt_length": 32, "response_length": 16, "max_model_len": 512, "dtype": "bfloat16", "gpu_memory_utilization": 0.8, "load_format": "dummy", "enforce_eager": True, "free_cache_engine": False, "calculate_log_probs": False, "tensor_model_parallel_size": 1, "n": 1, "val_kwargs": {"top_k": 1, "top_p": 1.0, "temperature": 0.0}, } ) with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): tokenizer = AutoTokenizer.from_pretrained(self.local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout_config: RolloutConfig = omega_conf_to_dataclass(config, dataclass_type=RolloutConfig) model_config = HFModelConfig(path=self.local_model_path) rollout = SGLangRollout( config=rollout_config, model_config=model_config, device_mesh=None, ) # Check that no interactions were initialized assert len(rollout.interaction_map) == 0 ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_rollout_sharding_manager.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets _TENSOR_1MB = torch.zeros(512, 512) _BYTES_1MB = 1 << 20 @pytest.mark.parametrize( "named_tensors, bucket_size_mb, gt_groups", [ ( [("a", _TENSOR_1MB), ("b", _TENSOR_1MB)], 0.5 * _BYTES_1MB, [["a"], ["b"]], ), ( [("a", _TENSOR_1MB), ("b", _TENSOR_1MB)], 1 * _BYTES_1MB, [["a"], ["b"]], ), ( [("a", _TENSOR_1MB), ("b", _TENSOR_1MB)], 1.5 * _BYTES_1MB, [["a"], ["b"]], ), ( [("a", _TENSOR_1MB), ("b", _TENSOR_1MB)], 2 * _BYTES_1MB, [["a", "b"]], ), ], ) def test_get_named_tensor_buckets(named_tensors, bucket_size_mb, gt_groups: list[list[str]]): named_tensors_iter = iter(named_tensors) groups = list(get_named_tensor_buckets(named_tensors_iter, bucket_size_mb)) assert len(groups) == len(gt_groups) for group, gt_group in zip(groups, gt_groups, strict=True): assert len(group) == len(gt_group) for (name, _), (gt_name) in zip(group, gt_group, strict=True): assert name == gt_name ================================================ FILE: verl_distillation/tests/workers/rollout/test_sglang_spmd.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_async_spmd.py """ import asyncio import os import torch from sglang.srt.entrypoints.engine import Engine from sglang.srt.utils import broadcast_pyobj from torch.distributed.device_mesh import init_device_mesh from utils_sglang import ( are_lists_similar, clean_torchelastic_env, generate_hf_output, initialize_global_process_group, load_tokenizer_and_model, prepare_inputs, ) def _pre_process_inputs(pad_token_id, prompt_token_ids: torch.Tensor): non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0] token_ids = prompt_token_ids[non_pad_index:].tolist() return token_ids def test_sglang_spmd(): assert torch.cuda.device_count() >= 2 initialize_global_process_group(spmd=True) clean_torchelastic_env() max_prompt_length = 16 max_response_length = 16 local_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") tokenizer, actor_model = load_tokenizer_and_model(local_model_path) preencode_prompts = ["Who won the Champions League in 2019?", "The founder of Apple is", "What's your name?"] input_ids, attention_mask, _ = prepare_inputs(tokenizer, preencode_prompts, max_prompt_length) hf_response_tokens = generate_hf_output(actor_model, input_ids, attention_mask, tokenizer, max_response_length) tensor_parallel_size = 2 inference_device_mesh_cpu = init_device_mesh( "cpu", mesh_shape=(1, tensor_parallel_size, 1), mesh_dim_names=["dp", "tp", "pp"] ) tp_rank = inference_device_mesh_cpu["tp"].get_local_rank() if tp_rank == 0: llm = Engine( model_path=local_model_path, dtype="bfloat16", mem_fraction_static=0.5, enable_memory_saver=True, tp_size=inference_device_mesh_cpu["tp"].size(), attention_backend="fa3", ) input_ids = input_ids.cuda() idx_list = [] pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id for i in range(input_ids.shape[0]): idx_list.append(_pre_process_inputs(pad_token_id, input_ids[i])) sampling_params = dict( n=1, temperature=0, top_p=1, top_k=-1, max_new_tokens=max_response_length, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, skip_special_tokens=True, spaces_between_special_tokens=True, ignore_eos=False, ) loop = asyncio.get_event_loop() outputs = loop.run_until_complete(llm.async_generate(input_ids=idx_list, sampling_params=sampling_params)) else: outputs = None [outputs] = broadcast_pyobj( [outputs], rank=inference_device_mesh_cpu["tp"].get_local_rank(), src=inference_device_mesh_cpu["tp"].mesh[0].item(), dist_group=inference_device_mesh_cpu["tp"].get_group(), force_cpu_device=False, ) sglang_response_tokens = [output["text"] for output in outputs] print(f"sglang response: {sglang_response_tokens}") assert are_lists_similar(hf_response_tokens, sglang_response_tokens), "Strings differ more than 10%:\n" print("SPMD Test Passed!") torch.distributed.barrier() torch.distributed.destroy_process_group() ================================================ FILE: verl_distillation/tests/workers/rollout/utils_sglang.py ================================================ # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from datetime import timedelta import torch from omegaconf import OmegaConf from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig from verl.utils.model import compute_position_id_with_mask from verl.utils.torch_functional import pad_sequence_to_length # ====================== utils ====================== def levenshtein(s1, s2): m, n = len(s1), len(s2) dp = [[0] * (n + 1) for _ in range(m + 1)] for i in range(m + 1): dp[i][0] = i for j in range(n + 1): dp[0][j] = j for i in range(1, m + 1): for j in range(1, n + 1): cost = 0 if s1[i - 1] == s2[j - 1] else 1 dp[i][j] = min(dp[i - 1][j] + 1, dp[i][j - 1] + 1, dp[i - 1][j - 1] + cost) return dp[m][n] def are_lists_similar(a, b, threshold=10): if len(a) != len(b): print("The lists are of different lengths.") return False total_length = 0 total_diff = 0 for s1, s2 in zip(a, b, strict=True): max_len = max(len(s1), len(s2)) total_length += max_len total_diff += levenshtein(s1, s2) percentage_difference = (total_diff / total_length) * 100 print(f"Total difference: {percentage_difference:.2f}%") return percentage_difference <= threshold def initialize_global_process_group(timeout_second=36000, spmd=False): import torch.distributed if not torch.distributed.is_initialized(): # Check if already initialized print("Initializing process group...") torch.distributed.init_process_group(timeout=timedelta(seconds=timeout_second)) else: print("Process group already initialized.") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) world_size = int(os.environ["WORLD_SIZE"]) torch.cuda.set_device(local_rank) CUDA_VISIBLE_DEVICES = os.environ.get("CUDA_VISIBLE_DEVICES", "") if not CUDA_VISIBLE_DEVICES: if spmd: # CUDA_VISIBLE_DEVICES = ','.join(str(i) for i in range(tensor_parallel_size)) CUDA_VISIBLE_DEVICES = ",".join(str(i) for i in range(world_size)) else: CUDA_VISIBLE_DEVICES = str(local_rank) os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES print(f"CUDA_VISIBLE_DEVICES is not set, set to {CUDA_VISIBLE_DEVICES}") return local_rank, rank, world_size def clean_torchelastic_env(): for k in ["TORCHELASTIC_USE_AGENT_STORE"]: if k in os.environ: del os.environ[k] def load_tokenizer_and_model(local_model_path, dtype="bfloat16"): tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained(local_model_path, torch_dtype=getattr(torch, dtype), device_map="cuda") return tokenizer, model def prepare_inputs(tokenizer, prompts, max_prompt_length): pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id tokenized = tokenizer(prompts, return_tensors="pt", padding=True) input_ids = pad_sequence_to_length(tokenized["input_ids"], max_prompt_length, pad_token_id, left_pad=True) attention_mask = pad_sequence_to_length( tokenized["attention_mask"], max_prompt_length, pad_token_id=0, left_pad=True ) position_ids = compute_position_id_with_mask(attention_mask) position_ids = pad_sequence_to_length(position_ids, max_prompt_length, pad_token_id=0, left_pad=True) return input_ids, attention_mask, position_ids def generate_hf_output(model, input_ids, attention_mask, tokenizer, max_response_length): generation_config = GenerationConfig(do_sample=False) output = model.generate( input_ids=input_ids.cuda(), attention_mask=attention_mask.cuda(), max_new_tokens=max_response_length, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, generation_config=generation_config, output_scores=False, return_dict_in_generate=True, use_cache=False, ) seq = output.sequences response = seq[:, input_ids.shape[1] :] return tokenizer.batch_decode(response) def get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_config_path=None, interaction_config_path=None, skip_tokenizer_init=False, ): sampling_params = dict( n=1, temperature=0, top_p=1, top_k=-1, ) rollout_config = OmegaConf.create( { "name": "sglang", "mode": "sync", "load_format": "auto", "enforce_eager": False, "free_cache_engine": True, "dtype": dtype, "gpu_memory_utilization": 0.5, "ignore_eos": False, "max_num_batched_tokens": 8192, "prompt_length": max_prompt_length, "response_length": max_response_length, "tensor_model_parallel_size": tensor_parallel_size, # set to 128MB only for testing "update_weights_bucket_megabytes": 128, # do not drop any samples in the test "over_sample_rate": 0.0, "multi_turn": { "max_assistant_turns": 4, "max_user_turns": 4, "enable": True, "tool_config_path": tool_config_path, "interaction_config_path": interaction_config_path, "use_inference_chat_template": False, "tokenization_sanity_check_mode": "strict", }, "calculate_log_probs": False, "max_model_len": None, "skip_tokenizer_init": skip_tokenizer_init, **sampling_params, } ) return rollout_config ================================================ FILE: verl_distillation/tests/workers/test_fsdp_attn_implementation.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test for attn_implementation override configuration in FSDP workers. This test verifies that the fix for honoring attn_implementation override config works correctly in the ActorRolloutRefWorker._build_model_optimizer method. """ from unittest.mock import Mock, patch import pytest import torch from omegaconf import OmegaConf from transformers import AutoConfig, AutoModelForCausalLM # Only run these tests if we can import verl components try: from verl.workers.config import FSDPEngineConfig # noqa: F401 from verl.workers.fsdp_workers import ( ActorRolloutRefWorker, # noqa: F401 CriticWorker, # noqa: F401 ) VERL_AVAILABLE = True except ImportError: VERL_AVAILABLE = False @pytest.mark.skipif(not VERL_AVAILABLE, reason="VERL components not available") class TestFSDPAttnImplementation: """Test cases for attn_implementation override in FSDP workers.""" def test_attn_implementation_extraction_logic(self): """Test the core logic for extracting attn_implementation from override config.""" # Test case 1: Default behavior override_config = {} attn_impl = override_config.get("attn_implementation", "flash_attention_2") assert attn_impl == "flash_attention_2" # Test case 2: Override to eager override_config = {"attn_implementation": "eager"} attn_impl = override_config.get("attn_implementation", "flash_attention_2") assert attn_impl == "eager" # Test case 3: Override to sdpa override_config = {"attn_implementation": "sdpa"} attn_impl = override_config.get("attn_implementation", "flash_attention_2") assert attn_impl == "sdpa" # Test case 4: Other configs don't affect attn_implementation override_config = {"other_setting": "value", "dropout": 0.1} attn_impl = override_config.get("attn_implementation", "flash_attention_2") assert attn_impl == "flash_attention_2" @patch("transformers.AutoConfig.from_pretrained") @patch("transformers.AutoModelForCausalLM.from_pretrained") def test_attn_implementation_passed_to_autoconfig(self, mock_model_from_pretrained, mock_config_from_pretrained): """Test that attn_implementation is correctly passed to AutoConfig.from_pretrained.""" # Mock the AutoConfig return value mock_config = Mock() mock_config.tie_word_embeddings = False mock_config.architectures = ["LlamaForCausalLM"] mock_config_from_pretrained.return_value = mock_config # Mock the model return value mock_model = Mock() mock_model_from_pretrained.return_value = mock_model # Test data test_cases = [ ({}, "flash_attention_2"), # Default ({"attn_implementation": "eager"}, "eager"), # Override to eager ({"attn_implementation": "sdpa"}, "sdpa"), # Override to sdpa ] for override_config, expected_attn_impl in test_cases: # Reset mocks mock_config_from_pretrained.reset_mock() mock_model_from_pretrained.reset_mock() # Simulate the logic from FSDP workers attn_implementation = override_config.get("attn_implementation", "flash_attention_2") # This simulates what happens in _build_model_optimizer AutoConfig.from_pretrained("test_path", trust_remote_code=False, attn_implementation=attn_implementation) # Verify AutoConfig.from_pretrained was called with correct attn_implementation mock_config_from_pretrained.assert_called_once_with( "test_path", trust_remote_code=False, attn_implementation=expected_attn_impl ) @patch("transformers.AutoConfig.from_pretrained") @patch("transformers.AutoModelForCausalLM.from_pretrained") def test_attn_implementation_passed_to_model(self, mock_model_from_pretrained, mock_config_from_pretrained): """Test that attn_implementation is correctly passed to model.from_pretrained.""" # Mock the AutoConfig return value mock_config = Mock() mock_config.tie_word_embeddings = False mock_config.architectures = ["LlamaForCausalLM"] mock_config_from_pretrained.return_value = mock_config # Mock the model return value mock_model = Mock() mock_model_from_pretrained.return_value = mock_model # Test with override config override_config = {"attn_implementation": "eager"} attn_implementation = override_config.get("attn_implementation", "flash_attention_2") # This simulates what happens in _build_model_optimizer AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path="test_path", torch_dtype=torch.bfloat16, config=mock_config, trust_remote_code=False, attn_implementation=attn_implementation, ) # Verify AutoModelForCausalLM.from_pretrained was called with correct attn_implementation mock_model_from_pretrained.assert_called_once_with( pretrained_model_name_or_path="test_path", torch_dtype=torch.bfloat16, config=mock_config, trust_remote_code=False, attn_implementation="eager", ) def test_override_config_integration(self): """Test that override_config from Hydra configuration works correctly.""" # Simulate the OmegaConf configuration structure used in VERL config_dict = { "model": {"path": "/test/path", "override_config": {"attn_implementation": "eager", "dropout": 0.1}} } # Convert to OmegaConf structure omegaconf = OmegaConf.create(config_dict) # Simulate what happens in the FSDP worker override_model_config = OmegaConf.to_container(OmegaConf.create(omegaconf.model.get("override_config", {}))) # Test extraction attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "eager" # Test that other configs are preserved assert override_model_config.get("dropout") == 0.1 def test_hydra_plus_prefix_config(self): """Test that Hydra +prefix configurations work correctly.""" # This simulates the configuration when user specifies: # +actor_rollout_ref.model.override_config.attn_implementation=eager # The + prefix in Hydra adds new keys to the config config_dict = { "actor_rollout_ref": { "model": { "path": "/test/path", "override_config": { "attn_implementation": "eager" # This gets added via +prefix }, } } } omegaconf = OmegaConf.create(config_dict) # Extract override config as done in FSDP workers override_model_config = OmegaConf.to_container( OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {})) ) # Verify extraction works attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "eager" def test_backward_compatibility(self): """Test that the fix maintains backward compatibility.""" # Test case 1: No override_config at all (old behavior) config_without_override = {} attn_implementation = config_without_override.get("attn_implementation", "flash_attention_2") assert attn_implementation == "flash_attention_2" # Test case 2: Empty override_config config_with_empty_override = {"override_config": {}} override_config = config_with_empty_override.get("override_config", {}) attn_implementation = override_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "flash_attention_2" # Test case 3: override_config with other settings but no attn_implementation config_with_other_overrides = {"override_config": {"dropout": 0.1, "hidden_size": 1024}} override_config = config_with_other_overrides.get("override_config", {}) attn_implementation = override_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "flash_attention_2" def test_critic_attn_implementation_extraction_logic(self): """Test the core logic for extracting attn_implementation from override config for CriticWorker.""" # Test case 1: Default behavior for critic override_config = {} attn_impl = override_config.get("attn_implementation", "flash_attention_2") assert attn_impl == "flash_attention_2" # Test case 2: Override to eager for critic override_config = {"attn_implementation": "eager"} attn_impl = override_config.get("attn_implementation", "flash_attention_2") assert attn_impl == "eager" # Test case 3: Override to sdpa for critic override_config = {"attn_implementation": "sdpa"} attn_impl = override_config.get("attn_implementation", "flash_attention_2") assert attn_impl == "sdpa" # Test case 4: Other configs don't affect attn_implementation for critic override_config = {"other_setting": "value", "dropout": 0.1} attn_impl = override_config.get("attn_implementation", "flash_attention_2") assert attn_impl == "flash_attention_2" @patch("transformers.AutoConfig.from_pretrained") def test_critic_attn_implementation_passed_to_autoconfig(self, mock_config_from_pretrained): """Test that attn_implementation is correctly passed to AutoConfig.from_pretrained in CriticWorker.""" # Mock the AutoConfig return value mock_config = Mock() mock_config.tie_word_embeddings = False mock_config.architectures = ["LlamaForCausalLM"] mock_config.num_labels = 1 mock_config_from_pretrained.return_value = mock_config # Test data for critic model test_cases = [ ({}, "flash_attention_2"), # Default ({"attn_implementation": "eager"}, "eager"), # Override to eager ({"attn_implementation": "sdpa"}, "sdpa"), # Override to sdpa ] for override_config, expected_attn_impl in test_cases: # Reset mocks mock_config_from_pretrained.reset_mock() # Simulate the logic from CriticWorker _build_critic_model_optimizer attn_implementation = override_config.get("attn_implementation", "flash_attention_2") # This simulates what should happen in CriticWorker._build_critic_model_optimizer # (This is where the fix needs to be applied in the actual implementation) AutoConfig.from_pretrained( "test_path", attn_implementation=attn_implementation, trust_remote_code=False, ) # Verify AutoConfig.from_pretrained was called with correct attn_implementation mock_config_from_pretrained.assert_called_once_with( "test_path", attn_implementation=expected_attn_impl, trust_remote_code=False, ) def test_critic_override_config_integration(self): """Test that override_config from Hydra configuration works correctly for CriticWorker.""" # Simulate the OmegaConf configuration structure used in VERL for critic config_dict = { "critic": { "model": {"path": "/test/path", "override_config": {"attn_implementation": "eager", "dropout": 0.1}} } } # Convert to OmegaConf structure omegaconf = OmegaConf.create(config_dict) # Simulate what happens in the CriticWorker override_model_config = OmegaConf.to_container( OmegaConf.create(omegaconf.critic.model.get("override_config", {})) ) # Test extraction for critic attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "eager" # Test that other configs are preserved for critic assert override_model_config.get("dropout") == 0.1 def test_critic_hydra_plus_prefix_config(self): """Test that Hydra +prefix configurations work correctly for CriticWorker.""" # This simulates the configuration when user specifies: # +critic.model.override_config.attn_implementation=eager # The + prefix in Hydra adds new keys to the config config_dict = { "critic": { "model": { "path": "/test/path", "override_config": { "attn_implementation": "eager" # This gets added via +prefix for critic }, } } } omegaconf = OmegaConf.create(config_dict) # Extract override config as done in CriticWorker override_model_config = OmegaConf.to_container( OmegaConf.create(omegaconf.critic.model.get("override_config", {})) ) # Verify extraction works for critic attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "eager" def test_both_actor_and_critic_configuration(self): """Test that both actor and critic can have different attn_implementation overrides simultaneously.""" # This simulates a complete training configuration with both actor and critic overrides config_dict = { "actor_rollout_ref": {"model": {"override_config": {"attn_implementation": "eager"}}}, "critic": {"model": {"override_config": {"attn_implementation": "sdpa"}}}, } omegaconf = OmegaConf.create(config_dict) # Extract actor override config actor_override_config = OmegaConf.to_container( OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {})) ) actor_attn_implementation = actor_override_config.get("attn_implementation", "flash_attention_2") # Extract critic override config critic_override_config = OmegaConf.to_container( OmegaConf.create(omegaconf.critic.model.get("override_config", {})) ) critic_attn_implementation = critic_override_config.get("attn_implementation", "flash_attention_2") # Verify both can be configured independently assert actor_attn_implementation == "eager" assert critic_attn_implementation == "sdpa" def test_critic_backward_compatibility(self): """Test that the CriticWorker fix maintains backward compatibility.""" # Test case 1: No override_config at all for critic (old behavior) config_without_override = {} attn_implementation = config_without_override.get("attn_implementation", "flash_attention_2") assert attn_implementation == "flash_attention_2" # Test case 2: Empty override_config for critic config_with_empty_override = {"override_config": {}} override_config = config_with_empty_override.get("override_config", {}) attn_implementation = override_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "flash_attention_2" # Test case 3: override_config with other settings but no attn_implementation for critic config_with_other_overrides = {"override_config": {"dropout": 0.1, "num_labels": 1}} override_config = config_with_other_overrides.get("override_config", {}) attn_implementation = override_config.get("attn_implementation", "flash_attention_2") assert attn_implementation == "flash_attention_2" def test_attn_implementation_fix_integration(): """Integration test to verify the entire fix works as expected.""" # This test simulates the complete flow from configuration to model creation # Step 1: Simulate Hydra configuration with +prefix # user_config = "+actor_rollout_ref.model.override_config.attn_implementation=eager" # This would result in a config structure like: config_dict = {"actor_rollout_ref": {"model": {"override_config": {"attn_implementation": "eager"}}}} # Step 2: Extract override_model_config as done in FSDP workers omegaconf = OmegaConf.create(config_dict) override_model_config = OmegaConf.to_container( OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {})) ) # Step 3: Apply the fix logic attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2") # Step 4: Verify the fix works assert attn_implementation == "eager" # Step 5: Verify this would be passed to both AutoConfig and Model creation # (This would normally be done with mocks, but we can test the parameter preparation) config_params = {"attn_implementation": attn_implementation} model_params = {"attn_implementation": attn_implementation} assert config_params["attn_implementation"] == "eager" assert model_params["attn_implementation"] == "eager" def test_critic_attn_implementation_fix_integration(): """Integration test to verify the entire fix works as expected for CriticWorker.""" # This test simulates the complete flow from configuration to model creation for critic # Step 1: Simulate Hydra configuration with +prefix for critic # user_config = "+critic.model.override_config.attn_implementation=sdpa" # This would result in a config structure like: config_dict = {"critic": {"model": {"override_config": {"attn_implementation": "sdpa"}}}} # Step 2: Extract override_model_config as should be done in CriticWorker omegaconf = OmegaConf.create(config_dict) override_model_config = OmegaConf.to_container(OmegaConf.create(omegaconf.critic.model.get("override_config", {}))) # Step 3: Apply the fix logic (what needs to be implemented in CriticWorker) attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2") # Step 4: Verify the fix works for critic assert attn_implementation == "sdpa" # Step 5: Verify this would be passed to AutoConfig creation for critic config_params = {"attn_implementation": attn_implementation} assert config_params["attn_implementation"] == "sdpa" def test_complete_training_configuration(): """Integration test for a complete training configuration with both actor and critic overrides.""" # This test simulates a realistic training configuration where both # actor and critic have different attention implementations config_dict = { "actor_rollout_ref": { "model": { "path": "/shared/models/llama-7b", "override_config": {"attn_implementation": "eager", "torch_dtype": "bfloat16"}, } }, "critic": { "model": { "path": "/shared/models/llama-7b", "override_config": {"attn_implementation": "sdpa", "num_labels": 1}, } }, } omegaconf = OmegaConf.create(config_dict) # Extract configurations as would be done in the workers actor_override_config = OmegaConf.to_container( OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {})) ) critic_override_config = OmegaConf.to_container(OmegaConf.create(omegaconf.critic.model.get("override_config", {}))) # Apply the fix logic for both actor_attn_implementation = actor_override_config.get("attn_implementation", "flash_attention_2") critic_attn_implementation = critic_override_config.get("attn_implementation", "flash_attention_2") # Verify both configurations work independently assert actor_attn_implementation == "eager" assert critic_attn_implementation == "sdpa" # Verify other configs are preserved assert actor_override_config.get("torch_dtype") == "bfloat16" assert critic_override_config.get("num_labels") == 1 if __name__ == "__main__": # Run basic tests test_attn_implementation_fix_integration() test_critic_attn_implementation_fix_integration() test_complete_training_configuration() if VERL_AVAILABLE: # Run class-based tests test_class = TestFSDPAttnImplementation() test_class.test_attn_implementation_extraction_logic() test_class.test_override_config_integration() test_class.test_hydra_plus_prefix_config() test_class.test_backward_compatibility() # Run new critic tests test_class.test_critic_attn_implementation_extraction_logic() test_class.test_critic_override_config_integration() test_class.test_critic_hydra_plus_prefix_config() test_class.test_both_actor_and_critic_configuration() test_class.test_critic_backward_compatibility() print("✓ All FSDP attn_implementation tests passed!") print("✓ All CriticWorker attn_implementation tests passed!") else: print("⚠ VERL components not available, skipping VERL-specific tests") print("✓ Integration tests passed!") print("✓ Critic integration tests passed!") ================================================ FILE: verl_distillation/tests/workers/test_fsdp_workers.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from omegaconf import OmegaConf from verl.workers.fsdp_workers import ActorRolloutRefWorker def test_actor_rollout_ref_worker_actor_ref_model(): """Test specifying different reference/actor model""" os.environ["RANK"] = "0" os.environ["WORLD_SIZE"] = "1" os.environ["MASTER_ADDR"] = "127.0.0.1" os.environ["MASTER_PORT"] = "8888" config_str = """ model: path: Qwen/Qwen2.5-0.5B-Instruct actor: _target_: verl.workers.config.FSDPActorConfig strategy: fsdp fsdp_config: _target_: verl.workers.config.FSDPEngineConfig fsdp_size: -1 forward_prefetch: false profiler: tool: torch_memory save_path: ./mem_snapshots tool_config: torch_memory: _target_: verl.utils.profiler.config.TorchMemoryToolConfig trace_alloc_max_entries: 100000 stack_depth: 32 ref: model: path: Qwen/Qwen2.5-1.5B-Instruct fsdp_config: _target_: verl.workers.config.FSDPEngineConfig fsdp_size: -1 profiler: tool: torch_memory save_path: ./mem_snapshots tool_config: torch_memory: _target_: verl.utils.profiler.config.TorchMemoryToolConfig trace_alloc_max_entries: 100000 stack_depth: 32 log_prob_micro_batch_size: 1 ulysses_sequence_parallel_size: 1 entropy_from_logits_with_chunking: false """ dict_conf = OmegaConf.create(config_str) actor_rollout_ref_worker = ActorRolloutRefWorker(dict_conf, role="ref") actor_rollout_ref_worker.init_model() model_config = actor_rollout_ref_worker.ref_module_fsdp._fsdp_wrapped_module.config assert model_config.hidden_size == 1536 # set ref.model to null, fallback to default case where actor is the same as reference dict_conf["ref"]["model"] = None actor_rollout_ref_worker = ActorRolloutRefWorker(dict_conf, role="ref") actor_rollout_ref_worker.init_model() model_config = actor_rollout_ref_worker.ref_module_fsdp._fsdp_wrapped_module.config assert model_config.hidden_size == 896 ================================================ FILE: verl_distillation/verl/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import logging import os from importlib.metadata import PackageNotFoundError from importlib.metadata import version as get_version from packaging.version import parse as parse_version from .protocol import DataProto from .utils.device import is_npu_available from .utils.import_utils import import_external_libs from .utils.logging_utils import set_basic_config version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__))) with open(os.path.join(version_folder, "version/version")) as f: __version__ = f.read().strip() set_basic_config(level=logging.WARNING) __all__ = ["DataProto", "__version__"] modules = os.getenv("VERL_USE_EXTERNAL_MODULES", "") if modules: modules = modules.split(",") import_external_libs(modules) if os.getenv("VERL_USE_MODELSCOPE", "False").lower() == "true": if importlib.util.find_spec("modelscope") is None: raise ImportError("You are using the modelscope hub, please install modelscope by `pip install modelscope -U`") # Patch hub to download models from modelscope to speed up. from modelscope.utils.hf_util import patch_hub patch_hub() if is_npu_available: from .models.transformers import npu_patch as npu_patch package_name = "transformers" required_version_spec = "4.52.4" try: installed_version = get_version(package_name) installed = parse_version(installed_version) required = parse_version(required_version_spec) if installed < required: raise ValueError( f"{package_name} version >= {required_version_spec} is required on ASCEND NPU, current version is " f"{installed}." ) except PackageNotFoundError as e: raise ImportError( f"package {package_name} is not installed, please run pip install {package_name}=={required_version_spec}" ) from e # In verl, the driver process aggregates the computation results of workers via Ray. # Therefore, after a worker completes its computation job, it will package the output # using tensordict and transfer it to the CPU. Since the `to` operation of tensordict # is non-blocking, when transferring data from a device to the CPU, it is necessary to # ensure that a batch of data has been completely transferred before being used on the # host; otherwise, unexpected precision issues may arise. Tensordict has already noticed # this problem and fixed it. Ref: https://github.com/pytorch/tensordict/issues/725 # However, the relevant modifications only cover CUDA and MPS devices and do not take effect # for third-party devices such as NPUs. This patch fixes this issue, and the relevant # modifications can be removed once the fix is merged into tensordict. import tensordict if parse_version(tensordict.__version__) < parse_version("0.10.0"): from tensordict.base import TensorDictBase def _sync_all_patch(self): from torch._utils import _get_available_device_type, _get_device_module device_type = _get_available_device_type() if device_type is None: return device_module = _get_device_module(device_type) device_module.synchronize() TensorDictBase._sync_all = _sync_all_patch ================================================ FILE: verl_distillation/verl/base_config.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from dataclasses import FrozenInstanceError, dataclass, fields from typing import Any # BaseConfig class inherits from collections.abc.Mapping, which means it can act like a dictionary @dataclass class BaseConfig(collections.abc.Mapping): """The BaseConfig provides dict-like interface for a dataclass config. By default all fields in the config is not mutable, unless specified in "_mutable_fields". The BaseConfig class implements the Mapping Abstract Base Class. This allows instances of this class to be used like dictionaries. """ _mutable_fields = set() _target_: str = "" def __setattr__(self, name: str, value): """Set the value of an attribute. Check if the attr is mutable before setting the value.""" # If the field already exists, it's considered frozen unless it's in _mutable_fields if name in self.__dict__ and name not in getattr(self, "_mutable_fields", set()): raise FrozenInstanceError(f"Field '{name}' is frozen and cannot be modified") super().__setattr__(name, value) def get(self, key: str, default: Any = None) -> Any: """Get the value associated with the given key. If the key does not exist, return the default value. Args: key (str): The attribute name to retrieve. default (Any, optional): The value to return if the attribute does not exist. Defaults to None. Returns: Any: The value of the attribute or the default value. """ try: return getattr(self, key) except AttributeError: return default def __getitem__(self, key: str): """Implement the [] operator for the class. Allows accessing attributes like dictionary items. Args: key (str): The attribute name to retrieve. Returns: Any: The value of the attribute. Raises: AttributeError: If the attribute does not exist. TypeError: If the key type is not string """ return getattr(self, key) def __iter__(self): """Implement the iterator protocol. Allows iterating over the attribute names of the instance. Yields: str: The name of each field in the dataclass. """ for f in fields(self): yield f.name def __len__(self): """ Return the number of fields in the dataclass. Returns: int: The number of fields in the dataclass. """ return len(fields(self)) ================================================ FILE: verl_distillation/verl/experimental/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/experimental/agent_loop/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .agent_loop import AgentLoopBase, AgentLoopManager, AgentLoopWorker, AsyncLLMServerManager from .single_turn_agent_loop import SingleTurnAgentLoop from .tool_agent_loop import ToolAgentLoop _ = [SingleTurnAgentLoop, ToolAgentLoop] __all__ = ["AgentLoopBase", "AgentLoopManager", "AsyncLLMServerManager", "AgentLoopWorker"] ================================================ FILE: verl_distillation/verl/experimental/agent_loop/agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import heapq import logging import os import random from abc import ABC, abstractmethod from typing import Any, Optional import hydra import numpy as np import ray import torch from cachetools import LRUCache from omegaconf import DictConfig, OmegaConf from pydantic import BaseModel, ConfigDict from tensordict import TensorDict from transformers import AutoProcessor, AutoTokenizer from verl.experimental.reward import RewardManagerWorker from verl.protocol import DataProto from verl.single_controller.ray.base import RayWorkerGroup from verl.utils import hf_processor, hf_tokenizer from verl.utils.fs import copy_to_local from verl.utils.model import compute_position_id_with_mask from verl.utils.rollout_trace import (RolloutTraceConfig, rollout_trace_attr, rollout_trace_op) from verl.utils.transferqueue_utils import tqbridge from verl.workers.rollout.replica import TokenOutput, get_rollout_replica_class logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class AsyncLLMServerManager: """ A class to manage multiple OpenAI compatible LLM servers. This class provides - Load balance: least requests load balancing - Sticky session: send multi-turn chat completions to same server for automatic prefix caching """ def __init__(self, config: DictConfig, server_handles: list[ray.actor.ActorHandle], max_cache_size: int = 10000): """Initialize the AsyncLLMServerManager. Args: config (DictConfig): YAML config. server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles. max_cache_size (int, optional): max cache size for request_id to server mapping. Defaults to 10000. """ self.config = config self.server_handles = server_handles random.shuffle(self.server_handles) # Least requests load balancing self.weighted_serveres = [[0, (hash(server), server)] for server in server_handles] heapq.heapify(self.weighted_serveres) # LRU cache to map request_id to server self.request_id_to_server = LRUCache(maxsize=max_cache_size) def _choose_server(self, request_id: str) -> ray.actor.ActorHandle: # TODO: implement server pressure awareness load balancing if request_id in self.request_id_to_server: return self.request_id_to_server[request_id] server = self.weighted_serveres[0][1][1] self.weighted_serveres[0][0] += 1 heapq.heapreplace(self.weighted_serveres, self.weighted_serveres[0]) self.request_id_to_server[request_id] = server return server @rollout_trace_op async def generate( self, request_id, *, prompt_ids: list[int], sampling_params: dict[str, Any], image_data: Optional[list[Any]] = None, ) -> TokenOutput: """Generate tokens from prompt ids. Args: request_id (str): request id for sticky session. prompt_ids (List[int]): List of prompt token ids. sampling_params (Dict[str, Any]): Sampling parameters for the chat completion. Returns: TokenOutput: token output """ server = self._choose_server(request_id) output = await server.generate.remote( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, image_data=image_data, ) return output class AgentLoopMetrics(BaseModel): """Agent loop performance metrics.""" generate_sequences: float = 0.0 tool_calls: float = 0.0 class AgentLoopOutput(BaseModel): """Agent loop output.""" prompt_ids: list[int] """Prompt token ids.""" response_ids: list[int] """Response token ids including LLM generated token, tool response token.""" response_mask: list[int] """Response mask, 1 for LLM generated token, 0 for tool response token.""" distill_special_token_mask: Optional[list[int]] = None """distill mask, 1 for special token, 0 for normal token for ref model.""" response_logprobs: Optional[list[float]] = None """Log probabilities for the response tokens.""" multi_modal_data: Optional[dict[str, Any]] = None """Multi-modal data for multi-modal tools.""" reward_score: Optional[float] = None """Reward score for the trajectory.""" num_turns: int = 0 """Number of chat turns, including user, assistant, tool.""" metrics: AgentLoopMetrics """Auxiliary performance metrics""" extra_fields: dict[str, Any] = {} """Extra fields for dynamic addition.""" class _InternalAgentLoopOutput(AgentLoopOutput): """Internal agent loop output with padded sequences.""" model_config = ConfigDict(arbitrary_types_allowed=True) prompt_ids: torch.Tensor """Padded prompt token ids.""" response_ids: torch.Tensor """Padded response token ids.""" input_ids: torch.Tensor """Padded input ids(prompt_ids + response_ids).""" position_ids: torch.Tensor """Padded position ids.""" response_mask: torch.Tensor """Padded response mask.""" attention_mask: torch.Tensor """Padded attention mask.""" response_logprobs: Optional[torch.Tensor] = None """Padded log probabilities for the response tokens.""" multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None """Multi-modal inputs for processors (e.g., pixel_values, image_grid_thw).""" distill_special_token_mask: Optional[torch.Tensor] = None """distill mask, 1 for special token, 0 for normal token for ref model.""" extra_fields: dict[str, Any] = {} """Extra fields for dynamic addition.""" # make hydra.utils.instantiate happy class _DummyConfig: def __init__(self, config: DictConfig) -> None: self.config = config class AgentLoopBase(ABC): """An agent loop takes a input message, chat with OpenAI compatible LLM server and interact with various environments.""" _class_initialized = False def __init__( self, trainer_config: _DummyConfig, server_manager: AsyncLLMServerManager, tokenizer: AutoTokenizer, processor: AutoProcessor, **kwargs, ): """Initialize agent loop, each sample will have its own loop instance. Args: trainer_config (_DummyConfig): trainer config. server_manager (AsyncLLMServerManager): OpenAI compatible LLM server manager. tokenizer (AutoTokenizer): Tokenizer for tokenize messages. processor (AutoProcessor): Processor for process messages. """ self.init_class(config=trainer_config.config, tokenizer=tokenizer, processor=processor, **kwargs) self.config = trainer_config.config self.server_manager = server_manager self.tokenizer = tokenizer self.processor = processor self.loop = asyncio.get_running_loop() @classmethod def init_class(cls, config: DictConfig, tokenizer: AutoTokenizer, processor: AutoProcessor, **kwargs): """This is used to do heavy initialization work that should shared across all instances. It's only called once. Args: config (DictConfig): trainer config. tokenizer (AutoTokenizer): Tokenizer for tokenize messages. processor (AutoProcessor): Processor for process multi_modal data. **kwargs: extra kwargs from config file passed in by `hydra.utils.instantiate`. """ if cls._class_initialized: return cls._class_initialized = True @abstractmethod async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput: """Run agent loop to interact with LLM server and environment. Args: sampling_params (Dict[str, Any]): LLM sampling params. **kwargs: dataset fields from `verl.utils.dataset.RLHFDataset`. Returns: AgentLoopOutput: Agent loop output. """ raise NotImplementedError """Agent loop registry: key is agent_name, value is a dict of agent loop config used by hydra.utils.instantiate to initialize agent loop instance. https://hydra.cc/docs/advanced/instantiate_objects/overview/ """ _agent_loop_registry: dict[str, dict] = {} def register(agent_name: str): """Register agent loop class.""" def decorator(subclass: type[AgentLoopBase]) -> type[AgentLoopBase]: fqdn = f"{subclass.__module__}.{subclass.__qualname__}" _agent_loop_registry[agent_name] = {"_target_": fqdn} return subclass return decorator class AgentLoopWorkerBase: """Agent loop worker takes a batch of messages and run each message in an agent loop.""" def __init__( self, config: DictConfig, server_handles: list[ray.actor.ActorHandle], reward_router_address: str = None, ): """Initialize agent loop manager. Args: config (DictConfig): YAML config. server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles. """ self.config = config # for recipe to change if not hasattr(self, "server_manager"): self.server_manager = AsyncLLMServerManager(config, server_handles) self.reward_router_address = reward_router_address model_path = config.actor_rollout_ref.model.path self.model_name = "/".join(model_path.split("/")[-2:]) local_path = copy_to_local(config.actor_rollout_ref.model.path) self.tokenizer = hf_tokenizer(local_path, trust_remote_code=True) self.processor = hf_processor(local_path, trust_remote_code=True) agent_loop_config_path = config.actor_rollout_ref.rollout.agent.agent_loop_config_path if agent_loop_config_path: agent_loop_configs = OmegaConf.load(agent_loop_config_path) for agent_loop_config in agent_loop_configs: _agent_loop_registry[agent_loop_config.name] = agent_loop_config if self.config.actor_rollout_ref.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.actor_rollout_ref.model.custom_chat_template self.tokenizer.chat_template = self.config.actor_rollout_ref.model.custom_chat_template self.reward_manager_worker = RewardManagerWorker.options( scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=ray.get_runtime_context().get_node_id(), soft=False, ), ).remote(self.config, self.reward_router_address) trace_config = self.config.actor_rollout_ref.rollout.get("trace", {}) RolloutTraceConfig.init( self.config.trainer.project_name, self.config.trainer.experiment_name, trace_config.get("backend"), trace_config.get("token2text", False), ) @tqbridge() async def generate_sequences(self, batch: DataProto) -> DataProto: """Generate sequences from agent loop. Args: batch (DataProto): Input batch. Returns: DataProto: Output batch. - prompts: [bsz, prompt_length], prompt token ids from dataset. - responses: [bsz, response_length], output token ids include response tokens from LLM generation and observation tokens from tool_calls. - response_mask: [bsz, response_length], 1 for LLM generated tokens, 0 for observation/padding tokens. - input_ids: [bsz, prompt_length + response_length], whole sequence token ids, including prompt tokens and response tokens. - attention_mask: [bsz, prompt_length + response_length], 0 for padding tokens, 1 for other tokens. - position_ids: [bsz, prompt_length + response_length], incremental position ids. For multi-turn conversations: responses: |<- LLM generation ->|<- tool_calls ->|<- LLM generation ->|<- padding ->| response_mask: | 1, 1, 1, ..., 1, 1 | 0, 0, .., 0, 0 | 1, 1, 1, ..., 1, 1 | 0, 0, ..., 0| """ config = self.config.actor_rollout_ref.rollout sampling_params = dict( temperature=config.temperature, top_p=config.top_p, repetition_penalty=1.0, logprobs=config.calculate_log_probs, ) # override sampling params for validation if batch.meta_info.get("validate", False): sampling_params["top_p"] = config.val_kwargs.top_p sampling_params["temperature"] = config.val_kwargs.temperature # by default, we assume it's a single turn agent if "agent_name" not in batch.non_tensor_batch: default_agent_loop = config.agent.default_agent_loop batch.non_tensor_batch["agent_name"] = np.array([default_agent_loop] * len(batch), dtype=object) if "index" in batch.non_tensor_batch: index = batch.non_tensor_batch["index"] else: index = np.arange(len(batch)) trajectory_info = await get_trajectory_info( batch.meta_info.get("global_steps", -1), index.tolist(), batch.meta_info.get("validate", False) ) tasks = [] for i in range(len(batch)): kwargs = {k: v[i] for k, v in batch.non_tensor_batch.items()} tasks.append(asyncio.create_task(self._run_agent_loop(sampling_params, trajectory_info[i], **kwargs))) outputs = await asyncio.gather(*tasks) output = self._postprocess(outputs) return output async def _run_agent_loop( self, sampling_params: dict[str, Any], trajectory: dict[str, Any], *, agent_name: str, **kwargs, ) -> _InternalAgentLoopOutput: with rollout_trace_attr( step=trajectory["step"], sample_index=trajectory["sample_index"], rollout_n=trajectory["rollout_n"], validate=trajectory["validate"], name="agent_loop", ): assert agent_name in _agent_loop_registry, ( f"Agent loop {agent_name} not registered, registered agent loops: {_agent_loop_registry.keys()}" ) agent_loop_config = _agent_loop_registry[agent_name] agent_loop = hydra.utils.instantiate( config=agent_loop_config, trainer_config=_DummyConfig(config=self.config), server_manager=self.server_manager, tokenizer=self.tokenizer, processor=self.processor, ) output: AgentLoopOutput = await agent_loop.run(sampling_params, **kwargs) # Some AgentLoop may have already computed the reward score, e.g SWE-agent. # NOTE: consistent with batch version of generate_sequences in vllm_rollout_spmd.py # prompt_ids: left padded with zeros (e.g., [0,0,0,0,1,2,3,4]) # response_ids: right padded with zeros (e.g., [5,6,7,8,0,0,0,0]) # input_ids: concatenation of prompt + response # Mask: # For example, if the prompt is [1,2,3,4] and the response is [5,6,7,(tool start)8,9(tool end),10,11,12] # - prompt_attention_mask: 0s for padding, 1s for tokens # e.g., [0,0,0,0,1,1,1,1] # - response_attention_mask: 0s for padding, 1s for tokens # e.g., [1,1,1,1,1,1,1,1,1,1,1,0,0,0,0] # attention_mask: concatenation of prompt_attention_mask and response_attention_mask # e.g., [0,0,0,0,1,1,1,1(prompt),1,1,1,1,1,1,1,1,1,1,1,0,0,0,0(response)] # - response_mask: 1s for LLM generated tokens, 0 for tool response/padding tokens # e.g., [1,1,1,1,1,1,1,(tool start),0,0(tool end),1,1,0,0,0,0] # - position_ids: sequential positions for tokens, starting at 0 # e.g., [0,0,0,0,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,0,0,0,0] self.tokenizer.padding_side = "left" prompt_output = self.tokenizer.pad( {"input_ids": output.prompt_ids}, padding="max_length", max_length=self.config.actor_rollout_ref.rollout.prompt_length, return_tensors="pt", return_attention_mask=True, ) if prompt_output["input_ids"].dim() == 1: prompt_output["input_ids"] = prompt_output["input_ids"].unsqueeze(0) prompt_output["attention_mask"] = prompt_output["attention_mask"].unsqueeze(0) self.tokenizer.padding_side = "right" response_output = self.tokenizer.pad( {"input_ids": output.response_ids}, padding="max_length", max_length=self.config.actor_rollout_ref.rollout.response_length, return_tensors="pt", return_attention_mask=True, ) if response_output["input_ids"].dim() == 1: response_output["input_ids"] = response_output["input_ids"].unsqueeze(0) response_output["attention_mask"] = response_output["attention_mask"].unsqueeze(0) response_mask_output = self.tokenizer.pad( {"input_ids": output.response_mask}, padding="max_length", max_length=self.config.actor_rollout_ref.rollout.response_length, return_tensors="pt", return_attention_mask=False, ) if response_mask_output["input_ids"].dim() == 1: response_mask_output["input_ids"] = response_mask_output["input_ids"].unsqueeze(0) distill_special_token_mask = None if output.distill_special_token_mask is not None: distill_special_token_mask_output = self.tokenizer.pad( {"input_ids": output.distill_special_token_mask}, padding="max_length", max_length=self.config.actor_rollout_ref.rollout.response_length, return_tensors="pt", return_attention_mask=False, ) if distill_special_token_mask_output["input_ids"].dim() == 1: distill_special_token_mask_output["input_ids"] = distill_special_token_mask_output["input_ids"].unsqueeze(0) distill_special_token_mask = distill_special_token_mask_output["input_ids"] * response_output["attention_mask"] response_logprobs = None if output.response_logprobs is not None: pad_size = self.config.actor_rollout_ref.rollout.response_length - len(output.response_logprobs) response_logprobs = torch.tensor(output.response_logprobs + [0.0] * pad_size).unsqueeze(0) response_mask = response_mask_output["input_ids"] * response_output["attention_mask"] attention_mask = torch.cat([prompt_output["attention_mask"], response_output["attention_mask"]], dim=1) input_ids = torch.cat([prompt_output["input_ids"], response_output["input_ids"]], dim=1) # Handle multi-modal inputs and position_ids calculation # Only support Qwen2VLImageProcessor for multi-modal processing currently # TODO: support other multi-modal inputs multi_modal_inputs = None if ( self.processor is not None and "Qwen2VLImageProcessor" in self.processor.image_processor.__class__.__name__ ): from verl.models.transformers.qwen2_vl import get_rope_index images = getattr(output, "multi_modal_data", {}).get("image", None) current_text = self.tokenizer.decode(input_ids.squeeze(0), skip_special_tokens=True) multi_modal_inputs = self.processor(text=[current_text], images=images, return_tensors="pt") multi_modal_inputs.pop("input_ids", None) multi_modal_inputs.pop("attention_mask", None) # We must use dict(multi_modal_inputs) to convert BatchFeature values to a new dict # because np.array() only keeps the keys for BatchFeature. multi_modal_inputs = dict(multi_modal_inputs) image_grid_thw = multi_modal_inputs.get("image_grid_thw") video_grid_thw = multi_modal_inputs.get("video_grid_thw") second_per_grid_ts = multi_modal_inputs.get("second_per_grid_ts") vision_position_ids = get_rope_index( self.processor, input_ids=input_ids.squeeze(0), image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, second_per_grid_ts=second_per_grid_ts, attention_mask=attention_mask.squeeze(0), ).unsqueeze(0) # (1, 3, seq_len) valid_mask = attention_mask[0].bool() text_position_ids = torch.ones((1, len(input_ids[0])), dtype=torch.long) text_position_ids[0, valid_mask] = torch.arange(valid_mask.sum().item()) text_position_ids = text_position_ids.unsqueeze(0) position_ids = torch.cat((text_position_ids, vision_position_ids), dim=1) # (1, 4, seq_length) else: position_ids = compute_position_id_with_mask(attention_mask) # (1, seq_len) enable_async_reward = ( self.reward_router_address is not None and self.config.reward_model.enable_resource_pool ) or not self.config.reward_model.enable if output.reward_score is None and enable_async_reward and self.config.reward_model.get("compute_in_agent_loop", False): batch = TensorDict( { "prompts": prompt_output["input_ids"], # [1, prompt_length] "responses": response_output["input_ids"], # [1, response_length] "attention_mask": attention_mask, # [1, prompt_length + response_length] "input_ids": input_ids, # [1, prompt_length + response_length] "position_ids": position_ids, }, batch_size=1, ) non_tensor_batch = { **{k: np.array([v]) for k, v in kwargs.items()}, "__num_turns__": np.array([output.num_turns]), "tool_extra_fields": np.array([output.extra_fields], dtype=object), } data = DataProto( batch=batch, non_tensor_batch=non_tensor_batch, ) result = await self.reward_manager_worker.compute_score.remote(data) output.reward_score = result["reward_score"] output.extra_fields["reward_extra_info"] = result["reward_extra_info"] return _InternalAgentLoopOutput( prompt_ids=prompt_output["input_ids"], response_ids=response_output["input_ids"], input_ids=input_ids, position_ids=position_ids, response_mask=response_mask, attention_mask=attention_mask, response_logprobs=response_logprobs, distill_special_token_mask=distill_special_token_mask, multi_modal_inputs=multi_modal_inputs, multi_modal_data=output.multi_modal_data, reward_score=output.reward_score, num_turns=output.num_turns, metrics=output.metrics, extra_fields=output.extra_fields, ) def _postprocess(self, inputs: list[_InternalAgentLoopOutput]) -> DataProto: """Process the padded outputs from _run_agent_loop and combine them into a batch.""" # Convert lists back to tensors and stack them to create a batch. prompt_ids = torch.cat([input.prompt_ids for input in inputs], dim=0) response_ids = torch.cat([input.response_ids for input in inputs], dim=0) response_mask = torch.cat([input.response_mask for input in inputs], dim=0) attention_mask = torch.cat([input.attention_mask for input in inputs], dim=0) input_ids = torch.cat([input.input_ids for input in inputs], dim=0) position_ids = torch.cat([input.position_ids for input in inputs], dim=0) optional_outputs = {} if inputs[0].response_logprobs is not None: optional_outputs["rollout_log_probs"] = torch.cat([input.response_logprobs for input in inputs], dim=0) if inputs[0].distill_special_token_mask is not None: optional_outputs["distill_special_token_mask"] = torch.cat([input.distill_special_token_mask for input in inputs], dim=0) batch = TensorDict( { "prompts": prompt_ids, # [bsz, prompt_length] "responses": response_ids, # [bsz, response_length] "response_mask": response_mask, # [bsz, response_length] "input_ids": input_ids, # [bsz, prompt_length + response_length] "attention_mask": attention_mask, # [bsz, prompt_length + response_length] # position_ids: [bsz, 3, prompt_length + response_length] or [bsz, prompt_length + response_length] "position_ids": position_ids, **optional_outputs, }, batch_size=len(inputs), ) scores = [input.reward_score for input in inputs] if all(score is not None for score in scores): prompt_length = prompt_ids.size(1) response_length = attention_mask[:, prompt_length:].sum(dim=1) - 1 rm_scores = torch.zeros_like(response_mask, dtype=torch.float32) rm_scores[torch.arange(response_mask.size(0)), response_length] = torch.tensor(scores, dtype=torch.float32) batch["rm_scores"] = rm_scores non_tensor_batch = { "__num_turns__": np.array([input.num_turns for input in inputs], dtype=np.int32), } # add reward_extra_info to non_tensor_batch reward_extra_infos = [input.extra_fields.get("reward_extra_info", {}) for input in inputs] reward_extra_keys = list(reward_extra_infos[0].keys()) for key in reward_extra_keys: non_tensor_batch[key] = np.array([info[key] for info in reward_extra_infos]) # Add multi_modal_inputs to non_tensor_batch if any samples have them multi_modal_inputs_list = [input.multi_modal_inputs for input in inputs] if any(mmi is not None for mmi in multi_modal_inputs_list): non_tensor_batch["multi_modal_inputs"] = np.array(multi_modal_inputs_list, dtype=object) metrics = [input.metrics.model_dump() for input in inputs] # Collect extra fields from all inputs and convert them to np.ndarray extra_fields = {} all_keys = set(key for input_item in inputs for key in input_item.extra_fields) for key in all_keys: temp_arr = np.empty(len(inputs), dtype=object) temp_arr[:] = [input.extra_fields.get(key) for input in inputs] extra_fields[key] = temp_arr non_tensor_batch.update(extra_fields) return DataProto( batch=batch, non_tensor_batch=non_tensor_batch, meta_info={"metrics": metrics, "reward_extra_keys": reward_extra_keys}, ) def create_transferqueue_client(self, controller_infos, storage_infos, role): """Create a client for data system(transfer queue).""" from verl.single_controller.ray.base import get_random_string from verl.utils.transferqueue_utils import create_transferqueue_client client_name = get_random_string(length=6) create_transferqueue_client( client_id=f"{role}_worker_{client_name}", controller_infos=controller_infos, storage_infos=storage_infos, ) @ray.remote class AgentLoopWorker(AgentLoopWorkerBase): """Agent loop worker takes a batch of messages and run each message in an agent loop.""" def __init__( self, config: DictConfig, server_handles: list[ray.actor.ActorHandle], reward_router_address: str = None ): """Initialize agent loop manager. Args: config (DictConfig): YAML config. server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles. reward_router_address (str): reward router address. """ super().__init__(config, server_handles, reward_router_address) async def get_trajectory_info(step, index, validate): """Get trajectory info. Args: step (int): global steps in the trainer. index (list): form datastore extra_info.index column. validate (bool): whether is a validate step. Returns: list: trajectory. """ trajectory_info = [] rollout_n = 0 for i in range(len(index)): if i > 0 and index[i - 1] == index[i]: rollout_n += 1 else: rollout_n = 0 trajectory_info.append({"step": step, "sample_index": index[i], "rollout_n": rollout_n, "validate": validate}) return trajectory_info class AgentLoopManager: """Agent loop manager that manages a group of agent loop workers.""" def __init__(self, config: DictConfig, worker_group: RayWorkerGroup = None, rm_wg: RayWorkerGroup = None): """Initialize agent loop manager. Args: config (DictConfig): trainer config. worker_group (RayWorkerGroup): ActorRolloutRef worker group for hybrid mode; None for standalone mode. """ self.config = config self.worker_group = worker_group self.reward_model_manager = None self.reward_router_address = None if self.config.reward_model.enable and self.config.reward_model.enable_resource_pool: from verl.experimental.reward import RewardModelManager self.reward_model_manager = RewardModelManager(config.reward_model, rm_wg) self.reward_router_address = self.reward_model_manager.get_router_address() # for recipe to change if not hasattr(self, "rollout_replica_class"): self.rollout_replica_class = get_rollout_replica_class(self.config.actor_rollout_ref.rollout.name) if not hasattr(self, "agent_loop_workers_class"): self.agent_loop_workers_class = AgentLoopWorker self._initialize_llm_servers() self._init_agent_loop_workers() # Initially we're in sleep mode. if self.config.actor_rollout_ref.rollout.free_cache_engine: self.sleep() def _initialize_llm_servers(self): rollout_world_size = ( self.config.actor_rollout_ref.rollout.tensor_model_parallel_size * self.config.actor_rollout_ref.rollout.data_parallel_size * self.config.actor_rollout_ref.rollout.pipeline_model_parallel_size ) world_size = ( self.worker_group.world_size if self.worker_group else self.config.trainer.n_gpus_per_node * self.config.trainer.nnodes ) num_replicas = world_size // rollout_world_size rollout_config = self.config.actor_rollout_ref.rollout model_config = self.config.actor_rollout_ref.model self.rollout_replicas = [ self.rollout_replica_class( replica_rank=replica_rank, config=rollout_config, model_config=model_config, gpus_per_node=self.config.trainer.n_gpus_per_node, ) for replica_rank in range(num_replicas) ] if self.worker_group: self._run_all([server.init_hybrid(self.worker_group) for server in self.rollout_replicas]) else: self._run_all([server.init_standalone() for server in self.rollout_replicas]) self.server_handles = [server._server_handle for server in self.rollout_replicas] self.server_addresses = [server._server_address for server in self.rollout_replicas] def _init_agent_loop_workers(self): self.agent_loop_workers = [] num_workers = self.config.actor_rollout_ref.rollout.agent.num_workers node_ids = [node["NodeID"] for node in ray.nodes() if node["Alive"] and node["Resources"].get("CPU", 0) > 0] for i in range(num_workers): # Round-robin scheduling over the all nodes node_id = node_ids[i % len(node_ids)] self.agent_loop_workers.append( self.agent_loop_workers_class.options( name=f"agent_loop_worker_{i}", scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=node_id, soft=True ), ).remote(self.config, self.server_handles, self.reward_router_address) ) def generate_sequences(self, prompts: DataProto) -> DataProto: """Split input batch and dispatch to agent loop workers. Args: prompts (DataProto): Input batch. Returns: DataProto: Output batch. """ if self.config.actor_rollout_ref.rollout.free_cache_engine: self.wake_up() if self.reward_model_manager and self.config.reward_model.rollout.free_cache_engine: self.reward_model_manager.wake_up() chunkes = prompts.chunk(len(self.agent_loop_workers)) outputs = ray.get( [ worker.generate_sequences.remote(chunk) for worker, chunk in zip(self.agent_loop_workers, chunkes, strict=True) ] ) output = DataProto.concat(outputs) if self.config.actor_rollout_ref.rollout.free_cache_engine: self.sleep() if self.reward_model_manager and self.config.reward_model.rollout.free_cache_engine: self.reward_model_manager.sleep() # calculate performance metrics metrics = [output.meta_info.pop("metrics") for output in outputs] # List[List[Dict[str, str]]] timing = self._performance_metrics(metrics, output) output.meta_info = {"timing": timing, **outputs[0].meta_info} return output def _performance_metrics(self, metrics: list[list[dict[str, str]]], output: DataProto) -> dict[str, float]: timing = {} t_generate_sequences = np.array([metric["generate_sequences"] for chunk in metrics for metric in chunk]) t_tool_calls = np.array([metric["tool_calls"] for chunk in metrics for metric in chunk]) timing["agent_loop/generate_sequences/min"] = t_generate_sequences.min() timing["agent_loop/generate_sequences/max"] = t_generate_sequences.max() timing["agent_loop/generate_sequences/mean"] = t_generate_sequences.mean() timing["agent_loop/tool_calls/min"] = t_tool_calls.min() timing["agent_loop/tool_calls/max"] = t_tool_calls.max() timing["agent_loop/tool_calls/mean"] = t_tool_calls.mean() # batch sequence generation is bounded by the slowest sample slowest = np.argmax(t_generate_sequences + t_tool_calls) attention_mask = output.batch["attention_mask"][slowest] prompt_length = output.batch["prompts"].shape[1] timing["agent_loop/slowest/generate_sequences"] = t_generate_sequences[slowest] timing["agent_loop/slowest/tool_calls"] = t_tool_calls[slowest] timing["agent_loop/slowest/prompt_length"] = attention_mask[:prompt_length].sum().item() timing["agent_loop/slowest/response_length"] = attention_mask[prompt_length:].sum().item() # on-policy distill if "distill_special_token_mask" in output.batch: distill_special_token_mask = output.batch["distill_special_token_mask"] timing["agent_loop/on_policy_distill/extend_token_seq_ratio"] = \ (distill_special_token_mask.sum(dim=1) > 0).float().mean().item() return timing def wake_up(self): """Wake up all rollout replica instances.""" self._run_all([replica.wake_up() for replica in self.rollout_replicas]) def sleep(self): """Sleep all rollout replica instances.""" self._run_all([replica.sleep() for replica in self.rollout_replicas]) def _run_all(self, tasks: list[asyncio.Task]): async def run_all(): await asyncio.gather(*tasks) asyncio.run(run_all()) ================================================ FILE: verl_distillation/verl/experimental/agent_loop/single_turn_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import logging import os from typing import Any from uuid import uuid4 from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput, register from verl.utils.profiler import simple_timer logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) @register("single_turn_agent") class SingleTurnAgentLoop(AgentLoopBase): """Naive agent loop that only do single turn chat completion.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.prompt_length = self.config.actor_rollout_ref.rollout.prompt_length self.response_length = self.config.actor_rollout_ref.rollout.response_length self.apply_chat_template_kwargs = self.config.data.get("apply_chat_template_kwargs", {}) async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput: messages = list(kwargs["raw_prompt"]) image_data = copy.deepcopy((kwargs.get("multi_modal_data") or {}).get("image", None)) metrics = {} request_id = uuid4().hex # Use processor if available for multimodal support if self.processor is not None: raw_prompt = await self.loop.run_in_executor( None, lambda: self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=False, **self.apply_chat_template_kwargs, ), ) model_inputs = self.processor(text=[raw_prompt], images=image_data, return_tensors="pt") prompt_ids = model_inputs.pop("input_ids").squeeze(0).tolist() else: prompt_ids = await self.loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, **self.apply_chat_template_kwargs ), ) with simple_timer("generate_sequences", metrics): output = await self.server_manager.generate( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, image_data=image_data ) response_mask = [1] * len(output.token_ids) output = AgentLoopOutput( prompt_ids=prompt_ids, response_ids=output.token_ids[: self.response_length], response_mask=response_mask[: self.response_length], response_logprobs=output.log_probs[: self.response_length] if output.log_probs else None, multi_modal_data={"image": image_data} if image_data is not None else {}, num_turns=2, metrics=metrics, ) return output ================================================ FILE: verl_distillation/verl/experimental/agent_loop/tool_agent_loop.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import copy import json import logging import os from enum import Enum from typing import Any, Optional from uuid import uuid4 from verl.experimental.agent_loop.agent_loop import (AgentLoopBase, AgentLoopOutput, register) from verl.experimental.agent_loop.tool_parser import FunctionCall, ToolParser from verl.experimental.agent_loop.utils import ( add_generation_prompt_for_gpt_oss, format_gpt_oss_tool_response_manually) from verl.interactions.base import BaseInteraction from verl.interactions.utils.interaction_registry import \ initialize_interactions_from_config from verl.tools.schemas import ToolResponse from verl.tools.utils.tool_registry import initialize_tools_from_config from verl.utils.profiler import simple_timer from verl.utils.rollout_trace import rollout_trace_op logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class AgentState(Enum): PENDING = "pending" GENERATING = "generating" PROCESSING_TOOLS = "processing_tools" TERMINATED = "terminated" INTERACTING = "interacting" class AgentData: """Encapsulates all state variables for the agent loop.""" def __init__( self, messages: list[dict[str, Any]], image_data: Any, metrics: dict[str, Any], request_id: str, tools_kwargs: dict[str, Any], interaction: Optional[BaseInteraction] = None, interaction_kwargs: Optional[dict[str, Any]] = None, ): self.messages = messages self.image_data = image_data self.metrics = metrics self.request_id = request_id self.tools_kwargs = tools_kwargs self.interaction = interaction self.interaction_kwargs = interaction_kwargs or {} # State variables self.prompt_ids: list[int] = [] self.response_ids: list[int] = [] self.response_mask: list[int] = [] self.distill_special_token_mask: list[int] = [] self.response_logprobs: list[float] = [] self.turn_scores: list[float] = [] self.tool_rewards: list[float] = [] self.user_turns = 0 self.assistant_turns = 0 # Temporary state for tool calls self.tool_calls: list[FunctionCall] = [] @register("tool_agent") class ToolAgentLoop(AgentLoopBase): @classmethod def init_class(cls, config, tokenizer, processor, **kwargs): if cls._class_initialized: return cls._class_initialized = True print("Performing class-level ToolAgentLoop initialization") # Initialize tools from config file cls.tokenizer = tokenizer cls.processor = processor cls.max_user_turns = config.actor_rollout_ref.rollout.multi_turn.max_user_turns cls.max_assistant_turns = config.actor_rollout_ref.rollout.multi_turn.max_assistant_turns cls.max_parallel_calls = config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls cls.max_tool_response_length = config.actor_rollout_ref.rollout.multi_turn.max_tool_response_length cls.tool_response_truncate_side = config.actor_rollout_ref.rollout.multi_turn.tool_response_truncate_side tool_config_path = config.actor_rollout_ref.rollout.multi_turn.tool_config_path tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else [] cls.tools = {tool.name: tool for tool in tool_list} cls.tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list] cls.tool_parser = ToolParser.get_tool_parser(config.actor_rollout_ref.rollout.multi_turn.format, cls.tokenizer) cls.tool_parser_name = config.actor_rollout_ref.rollout.multi_turn.format print(f"Initialized tools: {cls.tools}") cls.apply_chat_template_kwargs = config.data.get("apply_chat_template_kwargs", {}) cls.prompt_length = config.actor_rollout_ref.rollout.prompt_length cls.response_length = config.actor_rollout_ref.rollout.response_length cls.system_prompt = tokenizer.apply_chat_template( [{}], add_generation_prompt=False, tokenize=True, **cls.apply_chat_template_kwargs ) cls.extend_vocab_start_token = config.actor_rollout_ref.rollout.extend_vocab_start_token cls.mask_response_if_have_extend_token = config.actor_rollout_ref.rollout.mask_response_if_have_extend_token # Initialize interactions from config file cls.interaction_config_file = config.actor_rollout_ref.rollout.multi_turn.interaction_config_path if cls.interaction_config_file: cls.interaction_map: dict[str, BaseInteraction] = cls._initialize_interactions(cls.interaction_config_file) @rollout_trace_op async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput: messages = list(kwargs["raw_prompt"]) image_data = copy.deepcopy(kwargs.get("multi_modal_data", {}).get("image", None)) metrics = {} request_id = uuid4().hex tools_kwargs = kwargs.get("tools_kwargs", {}) # Initialize interaction if needed interaction = None interaction_kwargs = {} if self.interaction_config_file: interaction_kwargs = kwargs["extra_info"]["interaction_kwargs"] if "name" not in interaction_kwargs: raise ValueError("'name' key is required in interaction_kwargs") interaction_name = interaction_kwargs["name"] if interaction_name not in self.interaction_map: raise ValueError( f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: " f"{list(self.interaction_map.keys())}" ) interaction = self.interaction_map[interaction_name] await interaction.start_interaction(request_id, **interaction_kwargs) # Create AgentData instance to encapsulate all state agent_data = AgentData( messages=messages, image_data=image_data, metrics=metrics, request_id=request_id, tools_kwargs=tools_kwargs, interaction=interaction, interaction_kwargs=interaction_kwargs, ) # State machine loop state = AgentState.PENDING while state != AgentState.TERMINATED: if state == AgentState.PENDING: state = await self._handle_pending_state(agent_data, sampling_params) elif state == AgentState.GENERATING: state = await self._handle_generating_state(agent_data, sampling_params) elif state == AgentState.PROCESSING_TOOLS: state = await self._handle_processing_tools_state(agent_data) elif state == AgentState.INTERACTING: state = await self._handle_interacting_state(agent_data) else: logger.error(f"Invalid state: {state}") state = AgentState.TERMINATED # Finalize output response_ids = agent_data.prompt_ids[-len(agent_data.response_mask) :] prompt_ids = agent_data.prompt_ids[: len(agent_data.prompt_ids) - len(agent_data.response_mask)] multi_modal_data = {"image": agent_data.image_data} if agent_data.image_data is not None else {} output = AgentLoopOutput( prompt_ids=prompt_ids, response_ids=response_ids[: self.response_length], response_mask=agent_data.response_mask[: self.response_length], multi_modal_data=multi_modal_data, response_logprobs=agent_data.response_logprobs[: self.response_length] if agent_data.response_logprobs else None, distill_special_token_mask=agent_data.distill_special_token_mask[: self.response_length] if agent_data.distill_special_token_mask else None, num_turns=agent_data.user_turns + agent_data.assistant_turns + 1, metrics=agent_data.metrics, extra_fields={}, ) output.extra_fields.update({"turn_scores": agent_data.turn_scores, "tool_rewards": agent_data.tool_rewards}) return output async def _handle_pending_state(self, agent_data: AgentData, sampling_params: dict[str, Any]) -> AgentState: """Handle the pending state: prepare the prompt and start generation.""" if self.processor is not None: raw_prompt = await self.loop.run_in_executor( None, lambda: self.processor.apply_chat_template( agent_data.messages, tools=self.tool_schemas, add_generation_prompt=True, tokenize=False, **self.apply_chat_template_kwargs, ), ) model_inputs = self.processor(text=[raw_prompt], images=agent_data.image_data, return_tensors="pt") agent_data.prompt_ids = model_inputs.pop("input_ids").squeeze(0).tolist() else: agent_data.prompt_ids = await self.loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template( agent_data.messages, tools=self.tool_schemas, add_generation_prompt=True, tokenize=True, **self.apply_chat_template_kwargs, ), ) return AgentState.GENERATING async def _handle_generating_state( self, agent_data: AgentData, sampling_params: dict[str, Any], ignore_termination: bool = False ) -> AgentState: """Handle the generating state: generate model response and check for tool calls.""" add_messages: list[dict[str, Any]] = [] with simple_timer("generate_sequences", agent_data.metrics): output = await self.server_manager.generate( request_id=agent_data.request_id, prompt_ids=agent_data.prompt_ids, sampling_params=sampling_params, image_data=agent_data.image_data, ) agent_data.assistant_turns += 1 agent_data.response_ids = output.token_ids agent_data.prompt_ids += agent_data.response_ids distill_special_token_mask = [] response_mask = [1] * len(agent_data.response_ids) if self.extend_vocab_start_token is not None: assert isinstance(self.extend_vocab_start_token, int) for idx, token in enumerate(agent_data.response_ids): if token >= self.extend_vocab_start_token: distill_special_token_mask.append(1) else: distill_special_token_mask.append(0) try: first_one_index = distill_special_token_mask.index(1) response_mask[first_one_index + 1:] = [0] * (len(response_mask) - first_one_index - 1) except ValueError: pass if self.mask_response_if_have_extend_token: if sum(distill_special_token_mask) > 0: response_mask = [0] * len(agent_data.response_ids) else: distill_special_token_mask = [0] * len(agent_data.response_ids) agent_data.response_mask += response_mask agent_data.distill_special_token_mask += distill_special_token_mask if output.log_probs: agent_data.response_logprobs += output.log_probs # Check termination conditions if not ignore_termination and len(agent_data.response_mask) >= self.response_length: return AgentState.TERMINATED if self.max_assistant_turns and agent_data.assistant_turns >= self.max_assistant_turns: return AgentState.TERMINATED if self.max_user_turns and agent_data.user_turns >= self.max_user_turns: return AgentState.TERMINATED # Extract tool calls _, agent_data.tool_calls = await self.tool_parser.extract_tool_calls(agent_data.response_ids) # Handle interaction if needed if self.interaction_config_file: assistant_message = await self.loop.run_in_executor( None, lambda: self.tokenizer.decode(agent_data.response_ids, skip_special_tokens=True) ) add_messages.append({"role": "assistant", "content": assistant_message}) agent_data.messages.extend(add_messages) # Determine next state if agent_data.tool_calls: return AgentState.PROCESSING_TOOLS elif self.interaction_config_file: return AgentState.INTERACTING else: return AgentState.TERMINATED async def _handle_processing_tools_state(self, agent_data: AgentData) -> AgentState: """Handle the processing tools state: execute tool calls and prepare tool responses.""" add_messages: list[dict[str, Any]] = [] new_images_this_turn: list[Any] = [] # Local variable instead of agent_data attribute tasks = [] tool_call_names = [] for tool_call in agent_data.tool_calls[: self.max_parallel_calls]: tasks.append(self._call_tool(tool_call, agent_data.tools_kwargs)) tool_call_names.append(tool_call.name) with simple_timer("tool_calls", agent_data.metrics): responses = await asyncio.gather(*tasks) # Process tool responses and update multi_modal_data # Removed: agent_data.new_images_this_turn = [] for tool_response, tool_reward, _ in responses: # Create message from tool response if tool_response.image or tool_response.video: # Multi-modal content with structured format if not getattr(self.processor, "image_processor", None): raise ValueError( "Multimedia data can only be processed by `processor`, but the processor is None. " "This error is often caused if you are using a LLM model but your tool returns multimodal " "data. Plase use a vlm as the base model." ) content = [] if tool_response.image: content.append({"type": "image"}) if tool_response.video: content.append({"type": "video"}) if tool_response.text: content.append({"type": "text", "text": tool_response.text}) message = {"role": "tool", "content": content} else: # Text-only content message = {"role": "tool", "content": tool_response.text or ""} add_messages.append(message) # Handle image data if tool_response.image: if agent_data.image_data is None: agent_data.image_data = [] elif not isinstance(agent_data.image_data, list): agent_data.image_data = [agent_data.image_data] # Add new image data if isinstance(tool_response.image, list): # Ensure all elements in the list are valid image objects for img in tool_response.image: if img is not None: # Add a check to ensure the image is not None agent_data.image_data.append(img) new_images_this_turn.append(img) # Using local variable else: # Ensure the image is not None if tool_response.image is not None: agent_data.image_data.append(tool_response.image) new_images_this_turn.append(tool_response.image) # Using local variable # Handle video data if tool_response.video: # Currently not supported, raise informative error logger.warning("Multimedia type 'video' is not currently supported. Only 'image' is supported.") raise NotImplementedError( "Multimedia type 'video' is not currently supported. Only 'image' is supported." ) if tool_reward is not None: agent_data.tool_rewards.append(tool_reward) agent_data.messages.extend(add_messages) # Update prompt with tool responses if self.processor is not None: raw_tool_response = await self.loop.run_in_executor( None, lambda: self.processor.apply_chat_template( add_messages, add_generation_prompt=True, tokenize=False, **self.apply_chat_template_kwargs, ), ) # Use only the new images from this turn for processing tool responses current_images = new_images_this_turn if new_images_this_turn else None # Using local variable model_inputs = self.processor(text=[raw_tool_response], images=current_images, return_tensors="pt") response_ids = model_inputs.pop("input_ids").squeeze(0).tolist() else: if self.tool_parser_name == "gpt-oss": logger.info("manually format tool responses for gpt-oss") # Format tool responses manually tool_response_texts = [] for i, tool_msg in enumerate(add_messages): actual_tool_name = tool_call_names[i] formatted = format_gpt_oss_tool_response_manually(tool_msg["content"], actual_tool_name) tool_response_texts.append(formatted) tool_response_text = add_generation_prompt_for_gpt_oss("".join(tool_response_texts)) response_ids = await self.loop.run_in_executor( None, lambda: self.tokenizer.encode(tool_response_text, add_special_tokens=False) ) else: response_ids = await self.loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template(add_messages, add_generation_prompt=True, tokenize=True), ) response_ids = response_ids[len(self.system_prompt) :] if len(agent_data.response_mask) + len(response_ids) >= self.response_length: return AgentState.TERMINATED # Update prompt_ids and response_mask agent_data.prompt_ids += response_ids agent_data.response_mask += [0] * len(response_ids) if agent_data.response_logprobs: agent_data.response_logprobs += [0.0] * len(response_ids) agent_data.user_turns += 1 return AgentState.GENERATING async def _handle_interacting_state(self, agent_data: AgentData) -> AgentState: """Handle the interacting state: get user input from interaction.""" ( should_terminate_sequence, interaction_responses, reward, metrics, ) = await agent_data.interaction.generate_response( agent_data.request_id, agent_data.messages, **agent_data.interaction_kwargs ) agent_data.user_turns += 1 add_messages: list[dict[str, Any]] = [{"role": "user", "content": interaction_responses}] agent_data.messages.extend(add_messages) if reward is not None: agent_data.turn_scores.append(reward) # Update prompt with user responses (similar to _handle_processing_tools_state) if self.processor is not None: raw_user_response = await self.loop.run_in_executor( None, lambda: self.processor.apply_chat_template( add_messages, add_generation_prompt=True, tokenize=False, **self.apply_chat_template_kwargs, ), ) model_inputs = self.processor(text=[raw_user_response], images=None, return_tensors="pt") response_ids = model_inputs.pop("input_ids").squeeze(0).tolist() else: response_ids = await self.loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template(add_messages, add_generation_prompt=True, tokenize=True), ) response_ids = response_ids[len(self.system_prompt) :] # Update prompt_ids and response_mask agent_data.prompt_ids += response_ids agent_data.response_mask += [0] * len(response_ids) if agent_data.response_logprobs: agent_data.response_logprobs += [0.0] * len(response_ids) # double check prompt # Check termination condition if should_terminate_sequence: return AgentState.TERMINATED else: return AgentState.GENERATING async def _call_tool( self, tool_call: FunctionCall, tools_kwargs: dict[str, Any] ) -> tuple[ToolResponse, float, dict]: """Call tool and return tool response.""" tool, instance_id = None, None try: # TODO: append malformed tool_call to the prompt: invalid function name or arguments tool_name = tool_call.name tool_args = json.loads(tool_call.arguments) tool = self.tools[tool_name] kwargs = tools_kwargs.get(tool_name, {}) instance_id, _ = await tool.create(create_kwargs=kwargs.get("create_kwargs", {})) tool_execution_response, tool_reward, res = await tool.execute(instance_id, tool_args) except Exception as e: logger.warning(f"Error when executing tool: {e}") return ( ToolResponse( text=f"Error when executing tool: {e}", ), 0.0, {}, ) finally: if tool and instance_id: await tool.release(instance_id) tool_response_text = tool_execution_response.text if tool_response_text and len(tool_response_text) > self.max_tool_response_length: if self.tool_response_truncate_side == "left": tool_response_text = tool_response_text[: self.max_tool_response_length] + "...(truncated)" elif self.tool_response_truncate_side == "right": tool_response_text = "(truncated)..." + tool_response_text[-self.max_tool_response_length :] else: length = self.max_tool_response_length // 2 tool_response_text = tool_response_text[:length] + "...(truncated)..." + tool_response_text[-length:] # Create ToolResponse from tool execution result tool_response_kwargs = {"text": tool_response_text} # Add multimedia data if present for attr_name in ["image", "video"]: if hasattr(tool_execution_response, attr_name): attr_value = getattr(tool_execution_response, attr_name) if attr_value is not None: tool_response_kwargs[attr_name] = attr_value return ToolResponse(**tool_response_kwargs), tool_reward, res @classmethod def _initialize_interactions(cls, interaction_config_file): """Initialize interactions from configuration. Returns: dict[str, BaseInteraction]: A dictionary mapping interaction names to interaction instances. """ if interaction_config_file is None: return {} interaction_map = initialize_interactions_from_config(interaction_config_file) logger.info(f"Initialize interactions from configuration: interaction_map: {list(interaction_map.keys())}") return interaction_map ================================================ FILE: verl_distillation/verl/experimental/agent_loop/tool_parser.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import json import logging import os from abc import ABC, abstractmethod import regex from pydantic import BaseModel from verl.utils.rollout_trace import rollout_trace_op logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class FunctionCall(BaseModel): arguments: str """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. """ name: str """The name of the function to call.""" class ToolParser(ABC): _registry: dict[str, type["ToolParser"]] = {} def __init__(self, tokenizer) -> None: self.tokenizer = tokenizer @abstractmethod async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]: """Extract tool calls from the responses. Args: responses_ids (List[int]): The ids of the responses. Returns: Tuple[str, List[FunctionCall]]: Content and extracted tool calls. """ raise NotImplementedError @classmethod def get_tool_parser(cls, name: str, tokenizer): if name not in cls._registry: raise ValueError(f"Unknown tool parser: {name}") return cls._registry[name](tokenizer) @classmethod def register(cls, name: str): def decorator(subclass: type[ToolParser]) -> type[ToolParser]: cls._registry[name] = subclass return subclass return decorator @ToolParser.register("hermes") class HermesToolParser(ToolParser): """Adapted from https://github.com/vllm-project/vllm/blob/v0.9.1/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py""" def __init__(self, tokenizer) -> None: super().__init__(tokenizer) self.tool_call_start_token: str = "" self.tool_call_end_token: str = "" self.tool_call_regex = regex.compile(r"(.*?)", regex.DOTALL) @rollout_trace_op async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]: loop = asyncio.get_running_loop() text = await loop.run_in_executor(None, self.tokenizer.decode, responses_ids) if self.tool_call_start_token not in text or self.tool_call_end_token not in text: return text, [] matches = self.tool_call_regex.findall(text) function_calls = [] for match in matches: try: function_call = json.loads(match) name, arguments = function_call["name"], function_call["arguments"] function_calls.append(FunctionCall(name=name, arguments=json.dumps(arguments, ensure_ascii=False))) except Exception as e: logger.error(f"Failed to decode tool call: {e}") # remaing text exclude tool call tokens content = self.tool_call_regex.sub("", text) return content, function_calls @ToolParser.register("gpt-oss") class GptOssToolParser(ToolParser): """ Tool parser for gpt-oss model. Adapted from https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/function_call/gpt_oss_detector.py Args: tokenizer: The tokenizer to use. """ def __init__(self, tokenizer) -> None: super().__init__(tokenizer) # check https://cookbook.openai.com/articles/openai-harmony for more details. self.cot_pattern = regex.compile( r"<\|start\|>assistant<\|channel\|>analysis<\|message\|>.*?<\|end\|>", regex.DOTALL ) # <|start|>assistant may be pre-appended in prompts, so we need to remove it. self.partial_cot_pattern = regex.compile(r"<\|channel\|>analysis<\|message\|>(.*?)<\|end\|>", regex.DOTALL) self.tool_call_pattern = regex.compile( r"<\|start\|>assistant<\|channel\|>[^<]* to=functions\.([^<]+) " r"<\|constrain\|>json<\|message\|>(.*?)<\|call\|>", regex.DOTALL, ) @rollout_trace_op async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]: loop = asyncio.get_running_loop() # We need to keep special tokens for gpt-oss model for better tool call extraction. text = await loop.run_in_executor(None, lambda: self.tokenizer.decode(responses_ids, skip_special_tokens=False)) # Need to remove padding tokens for better tool call extraction. text = text.replace(self.tokenizer.pad_token, "") # Need to reomve COT since COT may contain tool call tokens.But they are not valid tool calls. text = regex.sub(self.cot_pattern, "", text) text = regex.sub(self.partial_cot_pattern, "", text) # check if there are tool calls in the text by re.findall matches = regex.findall(self.tool_call_pattern, text) if not matches: return text, [] function_calls = [] for match in matches: try: name, arguments = match[0], match[1] # don't check if arguments is valid JSON and leave it to client function_calls.append(FunctionCall(name=name, arguments=arguments)) except Exception as e: logger.error(f"Failed to decode tool call: {e}") # remaing text exclude tool call tokens content = regex.sub(self.tool_call_pattern, "", text) return content, function_calls ================================================ FILE: verl_distillation/verl/experimental/agent_loop/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tokenizer.apply_chat_template is not working properly for gpt-oss model. # Because the chat template requires tool call messages to parse tool response messages # so we need to format the tool response manually. def format_gpt_oss_tool_response_manually(tool_response: str, tool_call_name: str) -> str: """Format tool response for gpt-oss model. Args: tool_response: Tool response string tool_call_name: Name of the tool that was called Returns: Formatted tool response string """ return f"<|start|>functions.{tool_call_name} to=assistant<|channel|>commentary<|message|>{tool_response}<|end|>" def add_generation_prompt_for_gpt_oss(message_content: str) -> str: """Add generation prompt for gpt-oss model. Args: message_content: Message content string Returns: Message content string with generation prompt """ return message_content + "<|start|>assistant" ================================================ FILE: verl_distillation/verl/experimental/dataset/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/experimental/dataset/sampler.py ================================================ # Copyright 2025 Amazon.com Inc and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod from collections.abc import Sized from omegaconf import DictConfig from torch.utils.data import Sampler from verl import DataProto class AbstractSampler(Sampler[int]): """Abstract interface for custom samplers.""" @abstractmethod def __init__( self, data_source: Sized, data_config: DictConfig, ): pass class AbstractCurriculumSampler(AbstractSampler): """Experimental interface for curriculum learning samplers.""" @abstractmethod def update(self, batch: DataProto) -> None: pass ================================================ FILE: verl_distillation/verl/experimental/dynamic_dataset/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/experimental/dynamic_dataset/dynamicgen_dataset.py ================================================ # Copyright 2025 Amazon.com Inc and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dataset class that enables dynamic data generation strategies between iterations of training. This class extends RLHFDataset and uses an AbstractDataGen instance to generate data. This is especially useful in settings where proposer model generates new tasks based on rollout data. """ import logging from abc import ABC, abstractmethod from typing import Optional import datasets from omegaconf import DictConfig from torch.utils.data import Dataset from transformers import PreTrainedTokenizer, ProcessorMixin from verl import DataProto from verl.utils.dataset import RLHFDataset from verl.utils.import_utils import load_extern_type logger = logging.getLogger(__name__) class AbstractDataGenerator(ABC): def __init__(self, config: DictConfig): self.config = config @abstractmethod def generate(self, dataset: Dataset) -> datasets.Dataset: """ Generate method must be implemented by subclasses. Args: dataset: The dataset to generate from. Returns: Processed data or result as implemented by the subclass. """ pass class MockDataGenerator(AbstractDataGenerator): """ A noop data gen class that only reappends the first datapoint. This class is useful as a placeholder and testing. """ def __init__(self, config: DictConfig = None): super().__init__(config) def generate(self, dataset: Dataset) -> datasets.Dataset: print("MockDataGenerator: No operation performed on the dataset.") return dataset.dataframe.select([0]) class DynamicGenDataset(RLHFDataset): """ A dataset class that uses a data generation strategy to process data. This class extends RLHFDataset and uses an AbstractDataGen instance to generate data. """ def __init__( self, data_files: str | list[str], tokenizer: PreTrainedTokenizer, config: DictConfig, processor: Optional[ProcessorMixin] = None, ): super().__init__(data_files, tokenizer, config, processor) self.datagen: AbstractDataGenerator = config.datagen assert "datagen" in config and config.datagen.get("path", None) is not None, ( f"datagen path is not set in config: {config}" ) # Dynamically load the custom datagen class datagen_cls = load_extern_type(config.datagen.path, config.datagen.name) # Verify that the custom datagen class inherits from AbstractDataGenerator abs_cls = AbstractDataGenerator if not issubclass(datagen_cls, abs_cls): raise TypeError( f"The custom datagen class '{config.datagen.name}' from '{config.datagen.path}'" + " must inherit from {abs_cls}" ) self.data_generator = datagen_cls(config.datagen) self.on_batch_end() def append_dataframe(self, new_dataframe: datasets.Dataset): new_dataframe = self.maybe_filter_out_long_prompts(new_dataframe) self.dataframe = datasets.concatenate_datasets([self.dataframe, new_dataframe]) logger.info(f"new dataset len: {len(self.dataframe)}") def on_batch_end(self, batch: DataProto) -> None: """ Generate data using the provided data generation strategy. Note: This method is intended to change the dataset after each training batch. """ new_data = self.data_generator.generate(self) self.append_dataframe(new_data) ================================================ FILE: verl_distillation/verl/experimental/reward/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .reward_manager import RewardManagerWorker from .reward_model import RewardModelManager __all__ = ["RewardModelManager", "RewardManagerWorker"] ================================================ FILE: verl_distillation/verl/experimental/reward/reward_loop/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .registry import get_reward_loop_manager_cls, register # noqa: I001 from .dapo import DAPORewardLoopManager from .naive import NaiveRewardLoopManager __all__ = [ "DAPORewardLoopManager", "NaiveRewardLoopManager", "register", "get_reward_loop_manager_cls", ] ================================================ FILE: verl_distillation/verl/experimental/reward/reward_loop/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os from abc import ABC, abstractmethod from omegaconf import DictConfig from transformers import AutoTokenizer from verl import DataProto logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class RewardLoopManagerBase(ABC): _class_initialized = False def __init__(self, config: DictConfig, tokenizer: AutoTokenizer): """Initialize agent loop. Args: config (DictConfig): YAML config. tokenizer (AutoTokenizer): Tokenizer for tokenize messages. """ self.config = config self.tokenizer = tokenizer self.loop = asyncio.get_running_loop() self.init_class(config, tokenizer) @classmethod def init_class(cls, config: DictConfig, tokenizer: AutoTokenizer): """Initialize class state shared across all instances.""" if cls._class_initialized: return cls._class_initialized = True @abstractmethod async def run_single(self, data: DataProto): raise NotImplementedError ================================================ FILE: verl_distillation/verl/experimental/reward/reward_loop/dapo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from verl import DataProto from verl.experimental.reward.reward_loop import register from verl.experimental.reward.reward_loop.base import RewardLoopManagerBase from verl.utils.reward_score import default_compute_score @register("dapo") class DAPORewardLoopManager(RewardLoopManagerBase): """Reward loop for DAPO.""" def __init__(self, config, tokenizer, compute_score=None, reward_router_address=None, reward_model_tokenizer=None): super().__init__(config, tokenizer) self.compute_score = compute_score or default_compute_score self.is_async_reward_score = inspect.iscoroutinefunction(self.compute_score) # DAPO Reward Config overlong_buffer_cfg = config.reward_model.get("reward_kwargs", {}).get("overlong_buffer_cfg", None) self.overlong_buffer_cfg = overlong_buffer_cfg self.max_resp_len = config.reward_model.get("reward_kwargs", {}).get("max_resp_len", None) self.reward_router_address = reward_router_address self.reward_model_tokenizer = reward_model_tokenizer if self.overlong_buffer_cfg is not None: assert self.max_resp_len is not None, ( f"max_resp_len must be provided if {overlong_buffer_cfg=}, but got None" ) assert self.max_resp_len >= self.overlong_buffer_cfg.len, ( "max_resp_len must be larger than overlong_buffer.len" ) async def run_single(self, data: DataProto) -> dict: assert len(data) == 1, "Only support single data item" data_item = data[0] response_ids = data_item.batch["responses"] response_length = response_ids.shape[-1] valid_response_length = data_item.batch["attention_mask"][-response_length:].sum() valid_response_ids = response_ids[:valid_response_length] data_source = data_item.non_tensor_batch["data_source"] ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] extra_info = data_item.non_tensor_batch.get("extra_info", {}) response_str = await self.loop.run_in_executor( None, lambda: self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) ) if self.is_async_reward_score: result = await self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, reward_router_address=self.reward_router_address, reward_model_tokenizer=self.reward_model_tokenizer, ) else: result = await self.loop.run_in_executor( None, lambda: self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, reward_router_address=self.reward_router_address, reward_model_tokenizer=self.reward_model_tokenizer, ), ) reward_extra_info = {} score: float if isinstance(result, dict): score = result["score"] for key, value in result.items(): reward_extra_info[key] = value else: score = result reward_extra_info["acc"] = score reward = score if self.overlong_buffer_cfg is not None and self.overlong_buffer_cfg.enable: overlong_buffer_len = self.overlong_buffer_cfg.len expected_len = self.max_resp_len - overlong_buffer_len exceed_len = valid_response_length - expected_len overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0) reward += overlong_reward if self.overlong_buffer_cfg.log: reward_extra_info["overlong_reward"] = overlong_reward reward_extra_info["overlong"] = overlong_reward < 0 return {"reward_score": reward, "reward_extra_info": reward_extra_info} ================================================ FILE: verl_distillation/verl/experimental/reward/reward_loop/naive.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from verl import DataProto from verl.experimental.reward.reward_loop import register from verl.experimental.reward.reward_loop.base import RewardLoopManagerBase from verl.utils.reward_score import default_compute_score @register("naive") class NaiveRewardLoopManager(RewardLoopManagerBase): """The reward manager.""" def __init__(self, config, tokenizer, compute_score=None, reward_router_address=None, reward_model_tokenizer=None): super().__init__(config, tokenizer) self.compute_score = compute_score or default_compute_score self.is_async_reward_score = inspect.iscoroutinefunction(self.compute_score) self.reward_router_address = reward_router_address self.reward_model_tokenizer = reward_model_tokenizer async def run_single(self, data: DataProto) -> dict: assert len(data) == 1, "Only support single data item" data_item = data[0] response_ids = data_item.batch["responses"] response_length = response_ids.shape[-1] valid_response_length = data_item.batch["attention_mask"][-response_length:].sum() valid_response_ids = response_ids[:valid_response_length] data_source = data_item.non_tensor_batch["data_source"] ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] extra_info = data_item.non_tensor_batch.get("extra_info", {}) tool_extra_fields = data_item.non_tensor_batch.get("tool_extra_fields", None) if tool_extra_fields is not None: extra_info.update(tool_extra_fields.items()) response_str = await self.loop.run_in_executor( None, lambda: self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) ) if self.is_async_reward_score: result = await self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, reward_router_address=self.reward_router_address, reward_model_tokenizer=self.reward_model_tokenizer, ) else: result = await self.loop.run_in_executor( None, lambda: self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, reward_router_address=self.reward_router_address, reward_model_tokenizer=self.reward_model_tokenizer, ), ) reward_extra_info = {} score: float if isinstance(result, dict): score = result["score"] for key, value in result.items(): reward_extra_info[key] = value else: score = result reward_extra_info["acc"] = score reward = score return {"reward_score": reward, "reward_extra_info": reward_extra_info} ================================================ FILE: verl_distillation/verl/experimental/reward/reward_loop/registry.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable from verl.experimental.reward.reward_loop.base import RewardLoopManagerBase __all__ = ["register", "get_reward_loop_manager_cls"] REWARD_LOOP_MANAGER_REGISTRY: dict[str, type[RewardLoopManagerBase]] = {} def register(name: str) -> Callable[[type[RewardLoopManagerBase]], type[RewardLoopManagerBase]]: """Decorator to register a reward loop manager class with a given name. Args: name: `(str)` The name of the reward loop manager. """ def decorator(cls: type[RewardLoopManagerBase]) -> type[RewardLoopManagerBase]: if name in REWARD_LOOP_MANAGER_REGISTRY and REWARD_LOOP_MANAGER_REGISTRY[name] != cls: raise ValueError( f"reward loop manager {name} has already been registered: {REWARD_LOOP_MANAGER_REGISTRY[name]} vs {cls}" ) REWARD_LOOP_MANAGER_REGISTRY[name] = cls return cls return decorator def get_reward_loop_manager_cls(name: str) -> type[RewardLoopManagerBase]: """Get the reward loop manager class with a given name. Args: name: `(str)` The name of the reward loop manager. Returns: `(type)`: The reward loop manager class. """ if name not in REWARD_LOOP_MANAGER_REGISTRY: raise ValueError(f"Unknown reward loop manager: {name}") return REWARD_LOOP_MANAGER_REGISTRY[name] ================================================ FILE: verl_distillation/verl/experimental/reward/reward_manager.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import ray from omegaconf import DictConfig from verl.experimental.reward.reward_loop import get_reward_loop_manager_cls from verl.protocol import DataProto from verl.trainer.ppo.reward import get_custom_reward_fn from verl.utils import hf_tokenizer from verl.utils.fs import copy_to_local logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) @ray.remote class RewardManagerWorker: def __init__(self, config: DictConfig, reward_router_address: str = None): self.config = config self.reward_router_address = reward_router_address self._init_reward_fn() def _init_reward_fn(self): input_tokenizer_local_path = copy_to_local(self.config.actor_rollout_ref.model.path) self.input_tokenizer = hf_tokenizer(input_tokenizer_local_path, trust_remote_code=True) self.reward_model_tokenizer = None if self.config.reward_model.enable: reward_model_tokenizer_local_path = copy_to_local(self.config.reward_model.model.path) self.reward_model_tokenizer = hf_tokenizer(reward_model_tokenizer_local_path, trust_remote_code=True) self.reward_fn = get_custom_reward_fn(self.config) reward_loop_manager_cls = get_reward_loop_manager_cls(self.config.reward_model.reward_manager) self.reward_loop = reward_loop_manager_cls( self.config, self.input_tokenizer, self.reward_fn, self.reward_router_address, self.reward_model_tokenizer ) async def compute_score(self, data: DataProto) -> DataProto: return await self.reward_loop.run_single(data) ================================================ FILE: verl_distillation/verl/experimental/reward/reward_model.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import json import logging import os import aiohttp from openai.types.chat import ChatCompletion from verl import DataProto from verl.single_controller.ray.base import RayWorkerGroup from verl.workers.config import HFModelConfig, RewardModelConfig from verl.workers.rollout.replica import get_rollout_replica_class logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class RewardModelManager: """Reward model manager.""" def __init__(self, config: RewardModelConfig, worker_group: RayWorkerGroup = None): """ Initialize the reward model manager. Args: config (RewardModelConfig): Reward model configuration. worker_group (RayWorkerGroup, optional): Worker group. Defaults to None. """ self.config = config self.worker_group = worker_group self._initialize_llm_servers() self._initialize_router() if self.config.rollout.free_cache_engine: self.sleep() def _initialize_llm_servers(self): rollout_world_size = self.config.rollout.tensor_model_parallel_size world_size = ( self.worker_group.world_size if self.worker_group # colocate mode else self.config.n_gpus_per_node * self.config.nnodes # standalone mode ) num_replicas = world_size // rollout_world_size rollout_replica_class = get_rollout_replica_class(self.config.rollout.name) rollout_config = self.config.rollout model_config = HFModelConfig( path=self.config.model.path, external_lib=self.config.model.external_lib, trust_remote_code=self.config.model.trust_remote_code, ) self.tokenizer = model_config.get_processor() self.rollout_replicas = [ rollout_replica_class( replica_rank=replica_rank, config=rollout_config, model_config=model_config, gpus_per_node=self.config.n_gpus_per_node, is_reward_model=True, ) for replica_rank in range(num_replicas) ] if self.worker_group: self._run_all([server.init_colocated(self.worker_group) for server in self.rollout_replicas]) else: self._run_all([server.init_standalone() for server in self.rollout_replicas]) self.server_handles = [server._server_handle for server in self.rollout_replicas] self.server_addresses = [server._server_address for server in self.rollout_replicas] def _initialize_router(self): worker_urls = [f"http://{server_address}" for server_address in self.server_addresses] if self.config.rollout.name == "sglang": from .router.sglang_router import launch_router_process else: from .router.naive_router import launch_router_process self.router_address, _ = launch_router_process(worker_urls=worker_urls) def get_router_address(self): return self.router_address def wake_up(self): """Wake up all rollout replica instances.""" self._run_all([replica.wake_up() for replica in self.rollout_replicas]) def sleep(self): """Sleep all rollout replica instances.""" self._run_all([replica.sleep() for replica in self.rollout_replicas]) def _run_all(self, tasks: list[asyncio.Task]): async def run_all(): return await asyncio.gather(*tasks) return asyncio.run(run_all()) async def chat_complete(self, chat_complete_request: dict): url = f"http://{self.router_address}/v1/chat/completions" try: timeout = aiohttp.ClientTimeout(total=None) session = aiohttp.ClientSession(timeout=timeout) async with session.post(url, json=chat_complete_request) as resp: output = await resp.text() output = json.loads(output) return ChatCompletion(**output) except Exception as e: raise e finally: await session.close() def generate_sequences(self, prompts: DataProto, sampling_params: dict): chat_complete_requests = [ { "model": self.config.model.path, "messages": list(messages), **sampling_params, } for messages in prompts.non_tensor_batch.get("raw_prompt") ] tasks = [self.chat_complete(chat_complete_request) for chat_complete_request in chat_complete_requests] results = self._run_all(tasks) return results ================================================ FILE: verl_distillation/verl/experimental/reward/router/naive_router.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import multiprocessing import os import time from typing import Any import aiohttp import ray import uvicorn from fastapi import FastAPI, Request from fastapi.responses import JSONResponse from verl.workers.rollout.utils import get_free_port, is_valid_ipv6_address logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) async def _read_async_response(resp: aiohttp.ClientResponse) -> dict[str, Any]: if resp.status == 204 or (resp.content_length == 0): return {} try: return await resp.json(content_type=None) except Exception: try: text = await resp.text() except Exception: return {} return { "content_type": (resp.headers.get("Content-Type") or ""), "text": text, } def launch_router_process( worker_urls: list[str], ): router_ip = ray.util.get_node_ip_address().strip("[]") router_port, _ = get_free_port(router_ip) router_address = ( f"[{router_ip}]:{router_port}" if is_valid_ipv6_address(router_ip) else f"{router_ip}:{router_port}" ) router_process = multiprocessing.Process( target=run_router, args=( router_ip, router_port, worker_urls, ), ) router_process.daemon = True router_process.start() time.sleep(3) assert router_process.is_alive() logger.info(f"Router is running on {router_address}") return router_address, router_process def run_router(router_ip: str, router_port: int, worker_urls: list[str]): router = NaiveRouter(worker_urls=worker_urls, verbose=False) uvicorn.run(router.app, host=router_ip, port=router_port, log_level="warning") class NaiveRouter: def __init__( self, worker_urls: list[str], max_connections: int = 1024, timeout: int = 60, max_attempts: int = 3, retry_delay: float = 2.0, verbose: bool = False, ) -> None: """A minimal async load-balancing router.""" self.verbose = verbose self.app = FastAPI() self.worker_urls = worker_urls self.request_counts = {url: 0 for url in worker_urls} self.max_connections = max_connections self.timeout = timeout self.max_attempts = max_attempts self.retry_delay = retry_delay self.app = FastAPI() # Register startup / shutdown hooks self.app.on_event("startup")(self._on_startup) self.app.on_event("shutdown")(self._on_shutdown) # Catch-all proxy route self.app.api_route("/{endpoint:path}", methods=["GET", "POST"])(self._make_async_request) # Placeholder for aiohttp client self.client = None async def _on_startup(self): """Initialize aiohttp client safely inside the event loop""" connector = aiohttp.TCPConnector( limit=self.max_connections, limit_per_host=self.max_connections // 4, ttl_dns_cache=300, use_dns_cache=True, ) timeout = aiohttp.ClientTimeout(total=None) self.client = aiohttp.ClientSession(connector=connector, timeout=timeout) if self.verbose: logger.info(f"[router] aiohttp client initialized with max_connections={self.max_connections}") async def _on_shutdown(self): """Gracefully close aiohttp client""" if self.client and not self.client.closed: await self.client.close() if self.verbose: logger.info("[router] aiohttp client closed") async def _make_async_request(self, request: Request, endpoint: str): """Proxy single request to a worker URL.""" if not self.worker_urls: return JSONResponse(status_code=503, content={"error": "No available workers"}) worker_url = self._select_worker() target_url = f"{worker_url}/{endpoint}" if self.verbose: logger.debug(f"[router] Forwarding request → {target_url}") # Copy request data body = await request.body() headers = dict(request.headers) for attempt in range(self.max_attempts): # Send request to worker try: async with self.client.request(request.method, target_url, data=body, headers=headers) as response: response.raise_for_status() output = await _read_async_response(response) self._release_worker(worker_url) return output except asyncio.TimeoutError: logger.warning(f"Async request to {endpoint} timed out (attempt {attempt + 1})") except aiohttp.ClientConnectorError: logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})") except aiohttp.ClientResponseError as e: logger.error(f"HTTP error for {endpoint}: {e}") raise except Exception as e: logger.error(f"Unexpected error for {endpoint}: {e}") if attempt == self.max_attempts - 1: raise if attempt < self.max_attempts - 1: await asyncio.sleep(self.retry_delay * (2**attempt)) raise RuntimeError(f"Failed to complete async request to {endpoint} after {self.max_attempts} attempts") def _select_worker(self) -> str: """Select the least-loaded worker (simple round-robin by request count).""" url = min(self.request_counts, key=self.request_counts.get) self.request_counts[url] += 1 return url def _release_worker(self, url: str) -> None: """Mark worker as free after request completes.""" self.request_counts[url] = max(0, self.request_counts[url] - 1) ================================================ FILE: verl_distillation/verl/experimental/reward/router/sglang_router.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import multiprocessing import os import time import ray import requests from sglang_router.launch_server import RouterArgs, launch_router from verl.workers.rollout.utils import get_free_port, is_valid_ipv6_address logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def launch_router_process( worker_urls: list[str], request_timeout: int = 180, max_wait_time: int = 300, timeout: int = 30, ) -> str: router_ip = ray.util.get_node_ip_address().strip("[]") router_port, _ = get_free_port(router_ip) router_address = ( f"[{router_ip}]:{router_port}" if is_valid_ipv6_address(router_ip) else f"{router_ip}:{router_port}" ) router_args = RouterArgs( host=router_ip, port=router_port, worker_urls=worker_urls, balance_abs_threshold=0, log_level="warn", request_timeout_secs=request_timeout, ) router_process = multiprocessing.Process(target=launch_router, args=(router_args,)) router_process.daemon = True router_process.start() time.sleep(3) assert router_process.is_alive() # health check start_time = time.time() url = f"http://{router_address}/health" with requests.Session() as session: while time.time() - start_time < max_wait_time: try: response = session.get(url, timeout=timeout) if response.status_code == 200: break except requests.RequestException as e: logger.debug(f"Health check failed: {e}") time.sleep(2) else: router_process.terminate() raise RuntimeError(f"Router health check failed after {max_wait_time} seconds.") logger.info(f"Router is running on {router_address}") return router_address, router_process ================================================ FILE: verl_distillation/verl/interactions/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/interactions/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional from uuid import uuid4 class BaseInteraction: def __init__(self, config: dict[str, Any]): self.config = config self.name: str = config.get("name", "interaction_agent") # More general agent default role name async def start_interaction(self, instance_id: Optional[str] = None, **kwargs) -> str: """Create a tool instance. Args: instance_id: The instance id of the tool. Returns: The instance id of the tool. """ if instance_id is None: return str(uuid4()) else: return instance_id async def generate_response( self, instance_id: str, messages: list[dict[str, Any]], **kwargs ) -> tuple[bool, str, float, dict[str, Any]]: # More clear response generation method """ Generates a response for the current turn of interaction. Returns a tuple containing: - should_terminate_sequence (bool): True if the interaction sequence should end. - response_content (str): The textual content of the response. - current_turn_score (float): The score for this specific turn/response. - additional_data (dict): Any extra information or metadata. """ should_terminate_sequence: bool = False # if True, end rollout response_content: str = "Your current result seems acceptable." current_turn_score: float = 0.8 additional_data: dict[str, Any] = {} return should_terminate_sequence, response_content, current_turn_score, additional_data async def calculate_score(self) -> float: # More clear score calculation method """ Calculates a score for the interaction, potentially considering aspects like partial exposure & in-context task switching. should be invoke at turn-level """ # ...implement the logic to calculate turn-level score... score = 0.0 return score async def finalize_interaction(self) -> None: # More clear interaction end and resource release method """ Finalizes the interaction session and releases any associated state or resources. Simulates: release state """ # ...implement the logic to release state... pass ================================================ FILE: verl_distillation/verl/interactions/gsm8k_interaction.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any, Optional from uuid import uuid4 from verl.utils.reward_score import gsm8k from .base import BaseInteraction logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class Gsm8kInteraction(BaseInteraction): """A demo interaction for calculating the reward of gsm8k. - `start_interaction`: start a interaction instance for a trajectory. - `generate_response`: generate the response of the assistant. - `calculate_score`: calculate the score of the interaction. - `finalize_interaction`: finalize the interaction instance. """ def __init__(self, config: dict): super().__init__(config) self._instance_dict = {} async def start_interaction( self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs ) -> str: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id async def generate_response( self, instance_id: str, messages: list[dict[str, Any]], **kwargs ) -> tuple[bool, str, float, dict]: content = "" for i in range(len(messages) - 1, -1, -1): item = messages[i] if item.get("role") == "assistant": content = item.get("content") break self._instance_dict[instance_id]["response"] = content reward = await self.calculate_score(instance_id) if reward == 1.0: response = "Your response is correct!" should_terminate_sequence = True else: response = "Your response is incorrect! You need to reflect on your answer and try again." should_terminate_sequence = False return should_terminate_sequence, response, reward, {} async def calculate_score(self, instance_id: str, **kwargs) -> float: return gsm8k.compute_score( self._instance_dict[instance_id]["response"], self._instance_dict[instance_id]["ground_truth"], method="strict", format_score=0.0, score=1.0, ) async def finalize_interaction(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_distillation/verl/interactions/utils/__init__.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/interactions/utils/interaction_registry.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.util import logging import os import sys from omegaconf import OmegaConf logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def get_interaction_class(cls_name): """Dynamically import and return the interaction class.""" module_name, class_name = cls_name.rsplit(".", 1) if module_name not in sys.modules: spec = importlib.util.find_spec(module_name) module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) else: module = sys.modules[module_name] interaction_cls = getattr(module, class_name) return interaction_cls def initialize_interactions_from_config(interaction_config_file): """Initialize interactions from configuration file. Args: interaction_config_file: Path to the interaction configuration file. Returns: dict: A dictionary mapping interaction names to BaseInteraction instances. """ interaction_config = OmegaConf.load(interaction_config_file) interaction_map = {} for interaction_item in interaction_config.interaction: cls_name = interaction_item.class_name interaction_cls = get_interaction_class(cls_name) # Extract config and name config = OmegaConf.to_container(interaction_item.config, resolve=True) # Get the interaction name - either from config or derive from class name name = interaction_item.get("name", None) if name is None: # If no name is specified, use the class name as default class_simple_name = cls_name.split(".")[-1] # Remove "Interaction" suffix if present, otherwise use full class name if class_simple_name.endswith("Interaction"): name = class_simple_name[:-11].lower() # Remove "Interaction" (11 chars) else: name = class_simple_name.lower() # Check for duplicate names if name in interaction_map: raise ValueError(f"Duplicate interaction name '{name}' found. Each interaction must have a unique name.") # Inject the name into the config config["name"] = name # Create the interaction instance interaction = interaction_cls(config=config) interaction_map[name] = interaction logger.info(f"Initialized interaction '{name}' with class '{cls_name}'") return interaction_map ================================================ FILE: verl_distillation/verl/interactions/weather_interaction.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any, Optional from uuid import uuid4 from .base import BaseInteraction logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class WeatherInteraction(BaseInteraction): """A demo interaction for handling weather-related queries. - `start_interaction`: start a interaction instance for a trajectory. - `generate_response`: generate the response of the assistant. - `calculate_score`: calculate the score of the interaction. - `finalize_interaction`: finalize the interaction instance. """ def __init__(self, config: dict): super().__init__(config) self._instance_dict = {} async def start_interaction( self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs ) -> str: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id async def generate_response( self, instance_id: str, messages: list[dict[str, Any]], **kwargs ) -> tuple[bool, str, float, dict]: content = "no tool call" for i in range(len(messages) - 1, -1, -1): item = messages[i] if item.get("role") == "tool": content = item.get("content") break self._instance_dict[instance_id]["response"] = content reward = await self.calculate_score(instance_id) if reward == 1.0: response = "Thank you for your weather query!" should_terminate_sequence = True else: response = "Please use the weather tool to get the weather information." should_terminate_sequence = True return should_terminate_sequence, response, reward, {} async def calculate_score(self, instance_id: str, **kwargs) -> float: # For weather interaction, we can implement a more complex scoring logic # For now, we'll just return a default score of 1.0 if self._instance_dict[instance_id]["response"] == "no tool call": return 0.0 return 1.0 async def finalize_interaction(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_distillation/verl/model_merger/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/model_merger/__main__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module is used to merge huggingface model and test verl checkpoints from FSDP and Megatron backends. To merge FSDP checkpoints: ```sh python -m verl.model_merger merge \ --backend fsdp \ --local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model ``` To merge Megatron checkpoints: ```sh python -m verl.model_merger merge \ --backend megatron \ --tie-word-embedding \ --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model ``` or use distribtued merge for large models like dpskv3 671B ```sh torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} -m verl.model_merger merge\ --backend megatron \ --local_dir ./checkpoints/global_step_1/actor \ --target_dir /path/to/merged_hf_model ``` For more details, please refer to documentation: https://verl.readthedocs.io/en/latest/advance/checkpoint.html#convert-fsdp-and-megatron-checkpoints-to-huggingface-format-model """ from .base_model_merger import generate_config_from_args, parse_args def main(): args = parse_args() config = generate_config_from_args(args) print(f"config: {config}") if config.backend == "fsdp": from .fsdp_model_merger import FSDPModelMerger merger = FSDPModelMerger(config) elif config.backend == "megatron": from .megatron_model_merger import MegatronModelMerger merger = MegatronModelMerger(config) else: raise NotImplementedError(f"Unknown backend: {config.backend}") merger.merge_and_save() merger.cleanup() if __name__ == "__main__": main() ================================================ FILE: verl_distillation/verl/model_merger/base_model_merger.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Optional import torch from accelerate import init_empty_weights from transformers import ( AutoConfig, AutoModelForCausalLM, AutoModelForTokenClassification, AutoModelForVision2Seq, GenerationConfig, ) from verl.utils import hf_processor, hf_tokenizer def parse_args(): parser = argparse.ArgumentParser(description="verl model merger") subparsers = parser.add_subparsers(dest="operation", required=True, help="Specify 'merge' or 'test' operation.") base_op_parser = argparse.ArgumentParser(add_help=False) base_op_parser.add_argument( "--backend", type=str, required=True, choices=["fsdp", "megatron"], help="The backend of the model" ) base_op_parser.add_argument("--local_dir", type=str, default=None, help="Path to the saved model checkpoints.") base_op_parser.add_argument( "--tie-word-embedding", action="store_true", help="Whether to tie word embedding weights (currently only Megatron supported)", ) base_op_parser.add_argument("--trust-remote-code", action="store_true", help="Whether to trust remote code") base_op_parser.add_argument( "--is-value-model", action="store_true", help="Whether the model is a value model (currently only Megatron supported)", ) base_op_parser.add_argument( "--use_cpu_initialization", action="store_true", help="Whether to use CPU initialization for the model. This is useful for large models that cannot " "fit into GPU memory during initialization.", ) merge_parser = subparsers.add_parser("merge", parents=[base_op_parser], help="Merge model checkpoints and save.") merge_parser.add_argument( "--target_dir", default="tmp", type=str, help="Directory to save the merged huggingface model" ) merge_parser.add_argument( "--hf_upload_path", default=None, type=str, help="Hugging Face repository ID to upload the model" ) merge_parser.add_argument( "--private", action="store_true", help="Whether to upload the model to a private Hugging Face repository" ) test_parser = subparsers.add_parser( "test", parents=[base_op_parser], help="Test merged model against a reference Hugging Face model" ) test_parser.add_argument( "--test_hf_dir", type=str, required=True, help="Path to the reference Hugging Face model directory for testing" ) args = parser.parse_args() return args @dataclass class ModelMergerConfig: """Configuration for model merger operations. Args: operation (str): Operation type - 'merge' or 'test'. backend (str): Backend type for the model ('fsdp' or 'megatron'). target_dir (Optional[str]): Directory to save the merged huggingface model. Defaults to "tmp". hf_upload_path (Optional[str]): Hugging Face repository ID to upload the model. Defaults to None. private (bool): Whether to upload the model to a private Hugging Face repository. Defaults to False. test_hf_dir (Optional[str]): Path to the reference Hugging Face model directory for testing. Defaults to None. tie_word_embedding (bool): Whether to tie word embedding weights (currently only Megatron supported). Defaults to False. trust_remote_code (bool): Whether to trust remote code. Defaults to False. is_value_model (bool): Whether the model is a value model (currently only Megatron supported). Defaults to False. local_dir (Optional[str]): Path to the saved model checkpoints. Defaults to None. hf_model_config_path (Optional[str]): Path to HuggingFace model configuration files. Defaults to None. hf_upload (bool): Whether to upload to HuggingFace (computed automatically). Not for initialization. use_cpu_initialization (bool): Whether to use CPU initialization for large models. Defaults to False. """ operation: str # 'merge' or 'test' backend: str target_dir: Optional[str] = "tmp" hf_upload_path: Optional[str] = None private: bool = False test_hf_dir: Optional[str] = None tie_word_embedding: bool = False trust_remote_code: bool = False is_value_model: bool = False local_dir: Optional[str] = None hf_model_config_path: Optional[str] = None hf_upload: bool = field(init=False) use_cpu_initialization: bool = False def __post_init__(self): self.hf_upload = self.operation == "merge" and bool(self.hf_upload_path) if self.operation == "test": self.target_dir = None self.hf_upload_path = None self.private = False def generate_config_from_args(args: argparse.Namespace) -> ModelMergerConfig: common_config_args = { "operation": args.operation, "backend": args.backend, "tie_word_embedding": args.tie_word_embedding, "trust_remote_code": args.trust_remote_code, "is_value_model": args.is_value_model, "local_dir": args.local_dir, "hf_model_config_path": os.path.join(args.local_dir, "huggingface"), "use_cpu_initialization": args.use_cpu_initialization, } if args.operation == "merge": config = ModelMergerConfig( **common_config_args, target_dir=args.target_dir, hf_upload_path=args.hf_upload_path, private=args.private, test_hf_dir=None, ) os.makedirs(config.target_dir, exist_ok=True) elif args.operation == "test": config = ModelMergerConfig( **common_config_args, test_hf_dir=args.test_hf_dir, # the following args are not used by test operation target_dir=None, hf_upload_path=None, private=False, ) else: raise NotImplementedError(f"Unknown operation: {args.operation}") return config class BaseModelMerger(ABC): """ Abstract base class for merging distributed model checkpoints into HuggingFace format. This class provides common functionality for converting model checkpoints from different distributed training backends (FSDP, Megatron) into standard HuggingFace format that can be easily loaded and used for inference or further training. The merger supports two main operations: - merge: Convert and save checkpoints to HuggingFace format - test: Validate merged checkpoints against a reference model Args: config (ModelMergerConfig): Configuration object containing paths, backend type, and operation parameters. Attributes: config (ModelMergerConfig): The configuration object passed during initialization. hf_model_config_path (str): Path to the HuggingFace model configuration files. model_config (PretrainedConfig): Loaded HuggingFace model configuration. """ def __init__(self, config: ModelMergerConfig): self.config = config self.hf_model_config_path = config.hf_model_config_path self.model_config = AutoConfig.from_pretrained( self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code ) def get_transformers_auto_model_class(self): has_remote_code = hasattr(self.model_config, "auto_map") and any( self.model_config.architectures[0] in val for val in self.model_config.auto_map.values() ) if has_remote_code: auto_class = next( k for k, v in self.model_config.auto_map.items() if self.model_config.architectures[0] in v ) match auto_class: case "AutoModelForCausalLM": return AutoModelForCausalLM case "AutoModelForTokenClassification": return AutoModelForTokenClassification case "AutoModelForVision2Seq": return AutoModelForVision2Seq case _: raise NotImplementedError(f"Unknown auto class {auto_class}") else: if "ForTokenClassification" in self.model_config.architectures[0]: return AutoModelForTokenClassification elif "ForCausalLM" in self.model_config.architectures[0]: return AutoModelForCausalLM elif "ForConditionalGeneration" in self.model_config.architectures[0]: return AutoModelForVision2Seq raise NotImplementedError(f"Unknown architecture {self.model_config.architectures}") def patch_model_generation_config(self, model): """ The generation_config created from model config may be different to the pretrained model, this may lead to error when generating: https://github.com/volcengine/verl/issues/1246 This function patch the generation_config created from model config to the pretrained model. """ if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained(self.hf_model_config_path) except OSError: print( f"Warning: Generation config file not found in {self.hf_model_config_path}, using a " f"generation config created from the model config." ) return model def save_lora_adapter(self, state_dict: dict[str, torch.Tensor]): """ Save lora adapter to safetensors. Returns: lora_path: str, the path to the lora adapter. None if no lora adapter found. Note: This function change the 'state_dict' in place. """ lora_params_names = [name for name in state_dict.keys() if "lora_" in name] if len(lora_params_names) == 0: return None import json from typing import OrderedDict import peft from safetensors.torch import save_file lora_params = OrderedDict() target_modules = set() lora_key = None for name in lora_params_names: lora_key = name.replace(".default.weight", ".weight") target_modules.add(lora_key.split(".")[-3]) lora_params[lora_key] = state_dict.pop(name) lora_rank = min(lora_params[lora_key].shape[0], lora_params[lora_key].shape[1]) peft_dict = { "r": lora_rank, "lora_alpha": 0, # lora_alpha is not set. An error should be raised to inform the user to set it manually. "target_modules": list(target_modules), } peft_config = peft.LoraConfig(**peft_dict).to_dict() peft_config["task_type"] = peft_config["task_type"].value if peft_config["task_type"] else None peft_config["peft_type"] = peft_config["peft_type"].value if peft_config["peft_type"] else None peft_config["target_modules"] = list(peft_config["target_modules"]) lora_path = os.path.join(self.config.target_dir, "lora_adapter") os.makedirs(lora_path, exist_ok=True) with open(os.path.join(lora_path, "adapter_config.json"), "w", encoding="utf-8") as f: json.dump(peft_config, f, ensure_ascii=False, indent=4) save_file(lora_params, os.path.join(lora_path, "adapter_model.safetensors")) for name in list(state_dict.keys()): key = ( name.replace("base_model.model.", "") .replace(".base_layer.weight", ".weight") .replace(".base_layer.bias", ".bias") ) state_dict[key] = state_dict.pop(name) return lora_path def save_hf_model_and_tokenizer(self, state_dict: dict[str, torch.Tensor]): auto_model_class = self.get_transformers_auto_model_class() with init_empty_weights(): model = auto_model_class.from_config( self.model_config, torch_dtype=torch.bfloat16, trust_remote_code=self.config.trust_remote_code ) model.to_empty(device="cpu") model = self.patch_model_generation_config(model) lora_path = self.save_lora_adapter(state_dict) if lora_path: print(f"Saving lora adapter to {lora_path}") print(f"Saving model to {self.config.target_dir}") model.save_pretrained(self.config.target_dir, state_dict=state_dict) del state_dict del model processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code) tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code) if processor is not None: print(f"Saving processor to {self.config.target_dir}") processor.save_pretrained(self.config.target_dir) if tokenizer is not None: print(f"Saving tokenizer to {self.config.target_dir}") tokenizer.save_pretrained(self.config.target_dir) def upload_to_huggingface(self): import requests from huggingface_hub import HfApi from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError api = HfApi() try: # Attempt to create repository api.create_repo(repo_id=self.config.hf_upload_path, private=self.config.private, exist_ok=True) except HfHubHTTPError as e: # Handle authentication/API errors if e.response.status_code == 401: raise PermissionError( "Hugging Face authentication failed. Verify your token is valid and has write permissions." ) from e elif e.response.status_code == 404: raise RepositoryNotFoundError(f"Repository path not found: {self.config.hf_upload_path}") from e else: raise ConnectionError(f"Failed to create repository ({e.response.status_code}): {e}") from e except requests.exceptions.ConnectionError as e: raise ConnectionError("Network connection failed. Check your internet connection.") from e try: # Attempt folder upload api.upload_folder(folder_path=self.config.target_dir, repo_id=self.config.hf_upload_path, repo_type="model") except HfHubHTTPError as e: if e.response.status_code == 401: raise PermissionError("Authentication failed during upload. Token may have expired.") from e else: raise RuntimeError(f"Upload failed ({e.response.status_code}): {e}") from e except requests.exceptions.ConnectionError as e: raise ConnectionError("Network interruption during upload. Try again with stable connection.") from e except OSError as e: raise FileNotFoundError(f"Local folder error: {self.config.target_dir} - {str(e)}") from e except Exception as e: raise RuntimeError(f"Unexpected error during upload: {str(e)}") from e @abstractmethod def merge_and_save(self): raise NotImplementedError("Subclasses should implement this method") @abstractmethod def cleanup(self): raise NotImplementedError("Subclasses should implement this method to clean up resources if needed") ================================================ FILE: verl_distillation/verl/model_merger/fsdp_model_merger.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from concurrent.futures import ThreadPoolExecutor from pathlib import Path import numpy as np import torch from torch.distributed._tensor import Placement, Shard try: # for torch 2.5+ from torch.distributed.tensor import DTensor except ImportError: from torch.distributed._tensor import DTensor from tqdm import tqdm from .base_model_merger import BaseModelMerger class FSDPModelMerger(BaseModelMerger): """ Model merger for FSDP (Fully Sharded Data Parallel) checkpoints. This class handles the conversion of FSDP distributed checkpoints into HuggingFace format. FSDP shards model parameters across multiple processes, and this merger reconstructs the full model by loading and concatenating the sharded parameters from all ranks. The merger supports various FSDP configurations including: - Pure FSDP (single dimension sharding) - FSDP + DDP (data parallel + fully sharded data parallel) - DTensor-based sharding with custom device meshes Key features: - Automatic detection of world size from checkpoint filenames - Support for DTensor and non-DTensor checkpoints - Parallel loading of checkpoint shards for efficiency - Validation against reference HuggingFace models Example: To merge FSDP checkpoints: ```python config = ModelMergerConfig( operation="merge", backend="fsdp", local_dir="path/to/fsdp/checkpoints", target_dir="path/to/output" ) merger = FSDPModelMerger(config) merger.merge_and_save() ``` """ def _get_world_size(self) -> int: """_summary_ From FSDP json config file, extract the world size. Returns: int: world size """ config_path = Path(self.config.local_dir) / "fsdp_config.json" if not config_path.exists(): raise FileNotFoundError(f"Config file {config_path} does not exist.") with open(config_path) as f: config = json.load(f) # Extract world size from the config world_size = config.get("world_size", None) if world_size is None: raise ValueError("World size not found in the config file.") return world_size def _load_rank_zero_state_dict(self, world_size: int) -> dict: return torch.load( Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_0.pt", map_location="cpu", weights_only=False, ) def _extract_device_mesh_info(self, state_dict: dict, world_size: int) -> tuple[np.ndarray, tuple[str, ...]]: """ Retrieves sharding information (device_mesh, mesh_dim_names) from a DTensor in the state_dict. If no DTensor is found, infers a simple FSDP mesh based on world_size. """ pivot_key = sorted(list(state_dict.keys()))[0] weight = state_dict[pivot_key] if isinstance(weight, DTensor): # get sharding info device_mesh = weight.device_mesh mesh = device_mesh.mesh mesh_dim_names = device_mesh.mesh_dim_names else: # for non-DTensor mesh = np.array([world_size], dtype=np.int64) mesh_dim_names = ("fsdp",) return mesh, mesh_dim_names def _calculate_shard_configuration( self, mesh: np.ndarray, mesh_dim_names: tuple[str, ...] ) -> tuple[int, tuple[int, ...]]: """Calculates the total number of shards and the shape of the device mesh.""" assert mesh_dim_names in (("fsdp",), ("ddp", "fsdp")), f"Unsupported mesh_dim_names {mesh_dim_names}" if "tp" in mesh_dim_names: # TODO: "tp" is not supported yet due to the above assert total_shards = mesh.shape[-1] * mesh.shape[-2] mesh_shape = (mesh.shape[-2], mesh.shape[-1]) else: total_shards = mesh.shape[-1] mesh_shape = (mesh.shape[-1],) return total_shards, mesh_shape def _merge_by_placement(self, tensors: list[torch.Tensor], placement: Placement) -> torch.Tensor: """Merges a list of tensors based on their DTensor placement""" if placement.is_replicate(): return tensors[0] elif placement.is_partial(): raise NotImplementedError("Partial placement is not supported yet") elif placement.is_shard(): return torch.cat(tensors, dim=placement.dim).contiguous() raise NotImplementedError(f"Unsupported placement: {placement}") def _load_and_merge_state_dicts( self, world_size: int, total_shards: int, mesh_shape: tuple[int, ...], mesh_dim_names: tuple[str, ...] ) -> dict[str, torch.Tensor]: model_state_dict_lst = [None] * total_shards def process_one_shard(rank: int, model_state_dict_lst: list): model_path = Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_{rank}.pt" state_dict = torch.load(model_path, map_location="cpu", weights_only=False) model_state_dict_lst[rank] = state_dict return state_dict with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor: futures = [executor.submit(process_one_shard, rank, model_state_dict_lst) for rank in range(total_shards)] for future in tqdm(futures, desc=f"Loading {total_shards} FSDP shards", total=total_shards): future.result() # Merge state dicts from all shards state_dict = {} param_placements: dict[str, list] = {} for key in set(model_state_dict_lst[0].keys()): state_dict[key] = [] for model_state_shard in model_state_dict_lst: # add tensor shard in order of rank to state_dict[key] tensor = model_state_shard.pop(key) if isinstance(tensor, DTensor): state_dict[key].append(tensor._local_tensor.bfloat16()) placements = tuple(tensor.placements) # replicated placement at dp dimension can be discarded if mesh_dim_names[0] in ("dp", "ddp"): placements = placements[1:] if key not in param_placements: param_placements[key] = placements else: assert param_placements[key] == placements else: state_dict[key].append(tensor.bfloat16()) del model_state_dict_lst # Merge tensors for key in sorted(state_dict): if not isinstance(state_dict[key], list): print(f"No need to merge key {key}") continue if key in param_placements: # merge shards placements: tuple[Shard] = param_placements[key] if len(mesh_shape) == 1: # 1-D list, FSDP without TP assert len(placements) == 1 shards = state_dict[key] state_dict[key] = self._merge_by_placement(shards, placements[0]) else: # 2-D list, FSDP + TP raise NotImplementedError("FSDP + TP is not supported yet") else: state_dict[key] = torch.cat(state_dict[key], dim=0) return state_dict def merge_and_save(self): world_size = self._get_world_size() rank_zero_state_dict = self._load_rank_zero_state_dict(world_size) mesh, mesh_dim_names = self._extract_device_mesh_info(rank_zero_state_dict, world_size) print(f"Got device mesh {mesh}, mesh_dim_names {mesh_dim_names}") total_shards, mesh_shape = self._calculate_shard_configuration(mesh, mesh_dim_names) print(f"Processing model shards with {total_shards} {mesh_shape} in total") merged_state_dict = self._load_and_merge_state_dicts(world_size, total_shards, mesh_shape, mesh_dim_names) if self.config.operation == "test": if not self.config.test_hf_dir: raise ValueError("test_hf_dir must be provided for test operation") self._validate_state_dict(merged_state_dict) elif self.config.operation == "merge": self.save_hf_model_and_tokenizer(merged_state_dict) if self.config.hf_upload: self.upload_to_huggingface() else: raise ValueError(f"Unknown operation: {self.config.operation}") def _validate_state_dict(self, state_dict: dict[str, torch.Tensor]): auto_model_class = self.get_transformers_auto_model_class() hf_model = auto_model_class.from_pretrained(self.config.test_hf_dir, torch_dtype=torch.bfloat16) hf_state_dict = hf_model.state_dict() del hf_model hf_model_keys = set(hf_state_dict.keys()) collected_keys = set(state_dict.keys()) missing_keys = hf_model_keys - collected_keys assert len(missing_keys) == 0, f"Missing keys in collected state dict: {list(sorted(missing_keys))}" extra_keys = collected_keys - hf_model_keys assert len(extra_keys) == 0, f"Extra keys in collected state dict: {list(sorted(extra_keys))}" for key in hf_model_keys: hf_shape = hf_state_dict[key].shape collected_shape = state_dict[key].shape assert hf_shape == collected_shape, ( f"Shape mismatch for key '{key}': original {hf_shape} vs collected {collected_shape}" ) hf_dtype = hf_state_dict[key].dtype collected_dtype = state_dict[key].dtype assert hf_dtype == collected_dtype, ( f"Dtype mismatch for key '{key}': original {hf_dtype} vs collected {collected_dtype}" ) torch.testing.assert_close(hf_state_dict[key], state_dict[key], atol=1e-6, rtol=1e-6) print("FSDP checks passed: The merged state_dict matches the hf model saved by FSDPCheckpointManager.") def cleanup(self): """Cleanup temporary files if needed.""" # FSDP merger does not create temporary files, so no cleanup is needed. pass ================================================ FILE: verl_distillation/verl/model_merger/megatron_model_merger.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, ContextManager import numpy as np import torch import torch.distributed as dist try: # NPU patch import mindspeed.megatron_adaptor # noqa: F401 except ImportError: pass from accelerate import init_empty_weights from megatron.core import mpu from megatron.core.models.gpt.gpt_model import ModelType from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from safetensors.torch import load_file from transformers import ( AutoConfig, PretrainedConfig, ) from verl.models.mcore import hf_to_mcore_config from verl.utils.device import get_device_name, get_nccl_backend, get_torch_device from verl.utils.distributed import set_numa_affinity from verl.utils.megatron.dist_checkpointing import load_dist_checkpointing from verl.utils.megatron_utils import get_model from verl.utils.tokenizer import hf_processor, hf_tokenizer from .base_model_merger import BaseModelMerger, ModelMergerConfig @contextmanager def noop_context() -> Any: yield def get_dynamic_pipeline_shards(layer_num: int, pp_size: int) -> list[int]: """Calculate the pipeline sharding configuration for Megatron-LM. Args: layer_num: Total number of layers in the model. pp_size: Number of pipeline parallel ranks. Returns: layer number of each pp rank. Make the sharding of the pipeline as uniform as possible. """ if layer_num < pp_size: raise ValueError(f"layer_num {layer_num} must be greater than pp_size {pp_size}.") if pp_size < 1: raise ValueError(f"pp_size must be at least 1, got {pp_size}.") if pp_size == 1: return [layer_num] if pp_size == 2: return [ layer_num // 2, layer_num - layer_num // 2, ] middle_size = pp_size - 2 shards_strategy = [] for middle_layer_num in range(layer_num): first_last_layer_num = layer_num - middle_layer_num * middle_size first_layer_num = first_last_layer_num // 2 last_layer_num = first_last_layer_num - first_last_layer_num // 2 if 0 < first_layer_num <= middle_layer_num and 0 < last_layer_num <= middle_layer_num: shards_strategy.append( ( [first_layer_num] + [middle_layer_num] * middle_size + [last_layer_num], abs(first_layer_num - middle_layer_num), ) ) # sort by diff of layer_num, to make it as uniform as possible res = sorted(shards_strategy, key=lambda x: x[1])[0][0] assert sum(res) == layer_num, f"sum(res)={sum(res)} != layer_num={layer_num}, pp_size={pp_size}" return res class MegatronModelMerger(BaseModelMerger): """ Model merger for Megatron-LM distributed checkpoints. This class handles the conversion of Megatron-LM distributed checkpoints into HuggingFace format. Megatron-LM uses tensor parallelism, pipeline parallelism, and data parallelism to distribute large language models across multiple GPUs. This merger reconstructs the full model by loading distributed checkpoints and applying the necessary transformations. Key features: - Support for tensor parallel, pipeline parallel, and data parallel configurations - Automatic parameter name mapping from Megatron to HuggingFace conventions - Handling of QKV and gate-up tensor splitting/merging - Support for tied word embeddings and value models - Integration with Megatron's distributed checkpointing system The merger handles various model architectures and configurations: - Standard transformer models (GPT-style) - Models with tied word embeddings - Value models for reinforcement learning - Multi-layer attention (MLA) architectures - Mixture of Experts (MoE) models Args: config (ModelMergerConfig): Configuration object with Megatron-specific settings including tie_word_embedding and is_value_model flags. Example: To merge Megatron checkpoints: ```python config = ModelMergerConfig( operation="merge", backend="megatron", local_dir="path/to/megatron/checkpoints", target_dir="path/to/output", tie_word_embedding=True ) merger = MegatronModelMerger(config) merger.merge_and_save() ``` """ def __init__(self, config: ModelMergerConfig): super().__init__(config) # Currently we use only 1 rank to merge the dist_ckpt, we will move to multi-process save shortly afterwards if "WORLD_SIZE" not in os.environ: os.environ["RANK"] = "0" os.environ["LOCAL_RANK"] = "0" os.environ["WORLD_SIZE"] = "1" os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "12355" set_numa_affinity() torch.distributed.init_process_group(get_nccl_backend()) self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() local_rank = os.environ.get("LOCAL_RANK", 0) get_torch_device().set_device(f"{get_device_name()}:{local_rank}") mpu.initialize_model_parallel( tensor_model_parallel_size=1, pipeline_model_parallel_size=self.world_size, virtual_pipeline_model_parallel_size=None, context_parallel_size=1, expert_model_parallel_size=1, ) model_parallel_cuda_manual_seed(0) self.hf_config = AutoConfig.from_pretrained( self.config.hf_model_config_path, trust_remote_code=self.config.trust_remote_code ) print(self.hf_config, flush=True) self.params_mapping = { # megatron core gpt model name, huggingface model name # NOTICE: It's a little bit tricky, when 2 keys have the same prefix, we need to make sure the # longer key within the containing relationship is processed first. "embedding.word_embeddings": "model.embed_tokens", # input layer norm for dpskv3 "input_layernorm.weight": "input_layernorm.weight", "input_layernorm.bias": "input_layernorm.bias", # attn "self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight", "self_attention.linear_qkv.layer_norm_bias": "input_layernorm.bias", "self_attention.linear_qkv": "self_attn.qkv_proj", "self_attention.q_layernorm": "self_attn.q_norm", "self_attention.k_layernorm": "self_attn.k_norm", "self_attention.linear_proj": "self_attn.o_proj", # mla "self_attention.linear_q_proj": "self_attn.q_proj", "self_attention.linear_q_down_proj": "self_attn.q_a_proj", "self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight", "self_attention.linear_q_up_proj": "self_attn.q_b_proj", "self_attention.linear_kv_down_proj": "self_attn.kv_a_proj_with_mqa", "self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight", "self_attention.linear_kv_up_proj": "self_attn.kv_b_proj", # mlp "pre_mlp_layernorm": "post_attention_layernorm", "mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight", "mlp.linear_fc1.layer_norm_bias": "post_attention_layernorm.bias", "mlp.linear_fc1": "mlp.gate_up_proj", "mlp.linear_fc2": "mlp.down_proj", # moe "mlp.router.expert_bias": "mlp.gate.e_score_correction_bias", "mlp.router": "mlp.gate", "mlp.shared_experts.linear_fc1": "mlp.shared_experts.gate_up_proj", "mlp.shared_experts.linear_fc2": "mlp.shared_experts.down_proj", "linear_fc1": "gate_up_proj", "linear_fc2": "down_proj", # output "final_layernorm": "norm", "output_layer": "lm_head", } if "Qwen2MoeForCausalLM" in self.hf_config.architectures: self.params_mapping["mlp.shared_experts.linear_fc1"] = "mlp.shared_expert.gate_up_proj" self.params_mapping["mlp.shared_experts.linear_fc2"] = "mlp.shared_expert.down_proj" self.params_mapping["mlp.shared_experts.gate_weight"] = "mlp.shared_expert_gate.weight" def _load_state_dicts(self, model_ckpt_path: str) -> dict[str, Any]: """_summary_ Use Megatron dist_checkpointing to load the model state dicts from the checkpoint directory. Args: model_ckpt_path (str): Path to the model checkpoint directory. Returns: State dict containing the model parameters. """ # init hf config self.pipeline_shards = get_dynamic_pipeline_shards(self.hf_config.num_hidden_layers, self.world_size) print(f"Pipeline shards: {self.pipeline_shards}, total layers: {sum(self.pipeline_shards)}") tf_config = hf_to_mcore_config( self.hf_config, torch.bfloat16, num_layers_in_first_pipeline_stage=self.pipeline_shards[0] if len(self.pipeline_shards) > 1 else None, num_layers_in_last_pipeline_stage=self.pipeline_shards[-1] if len(self.pipeline_shards) > 2 else None, ) tf_config.use_cpu_initialization = self.config.use_cpu_initialization tie_word_embeddings = getattr(self.hf_config, "tie_word_embeddings", False) # init megatron model def megatron_model_provider(pre_process, post_process): from verl.models.mcore import init_mcore_model parallel_model = init_mcore_model( tf_config, self.hf_config, pre_process, post_process, share_embeddings_and_output_weights=tie_word_embeddings, value=False, ) return parallel_model context: Callable[..., ContextManager] = ( init_empty_weights if self.config.use_cpu_initialization else noop_context ) with context(): whole_model = get_model( model_provider_func=megatron_model_provider, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=False, transformer_config=tf_config, ) if self.config.use_cpu_initialization: # convert meta device to empty tensor so it can use `copy_` function whole_model[0].module = whole_model[0].module.to_empty(device="cpu") # load state dicts sharded_state_dict = {} for vpp_rank, model in enumerate(whole_model): key = f"model{vpp_rank}" if len(whole_model) > 1 else "model" mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank) sharded_state_dict[key] = model.sharded_state_dict() model_state_dict = load_dist_checkpointing(sharded_state_dict, model_ckpt_path) model_state_dict_list = [] for vpp_rank, model in enumerate(whole_model): key = f"model{vpp_rank}" if len(whole_model) > 1 else "model" mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank) model_state_dict_list.append(model_state_dict[key]) return model_state_dict_list def _check_megatron_state_key(self, key: str) -> bool: """ Checks if the key is a valid Megatron state key. Now the model merger only supports keys that start with "decoder/embedding/output_layer" in TransformerLayer. Shall not use key starts with "model." """ if key.startswith("model."): raise ValueError( f"Invalid key {key} in Megatron state_dict. Expected keys to start with " f"'decoder/embedding/output_layer' in TransformerLayer." ) skip_checking_keys = ["embedding.word_embeddings", "output_layer"] for skip_key in skip_checking_keys: if skip_key in key: print(f"skip checking key {key}") return # Exclude extra state keys if not key.startswith("decoder"): raise ValueError( f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder' in TransformerLayer." ) def _split_tensors( self, key: str, tensor: torch.Tensor, config: PretrainedConfig, is_value_model: bool = False ) -> list[torch.Tensor]: """ Splits a tensor into multiple tensors based on the name. This is used to handle qkv and gate_up tensors. """ if "linear_fc1.weight" in key: # if the tensor is gate and proj gate_lst = [] up_lst = [] gate, up = tensor.chunk(2) gate_lst.append(gate) up_lst.append(up) gate = torch.cat(gate_lst, dim=0) up = torch.cat(up_lst, dim=0) return [gate, up] elif "self_attention.linear_qkv." in key and "layer_norm" not in key: # if the tensor is qkv, for each param on tp, split into q, k, v # concat q, k, v separately. q_lst, k_lst, v_lst = [], [], [] assert config.num_attention_heads % config.num_key_value_heads == 0 num_q_per_kv = config.num_attention_heads // config.num_key_value_heads assert tensor.shape[0] % (num_q_per_kv + 2) == 0, ( f"Tensor shape {tensor.shape} is not divisible by {num_q_per_kv + 2}" ) kv_size = tensor.shape[0] // (num_q_per_kv + 2) split_size = [kv_size * num_q_per_kv, kv_size, kv_size] num_query_groups_per_partition = config.num_key_value_heads for chunk in tensor.chunk(num_query_groups_per_partition): split_size = [ kv_size * num_q_per_kv // num_query_groups_per_partition, kv_size // num_query_groups_per_partition, kv_size // num_query_groups_per_partition, ] q, k, v = chunk.split(split_size) q_lst.append(q) k_lst.append(k) v_lst.append(v) return [torch.cat(q_lst, dim=0), torch.cat(k_lst, dim=0), torch.cat(v_lst, dim=0)] else: return [tensor] def _merge_state_dicts(self, model_state_dict_list: list[dict[str, Any]]) -> dict[str, torch.Tensor]: state_dict = {} layers_cum = 0 if self.world_size > 1: pipeline_cumsum = np.cumsum(self.pipeline_shards) layers_cum = 0 if self.rank == 0 else pipeline_cumsum[self.rank - 1] print(f"{layers_cum=}") for model_state_dict in model_state_dict_list: layers_handled = 0 keys = model_state_dict.keys() for key in keys: if "extra_state" in key: continue if self.config.tie_word_embedding and ("output_layer" in key): print("skip lm_head and reward_head loading because of tie_word_embeddings") continue self._check_megatron_state_key(key) hf_name = self._replace_name(key, self.params_mapping) assert hf_name is not None, f"Failed to convert layer name [{key}] from megatron to huggingface." if "model.layers." in hf_name: local_layer_no = int(hf_name.split(".")[2]) layers_handled = max(local_layer_no, layers_handled) global_layer_no = local_layer_no + layers_cum new_key_list = hf_name.split(".") new_key_list[2] = str(global_layer_no) hf_name = ".".join(new_key_list) else: warnings.warn(f"hf_name {hf_name} will not be fixed with layer number", stacklevel=2) if "mlp.experts." in hf_name and ".weight" in hf_name: name_prefix, expert_id = hf_name.split(".weight") for proj in ["gate_up", "down"]: if f"{proj}_proj" in hf_name: hf_name = hf_name.replace( f"mlp.experts.{proj}_proj.weight{expert_id}", f"mlp.experts.{expert_id}.{proj}_proj.weight", ) tensor = model_state_dict[key] split_tensor = self._split_tensors( key, tensor, self.hf_config, is_value_model=self.config.is_value_model ) if len(split_tensor) == 1: state_dict[hf_name] = split_tensor[0] elif len(split_tensor) == 3: # split qkv for n, d in zip(["q", "k", "v"], split_tensor, strict=True): state_dict[hf_name.replace("qkv", n)] = d elif len(split_tensor) == 2: # split gate up state_dict[hf_name.replace("gate_up", "gate")] = split_tensor[0] state_dict[hf_name.replace("gate_up", "up")] = split_tensor[1] shape_info = ( split_tensor.shape if isinstance(split_tensor, torch.Tensor) else [t.shape for t in split_tensor] ) print(f"converted {key} to {hf_name} with shape {shape_info}") layers_cum += layers_handled + 1 # zero based return state_dict def save_hf_model_and_tokenizer(self, merged_state_dict): if self.world_size == 1: return super().save_hf_model_and_tokenizer(merged_state_dict) from safetensors.torch import save_file layer_num = self.hf_config.num_hidden_layers # FIXME: make configurable saves_per_layer = 1 if layer_num < 30 else 2 saves_total = saves_per_layer * layer_num saves_indexes = {} # calculate the layer start index and key chunks layer_this_rank = self.pipeline_shards[self.rank] pipeline_cumsum = np.cumsum(self.pipeline_shards) layer_start = 0 if self.rank == 0 else pipeline_cumsum[self.rank - 1] keys = list(merged_state_dict.keys()) keys_chunk = np.array_split(np.array(keys), layer_this_rank * saves_per_layer) numel = 0 assert len(keys_chunk) == layer_this_rank * saves_per_layer, ( f"Expected {len(keys_chunk)} chunks, but got {layer_this_rank * saves_per_layer} for rank {self.rank}." ) # save to model shards manually target_dir = Path(self.config.target_dir) for i, keys in enumerate(keys_chunk): sd_to_save = {k: merged_state_dict[k] for k in keys} numel += sum([sd_to_save[i].numel() for i in sd_to_save]) save_idx = layer_start * saves_per_layer + i save_path = target_dir / f"model-{save_idx + 1:05d}-of-{saves_total:05d}.safetensors" save_file(sd_to_save, save_path) for k in keys: saves_indexes[k] = str(save_path.name) tensor = torch.tensor([numel]).to(get_device_name()) dist.all_reduce(tensor, op=dist.ReduceOp.SUM) numel = tensor.cpu().item() all_save_indexes = [{} for _ in range(self.world_size)] dist.all_gather_object(all_save_indexes, saves_indexes) saves_indexes = {k: v for i in all_save_indexes for k, v in i.items()} if self.rank == 0: with open(target_dir / "model.safetensors.index.json", "w") as f: json.dump( { "metadata": { "total_size": numel, }, "weight_map": saves_indexes, }, f, indent=4, ) print(f"model saved to {target_dir} with {numel=}") self.model_config.save_pretrained(self.config.target_dir) processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code) tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code) if processor is not None: print(f"Saving processor to {self.config.target_dir}") processor.save_pretrained(self.config.target_dir) if tokenizer is not None: print(f"Saving tokenizer to {self.config.target_dir}") tokenizer.save_pretrained(self.config.target_dir) def merge_and_save(self): from verl.utils.megatron_utils import get_dist_checkpoint_path model_ckpt_path = get_dist_checkpoint_path(self.config.local_dir) model_state_dict = self._load_state_dicts(model_ckpt_path) merged_state_dict = self._merge_state_dicts(model_state_dict) del model_state_dict if self.config.operation == "test": if not self.config.test_hf_dir: raise ValueError("test_hf_dir must be provided for test operation") self._validate_state_dict(merged_state_dict) elif self.config.operation == "merge": self.save_hf_model_and_tokenizer(merged_state_dict) if self.config.hf_upload: self.upload_to_huggingface() else: raise ValueError(f"Unknown operation: {self.config.operation}") def _validate_state_dict(self, state_dict: dict[str, torch.Tensor]): """ Compares the merged Megatron state_dict against a reference safetensors model. Applies necessary name mappings from Megatron to Hugging Face conventions using _replace_name. """ ref_state_dict = load_file(Path(self.config.test_hf_dir) / "model.safetensors") for name, loaded_weight in state_dict.items(): # name = self._replace_name(original_name, self.params_mapping) if not name or name.endswith(".bias") and name not in ref_state_dict: continue if "rotary_emb.inv_freq" in name: continue if "lm_head.weight" in name: if self.config.is_value_model or self.config.tie_word_embedding: continue if name not in ref_state_dict: raise RuntimeError(f"key: {name} not exist in state_dict") param = ref_state_dict[name] assert loaded_weight.dtype == param.dtype torch.testing.assert_close(loaded_weight.to("cpu"), param, atol=1e-2, rtol=5e-2) def _replace_name(self, megatron_name: str, name_mapping: dict[str, str]) -> str: for m_name, v_name in name_mapping.items(): if m_name not in megatron_name: continue megatron_name = megatron_name.replace("decoder", "model") param_name = megatron_name.replace(m_name, v_name) return param_name return None # Return None if no mapping found def cleanup(self): torch.distributed.destroy_process_group() ================================================ FILE: verl_distillation/verl/models/README.md ================================================ # Models Common modelzoo such as huggingface/transformers stuggles when using Pytorch native model parallelism. Following the design principle of vLLM, we keep a simple, parallelizable, highly-optimized with packed inputs in verl. ## Adding a New Huggingface Model ### Step 1: Copy the model file from HF to verl - Add a new file under verl/models/hf - Copy ONLY the model file from huggingface/transformers/models to verl/models/hf ### Step 2: Modify the model file to use packed inputs - Remove all the code related to inference (kv cache) - Modify the inputs to include only - input_ids (total_nnz,) - cu_seqlens (total_nnz + 1,) - max_seqlen_in_batch: int - Note that this requires using flash attention with causal mask. ### Step 2.5: Add tests - Add a test to compare this version and the huggingface version - Following the infrastructure and add tests to tests/models/hf ### Step 3: Add a function to apply tensor parallelism - Please follow - https://pytorch.org/docs/stable/distributed.tensor.parallel.html - https://pytorch.org/tutorials/intermediate/TP_tutorial.html - General comments - Tensor Parallelism in native Pytorch is NOT auto-parallelism. The way it works is to specify how model parameters and input/output reshards using configs. These configs are then registered as hooks to perform input/output resharding before/after model forward. ### Step 4: Add a function to apply data parallelism - Please use FSDP2 APIs - See demo here https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py#L413 ### Step 5: Add a function to apply pipeline parallelism - Comes in Pytorch 2.4 - Currently only in alpha in nightly version - Check torchtitan for more details ================================================ FILE: verl_distillation/verl/models/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/models/llama/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/models/llama/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .modeling_llama_megatron import ( ParallelLlamaForCausalLM, # rmpad with megatron ParallelLlamaForCausalLMRmPad, # rmpad with megatron and pipeline parallelism ParallelLlamaForCausalLMRmPadPP, ParallelLlamaForValueRmPad, ParallelLlamaForValueRmPadPP, # original model with megatron ParallelLlamaModel, ) __all__ = [ "ParallelLlamaForCausalLM", "ParallelLlamaForCausalLMRmPad", "ParallelLlamaForCausalLMRmPadPP", "ParallelLlamaForValueRmPad", "ParallelLlamaForValueRmPadPP", "ParallelLlamaModel", ] ================================================ FILE: verl_distillation/verl/models/llama/megatron/checkpoint_utils/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/models/llama/megatron/checkpoint_utils/llama_loader.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu print(f"get megatron data parallel size: {mpu.get_data_parallel_world_size()}") pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_llama( state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False ): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def fetch_params(module): for param in module.parameters(): torch.distributed.fetch( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, ( f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size " f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}" ) models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.model.layers) == num_layers_per_model def _fetch_tensor(tensor, name) -> torch.Tensor: """fetch tensor""" nonlocal state_dict if tensor is not None: tensor.data.copy_(state_dict[name]) def _fetch_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """fetch tensor in tp shards""" nonlocal state_dict tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) if tensor is not None: tensor.data.copy_(tensor_chunk[tp_rank]) else: print(f"tp_shard tensor:[{name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """fetch tensor in tp shards""" nonlocal state_dict tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) if tensor is not None: tensor.data.copy_(tensor_chunk[tp_rank]) else: print(f"tp_shard tensor:[{name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """fetch gate_up tensor in tp shards""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if gate_name in state_dict and up_name in state_dict: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) if tensor is not None: tensor.data.copy_(tensor_chunk[tp_rank]) else: print(f"tp_shard tensor:[{gate_name}, {up_name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name) -> torch.Tensor: """fetch tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0)) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0)) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) if tensor is not None: tensor.data.copy_(tensor_chunk[tp_rank]) # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) embed_tokens_weight = None if pp_rank == 0: embed_tokens_weight = gpt_model_module.model.embed_tokens.weight _fetch_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() num_layer_per_pp = config.num_hidden_layers // pp_size vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() layer_list = [] if vpp_size is not None: for vpp_rank in range(vpp_size): num_layer_vpp_chunk = num_layer_per_pp // vpp_size num_layer_this_model = num_layer_vpp_chunk offset = vpp_rank * (config.num_hidden_layers // mpu.get_virtual_pipeline_model_parallel_world_size()) + ( mpu.get_pipeline_model_parallel_rank() * num_layer_vpp_chunk ) layer_list.extend(list(range(offset, offset + num_layer_this_model))) else: num_layer_this_model = num_layer_per_pp offset = pp_rank * num_layer_per_pp layer_list.extend(list(range(offset, offset + num_layer_this_model))) for layer in layer_list: print_rank_0(f"loading layer #{layer}...") layer_name = f"model.layers.{layer}" dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[dst_layer_idx] _fetch_tensor( sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) _fetch_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) _fetch_tp_shard_tensor( sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _fetch_tensor( sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _fetch_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _fetch_tp_shard_tensor( sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _fetch_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", ) print_rank_0("loading lm_head...") if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.lm_head.weight if is_value_model: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _fetch_tensor(lm_head_weight, "lm_head.weight") print_rank_0("load lm_head weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _fetch_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _fetch_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") else: _fetch_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_distillation/verl/models/llama/megatron/checkpoint_utils/llama_loader_depracated.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu print(f"get megatron data parallel size: {mpu.get_data_parallel_world_size()}") pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_llama( state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False ): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def broadcast_params(module): for param in module.parameters(): torch.distributed.broadcast( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, ( f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size " f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}" ) models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.model.layers) == num_layers_per_model def _broadcast_tensor(tensor, name) -> torch.Tensor: """broadcast tensor from rank0 across mp_group""" nonlocal state_dict nonlocal mp_group if torch.distributed.get_rank() == 0: if name in state_dict: weight = state_dict[name] tensor_shape = weight.shape else: tensor_shape = None else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not in state_dict, skip load") return if tensor is None: tensor = torch.empty( tensor_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) if torch.distributed.get_rank() == 0: tensor.data.copy_(weight) dist.broadcast(tensor, src=0, group=mp_group) def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank() == 0:} tensor {gate_name, up_name} shape " f"{tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_( torch.cat([q_part, k_part, v_part], dim=0) ) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_( torch.cat([q_part, k_part, v_part], dim=0) ) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name, k_name, v_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) if dp_rank == 0: # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) embed_tokens_weight = None if pp_rank == 0: embed_tokens_weight = gpt_model_module.model.embed_tokens.weight _broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"loading layer #{layer}...") layer_name = f"model.layers.{layer}" dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[dst_layer_idx] _broadcast_tensor( sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) _broadcast_tp_shard_tensor( sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _broadcast_tensor( sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _broadcast_tp_shard_tensor( sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", ) print_rank_0("loading lm_head...") lm_head_weight = None if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.lm_head.weight if is_value_model: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "lm_head.weight") print_rank_0("load lm_head weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _broadcast_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") else: _broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() # Broadcast weights inside data parallel groups for wrapped_model in wrapped_models: broadcast_params(wrapped_model) get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_distillation/verl/models/llama/megatron/checkpoint_utils/llama_saver.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from megatron.core import mpu from megatron.core.distributed import DistributedDataParallel as LocalDDP from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.device import get_device_id, get_torch_device from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model def _megatron_calc_global_rank(tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0): """given TP,DP,PP rank to get the global rank.""" tp_size = mpu.get_tensor_model_parallel_world_size() dp_size = mpu.get_data_parallel_world_size() pp_size = mpu.get_pipeline_model_parallel_world_size() assert tp_size * dp_size * pp_size == torch.distributed.get_world_size(), ( f"{tp_size} x {dp_size} x {pp_size} != {torch.distributed.get_world_size()}" ) # We only support TP-DP-PP grouping, for correctness when resharding return (pp_rank * dp_size + dp_rank) * tp_size + tp_rank def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def merge_megatron_ckpt_llama(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False): """Merge sharded parameters of a Megatron module into a merged checkpoint. Args: wrapped_models (list of megatron.core.distributed.DistributedDataParallel): The local DDP wrapped megatron modules. config (str or None): HF config for model dtype: model params type is_value_model: if model is value model tie_word_embeddings: tie_word_embeddings, not used in llama, only to keep same interface with qwen2 Returns: state_dict (dict): The merged state_dict in rank 0, and an empty dictionary in other ranks. """ start_time = time.time() def _get_gpt_model(model): return model dp_rank = mpu.get_data_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() pp_rank = mpu.get_pipeline_model_parallel_rank() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if dist.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) assert len(models[i].model.layers) == num_layers_per_model, ( "len model layers {} not equal to num_layers_per_model {}".format( len(models[i].model.layers), num_layers_per_model ) ) state_dict = dict() def _get_cpu_tensor(tensor: torch.Tensor): if tensor is None: return None if tensor.device == torch.device("cpu"): return tensor.detach().clone() return tensor.detach().cpu() def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor: """broadcast tensor across mp_group""" nonlocal state_dict nonlocal mp_group src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) if torch.distributed.get_rank() == src_rank: if tensor is None: weight = None tensor_shape = None else: weight = tensor tensor_shape = weight.shape else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not exist, skip collect") return if weight is None: weight = torch.empty( tensor_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) dist.broadcast(weight, src=src_rank, group=mp_group) if torch.distributed.get_rank() == 0: state_dict[name] = _get_cpu_tensor(weight) def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=concat_dim) if mutate_func is not None: full_tensor = mutate_func(full_tensor) state_dict[name] = full_tensor def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) intermediate_size_tp = config.intermediate_size // tp_size gate_weight_list = [] up_weight_list = [] for i in range(tp_size): gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)] gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp] up_weight_tp = gate_up_weight_tp[intermediate_size_tp:] gate_weight_list.append(gate_weight_tp) up_weight_list.append(up_weight_tp) state_dict[gate_name] = torch.cat(gate_weight_list, dim=0) state_dict[up_name] = torch.cat(up_weight_list, dim=0) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank): """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) q_weight_list = [] k_weight_list = [] v_weight_list = [] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_part = qkv_part[:q_size_tp] k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp] v_part = qkv_part[q_size_tp + kv_size_tp : total_size] q_weight_list.append(q_part) k_weight_list.append(k_part) v_weight_list.append(v_part) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_part = qkv_part[:q_size_tp] k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp] v_part = qkv_part[q_size_tp + kv_size_tp : total_size] q_weight_list.append(q_part) if i * config.num_key_value_heads % tp_size == 0: k_weight_list.append(k_part) v_weight_list.append(v_part) state_dict[q_name] = torch.cat(q_weight_list, dim=0) state_dict[k_name] = torch.cat(k_weight_list, dim=0) state_dict[v_name] = torch.cat(v_weight_list, dim=0) # empty cache before collecting weights get_torch_device().empty_cache() # Embeddings # ------------------- if dp_rank == 0: # Embeddings # ------------------- print_rank_0("collecting embeddings...") gpt_model_module = _get_gpt_model(models[0]) _broadcast_tp_shard_tensor( gpt_model_module.model.embed_tokens.weight if pp_rank == 0 else None, "model.embed_tokens.weight", src_pp_rank=0, ) # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"collecting layer #{layer}...") layer_name = f"model.layers.{layer}" src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[src_layer_idx] _broadcast_tensor( sync_layer.input_layernorm.weight, f"{layer_name}.input_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.self_attn.o_proj.weight, f"{layer_name}.self_attn.o_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) _broadcast_tensor( sync_layer.post_attention_layernorm.weight, f"{layer_name}.post_attention_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.mlp.down_proj.weight, f"{layer_name}.mlp.down_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) # Final Layernorm # ------------------- print_rank_0("collecting final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", src_pp_rank=pp_size - 1, ) print_rank_0("collecting lm_head...") if is_value_model: if pp_rank == pp_size - 1: print(f"gpt_model_module.lm_head.weight: {gpt_model_module.lm_head.weight.shape}") _broadcast_tensor( gpt_model_module.lm_head.weight if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) _broadcast_tensor( gpt_model_module.reward_head.weight if pp_rank == pp_size - 1 and getattr(gpt_model_module, "reward_weight", None) is not None else None, "reward_head.weight", src_pp_rank=pp_size - 1, ) else: _broadcast_tp_shard_tensor( getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) dist.barrier() get_torch_device().empty_cache() if torch.distributed.get_rank() == 0: if dtype not in [torch.float16, torch.bfloat16, torch.float32]: print(f'Unknown/unsupported dtype to save: {dtype}"') exit(1) for k, v in state_dict.items(): if dtype != v.dtype: state_dict[k] = v.to(dtype) print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s") return state_dict ================================================ FILE: verl_distillation/verl/models/llama/megatron/layers/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .parallel_attention import ParallelLlamaAttention from .parallel_decoder import ParallelLlamaDecoderLayer, ParallelLlamaDecoderLayerRmPad from .parallel_linear import ( LinearForLastLayer, MergedColumnParallelLinear, QKVParallelLinear, ) from .parallel_mlp import ParallelLlamaMLP from .parallel_rmsnorm import ParallelLlamaRMSNorm __all__ = [ "LinearForLastLayer", "MergedColumnParallelLinear", "QKVParallelLinear", "ParallelLlamaAttention", "ParallelLlamaDecoderLayer", "ParallelLlamaDecoderLayerRmPad", "ParallelLlamaMLP", "ParallelLlamaRMSNorm", ] ================================================ FILE: verl_distillation/verl/models/llama/megatron/layers/parallel_attention.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional import torch import torch.nn.functional as F from einops import rearrange from flash_attn.layers.rotary import apply_rotary_emb from megatron.core import ModelParallelConfig, tensor_parallel from megatron.core import parallel_state as mpu from torch import nn from transformers import LlamaConfig from transformers.utils import is_flash_attn_2_available from verl.models.llama.megatron.layers.parallel_linear import QKVParallelLinear from verl.utils.megatron import tensor_parallel as tp_utils class LlamaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) t = t / self.scaling_factor freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) class LlamaLlama3ScalingRotaryEmbedding(LlamaRotaryEmbedding): def __init__(self, dim, config, max_position_embeddings=2048, base=10000, device=None): super().__init__(dim, max_position_embeddings, base, device) self.factor = config.rope_scaling["factor"] # `8` in the original implementation self.high_freq_factor = config.rope_scaling["high_freq_factor"] # `1` in the original implementation self.low_freq_factor = config.rope_scaling["low_freq_factor"] # `4` in the original implementation self.old_context_len = config.rope_scaling[ "original_max_position_embeddings" ] # `8192` in the original implementation low_freq_wavelen = self.old_context_len / self.low_freq_factor high_freq_wavelen = self.old_context_len / self.high_freq_factor wavelen = 2 * math.pi / self.inv_freq # wavelen < high_freq_wavelen: do nothing; wavelen > low_freq_wavelen: divide by factor inv_freq_llama = torch.where(wavelen > low_freq_wavelen, self.inv_freq / self.factor, self.inv_freq) # otherwise: interpolate between the two, using a smooth factor smooth_factor = (self.old_context_len / wavelen - self.low_freq_factor) / ( self.high_freq_factor - self.low_freq_factor ) smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / self.factor + smooth_factor * inv_freq_llama is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen) inv_freq = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class ParallelLlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config = config self.megatron_config = megatron_config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta # assign values after tp tp_size = mpu.get_tensor_model_parallel_world_size() assert self.num_heads % tp_size == 0, ( f"num_head must be divisible by tp_size. Got num_head={self.num_heads}, tp_size={tp_size}" ) assert self.num_key_value_heads % tp_size == 0, ( f"num_key_value_heads must be divisible by tp_size. Got num_key_value_heads=" f"{self.num_key_value_heads}, tp_size={tp_size}" ) self.num_heads_per_tp = self.num_heads // tp_size self.num_key_value_heads_per_tp = self.num_key_value_heads // tp_size self.hidden_size_per_tp = self.hidden_size // tp_size if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and " f"`num_heads`: {self.num_heads})." ) column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" assert row_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, megatron_config) tp_utils.update_kwargs_with_config(row_kwargs, megatron_config) # [self.q_size, self.k_size, self.v_size] self.qkv_proj = QKVParallelLinear( input_size=self.hidden_size, num_heads=self.num_heads, num_key_value_heads=self.num_key_value_heads, head_dim=self.head_dim, bias=config.attention_bias, gather_output=False, skip_bias_add=False, **column_kwargs, ) self.q_size = self.num_heads_per_tp * self.head_dim self.k_size = self.num_key_value_heads_per_tp * self.head_dim self.v_size = self.num_key_value_heads_per_tp * self.head_dim self.o_proj = tensor_parallel.RowParallelLinear( input_size=self.num_heads * self.head_dim, output_size=self.hidden_size, bias=config.attention_bias, input_is_parallel=True, skip_bias_add=False, **row_kwargs, ) self._init_rope() def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = LlamaRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) else: rope_type_key = "type" if "type" in self.config.rope_scaling else "rope_type" scaling_type = self.config.rope_scaling[rope_type_key] scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "linear": self.rotary_emb = LlamaLinearScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) elif scaling_type == "dynamic": self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) elif scaling_type == "llama3": self.rotary_emb = LlamaLlama3ScalingRotaryEmbedding( self.head_dim, self.config, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) else: raise ValueError(f"Unknown RoPE scaling type {scaling_type}") def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() qkv = self.qkv_proj(hidden_states)[0] query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1) query_states = query_states.view(bsz, q_len, self.num_heads_per_tp, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads_per_tp, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads_per_tp, q_len, kv_seq_len)}, " f"but is {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads_per_tp, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads_per_tp, q_len, self.head_dim)}, " f"but is {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size_per_tp) attn_output = self.o_proj(attn_output)[0] return attn_output """ Remove padding Attention - Using Flash-attn 2 - Compatible with sequence parallel """ if is_flash_attn_2_available(): from flash_attn import flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa: F401 def apply_rotary_pos_emb_rmpad(q, k, cos, sin, position_ids, indices, sequence_length): batch_size = position_ids.shape[0] q = pad_input(q, indices, batch_size, sequence_length) # (batch_size, seqlen, num_head, head_dim) k = pad_input(k, indices, batch_size, sequence_length) cos = cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim] sin = sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) q_embed = index_first_axis(rearrange(q_embed, "b s ... -> (b s) ..."), indices) k_embed = index_first_axis(rearrange(k_embed, "b s ... -> (b s) ..."), indices) return q_embed, k_embed # use flash-attn rotary embeddings with rmpad # cos/sin shoudl be: (seq_length, rotary_dim / 2) def apply_rotary_pos_emb_rmpad_flash(q, k, cos, sin, cu_seqlens, max_seqlen): q_embed = apply_rotary_emb( q, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) k_embed = apply_rotary_emb( k, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) return q_embed, k_embed class ParallelLlamaAttentionRmPad(ParallelLlamaAttention): def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: torch.Tensor = None, max_seqlen_in_batch: int = None, ): total_nnz, _, _ = hidden_states.size() # This is the total_nnz padded after sequence parallel if self.megatron_config.sequence_parallel: total_nnz = total_nnz * mpu.get_tensor_model_parallel_world_size() qkv = self.qkv_proj(hidden_states)[0] query_states, key_states, value_states = qkv.split( [self.q_size, self.k_size, self.v_size], dim=-1 ) # (total_nnz, 1, hidden_size) if self.megatron_config.sequence_parallel: sequence_parallel_pad = total_nnz - cu_seqlens[-1] total_nnz = cu_seqlens[-1] # total_nnz before sp padding query_states = query_states[:total_nnz] key_states = key_states[:total_nnz] value_states = value_states[:total_nnz] # Flash attention requires the input to have the shape # batch_size x seq_length x head_dime x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(total_nnz, self.num_heads_per_tp, self.head_dim) key_states = key_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim) value_states = value_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim) cos, sin = self.rotary_emb(value_states, seq_len=sequence_length) cos, sin = cos[:, : cos.shape[1] // 2], sin[:, : sin.shape[1] // 2] # flash attn only needs half query_states, key_states = apply_rotary_pos_emb_rmpad_flash( query_states, key_states, cos, sin, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen_in_batch ) # query_states, key_states = apply_rotary_pos_emb_rmpad(query_states, key_states, cos, sin, # position_ids, indices, # TODO: llama does not have dropout in the config?? # It is recommended to use dropout with FA according to the docs # when training. dropout_rate = 0.0 # if not self.training else self.attn_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: query_states = query_states.to(torch.float16) key_states = key_states.to(torch.float16) value_states = value_states.to(torch.float16) attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens, cu_seqlens_k=cu_seqlens, max_seqlen_q=max_seqlen_in_batch, max_seqlen_k=max_seqlen_in_batch, dropout_p=dropout_rate, softmax_scale=None, causal=True, ) attn_output_unpad = attn_output_unpad.to(input_dtype) attn_output_unpad = attn_output_unpad.reshape(total_nnz, 1, self.hidden_size_per_tp).contiguous() # sequence parallel reduce_scatter is performed inside RowColumnParallel if enabled # Here we need to repad if self.megatron_config.sequence_parallel: attn_output_unpad = F.pad(attn_output_unpad, pad=(0, 0, 0, 0, 0, sequence_parallel_pad)) attn_output_unpad = self.o_proj(attn_output_unpad)[0] return attn_output_unpad ================================================ FILE: verl_distillation/verl/models/llama/megatron/layers/parallel_decoder.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch from megatron.core import ModelParallelConfig from torch import nn from transformers import LlamaConfig from verl.utils.megatron_utils import TransformerConfig, convert_config from .parallel_attention import ParallelLlamaAttention, ParallelLlamaAttentionRmPad from .parallel_mlp import ParallelLlamaMLP from .parallel_rmsnorm import ParallelLlamaRMSNorm class ParallelLlamaDecoderLayer(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, layer_idx: int): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.self_attn = ParallelLlamaAttention(config=config, megatron_config=megatron_config) self.mlp = ParallelLlamaMLP(config, megatron_config=megatron_config) self.input_layernorm = ParallelLlamaRMSNorm(config, megatron_config) self.post_attention_layernorm = ParallelLlamaRMSNorm(config, megatron_config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Note: sequence parallel is hidden inside ColumnParallelLinear # reduce scatter is hidden inside RowParallelLinear # Self Attention hidden_states = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, ) # TODO: add sequence parallel operator reduce_scatter here hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) # TODO: add sequence parallel operator all_gather here hidden_states = self.mlp(hidden_states) # TODO: add sequence parallel operator reduce_scatter here hidden_states = residual + hidden_states outputs = hidden_states return outputs class ParallelLlamaDecoderLayerRmPad(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, layer_idx: int): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.self_attn = ParallelLlamaAttentionRmPad(config=config, megatron_config=megatron_config) self.mlp = ParallelLlamaMLP(config, megatron_config=megatron_config) self.input_layernorm = ParallelLlamaRMSNorm(config, megatron_config) self.post_attention_layernorm = ParallelLlamaRMSNorm(config, megatron_config) def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states # (total_nnz // sp, 1, hidden_size) hidden_states = self.input_layernorm(hidden_states) # Self Attention # (total_nnz // sp, 1, hidden_size) -> all-gather (total_nnz, 1, hidden_size) # -> col + row -> reduce-scatter -> (total_nnz // sp, 1, hidden_size) hidden_states = self.self_attn( hidden_states=hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = residual + hidden_states # Fully Connected # shape changes same as attn residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = hidden_states return outputs ================================================ FILE: verl_distillation/verl/models/llama/megatron/layers/parallel_linear.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023 The vLLM team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/linear.py import torch from megatron.core import tensor_parallel class QKVParallelLinear(tensor_parallel.ColumnParallelLinear): def __init__( self, input_size, num_heads, num_key_value_heads, head_dim, *, bias=True, gather_output=True, skip_bias_add=False, **kwargs, ): # Keep input parameters, and already restrict the head numbers self.input_size = input_size self.q_output_size = num_heads * head_dim self.kv_output_size = num_key_value_heads * head_dim self.head_dim = head_dim self.gather_output = gather_output self.skip_bias_add = skip_bias_add input_size = self.input_size output_size = (num_heads + 2 * num_key_value_heads) * self.head_dim super().__init__( input_size=input_size, output_size=output_size, bias=bias, gather_output=gather_output, skip_bias_add=skip_bias_add, **kwargs, ) class MergedColumnParallelLinear(tensor_parallel.ColumnParallelLinear): def __init__( self, input_size, gate_ouput_size, up_output_size, *, bias=True, gather_output=True, skip_bias_add=False, **kwargs, ): # Keep input parameters, and already restrict the head numbers self.input_size = input_size self.output_size = gate_ouput_size + up_output_size self.gather_output = gather_output self.skip_bias_add = skip_bias_add super().__init__( input_size=self.input_size, output_size=self.output_size, bias=bias, gather_output=gather_output, skip_bias_add=skip_bias_add, **kwargs, ) class LinearForLastLayer(torch.nn.Linear): def __init__( self, input_size, output_size, *, config, bias=True, ): super().__init__(in_features=input_size, out_features=output_size, bias=bias) self.sequence_parallel = config.sequence_parallel if self.sequence_parallel: self.weight.sequence_parallel = True def forward( self, input_, weight=None, runtime_gather_output=None, ): logits = super().forward(input_) logits = logits.float() if self.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits, None ================================================ FILE: verl_distillation/verl/models/llama/megatron/layers/parallel_mlp.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from megatron.core import ModelParallelConfig, tensor_parallel from megatron.core import parallel_state as mpu from torch import nn from transformers.activations import ACT2FN from verl.models.llama.megatron.layers.parallel_linear import MergedColumnParallelLinear from verl.utils.megatron import tensor_parallel as tp_utils class ParallelLlamaMLP(nn.Module): def __init__(self, config, megatron_config: ModelParallelConfig = None) -> None: super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size # The weight is only [hidden_size, intermediate_size // model_parallel_world_size] column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" assert row_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(row_kwargs, megatron_config) tp_utils.update_kwargs_with_config(column_kwargs, megatron_config) tp_size = mpu.get_tensor_model_parallel_world_size() self.gate_up_proj = MergedColumnParallelLinear( input_size=self.hidden_size, gate_ouput_size=self.intermediate_size, up_output_size=self.intermediate_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) self.gate_size = self.intermediate_size // tp_size self.down_proj = tensor_parallel.RowParallelLinear( input_size=self.intermediate_size, output_size=self.hidden_size, bias=False, input_is_parallel=True, skip_bias_add=False, **row_kwargs, ) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): gate_up = self.gate_up_proj(x)[0] gate, up = gate_up.split(self.gate_size, dim=-1) return self.down_proj(self.act_fn(gate) * up)[0] ================================================ FILE: verl_distillation/verl/models/llama/megatron/layers/parallel_rmsnorm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numbers import torch from apex.normalization.fused_layer_norm import fused_rms_norm_affine from megatron.core import ModelParallelConfig from torch import nn from transformers import LlamaConfig from verl.utils.megatron import sequence_parallel as sp_utils class ParallelLlamaRMSNorm(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): """ LlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() if isinstance(config.hidden_size, numbers.Integral): normalized_shape = (config.hidden_size,) self.normalized_shape = torch.Size(normalized_shape) self.weight = nn.Parameter(torch.ones(self.normalized_shape)) self.variance_epsilon = config.rms_norm_eps if megatron_config.sequence_parallel: sp_utils.mark_parameter_as_sequence_parallel(self.weight) def forward(self, hidden_states): return fused_rms_norm_affine( input=hidden_states, weight=self.weight, normalized_shape=self.normalized_shape, eps=self.variance_epsilon, memory_efficient=True, ) ================================================ FILE: verl_distillation/verl/models/llama/megatron/modeling_llama_megatron.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LLaMA model with Megatron-style acceleration.""" from typing import Optional import torch import torch.utils.checkpoint from megatron.core import ModelParallelConfig, mpu, tensor_parallel from torch import nn from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.models.llama.configuration_llama import LlamaConfig from transformers.models.llama.modeling_llama import CausalLMOutputWithPast from verl.utils.megatron import sequence_parallel as sp_utils from verl.utils.megatron import tensor_parallel as tp_utils from verl.utils.megatron_utils import TransformerConfig, convert_config from .layers import ParallelLlamaDecoderLayer, ParallelLlamaDecoderLayerRmPad, ParallelLlamaRMSNorm """ TODO: 1. Add weight initialization. Here we need to be careful on TP weight init. 2. Add sequence parallel 3. Load checkpoint from meta LLama pretrained checkpoint """ # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class ParallelLlamaModel(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] Args: config: LlamaConfig """ def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) self.layers = nn.ModuleList( [ParallelLlamaDecoderLayer(config, megatron_config) for _ in range(config.num_hidden_layers)] ) self.norm = ParallelLlamaRMSNorm(config, megatron_config) # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (batch_size, seq_length) attention_mask: attention_mask. shape (batch_size, seq_length) position_ids: position ids. shape (batch_size, seq_length) Returns: """ batch_size, seq_length = input_ids.shape inputs_embeds = self.embed_tokens(input_ids) # embed positions attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds) hidden_states = inputs_embeds for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return hidden_states class ParallelLlamaForCausalLM(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.model = ParallelLlamaModel(config, megatron_config=megatron_config) self.vocab_size = config.vocab_size column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) hidden_states = outputs logits = self.lm_head(hidden_states)[0] logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) logits = logits.float() return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa: F401, E402 class ParallelLlamaModelRmPad(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] Args: config: LlamaConfig """ def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() self.megatron_config = megatron_config if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) self.layers = nn.ModuleList( [ParallelLlamaDecoderLayerRmPad(config, megatron_config) for _ in range(config.num_hidden_layers)] ) self.norm = ParallelLlamaRMSNorm(config, megatron_config) def forward( self, input_ids: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (1, totol_nnz) position_ids: position ids. shape (batch_size, seq_length) Returns: """ inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size) # (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size) inputs_embeds = inputs_embeds.transpose(0, 1) if self.megatron_config.sequence_parallel: inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds) hidden_states = inputs_embeds for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return hidden_states class ParallelLlamaForCausalLMRmPad(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.megatron_config = megatron_config self.model = ParallelLlamaModelRmPad(config, megatron_config=megatron_config) self.vocab_size = config.vocab_size self._init_head(config) def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def _forward_head(self, hidden_states): # all_gather from sequence parallel region is performed inside lm_head logits = self.lm_head(hidden_states)[0] logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp) logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) # (total_nnz_padded, 1, vocab_size) return logits def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" batch_size, sequence_length = input_ids.shape # remove padding here input_ids, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input( input_ids.unsqueeze(dim=-1), attention_mask ) # (total_nnz, 1) # pad input_ids to multiple of tp for all tp ranks # TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap if self.megatron_config.sequence_parallel: input_ids = sp_utils.pad_to_sequence_parallel(input_ids) input_ids = input_ids.transpose(0, 1) # (1, total_nnz+pad) outputs = self.model( input_ids=input_ids, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = outputs logits = self._forward_head(hidden_states) # remove padding from sequence parallel if self.megatron_config.sequence_parallel: totol_nnz = cu_seqlens[-1] logits = logits[:totol_nnz] # (total_nnz_padded) logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # add removed padding back logits = pad_input( logits, indices, batch_size, seqlen=sequence_length ) # (batch_size, sequence_length, vocab_size) return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) class ParallelLlamaForValueRmPad(ParallelLlamaForCausalLMRmPad): def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False) # lm_head is effectively the same as sequence parallel sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight) def _forward_head(self, hidden_states): logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1) logits = logits.float() if self.megatron_config.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: output = super().forward(input_ids, attention_mask, position_ids) output.logits = torch.squeeze(output.logits, dim=-1) return output """ Support pipeline parallelism """ class ParallelLlamaModelRmPadPP(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] This model definition supports pipeline parallelism. To support pp and vpp, - This model only contains layer in this pp stage and vpp chunk - When calling get_model in Megatron, this rank will instantiate all the vpp chunks in this pp. Args: config: LlamaConfig """ def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, pre_process, post_process): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.pre_process = pre_process self.post_process = post_process self.megatron_config = megatron_config embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) if pre_process: self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) else: self.embed_tokens = None pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = megatron_config.pipeline_model_parallel_size self.num_layer_per_pp = config.num_hidden_layers // pp_size vpp_size = megatron_config.virtual_pipeline_model_parallel_size vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank() if vpp_size is not None: self.layers = nn.ModuleList() self.num_layer_vpp_chunk = self.num_layer_per_pp // vpp_size self.num_layer_this_model = self.num_layer_vpp_chunk offset = vpp_rank * (config.num_hidden_layers // vpp_size) + (pp_rank * self.num_layer_vpp_chunk) else: self.num_layer_this_model = self.num_layer_per_pp offset = pp_rank * self.num_layer_per_pp self.layers = nn.ModuleList() for i in range(self.num_layer_this_model): layer = ParallelLlamaDecoderLayerRmPad(config, megatron_config, layer_idx=offset + i) self.layers.add_module(f"{i}", layer) if post_process: self.norm = ParallelLlamaRMSNorm(config, megatron_config) else: self.norm = None def set_input_tensor(self, input_tensor): """Set input tensor to be used instead of forward()'s input. When doing pipeline parallelism the input from the previous stage comes from communication, not from the input, so the model's forward_step_func won't have it. This function is thus used by internal code to bypass the input provided by the forward_step_func""" self.input_tensor = input_tensor def forward( self, input_ids: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (1, totol_nnz) position_ids: position ids. shape (batch_size, seq_length) Returns: """ if self.pre_process: inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size) # vocab parallel embedding will not do sequence parallel reduce-scatter in open source megatron # so need to deal with it by handle here: # (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size) inputs_embeds = inputs_embeds.transpose(0, 1) if self.megatron_config.sequence_parallel: inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds) hidden_states = inputs_embeds else: # self.hidden_states should be passed by Megatron hidden_states = self.input_tensor for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = layer_outputs if self.post_process: hidden_states = self.norm(hidden_states) return hidden_states class ParallelLlamaForCausalLMRmPadPP(nn.Module): def __init__( self, config: LlamaConfig, megatron_config: ModelParallelConfig, pre_process, post_process, share_embeddings_and_output_weights=False, ): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.megatron_config = megatron_config self.model = ParallelLlamaModelRmPadPP( config, megatron_config=megatron_config, pre_process=pre_process, post_process=post_process ) assert share_embeddings_and_output_weights is False, ( "Llama Model not supports sharing embedding and output weights" ) self.share_embeddings_and_output_weights = share_embeddings_and_output_weights self.vocab_size = config.vocab_size self.pre_process = pre_process self.post_process = post_process if post_process: self._init_head(config) def set_input_tensor(self, input_tensor): """Set input tensor to be used instead of forward()'s input. When doing pipeline parallelism the input from the previous stage comes from communication, not from the input, so the model's forward_step_func won't have it. This function is thus used by internal code to bypass the input provided by the forward_step_func""" assert len(input_tensor) == 1 self.model.set_input_tensor(input_tensor[0]) def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def _forward_head(self, hidden_states): # all_gather from sequence parallel region is performed inside lm_head # logits shape before forward_head hidden_states.shape: [4, 32, 4096] logits = self.lm_head(hidden_states)[0] # logits shape after forward_head logits.shape: [8, 32, 8] logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp) return logits def forward( self, # original input *, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" # Note that input_ids, attention_mask and position_ids should be passed to every pp layer. # In the first pp, input_ids will be used, in other pp layers hidden_states will be used inside self.model batch_size, sequence_length = input_ids.shape # remove padding here input_ids_rmpad, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input( input_ids.unsqueeze(dim=-1), attention_mask ) # (total_nnz, 1) # pad input_ids to multiple of tp for all tp ranks # TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap if self.megatron_config.sequence_parallel: input_ids_rmpad = sp_utils.pad_to_sequence_parallel(input_ids_rmpad) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz+pad) outputs = self.model( input_ids=input_ids_rmpad, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) if self.post_process: hidden_states = outputs # print(f'hidden_states.shape = {hidden_states.shape}') # torch.Size([4, 32, 4096]) logits = self._forward_head(hidden_states) logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # torch.Size([8, 32, 16]) # remove padding from sequence parallel if self.megatron_config.sequence_parallel: totol_nnz = cu_seqlens[-1] logits = logits[:totol_nnz] # (total_nnz_padded) # add removed padding back. If input is already rmpad, we let the caller pad_input logits = pad_input( logits, indices, batch_size, seqlen=sequence_length ) # (batch_size, sequence_length, vocab_size) return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) else: return outputs class ParallelLlamaForValueRmPadPP(ParallelLlamaForCausalLMRmPadPP): def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False) # lm_head is effectively the same as sequence parallel sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight) def _forward_head(self, hidden_states): logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1) logits = logits.float() if self.megatron_config.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits def forward( self, *, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: output = super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) if self.post_process: output.logits = torch.squeeze(output.logits, dim=-1) return output else: return output ================================================ FILE: verl_distillation/verl/models/mcore/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .registry import ( get_mcore_forward_fn, get_mcore_forward_fused_fn, get_mcore_forward_no_padding_fn, get_mcore_weight_converter, hf_to_mcore_config, init_mcore_model, ) __all__ = [ "hf_to_mcore_config", "init_mcore_model", "get_mcore_forward_fn", "get_mcore_weight_converter", "get_mcore_forward_fused_fn", "get_mcore_forward_no_padding_fn", ] ================================================ FILE: verl_distillation/verl/models/mcore/config_converter.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # convert huggingface config to mcore transformer config import warnings from typing import TypeVar import torch import torch.nn.functional as F from megatron.core import parallel_state as mpu from megatron.core.transformer import MLATransformerConfig, TransformerConfig from transformers import PretrainedConfig T = TypeVar("T", bound=TransformerConfig) def _get_base_transformer_config( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> dict: """ Create a base TransformerConfig with common parameters across different model architectures. TODO: (ycl) use dataclass or converter config? Args: hf_config: HuggingFace model configuration dtype: Data type for the model override_transformer_config_kwargs: Additional parameters to override defaults Returns: TransformerConfig with common parameters """ # Common parallel state parameters overlap_p2p_comm = ( mpu.get_virtual_pipeline_model_parallel_world_size() is not None and mpu.get_virtual_pipeline_model_parallel_world_size() > 1 ) batch_p2p_comm = False # Base configuration with common parameters base_config = { # Model architecture parameters "num_layers": hf_config.num_hidden_layers, "hidden_size": hf_config.hidden_size, "num_attention_heads": hf_config.num_attention_heads, "num_query_groups": hf_config.num_key_value_heads, "ffn_hidden_size": hf_config.intermediate_size, "attention_dropout": hf_config.attention_dropout, "hidden_dropout": getattr(hf_config, "hidden_dropout", 0.0), "kv_channels": getattr(hf_config, "head_dim", None), "layernorm_epsilon": hf_config.rms_norm_eps, "add_bias_linear": True, # Activation and normalization "activation_func": F.silu, "normalization": "RMSNorm", "gated_linear_unit": True, # Data types "pipeline_dtype": dtype, "params_dtype": dtype, "bf16": dtype is torch.bfloat16, # Parallel configuration "tensor_model_parallel_size": mpu.get_tensor_model_parallel_world_size(), "pipeline_model_parallel_size": mpu.get_pipeline_model_parallel_world_size(), "expert_model_parallel_size": mpu.get_expert_model_parallel_world_size(), "expert_tensor_parallel_size": mpu.get_expert_tensor_parallel_world_size(), "virtual_pipeline_model_parallel_size": mpu.get_virtual_pipeline_model_parallel_world_size(), "context_parallel_size": mpu.get_context_parallel_world_size(), "overlap_p2p_comm": overlap_p2p_comm, "batch_p2p_comm": batch_p2p_comm, "sequence_parallel": mpu.get_tensor_model_parallel_world_size() > 1, # Common settings "variable_seq_lengths": True, "masked_softmax_fusion": True, "moe_token_dispatcher_type": "alltoall", } # Update with any provided overrides # override_transformer_config_kwargs as kwargs shall never be none base_config.update(override_transformer_config_kwargs) return base_config def _get_mla_transformer_config( hf_config: PretrainedConfig, mla_rope_config: dict, dtype: torch.dtype, **override_transformer_config_kwargs ) -> dict: """ Create a MLATransformerConfig with common parameters across different model architectures. This is specifically for MLA models like DeepseekV3. Args: hf_config: HuggingFace model configuration mla_rope_config: MLA specific RoPE configuration dtype: Data type for the model override_transformer_config_kwargs: Additional parameters to override defaults Returns: MLATransformerConfig with common parameters """ base_config = _get_base_transformer_config(hf_config=hf_config, dtype=dtype, **override_transformer_config_kwargs) mla_config = { # MLA specific parameters "q_lora_rank": hf_config.q_lora_rank, "kv_lora_rank": hf_config.kv_lora_rank, "qk_head_dim": hf_config.qk_nope_head_dim, "qk_pos_emb_head_dim": hf_config.qk_rope_head_dim, "v_head_dim": hf_config.v_head_dim, "rotary_base": hf_config.rope_theta, "rotary_scaling_factor": mla_rope_config["factor"], "rope_type": mla_rope_config["type"], "max_position_embeddings": mla_rope_config["original_max_position_embeddings"], "beta_fast": mla_rope_config["beta_fast"], "beta_slow": mla_rope_config["beta_slow"], "mscale": mla_rope_config["mscale"], "mscale_all_dim": mla_rope_config["mscale_all_dim"], } base_config.update(mla_config) return base_config def check_and_construct_configs(original_config: dict, cls: type[T]) -> T: """ Check and disable incompatible configurations for older Megatron version. Args: original_config (dict): The original model configuration. Returns: dict: The updated model configuration with incompatible settings disabled. """ removed_keys = [] for key in original_config.keys(): if not hasattr(cls, key): removed_keys.append(key) if removed_keys: warnings.warn( f"The following keys are not supported in the current Megatron version and will be removed: {removed_keys}", stacklevel=2, ) for key in removed_keys: original_config.pop(key) original_config = mapping_string_to_attn_backend(original_config) if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0: print(f"Overridden {cls.__name__} init config: {original_config}") return cls(**original_config) def hf_to_mcore_config_dense( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: # for LlamaForCausalLM or Qwen2ForCausalLM qkv_bias = True if "Qwen2" in hf_config.architectures[0] else getattr(hf_config, "attention_bias", False) qk_layernorm = True if "Qwen3" in hf_config.architectures[0] else False args: dict = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, use_cpu_initialization=False, add_bias_linear=False, add_qkv_bias=qkv_bias, qk_layernorm=qk_layernorm, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) return check_and_construct_configs(args, TransformerConfig) def hf_to_mcore_config_qwen2moe( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: args: dict = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, use_cpu_initialization=False, add_bias_linear=False, layernorm_epsilon=hf_config.rms_norm_eps, # MoE specific moe_ffn_hidden_size=hf_config.moe_intermediate_size, moe_router_bias_update_rate=0.001, moe_router_topk=hf_config.num_experts_per_tok, num_moe_experts=hf_config.num_experts, moe_shared_expert_intermediate_size=hf_config.shared_expert_intermediate_size, moe_aux_loss_coeff=hf_config.router_aux_loss_coef, # moe_aux_loss_coeff=0.0, moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL moe_shared_expert_overlap=True, moe_grouped_gemm=True, moe_router_score_function="softmax", # Other optimizations persist_layer_norm=True, bias_activation_fusion=True, bias_dropout_fusion=True, # Qwen specific moe_router_pre_softmax=True, add_qkv_bias=True, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) return check_and_construct_configs(args, TransformerConfig) def hf_to_mcore_config_mixtral( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: args: dict = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, use_cpu_initialization=False, add_bias_linear=False, layernorm_epsilon=hf_config.rms_norm_eps, # MoE specific num_moe_experts=hf_config.num_local_experts, moe_aux_loss_coeff=hf_config.router_aux_loss_coef, moe_router_topk=hf_config.num_experts_per_tok, moe_router_pre_softmax=True, moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL moe_router_score_function="softmax", moe_shared_expert_intermediate_size=None, # mixtral has no shared expert moe_shared_expert_overlap=False, # mixtral has no shared expert moe_ffn_hidden_size=hf_config.intermediate_size, moe_router_bias_update_rate=0.001, # moe_permute_fusion=True, # need TE 2.1+ moe_grouped_gemm=True, # Other optimizations persist_layer_norm=True, apply_rope_fusion=True, bias_activation_fusion=True, bias_dropout_fusion=True, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) return check_and_construct_configs(args, TransformerConfig) def hf_to_mcore_config_qwen3moe( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: args: dict = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, use_cpu_initialization=False, add_bias_linear=False, layernorm_epsilon=hf_config.rms_norm_eps, # MoE specific moe_ffn_hidden_size=hf_config.moe_intermediate_size, moe_router_bias_update_rate=0.001, moe_router_topk=hf_config.num_experts_per_tok, num_moe_experts=hf_config.num_experts, moe_aux_loss_coeff=hf_config.router_aux_loss_coef, # moe_aux_loss_coeff=0.0, moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL moe_grouped_gemm=True, moe_router_score_function="softmax", # Other optimizations persist_layer_norm=True, bias_activation_fusion=True, bias_dropout_fusion=True, # Qwen specific moe_router_pre_softmax=False, qk_layernorm=True, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) return check_and_construct_configs(args, TransformerConfig) def hf_to_mcore_config_dpskv3( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> MLATransformerConfig: # DeepseekV3ForCausalLM from megatron.core.transformer.enums import AttnBackend from .patch_v012 import apply_patch apply_patch() mla_rope_config = { "beta_fast": 32, "beta_slow": 1, "factor": 1, "mscale": 1.0, "mscale_all_dim": 1.0, "original_max_position_embeddings": 4096, "type": "rope", } if "rope_scaling" in hf_config and hf_config.rope_scaling is not None: mla_rope_config.update(hf_config.rope_scaling) moe_layer_freq = [1] * hf_config.num_hidden_layers for i in range(min(hf_config.first_k_dense_replace, hf_config.num_hidden_layers)): moe_layer_freq[i] = 0 # disable MTP and quantization for now if "num_nextn_predict_layers" in hf_config: assert hf_config.num_nextn_predict_layers == 0, ( "MTP is not supported for now, please modify the config.json to set num_nextn_predict_layers to 0" ) assert "quantization_config" not in hf_config or not hf_config.quantization_config, ( "quantization is not supported for now, please modify the config.json to remove quantization_config" ) args: dict = _get_mla_transformer_config( hf_config=hf_config, mla_rope_config=mla_rope_config, dtype=dtype, # Additional parameters use_cpu_initialization=False, add_bias_linear=False, attention_backend=AttnBackend.fused, qk_layernorm=True, # Standard MoE parameters moe_ffn_hidden_size=hf_config.moe_intermediate_size, moe_token_dispatcher_type="alltoall", moe_router_bias_update_rate=0.001, moe_router_enable_expert_bias=True, moe_router_topk=hf_config.num_experts_per_tok, num_moe_experts=hf_config.n_routed_experts, moe_shared_expert_intermediate_size=hf_config.moe_intermediate_size * hf_config.n_shared_experts, moe_aux_loss_coeff=getattr(hf_config, "aux_loss_alpha", 0.001), moe_router_load_balancing_type="seq_aux_loss", moe_shared_expert_overlap=True, # moe_permute_fusion=True, # need TE 2.1+ moe_grouped_gemm=True, moe_router_score_function="sigmoid", moe_router_pre_softmax=True, moe_router_topk_scaling_factor=hf_config.routed_scaling_factor, moe_layer_freq=moe_layer_freq, # mcore 0.12 moe moe_router_dtype="fp64", disable_bf16_reduced_precision_matmul=True, # Other optimizations # deallocate_pipeline_outputs=True, # gradient_accumulation_fusion=True, persist_layer_norm=True, bias_activation_fusion=True, bias_dropout_fusion=True, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) transformer_config = check_and_construct_configs(args, MLATransformerConfig) # MTP if "num_nextn_predict_layers" in hf_config: transformer_config.mtp_num_layers = hf_config.num_nextn_predict_layers transformer_config.mtp_loss_scaling_factor = 0.1 return transformer_config def hf_to_mcore_config_qwen2_5_vl( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: # Qwen2_5_VLForConditionalGeneration args = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, add_bias_linear=False, # qwen specific add_qkv_bias=True, mrope_section=hf_config.rope_scaling["mrope_section"], ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) args = mapping_string_to_attn_backend(args) return TransformerConfig(**args) def hf_to_mcore_config_llama4( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: # Llama4ForConditionalGeneration raise NotImplementedError("Llama4ForConditionalGeneration is not supported yet") def mapping_string_to_attn_backend(args: dict) -> dict: if "attention_backend" in args and isinstance(args["attention_backend"], str): from megatron.core.transformer.enums import AttnBackend args["attention_backend"] = AttnBackend[args["attention_backend"]] return args ================================================ FILE: verl_distillation/verl/models/mcore/loader.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device from .saver import _megatron_calc_global_rank def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_gptmodel(state_dict, wrapped_models, config, params_dtype, is_value_model=False): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def broadcast_params(module): for param in module.parameters(): torch.distributed.broadcast( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() cp_rank = mpu.get_context_parallel_rank() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=0, cp_rank=cp_rank) pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == src_rank: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.decoder.layers) == num_layers_per_model def _broadcast_tensor(tensor, name) -> torch.Tensor: """broadcast tensor from rank0 across mp_group""" nonlocal state_dict nonlocal mp_group if torch.distributed.get_rank() == src_rank: if name in state_dict: weight = state_dict[name] tensor_shape = weight.shape else: tensor_shape = None else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not in state_dict, skip load") return if tensor is None: tensor = torch.empty( tensor_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) if torch.distributed.get_rank() == src_rank: tensor.data.copy_(weight) dist.broadcast(tensor, src=src_rank, group=mp_group) def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == src_rank: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == src_rank: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=src_rank, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == src_rank: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == src_rank: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=src_rank, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == src_rank: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank() == src_rank:} tensor {gate_name, up_name} shape " f"{tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == src_rank: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=src_rank, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == src_rank: assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) if config.num_key_value_heads >= tp_size: q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp sizes = [total_size * tp_size] if not bias: sizes.append(config.hidden_size) new_weight_qkv = torch.empty(*sizes, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] num_query_groups_per_partition = models[0].config.num_query_groups // tp_size new_weight_qkv_this_tp = new_weight_qkv[i * total_size : (i + 1) * total_size] q_part_per_head = torch.chunk(q_part, num_query_groups_per_partition, dim=0) k_part_per_head = torch.chunk(k_part, num_query_groups_per_partition, dim=0) v_part_per_head = torch.chunk(v_part, num_query_groups_per_partition, dim=0) total_size_per_head = total_size // num_query_groups_per_partition for j in range(num_query_groups_per_partition): new_weight_qkv_this_tp[j * total_size_per_head : (j + 1) * total_size_per_head].copy_( torch.cat([q_part_per_head[j], k_part_per_head[j], v_part_per_head[j]], dim=0) ) else: q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp sizes = [total_size * tp_size] if not bias: sizes.append(config.hidden_size) new_weight_qkv = torch.empty(*sizes, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv_this_tp = new_weight_qkv[i * total_size : (i + 1) * total_size] q_part_per_head = torch.chunk(q_part, config.num_attention_heads, dim=0) k_part_per_head = torch.chunk(k_part, config.num_attention_heads, dim=0) v_part_per_head = torch.chunk(v_part, config.num_attention_heads, dim=0) total_size_per_head = total_size // config.num_attention_heads for j in range(config.num_attention_heads): new_weight_qkv_this_tp[j * total_size_per_head : (j + 1) * total_size_per_head].copy_( torch.cat([q_part_per_head[j], k_part_per_head[j], v_part_per_head[j]], dim=0) ) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name, k_name, v_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == src_rank: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=src_rank, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) if dp_rank == 0: # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) embed_tokens_weight = None if pp_rank == 0: embed_tokens_weight = gpt_model_module.embedding.word_embeddings.weight _broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): layer_name = f"model.layers.{layer}" print_rank_0(f"loading layer #{layer}, with layer_name model.layers.{layer}...") dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.decoder.layers[dst_layer_idx] _broadcast_tensor( sync_layer.self_attention.linear_qkv.layer_norm_weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) if f"{layer_name}.self_attn.q_norm.weight" in state_dict: _broadcast_tensor( sync_layer.self_attention.q_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_norm.weight", ) _broadcast_tensor( sync_layer.self_attention.k_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.k_norm.weight", ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attention.linear_qkv.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) if f"{layer_name}.self_attn.q_proj.bias" in state_dict: _broadcast_tp_shard_tensor_qkv( sync_layer.self_attention.linear_qkv.bias if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", bias=True, ) _broadcast_tp_shard_tensor( sync_layer.self_attention.linear_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _broadcast_tensor( sync_layer.mlp.linear_fc1.layer_norm_weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.linear_fc1.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _broadcast_tp_shard_tensor( sync_layer.mlp.linear_fc2.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.decoder.final_layernorm, "weight", None), "model.norm.weight", ) print_rank_0("loading lm_head...") lm_head_weight = None if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.output_layer.weight if is_value_model: # if torch.distributed.get_rank() == src_rank: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "lm_head.weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") elif "score.weight" in state_dict and state_dict["score.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "score.weight") print_rank_0("load lm_head from score weight") else: _broadcast_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") # else: # _broadcast_tensor(lm_head_weight, "lm_head.weight") else: _broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() # Broadcast weights inside data parallel groups for wrapped_model in wrapped_models: broadcast_params(wrapped_model) pass get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_distillation/verl/models/mcore/mbridge.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from mbridge import AutoBridge from mbridge.utils.post_creation_callbacks import freeze_moe_router, make_value_model except ImportError: print("mbridge package not found. Please install mbridge with `pip install verl[mcore]` or `pip install mbridge`") raise __all__ = ["AutoBridge", "make_value_model", "freeze_moe_router"] ================================================ FILE: verl_distillation/verl/models/mcore/model_forward.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from verl.utils.megatron_utils import unwrap_model from .util import ( postprocess_packed_seqs, postprocess_packed_seqs_no_padding, preprocess_packed_seqs, preprocess_packed_seqs_no_padding, ) def model_forward_gen(vision_model: bool = False): def model_forward( model, input_ids, attention_mask, position_ids, multi_modal_inputs: dict, logits_processor=None, logits_processor_args: dict = None, value_model=False, ): """Forward pass for models with sequence packing.""" pre_process = ( unwrap_model(model).pre_process if not vision_model else False ) # vision model does not need pre_process, because we pack the input_ids to thd in the forward function post_process = unwrap_model(model).post_process model_kwargs = {} if "pixel_values" in multi_modal_inputs: model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device) if "image_grid_thw" in multi_modal_inputs: model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device) if "pixel_values_videos" in multi_modal_inputs: model_kwargs["pixel_values_videos"] = multi_modal_inputs["pixel_values_videos"].to(input_ids.device) if "video_grid_thw" in multi_modal_inputs: model_kwargs["video_grid_thw"] = multi_modal_inputs["video_grid_thw"].to(input_ids.device) batch_size, seq_len = attention_mask.shape[:2] input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process) input_ids_rmpad = input_ids_rmpad.contiguous() input_args = dict( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids if not vision_model else None, # vision models will calculate position_ids packed_seq_params=packed_seq_params, **model_kwargs, ) if vision_model: # workaround for supporting sequence packing with context parallelism # cp split with sequence packing will make model lose vision token information, so we need to keep # the original input_ids and pack them after vision embedding is calculated, # cooporate with mbridge input_args["input_ids"] = input_ids input_args["attention_mask"] = attention_mask output_orig = model(**input_args) if post_process and logits_processor is not None: args = { k: preprocess_packed_seqs(v, attention_mask, pre_process=True)[0] for k, v in logits_processor_args.items() } output_dict = logits_processor(output_orig, **args) output = { k: postprocess_packed_seqs( v, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) for k, v in output_dict.items() } else: output = postprocess_packed_seqs( output_orig, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) if value_model and post_process: output = output[..., 0] return output return model_forward def gptmodel_forward_no_padding( model, input_ids, multi_modal_inputs: dict, logits_processor=None, logits_processor_args: dict = None, value_model=False, ): """Default forward pass for GPT models with optional sequence packing.""" pre_process = unwrap_model(model).pre_process post_process = unwrap_model(model).post_process model_kwargs = {} if "pixel_values" in multi_modal_inputs: model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device) if "image_grid_thw" in multi_modal_inputs: model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device) batch_size = input_ids.shape[0] input_ids_rmpad, packed_seq_params = preprocess_packed_seqs_no_padding(input_ids, pre_process=pre_process) input_ids_rmpad = input_ids_rmpad.contiguous() output_orig = model( input_ids=input_ids_rmpad, attention_mask=None, position_ids=None, packed_seq_params=packed_seq_params, **model_kwargs, ) if post_process and logits_processor is not None: args = {k: preprocess_packed_seqs_no_padding(v, pre_process=True)[0] for k, v in logits_processor_args.items()} output_dict = logits_processor(output_orig, **args) output = { k: postprocess_packed_seqs_no_padding( v, packed_seq_params, input_ids, batch_size, post_process=post_process ) for k, v in output_dict.items() } else: output = postprocess_packed_seqs_no_padding( output_orig, packed_seq_params, input_ids, batch_size, post_process=post_process ) if value_model and post_process: # output = output[..., 0] # while using nested tensor, the advanced indexing operation above will result in an error at backward, i.e. # ValueError: NestedTensor _nested_select_backward_default(grad_output: t, self: jt_all, dim: any, index: any) # so we use `squeeze` to remove the last dimension output = output.squeeze(-1) return output ================================================ FILE: verl_distillation/verl/models/mcore/model_forward_1f1b_overlap.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional import torch from megatron.core.models.common.model_chunk_schedule_plan import TransformerModelChunkSchedulePlan from megatron.core.models.gpt.gpt_model import GPTModel from megatron.core.utils import make_viewless_tensor from torch import Tensor from verl.models.mcore.util import preprocess_packed_seqs from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy from verl.utils.megatron_utils import unwrap_model from verl.utils.model import CausalLMOutputForPPO from .util import postprocess_packed_seqs, postprocess_packed_seqs_for_dict_output def gptmodel_forward_1f1b_overlap( model: GPTModel, input_ids: Tensor, position_ids: Tensor, attention_mask: Tensor, labels: Tensor = None, labels_mask: Tensor = None, multi_modal_inputs: Optional[dict] = None, logits_processor: Optional[Callable] = None, logits_processor_args: Optional[dict] = None, temperature: float = 1.0, ) -> TransformerModelChunkSchedulePlan: pre_process: bool = unwrap_model(model).pre_process post_process: bool = unwrap_model(model).post_process assert logits_processor is None, "only support fused kernel" batch_size, seq_len = attention_mask.shape[:2] input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process) input_ids_rmpad = input_ids_rmpad.contiguous() schedule_plan = model.build_schedule_plan( input_ids=input_ids_rmpad, attention_mask=attention_mask, labels=labels, position_ids=position_ids, packed_seq_params=packed_seq_params, ) if post_process: attention_mask_out = attention_mask def _postprocess( self, hidden_states, input_ids, position_ids, labels, rotary_pos_emb, rotary_pos_cos, rotary_pos_sin, mtp_in_postprocess=None, loss_mask=None, decoder_input=None, attention_mask=None, inference_params=None, packed_seq_params=None, sequence_len_offset=None, runtime_gather_output=None, extra_block_kwargs=None, inference_context=None, ): """patched from https://github.com/NVIDIA/Megatron-LM/blob/core_r0.14.0/megatron/core/models/gpt/gpt_model.py#L412""" """Postprocesses decoder hidden states to generate logits or compute loss. Applies Multi-Token Prediction if enabled, generates output logits through the output layer, and computes language model loss when labels are provided. """ from megatron.core import parallel_state from megatron.core.tensor_parallel import gather_from_sequence_parallel_region in_inference_mode = inference_context is not None and not self.training if in_inference_mode: assert runtime_gather_output, "Inference must always gather TP logits" # logits and loss output_weight = None if self.share_embeddings_and_output_weights: output_weight = self.shared_embedding_or_output_weight() if mtp_in_postprocess: hidden_states = self.mtp( input_ids=input_ids, position_ids=position_ids, hidden_states=hidden_states, attention_mask=attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb, rotary_pos_cos=rotary_pos_cos, rotary_pos_sin=rotary_pos_sin, packed_seq_params=packed_seq_params, sequence_len_offset=sequence_len_offset, embedding=self.embedding, **(extra_block_kwargs or {}), ) if not self.post_process: return hidden_states if self.mtp_process: from megatron.core.transformer.multi_token_prediction import ( MTPLossAutoScaler, MTPLossLoggingHelper, roll_tensor, ) mtp_labels = labels.clone() hidden_states_list = torch.chunk(hidden_states, 1 + self.config.mtp_num_layers, dim=0) hidden_states = hidden_states_list[0] if loss_mask is None: # if loss_mask is not provided, use all ones as loss_mask loss_mask = torch.ones_like(mtp_labels) for mtp_layer_number in range(self.config.mtp_num_layers): # output mtp_logits, _ = self.output_layer( hidden_states_list[mtp_layer_number + 1], weight=output_weight, runtime_gather_output=runtime_gather_output, ) # Calc loss for the current Multi-Token Prediction (MTP) layers. mtp_labels, _ = roll_tensor(mtp_labels, shifts=-1, dims=-1, cp_group=self.cp_group) loss_mask, num_tokens = roll_tensor(loss_mask, shifts=-1, dims=-1, cp_group=self.cp_group) mtp_loss = self.compute_language_model_loss(mtp_labels, mtp_logits) mtp_loss = loss_mask * mtp_loss if self.training: # TODO(shifangx): remove the use of parallel_state here # after moving loss logging to loss_func in pretrain_gpt.py MTPLossLoggingHelper.save_loss_to_tracker( torch.sum(mtp_loss) / num_tokens, mtp_layer_number, self.config.mtp_num_layers, avg_group=parallel_state.get_data_parallel_group(with_context_parallel=True), ) mtp_loss_scale = self.config.mtp_loss_scaling_factor / self.config.mtp_num_layers if self.config.calculate_per_token_loss: hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss) else: hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss / num_tokens) if logits_processor is not None: logits, _ = self.output_layer( hidden_states, weight=output_weight, runtime_gather_output=runtime_gather_output ) output_orig = logits.transpose(0, 1).contiguous() args = { k: preprocess_packed_seqs(v, attention_mask_out, pre_process=True)[0] for k, v in logits_processor_args.items() } output_dict = logits_processor(output_orig, **args) output = { k: postprocess_packed_seqs( v, packed_seq_params, attention_mask_out, batch_size, seq_len, post_process=post_process ) for k, v in output_dict.items() } else: # fused kernel labels_rmpad, _ = preprocess_packed_seqs(labels, attention_mask, pre_process=True) labels_mask_rmpad, _ = preprocess_packed_seqs(labels_mask, attention_mask, pre_process=True) labels_rmpad = labels_rmpad.contiguous() labels_mask_rmpad = labels_mask_rmpad.contiguous() output = CausalLMOutputForPPO( loss=None, logits=None, past_key_values=None, hidden_states=hidden_states, attentions=None, ) if self.config.sequence_parallel: hidden_states = gather_from_sequence_parallel_region(hidden_states) logprobs, entropy = linear_cross_entropy( hidden_states, self.output_layer.weight, labels_rmpad, temperature, "none", parallel_state.get_tensor_model_parallel_group(), ) output.entropy = entropy output.log_probs = logprobs output = postprocess_packed_seqs_for_dict_output( labels_mask_rmpad, output, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process, ) output_ = [output["log_probs"]] # TODO NOW 1f1b overlap only support one tensor output # if "entropy" in output: # output_.append(output["entropy"]) output_ = tuple(output_) return output_ def _custom_post_process_node_forward_impl(self, hidden_states): if self.gpt_model.decoder.final_layernorm and not self.gpt_model.mtp_process: hidden_states = self.gpt_model.decoder.final_layernorm(hidden_states) # TENorm produces a "viewed" tensor. This will result in schedule.py's # deallocate_output_tensor() throwing an error, so a viewless tensor is # created to prevent this. hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True) # Run GPTModel._postprocess output = self.gpt_model._postprocess( hidden_states=hidden_states, input_ids=self.chunk_state.input_ids, position_ids=self.chunk_state.position_ids, labels=self.chunk_state.labels, decoder_input=self.chunk_state.decoder_input, rotary_pos_emb=self.chunk_state.rotary_pos_emb, rotary_pos_cos=self.chunk_state.rotary_pos_cos, rotary_pos_sin=self.chunk_state.rotary_pos_sin, mtp_in_postprocess=False, loss_mask=self.chunk_state.loss_mask, attention_mask=self.chunk_state.attention_mask, packed_seq_params=self.chunk_state.packed_seq_params, sequence_len_offset=self.chunk_state.sequence_len_offset, runtime_gather_output=self.chunk_state.runtime_gather_output, extra_block_kwargs=self.chunk_state.extra_block_kwargs, ) return output schedule_plan.post_process.forward_impl = _custom_post_process_node_forward_impl.__get__( schedule_plan.post_process, schedule_plan.post_process.__class__ ) unwrap_model(model)._postprocess = _postprocess.__get__(unwrap_model(model), unwrap_model(model).__class__) return schedule_plan ================================================ FILE: verl_distillation/verl/models/mcore/model_forward_fused.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict from typing import Optional import megatron.core as mcore import torch from megatron.core import parallel_state from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk from megatron.core.inference.contexts import BaseInferenceContext from megatron.core.models.gpt.gpt_model import GPTModel from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.tensor_parallel.mappings import gather_from_sequence_parallel_region from megatron.core.utils import deprecate_inference_params from torch import Tensor from verl.models.mcore.util import preprocess_packed_seqs from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy from verl.utils.megatron_utils import unwrap_model from verl.utils.model import CausalLMOutputForPPO from .util import postprocess_packed_seqs_for_dict_output def _get_patching_model(model: torch.nn.Module): model = unwrap_model(model) if isinstance(model, GPTModel): return model if not (hasattr(model, "language_model") and isinstance(model.language_model, GPTModel)): print(f"Model {model.__class__.__name__} is not a supported for fused forward") return None return model.language_model def patch_fused_forward(model: torch.nn.Module): assert mcore.__version__ >= "0.13.0", "Fused forward patching requires mecore >= 0.13.0" model = _get_patching_model(model) if model is not None: model.forward_backup = model.forward model.forward = _fused_GPTModel_forward.__get__(model, model.__class__) def unpatch_fused_forward(model: torch.nn.Module): model = _get_patching_model(model) if model is not None: model.forward = model.forward_backup def fused_forward_model_gen(vision_model: bool = False): def fused_forward_model( model, input_ids: Tensor, position_ids: Tensor, attention_mask: Tensor, labels: Tensor, labels_mask: Tensor, temperature: float, multi_modal_inputs: dict, ): pre_process: bool = ( unwrap_model(model).pre_process if not vision_model else False ) # vision model does not need pre_process, because we pack the input_ids to thd in the forward function post_process: bool = unwrap_model(model).post_process model_kwargs = {} if "pixel_values" in multi_modal_inputs: model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device) if "image_grid_thw" in multi_modal_inputs: model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device) if "pixel_values_videos" in multi_modal_inputs: model_kwargs["pixel_values_videos"] = multi_modal_inputs["pixel_values_videos"].to(input_ids.device) if "video_grid_thw" in multi_modal_inputs: model_kwargs["video_grid_thw"] = multi_modal_inputs["video_grid_thw"].to(input_ids.device) batch_size, seq_len = attention_mask.shape[:2] input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process) input_ids_rmpad = input_ids_rmpad.contiguous() labels_rmpad, _ = preprocess_packed_seqs(labels, attention_mask, pre_process=True) labels_mask_rmpad, _ = preprocess_packed_seqs(labels_mask, attention_mask, pre_process=True) labels_rmpad = labels_rmpad.contiguous() labels_mask_rmpad = labels_mask_rmpad.contiguous() input_args = dict( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids if not vision_model else None, # vision models will calculate position_ids packed_seq_params=packed_seq_params, labels=labels_rmpad, temperature=temperature, **model_kwargs, ) if vision_model: # workaround for supporting sequence packing with context parallelism # cp split with sequence packing will make model lose vision token information, so we need to keep # the original input_ids and pack them after vision embedding is calculated, # cooporate with mbridge input_args["input_ids"] = input_ids input_args["attention_mask"] = attention_mask output_orig: CausalLMOutputForPPO = model(**input_args) if post_process: # output_orig is in type of CausalLMOutputForPPO output = postprocess_packed_seqs_for_dict_output( labels_mask_rmpad, output_orig, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process, ) else: output = output_orig return output return fused_forward_model def _fused_GPTModel_forward( model, input_ids: Tensor, position_ids: Tensor, attention_mask: Tensor, decoder_input: Tensor = None, labels: Tensor = None, inference_context: BaseInferenceContext = None, packed_seq_params: PackedSeqParams = None, extra_block_kwargs: dict = None, runtime_gather_output: Optional[bool] = None, *, inference_params: Optional[BaseInferenceContext] = None, loss_mask: Optional[Tensor] = None, temperature: float = 1.0, **kwargs, ) -> CausalLMOutputForPPO: """ Patch self._postprocess in forward for GPT models to enable fused kernel support. https://github.com/NVIDIA/Megatron-LM/blob/core_v0.13.0/megatron/core/models/gpt/gpt_model.py TODO: Currently we still need to patch `forward` because we need to pass `temperature` explicitly to `self._postprocess` when calling, maybe there can be a better way to handle this? """ inference_context = deprecate_inference_params(inference_context, inference_params) preproc_output = model._preprocess( input_ids=input_ids, position_ids=position_ids, decoder_input=decoder_input, inference_context=inference_context, packed_seq_params=packed_seq_params, ) (decoder_input, rotary_pos_emb, rotary_pos_cos, rotary_pos_sin, sequence_len_offset) = preproc_output[:5] # Run decoder. hidden_states = model.decoder( hidden_states=decoder_input, attention_mask=attention_mask, inference_context=inference_context, rotary_pos_emb=rotary_pos_emb, rotary_pos_cos=rotary_pos_cos, rotary_pos_sin=rotary_pos_sin, packed_seq_params=packed_seq_params, sequence_len_offset=sequence_len_offset, **(extra_block_kwargs or {}), **kwargs, ) if not model.post_process: return hidden_states output = CausalLMOutputForPPO( loss=None, logits=None, past_key_values=None, hidden_states=hidden_states, attentions=None, ) if model.config.sequence_parallel: hidden_states = gather_from_sequence_parallel_region(hidden_states) logprobs, entropy = linear_cross_entropy( hidden_states, model.output_layer.weight, labels, temperature, "none", parallel_state.get_tensor_model_parallel_group(), ) if has_config_logger_enabled(model.config): payload = OrderedDict( { "input_ids": input_ids, "position_ids": position_ids, "attention_mask": attention_mask, "decoder_input": decoder_input, "logprobs": logprobs, "entropy": entropy, } ) log_config_to_disk(model.config, payload, prefix="input_and_logits") output.entropy = entropy output.log_probs = logprobs return output ================================================ FILE: verl_distillation/verl/models/mcore/model_initializer.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use mcore transformer config to initialize the model import inspect from abc import ABC, abstractmethod from megatron.core.models.gpt.gpt_layer_specs import get_gpt_decoder_block_spec, get_gpt_mtp_block_spec from megatron.core.models.gpt.gpt_model import GPTModel from .config_converter import PretrainedConfig, TransformerConfig class BaseModelInitializer(ABC): """Base class for model initializers.""" def __init__(self, tfconfig: TransformerConfig, hf_config: PretrainedConfig): self.tfconfig = tfconfig self.hf_config = hf_config self.has_vp_stage = inspect.signature(get_gpt_decoder_block_spec).parameters.get("vp_stage", None) is not None @abstractmethod def get_transformer_layer_spec(self, vp_stage=None): """Get the transformer layer specification. https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/models/gpt/gpt_layer_specs.py""" pass def get_rope_scaling_args(self) -> dict: """Get rope scaling args.""" rope_scaling_args = {} if "rope_scaling" in self.hf_config: if self.hf_config.rope_scaling is not None: # assert self.hf_config.rope_scaling["type"] == "linear", "only linear scaling is supported for now" rope_scaling_args["seq_len_interpolation_factor"] = self.hf_config.rope_scaling["factor"] return rope_scaling_args def initialize( self, pre_process: bool = True, post_process: bool = True, share_embeddings_and_output_weights: bool = False, value: bool = False, **extra_kwargs, ) -> GPTModel: """Initialize a GPT model with the given configuration. https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/models/gpt/gpt_model.py Args: pre_process (bool): include embedding layer. post_process (bool): including an output layer. share_embeddings_and_output_weights (bool): input embeddings and output logit weights are shared. value (bool): add an extra linear layer for classification or regression. Returns: GPTModel: An initialized GPT model instance """ vp_stage = extra_kwargs.get("vp_stage", None) transformer_layer_spec = self.get_transformer_layer_spec(vp_stage=vp_stage) rope_scaling_args = self.get_rope_scaling_args() mtp_block_spec = extra_kwargs.get("mtp_block_spec", None) model = GPTModel( config=self.tfconfig, transformer_layer_spec=transformer_layer_spec, vocab_size=self.hf_config.vocab_size, max_sequence_length=self.hf_config.max_position_embeddings, pre_process=pre_process, post_process=post_process, share_embeddings_and_output_weights=share_embeddings_and_output_weights, position_embedding_type="rope", rotary_base=self.hf_config.rope_theta, **rope_scaling_args, mtp_block_spec=mtp_block_spec, **({} if not self.has_vp_stage else {"vp_stage": vp_stage}), ) if post_process and value: from verl.models.llama.megatron.layers.parallel_linear import LinearForLastLayer model.output_layer = LinearForLastLayer( input_size=self.tfconfig.hidden_size, output_size=1, config=self.tfconfig ) return model class DenseModel(BaseModelInitializer): """Initializer for dense models like Llama and Qwen2.""" def get_transformer_layer_spec(self, vp_stage=None): assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now" extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage} return get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs) class Qwen2MoEModel(BaseModelInitializer): """Initializer for Qwen2 MoE models.""" def get_transformer_layer_spec(self, vp_stage=None): assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now" extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage} transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs) # Patch layer spec for shared experts for i in range(len(transformer_layer_spec.layer_specs)): transformer_layer_spec.layer_specs[i].submodules.mlp.submodules.shared_experts.params["gate"] = True return transformer_layer_spec def initialize(self, **kwargs): # Qwen default freeze_moe_router: true model = super().initialize(**kwargs) freeze_moe_router = kwargs.get("freeze_moe_router", True) if freeze_moe_router: for layer in model.decoder.layers: layer.mlp.router.weight.requires_grad = False return model class MixtralModel(BaseModelInitializer): """Initializer for Mixtral models.""" def get_transformer_layer_spec(self, vp_stage=None): assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now" extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage} transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs) return transformer_layer_spec def initialize(self, **kwargs): model = super().initialize(**kwargs) freeze_moe_router = kwargs.get("freeze_moe_router", False) if freeze_moe_router: for layer in model.decoder.layers: layer.mlp.router.weight.requires_grad = False return model class Qwen3MoEModel(BaseModelInitializer): """Initializer for Qwen3 MoE models.""" def get_transformer_layer_spec(self, vp_stage=None): assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now" extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage} transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs) return transformer_layer_spec def initialize(self, **kwargs): # Qwen default freeze_moe_router: true model = super().initialize(**kwargs) freeze_moe_router = kwargs.get("freeze_moe_router", True) if freeze_moe_router: for layer in model.decoder.layers: layer.mlp.router.weight.requires_grad = False return model class DeepseekV3Model(BaseModelInitializer): """Initializer for DeepseekV3 models.""" def get_transformer_layer_spec(self, vp_stage=None): extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage} transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs) return transformer_layer_spec def get_rope_scaling_args(self) -> dict: """Get rope scaling args.""" rope_scaling_args = {} return rope_scaling_args def initialize( self, **kwargs, ): vp_stage = kwargs.get("vp_stage", None) freeze_moe_router = kwargs.get("freeze_moe_router", True) if freeze_moe_router: self.tfconfig.moe_router_load_balancing_type = "none" # MTP if self.tfconfig.mtp_num_layers is not None and self.tfconfig.mtp_num_layers > 0: transformer_layer_spec = self.get_transformer_layer_spec(vp_stage=vp_stage) mtp_block_spec = get_gpt_mtp_block_spec( self.tfconfig, transformer_layer_spec, use_transformer_engine=True, vp_stage=vp_stage ) kwargs["mtp_block_spec"] = mtp_block_spec model = super().initialize(**kwargs) if freeze_moe_router: for layer in model.decoder.layers: if hasattr(layer.mlp, "router"): layer.mlp.router.weight.requires_grad = False return model class Qwen25VLModel(BaseModelInitializer): """Initializer for Qwen2.5 VL models.""" def get_transformer_layer_spec(self, vp_stage=None): extra_kwargs = {} if not self.has_vp_stage else {"vp_stage": vp_stage} transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True, **extra_kwargs) return transformer_layer_spec def initialize( self, pre_process=None, post_process=None, share_embeddings_and_output_weights=False, value=False, **extra_kwargs, ): tfconfig = self.tfconfig hf_config = self.hf_config # Qwen2_5_VLForConditionalGeneration from copy import deepcopy transformer_layer_spec = self.get_transformer_layer_spec() from megatron.core.extensions.transformer_engine import TEColumnParallelLinear, TERowParallelLinear from megatron.core.models.gpt.moe_module_specs import MLPSubmodules from megatron.core.models.vision.vit_layer_specs import get_vit_layer_with_transformer_engine_spec from .qwen2_5_vl import Qwen2_5VLModel, get_vision_model_config, get_vision_projection_config vision_transformer_config = get_vision_model_config(deepcopy(tfconfig)) vision_transformer_config.pipeline_model_parallel_size = 1 vision_transformer_config.first_pipeline_num_layers = None vision_projection_config = get_vision_projection_config( deepcopy(tfconfig), vision_transformer_config.hidden_size, spatial_merge_size=hf_config.vision_config.spatial_merge_size, ) vision_projection_layer_spec = MLPSubmodules( linear_fc1=TEColumnParallelLinear, linear_fc2=TERowParallelLinear, ) vision_transformer_layer_spec = get_vit_layer_with_transformer_engine_spec() qwen25_vl_model = Qwen2_5VLModel( language_transformer_config=tfconfig, language_transformer_layer_spec=transformer_layer_spec, language_vocab_size=hf_config.vocab_size, language_max_sequence_length=hf_config.max_position_embeddings, vision_transformer_config=vision_transformer_config, vision_transformer_layer_spec=vision_transformer_layer_spec, vision_projection_config=vision_projection_config, vision_projection_layer_spec=vision_projection_layer_spec, vision_projection_type="mlp", language_rotary_base=hf_config.rope_theta, pre_process=pre_process, post_process=post_process, add_decoder=True, add_encoder=True, parallel_output=True, language_share_embeddings_and_output_weights=share_embeddings_and_output_weights, ) if post_process and value: from verl.models.llama.megatron.layers.parallel_linear import LinearForLastLayer qwen25_vl_model.language_model.output_layer = LinearForLastLayer( input_size=tfconfig.hidden_size, output_size=1, config=tfconfig ) return qwen25_vl_model ================================================ FILE: verl_distillation/verl/models/mcore/patch_v012.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # there is some bug in mcore 0.12, so we need to patch it # 1. `get_query_key_value_tensors` in `multi_latent_attention.py` works wrong when packed_seq_params is not None def apply_patch(): import torch from megatron.core import parallel_state, tensor_parallel from megatron.core.transformer.multi_latent_attention import ( MLASelfAttention, apply_rotary_pos_emb, deprecate_inference_params, gather_from_sequence_parallel_region, gather_from_tensor_model_parallel_region, scatter_to_sequence_parallel_region, ) def patch_get_query_key_value_tensors( self, hidden_states, key_value_states=None, position_ids=None, packed_seq_params=None, inference_context=None, *, inference_params=None, ): """ Derives `query`, `key` and `value` tensors from `hidden_states`. """ # s = sequence length, b = batch size, h = hidden size, n = num attention heads # Attention heads [s, b, n*h] assert hidden_states.ndim == 3, f"hidden_states should be 3D, [s, b, n*h], got {hidden_states.ndim}D" inference_context = deprecate_inference_params(inference_context, inference_params) # ========================================= # Prepare RoPE and seqlen related params # ========================================= rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len( inference_context, None, hidden_states, self.config, packed_seq_params ) # rotary_pos_emb:[s, b, 1, 64] mscale = 1.0 if self.config.rope_type == "rope": packed_seq = packed_seq_params is not None and packed_seq_params.qkv_format == "thd" rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len, packed_seq=packed_seq) else: rotary_pos_emb, mscale = self.rotary_pos_emb(rotary_seq_len) # ========================================= # QKV down projection and layernorm # ========================================= if self.config.q_lora_rank is not None: # if linear_q_down_proj is ColumnParallelLinear: # q_compressed: [s, b, q_lora_rank / TP] # elif linear_q_down_proj is Linear: # q_compressed: [s / TP, b, q_lora_rank] q_compressed, _ = self.linear_q_down_proj(hidden_states) # When output is sharded (ColumnParallelLinear), two things are needed to be # identical to a normal Linear. # 1. Manually gather output to restore output dim q_lora_rank; # 2. Scatter sequence back to s / TP if sequence-parallel since it was # gathered by ColumnParallelLinear. if q_compressed.size(-1) != self.config.q_lora_rank: q_compressed = gather_from_tensor_model_parallel_region(q_compressed) if self.config.sequence_parallel: q_compressed = scatter_to_sequence_parallel_region(q_compressed) q_compressed = self.q_layernorm(q_compressed) else: q_compressed = hidden_states # if linear_kv_down_proj is ColumnParallelLinear: # kv_combined: [s, b, (kv_lora_rank + qk_pos_emb_head_dim) / TP] # elif linear_kv_down_proj is Linear: # kv_combined: [s / TP, b, (kv_lora_rank + qk_pos_emb_head_dim)] kv_combined, _ = self.linear_kv_down_proj(hidden_states) if kv_combined.size(-1) != self.config.kv_lora_rank + self.config.qk_pos_emb_head_dim: # kv_combined: [s, b, (kv_lora_rank + qk_pos_emb_head_dim)] kv_combined = gather_from_tensor_model_parallel_region(kv_combined) # kv_compressed:[s, b, kv_lora_rank], k_pos_emb: [s, b, qk_pos_emb_head_dim] kv_compressed, k_pos_emb = torch.split( kv_combined, [self.config.kv_lora_rank, self.config.qk_pos_emb_head_dim], dim=-1 ) if self.config.sequence_parallel: # kv_compressed:[s / TP, b, kv_lora_rank] kv_compressed = scatter_to_sequence_parallel_region(kv_compressed) else: # kv_compressed:[s / TP, b, kv_lora_rank], k_pos_emb: [s / TP, b, qk_pos_emb_head_dim] kv_compressed, k_pos_emb = torch.split( kv_combined, [self.config.kv_lora_rank, self.config.qk_pos_emb_head_dim], dim=-1 ) if parallel_state.get_tensor_model_parallel_world_size() > 1: # k_pos_emb: [s, b, qk_pos_emb_head_dim] k_pos_emb = gather_from_sequence_parallel_region(k_pos_emb) kv_compressed = self.kv_layernorm(kv_compressed) # ========================================= # QKV up projection and RoPE apply # ========================================= def qkv_up_proj_and_rope_apply(q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb): if self.config.q_lora_rank is not None: q, _ = self.linear_q_up_proj(q_compressed) else: # hidden_states:[s, b, 2048], q: [s, b, n * 192] q, _ = self.linear_q_proj(q_compressed) q_len, bsz, _ = q.size() # q: [s, b, n, 192] q = q.view(q_len, bsz, self.num_attention_heads_per_partition, self.q_head_dim) # kv: [s, b, 2048] kv, _ = self.linear_kv_up_proj(kv_compressed) # kv: [s, b, n, 256] kv = kv.view( q_len, bsz, self.num_attention_heads_per_partition, self.config.qk_head_dim + self.config.v_head_dim, ) if inference_context is not None: # add offset to the sequence start for inference sequence_start = inference_context.sequence_len_offset sequence_end = sequence_start + q_len rotary_pos_emb = rotary_pos_emb[sequence_start:sequence_end] else: # Shorten rotary_pos_emb to the sequence length when inference_params # is not provided. This makes sure we can run forward directly with # any sequence length. During training, the sequence length is always # the full rotary_pos_emb length. rotary_pos_emb = rotary_pos_emb[0:q_len] # [s, b, 64] -> [s, b, 1, 64] k_pos_emb = torch.unsqueeze(k_pos_emb, 2) # q: [s, b, n, 128], q_pos_emb: [s, b, n, 64] q_no_pe, q_pos_emb = torch.split(q, [self.config.qk_head_dim, self.config.qk_pos_emb_head_dim], dim=-1) # k_no_pe: [s, b, n, 128], value: [s, b, n, 128] k_no_pe, value = torch.split(kv, [self.config.qk_head_dim, self.config.v_head_dim], dim=-1) if packed_seq_params is not None: cu_seqlens_q = packed_seq_params.cu_seqlens_q cu_seqlens_kv = packed_seq_params.cu_seqlens_kv q_pos_emb = q_pos_emb.squeeze(1) k_pos_emb = k_pos_emb.squeeze(1) q_no_pe = q_no_pe.squeeze(1) k_no_pe = k_no_pe.squeeze(1) value = value.squeeze(1) else: cu_seqlens_q = cu_seqlens_kv = None # q_pos_emb: [s, b, n, 64], k_pos_emb:[s, b, 1, 64] q_pos_emb = apply_rotary_pos_emb( q_pos_emb, rotary_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q, mscale=mscale, ) k_pos_emb = apply_rotary_pos_emb( k_pos_emb, rotary_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv, mscale=mscale, ) # query: [s, b, n, 192] query = torch.cat([q_no_pe, q_pos_emb], dim=-1) if packed_seq_params is not None: k_pos_emb = k_pos_emb.expand(-1, self.num_attention_heads_per_partition, -1) key = torch.cat([k_no_pe, k_pos_emb], dim=-1) else: # key: [s, b, n, 192] k_pos_emb = k_pos_emb.expand(-1, -1, self.num_attention_heads_per_partition, -1) key = torch.cat([k_no_pe, k_pos_emb], dim=-1) query = query.contiguous() key = key.contiguous() value = value.contiguous() return query, key, value if self.recompute_up_proj: self.qkv_up_checkpoint = tensor_parallel.CheckpointWithoutOutput() query, key, value = self.qkv_up_checkpoint.checkpoint( qkv_up_proj_and_rope_apply, q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb ) else: query, key, value = qkv_up_proj_and_rope_apply(q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb) return query, key, value MLASelfAttention.get_query_key_value_tensors = patch_get_query_key_value_tensors ================================================ FILE: verl_distillation/verl/models/mcore/qwen2_5_vl/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .model import Qwen2_5VLModel from .vision_config import get_vision_model_config, get_vision_projection_config __all__ = ["Qwen2_5VLModel", "get_vision_model_config", "get_vision_projection_config"] ================================================ FILE: verl_distillation/verl/models/mcore/qwen2_5_vl/attention.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from megatron.core.transformer.attention import * from .rope_utils import apply_rotary_pos_emb_absolute class Qwen2_5VLSelfAttention(SelfAttention): """ Overrides the SelfAttention class, the difference is that qwen2_5_vl uses apply_rotary_pos_emb_absolute instead of apply_rotary_pos_emb """ def forward( self, hidden_states: Tensor, attention_mask: Tensor, key_value_states: Optional[Tensor] = None, inference_context: Optional[BaseInferenceContext] = None, rotary_pos_emb: Optional[Union[Tensor, Tuple[Tensor, Tensor]]] = None, rotary_pos_cos: Optional[Tensor] = None, rotary_pos_sin: Optional[Tensor] = None, attention_bias: Optional[Tensor] = None, packed_seq_params: Optional[PackedSeqParams] = None, sequence_len_offset: Optional[int] = None, *, inference_params: Optional[BaseInferenceContext] = None, ) -> Tuple[Tensor, Tensor]: """ Perform a forward pass through the attention module. Args: hidden_states (Tensor): Hidden states. attention_mask (Tensor): Attention mask. key_value_states (Optional[Tensor]): Key/value states (for cross attention). inference_context (Optional[BaseInferenceContext]): Inference context that manages KV cache. rotary_pos_emb (Optional[Union[Tensor, Tuple[Tensor, Tensor]]]): Rotary embedding tensor(s). rotary_pos_cos (Optional[Tensor]): Rotary embedding cosine. rotary_pos_sin (Optional[Tensor]): Rotary embedding sine. attention_bias (Optional[Tensor]): Attention bias. packed_seq_params (Optional[PackedSeqparams]): Parameters used for THD format. sequence_len_offset (Optional[int]): Sequence length offset used for inference CUDA graphs. Return: (Tuple[Tensor, Tensor]) Attention output and bias. """ inference_context = deprecate_inference_params(inference_context, inference_params) if inference_context and inference_context.is_dynamic_batching(): assert flash_decode_and_prefill_kernel is not None, ( "Internal use only: install package `nvidia_chunked_flash_attn`." ) # hidden_states: [sq, b, h] if self.config.flash_decode and not self.training and inference_context is not None: rotary_pos_emb = None else: assert rotary_pos_cos is None and rotary_pos_sin is None # For self attention we just duplicate the rotary_pos_emb if it isn't already if rotary_pos_emb is not None and not isinstance(rotary_pos_emb, tuple): rotary_pos_emb = (rotary_pos_emb,) * 2 # ===================== # Query, Key, and Value # ===================== # Get the query, key and value tensors based on the type of attention - # self or cross attn. query, key, value = self.get_query_key_value_tensors(hidden_states, key_value_states) # =================================================== # Adjust key, value, and rotary_pos_emb for inference # =================================================== # This branch only runs in the decode phase of flash decoding and returns after the linear # projection. This conditional is not used in the prefill phase or non-flash-decoding cases. if ( self.config.flash_decode and inference_context is not None and inference_context.is_decode_only() and not self.training and rotary_pos_cos is not None ): assert self.layer_number in inference_context.key_value_memory_dict assert inference_context.sequence_len_offset is not None inference_key_memory, inference_value_memory = inference_context.key_value_memory_dict[self.layer_number] output = self.flash_decode( sequence_len_offset=sequence_len_offset, query_layer=query, key_layer=key, value_layer=value, inference_key_memory=inference_key_memory, inference_value_memory=inference_value_memory, rotary_cos=rotary_pos_cos, rotary_sin=rotary_pos_sin, ) out = output.transpose(0, 1).contiguous() context_layer = out.view(out.size(0), out.size(1), -1) output, bias = self.linear_proj(context_layer) return output, bias # Use latest mcore 0.13 API and forward-compatible with previous versions. outputs = self._adjust_key_value_for_inference( inference_context, query, key, value, rotary_pos_emb, rotary_pos_cos, rotary_pos_sin, sequence_len_offset, ) query, key, value, rotary_pos_emb, attn_mask_type = outputs[:5] if packed_seq_params is not None: query = query.squeeze(1) key = key.squeeze(1) value = value.squeeze(1) # ================================================ # relative positional embedding (rotary embedding) # ================================================ if rotary_pos_emb is not None and not self.config.flash_decode: q_pos_emb, k_pos_emb = rotary_pos_emb if packed_seq_params is not None: if packed_seq_params.cu_seqlens_q_padded is not None: cu_seqlens_q = packed_seq_params.cu_seqlens_q_padded else: cu_seqlens_q = packed_seq_params.cu_seqlens_q if packed_seq_params.cu_seqlens_kv_padded is not None: cu_seqlens_kv = packed_seq_params.cu_seqlens_kv_padded else: cu_seqlens_kv = packed_seq_params.cu_seqlens_kv else: cu_seqlens_q = cu_seqlens_kv = None if q_pos_emb is not None: # TODO VIJAY: simplify if inference_context is None or inference_context.is_static_batching(): query = apply_rotary_pos_emb_absolute(query, q_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q) else: query = inference_context.apply_rotary_emb_query(query, q_pos_emb, self.config, cu_seqlens_q) if k_pos_emb is not None: key = apply_rotary_pos_emb_absolute(key, k_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect # value_layer = apply_rotary_pos_emb(value_layer, k_pos_emb) # ================================== # core attention computation # ================================== if self.checkpoint_core_attention and self.training: core_attn_out = self._checkpointed_attention_forward( query, key, value, attention_mask, attn_mask_type=attn_mask_type, attention_bias=attention_bias, packed_seq_params=packed_seq_params, ) else: if inference_context is None or inference_context.is_static_batching(): # Static batching attention kernel. core_attn_out = self.core_attention( query, key, value, attention_mask, attn_mask_type=attn_mask_type, attention_bias=attention_bias, packed_seq_params=packed_seq_params, ) else: # Dynamic batching attention kernel. q, k, v = (query, key, value) cu_query_lengths, max_seqlen_q = inference_context.cu_query_lengths() cu_kv_lengths, max_seqlen_k = inference_context.cu_kv_lengths() core_attn_out = self.flash_decode_and_prefill( q, k, v, max_seqlen_q, max_seqlen_k, cu_query_lengths, cu_kv_lengths ) core_attn_out = core_attn_out.squeeze(0).unsqueeze(1) core_attn_out = rearrange(core_attn_out, "s b h d -> s b (h d)") if packed_seq_params is not None and packed_seq_params.qkv_format == "thd": # reshape to same output shape as unpacked case # (t, np, hn) -> (t, b=1, h=np*hn) # t is the pack size = sum (sq_i) # note that batch is a dummy dimension in the packed case core_attn_out = core_attn_out.reshape(core_attn_out.size(0), 1, -1) # ================= # Output. [sq, b, h] # ================= output, bias = self.linear_proj(core_attn_out) return output, bias ================================================ FILE: verl_distillation/verl/models/mcore/qwen2_5_vl/model.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import torch from megatron.core import InferenceParams, mpu, tensor_parallel from megatron.core.models.gpt.gpt_model import GPTModel # from .transformer_config import Qwen2VLTransformerConfig from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_config import TransformerConfig from verl.models.mcore.util import preprocess_packed_seqs from .attention import Qwen2_5VLSelfAttention from .vision_model import Qwen2_5VisionModel # Note: This is under development and may be missing features. class Qwen2_5VLModel(MegatronModule): """Qwen2.5VL multi-modal model. Args: language_transformer_config (TransformerConfig): Transformer config for the language model. language_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the language model. language_vocab_size (int): Language model vocabulary size. language_max_sequence_length (int): Language model maximum sequence length. This is used for positional embedding. vision_transformer_config (TransformerConfig): Transformer config for the vision model. vision_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the vision model. vision_projection_config (TransformerConfig): Config for the projection from vision model outputs to language model inputs. vision_projection_layer_spec (ModuleSpec): Specifies the module to use for the vision projection. vision_projection_type (str): Type of the vision projection to use. Default is a 2-layer MLP. parallel_output (bool): Do not gather the outputs, keep them split across tensor parallel ranks. This is typically True for training and False for inference. language_rotary_percent (float): Percent of rotary dimension to use for rotary position embeddings in the language model. Defaults to 1.0. pre_process (bool): Include the embedding layer in the gpt decoder (used with pipeline parallelism). Defaults to True. post_process (bool): Include an output layer and a layernorm in the gpt decoder (used with pipeline parallelism). Defaults to True. add_encoder (bool): Construct the encoder module (used with pipeline parallelism). Defaults to True. When we use pipelining, the encoder will live on only a subset of the pipeline stages (specifically, only the first stage). add_decoder (bool): Construct the decoder module (used with pipeline parallelism). Defaults to True. When we use pipelining, the decoder will live on only a subset of the pipeline stages (specifically, every stage after the first one). img_h (int): The height of each image that the ViT will see. img_w (int): The width of each image that the ViT will see. patch_dim (int): The size of each patch side. img_embedding_idx (int): Index in the language_embeddings tensor where image_embeddings should be inserted. Defaults to 0. """ def __init__( self, language_transformer_config: TransformerConfig, language_transformer_layer_spec: ModuleSpec, language_vocab_size: int, language_max_sequence_length: int, vision_transformer_config: TransformerConfig, vision_transformer_layer_spec: ModuleSpec, vision_projection_config: TransformerConfig, vision_projection_layer_spec: ModuleSpec, vision_projection_type: str = "mlp", parallel_output: bool = True, language_rotary_percent: float = 1.0, pre_process: bool = True, post_process: bool = True, add_encoder: bool = True, add_decoder: bool = True, language_rotary_base: int = 10000, fp16_lm_cross_entropy: bool = False, language_share_embeddings_and_output_weights: bool = False, image_token_id: int = 151655, video_token_id: int = 151656, ) -> None: super().__init__(config=language_transformer_config) # patch self_attention to use qwen2_5_vl attention vision_transformer_layer_spec.submodules.self_attention.module = Qwen2_5VLSelfAttention for layer_spec in language_transformer_layer_spec.layer_specs: layer_spec.submodules.self_attention.module = Qwen2_5VLSelfAttention logging.getLogger(__name__).warning("Qwen2VL model is under development and may be missing features.") self.pre_process = pre_process self.post_process = post_process self.add_encoder = add_encoder self.add_decoder = add_decoder self.encoder_hidden_state = None self.vision_model = None self.vision_projection = None self.language_model = None self.image_token_id = image_token_id self.video_token_id = video_token_id self.square_merge_size = vision_projection_config.ffn_hidden_size // vision_transformer_config.hidden_size # This attribute is needed to check if an all-reduce is required # on the word embeddings inside `finalize_model_grads._allreduce_word_embedding_grads`. self.share_embeddings_and_output_weights = False if self.pre_process: self.vision_model = Qwen2_5VisionModel( vision_transformer_config, vision_transformer_layer_spec, vision_projection_config, vision_projection_layer_spec, projection_type=vision_projection_type, pre_process=True, post_process=True, ) self.language_model = GPTModel( config=language_transformer_config, transformer_layer_spec=language_transformer_layer_spec, vocab_size=language_vocab_size, max_sequence_length=language_max_sequence_length, parallel_output=parallel_output, position_embedding_type="mrope", rotary_percent=language_rotary_percent, pre_process=self.pre_process, post_process=self.post_process, rotary_base=language_rotary_base, fp16_lm_cross_entropy=fp16_lm_cross_entropy, share_embeddings_and_output_weights=language_share_embeddings_and_output_weights, scatter_embedding_sequence_parallel=False, ) assert mpu.get_context_parallel_world_size() <= 1, "please use mbridge for qwen2_5_vl with context parallelism" self.share_embeddings_and_output_weights = self.language_model.share_embeddings_and_output_weights def shared_embedding_or_output_weight(self): """This is a convenience method to surface the language model's word embeddings, which is necessary for `finalize_model_grads._allreduce_word_embedding_grads`.""" if self.add_decoder: return self.language_model.shared_embedding_or_output_weight() return None def set_input_tensor(self, input_tensor) -> None: # This is usually handled in schedules.py but some inference code still # gives us non-lists or None if not isinstance(input_tensor, list): input_tensor = [input_tensor] assert len(input_tensor) == 1, "input_tensor should only be length 1 for Qwen2VL" if self.pre_process: self.encoder_hidden_state = input_tensor[0] else: self.language_model.set_input_tensor(input_tensor[0]) def freeze(self, freeze_language_model: bool, freeze_vision_model: bool, freeze_vision_projection: bool): """Freeze model modules. Make specific modules non-trainable by setting requires_grad to False for the module's parameters. Args: freeze_language_model (bool): Freeze the language model module. freeze_vision_model (bool): Freeze the vision model module. freeze_vision_projection (bool): Freeze the vision projection module. """ modules = [] if freeze_language_model and self.language_model is not None: modules.append(self.language_model) if freeze_vision_model and self.vision_model is not None: modules.append(self.vision_model) if freeze_vision_projection and self.vision_projection is not None: modules.append(self.vision_projection) for module in modules: for param in module.parameters(): param.requires_grad = False def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, attention_mask: torch.Tensor = None, labels: torch.Tensor = None, inference_params: InferenceParams = None, packed_seq_params: PackedSeqParams = None, extra_block_kwargs: dict = None, pixel_values: torch.Tensor = None, pixel_values_videos: torch.Tensor = None, image_grid_thw: torch.Tensor = None, video_grid_thw: torch.Tensor = None, **kwargs, ) -> torch.Tensor: """Forward function of the Qwen2VL model. ### there is a workaround for supporting sequence packing with context parallelism # cp split with sequence packing will make model lose vision token information, so we need to keep # the original input_ids and pack them after vision embedding is calculated, # cooporate with verl's models/mcore/model_forward.py # pack the combined_embeddings to thd here, we check if packed_seq_params is None to determine if # we need to pack the combined_embeddings to thd # this function needs the position_ids and attention_mask in BSHD format, no matter use packed_seq or not Args: image_data (torch.Tensor): input image of shape [total_thw_size, n_features]. input_ids (torch.Tensor): input text ids [batch, text_seq_len]. position_ids (torch.Tensor): input text position ids [batch, text_seq_len]. attention_mask (torch.Tensor): attention mask for the language model [batch, 1, combined_seq_len, combined_seq_len]. labels (torch.Tensor): Optional target text labels [batch, combined_seq_len]. inference_params (InferenceParams): Inference-time parameters including KV cache. video_start_index: 0 -- all video len(video_seq) -- all image others -- mixture *_input_mask: should not be None in the first PP stage Returns: output (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size]. """ video_start_index = 0 vision_grid_thw = None vision_data = None if image_grid_thw is not None: image_mask = input_ids == self.image_token_id vision_grid_thw = image_grid_thw vision_data = pixel_values video_start_index = image_mask.sum().item() if video_grid_thw is not None: video_mask = input_ids == self.video_token_id if vision_grid_thw is not None: vision_grid_thw = torch.cat([vision_grid_thw, video_grid_thw], dim=0) vision_data = torch.cat([vision_data, pixel_values_videos], dim=0) else: vision_grid_thw = video_grid_thw vision_data = pixel_values_videos use_inference_kv_cache = ( inference_params is not None and "image_tokens_count" in inference_params.key_value_memory_dict ) if use_inference_kv_cache: raise NotImplementedError() if self.pre_process: vision_embeds = None if vision_grid_thw is not None and vision_grid_thw.shape[0] > 0: vision_embeds = self.vision_model( vision_data=vision_data, # If None, vision model should use intermediate outputs (EPP > 1) grid_thw=vision_grid_thw, # should provided in each EPP stage ) # If running inference, the language model KV cache will be updated for image token positions. # Here we store the image tokens sequence length, which can be used as an offset to the KV cache later. if inference_params is not None: raise NotImplementedError() # inference_params.key_value_memory_dict["image_tokens_count"] = ( # vision_embeddings.shape[0] # ) # If running inference, we can skip image token computation if they were computed already earlier # for this sample. if use_inference_kv_cache: language_embeddings: torch.Tensor = self.language_model.embedding( input_ids=input_ids, position_ids=None, # NOTE: disable ) # [text_seq_len, b, h_language] # NOTE: why not cat here? is it the combined embeddings useless? combined_embeddings = language_embeddings elif vision_embeds is not None: if video_start_index == 0: image_embeds = None video_embeds = vision_embeds elif video_start_index == vision_embeds.shape[0]: image_embeds = vision_embeds video_embeds = None elif 0 < video_start_index < vision_embeds.shape[0]: image_embeds = vision_embeds[:video_start_index] video_embeds = vision_embeds[video_start_index:] else: raise ValueError( f"Expect video token start index in range [0, {vision_embeds.shape[0]}], but got " f"{video_start_index}" ) combined_embeddings = self.language_model.embedding( input_ids=input_ids, position_ids=None, # NOTE: disable ) # [text_seq_len, b, h_language] if image_embeds is not None or video_embeds is not None: combined_embeddings = combined_embeddings.transpose(0, 1).contiguous() if image_embeds is not None: image_mask = (input_ids == self.image_token_id).contiguous() if image_mask.sum() > 0: combined_embeddings = combined_embeddings.clone() combined_embeddings[image_mask] = image_embeds.to( dtype=combined_embeddings.dtype, device=combined_embeddings.device ) if video_embeds is not None: video_mask = (input_ids == self.video_token_id).contiguous() if video_mask.sum() > 0: combined_embeddings = combined_embeddings.clone() combined_embeddings[video_mask] = video_embeds.to( dtype=combined_embeddings.dtype, device=combined_embeddings.device ) combined_embeddings = combined_embeddings.transpose(0, 1).contiguous() else: combined_embeddings = self.language_model.embedding( input_ids=input_ids, position_ids=None, # NOTE: disable ) # [text_seq_len, b, h_language] if packed_seq_params is not None: combined_embeddings = ( preprocess_packed_seqs( combined_embeddings.transpose(0, 1).contiguous(), attention_mask, pre_process=True )[0] .transpose(0, 1) .contiguous() ) if self.config.sequence_parallel: combined_embeddings = tensor_parallel.scatter_to_sequence_parallel_region(combined_embeddings) combined_embeddings = combined_embeddings.contiguous() else: combined_embeddings = None from .rope_utils import get_rope_index # BSHD position_ids, _ = get_rope_index( input_ids, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, attention_mask=attention_mask, ) # THD if packed_seq_params is not None: position_ids = ( preprocess_packed_seqs(position_ids.permute(1, 2, 0), attention_mask, pre_process=True)[0] .permute(2, 0, 1) .contiguous() ) attention_mask = None output = self.language_model( input_ids=None, position_ids=position_ids, # None in encoder attention_mask=attention_mask, # None in encoder decoder_input=combined_embeddings, # only not None in the first decoder PP stage labels=labels, # only not None in the last decoder PP stage # inference_params=inference_params, # currently always None packed_seq_params=packed_seq_params, # currently always None **(extra_block_kwargs or {}), **kwargs, ) return output ================================================ FILE: verl_distillation/verl/models/mcore/qwen2_5_vl/rope_utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging from typing import Optional import torch from megatron.core.models.common.embeddings.rope_utils import * from megatron.core.models.common.embeddings.rope_utils import _apply_rotary_pos_emb_bshd from torch import Tensor logger = logging.getLogger(__name__) # Slightly modified from Qwen2VLForConditionalGeneration.get_rope_index def get_rope_index( input_ids: Optional[torch.LongTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ): """ Calculate the 3D rope index based on image and video's temporal, height and width in LLM. Explanation: Each embedding sequence contains vision embedding and text embedding or just contains text embedding. For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. Examples: input_ids: [T T T T T], here T is for text. temporal position_ids: [0, 1, 2, 3, 4] height position_ids: [0, 1, 2, 3, 4] width position_ids: [0, 1, 2, 3, 4] For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part and 1D rotary position embedding for text part. Examples: Temporal (Time): 3 patches, representing different segments of the video in time. Height: 2 patches, dividing each frame vertically. Width: 2 patches, dividing each frame horizontally. We also have some important parameters: fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second. tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity. temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs. input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] text temporal position_ids: [101, 102, 103, 104, 105] text height position_ids: [101, 102, 103, 104, 105] text width position_ids: [101, 102, 103, 104, 105] Here we calculate the text start position_ids as the max vision position_ids plus 1. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) """ spatial_merge_size = 2 tokens_per_second = 2 image_token_id = 151655 video_token_id = 151656 vision_start_token_id = 151652 mrope_position_deltas = [] if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): total_input_ids = input_ids if attention_mask is None: attention_mask = torch.ones_like(total_input_ids) position_ids = torch.ones( 3, input_ids.shape[0], input_ids.shape[1], dtype=input_ids.dtype, device=input_ids.device, ) image_index, video_index = 0, 0 attention_mask = attention_mask.to(total_input_ids.device) for i, input_ids in enumerate(total_input_ids): input_ids = input_ids[attention_mask[i] == 1] image_nums, video_nums = 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1) vision_tokens = input_ids[vision_start_indices + 1] image_nums = (vision_tokens == image_token_id).sum() video_nums = (vision_tokens == video_token_id).sum() input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos = image_nums, video_nums for _ in range(image_nums + video_nums): if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if ed_image < ed_video: t, h, w = ( image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2], ) second_per_grid_t = 0 image_index += 1 remain_images -= 1 ed = ed_image else: t, h, w = ( video_grid_thw[video_index][0], video_grid_thw[video_index][1], video_grid_thw[video_index][2], ) if second_per_grid_ts is not None: second_per_grid_t = second_per_grid_ts[video_index] else: second_per_grid_t = 1.0 video_index += 1 remain_videos -= 1 ed = ed_video llm_grid_t, llm_grid_h, llm_grid_w = ( t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) text_len = ed - st st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) range_tensor = torch.arange(llm_grid_t).view(-1, 1) expanded_range = range_tensor.expand(-1, llm_grid_h * llm_grid_w) time_tensor = expanded_range * second_per_grid_t * tokens_per_second time_tensor_long = time_tensor.long() t_index = time_tensor_long.flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx) st = ed + llm_grid_t * llm_grid_h * llm_grid_w if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i])) mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) return position_ids, mrope_position_deltas else: if attention_mask is not None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] else: position_ids = ( torch.arange(input_ids.shape[1], device=input_ids.device) .view(1, 1, -1) .expand(3, input_ids.shape[0], -1) ) mrope_position_deltas = torch.zeros( [input_ids.shape[0], 1], device=input_ids.device, dtype=input_ids.dtype, ) return position_ids, mrope_position_deltas def apply_rotary_pos_emb_thd_absolute( t: Tensor, cu_seqlens: Tensor, freqs: Tensor, rotary_interleaved: bool = False ) -> Tensor: """A baseline implementation of applying RoPE for `thd` format. Args: t (Tensor): Input tensor T is of shape [t, h, d] cu_seqlens(Tensor): Cumulative sum of sequence lengths in a batch for `t`, with shape [b + 1] and dtype torch.int32. freqs (Tensor): Rotary Positional embedding tensor freq is of shape [max_s, 1, 1, d] Returns: Tensor: Shape [t, h, d]. The input tensor after applying RoPE. """ return _apply_rotary_pos_emb_bshd(t[:, None], freqs, rotary_interleaved=rotary_interleaved).squeeze(1) def apply_rotary_pos_emb_absolute( t: Tensor, freqs: Tensor, config: TransformerConfig, cu_seqlens: Optional[Tensor] = None, ): """ Reroute to the appropriate apply_rotary_pos_emb function depending on bshd (conventional) / thd (packed seq) format In Qwen2-VL, the shape of freqs is (seq_length, bs, 1, 2 * dim) instead of [max_seqlen, 1, 1, 2 * dim] """ if config.apply_rope_fusion: if cu_seqlens is None: # NOTE: TE backends do not support mRoPE in bshd format when bs > 1 if freqs.shape[1] > 1: return _apply_rotary_pos_emb_bshd(t, freqs, rotary_interleaved=config.rotary_interleaved) else: return fused_apply_rotary_pos_emb(t, freqs) else: # NOTE: as expected, thd format can use bshd return fused_apply_rotary_pos_emb(t[:, None], freqs).squeeze(1) else: if cu_seqlens is None: return _apply_rotary_pos_emb_bshd(t, freqs, rotary_interleaved=config.rotary_interleaved) else: return apply_rotary_pos_emb_thd_absolute(t, cu_seqlens, freqs, rotary_interleaved=config.rotary_interleaved) ================================================ FILE: verl_distillation/verl/models/mcore/qwen2_5_vl/vision_config.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from megatron.core import parallel_state from megatron.core.transformer import TransformerConfig def get_vision_model_config(config: TransformerConfig) -> TransformerConfig: # Given a Transformer Config from decoder, build vision encoder config # diff: out_hidden_size & intermediate_size # mlp: hidden_size -> intermediate_size -> embed_dim, silu # NOTE: here we provide a workaround to solve the wrong layer amount when VPP of decoder is on if config.num_layers in [28, 36]: config.ffn_hidden_size = 3420 else: config.ffn_hidden_size = 3456 if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None: config.num_layers = 32 * parallel_state.get_virtual_pipeline_model_parallel_world_size() # depth else: config.num_layers = 32 # depth config.num_attention_heads = 16 # num_heads config.add_bias_linear = True # all nn.Linear has bias (MLP, attn) config.add_qkv_bias = True # qkv_proj in attn has bias config.hidden_size = 1280 # hidden_size config.hidden_dropout = 0.0 config.attention_dropout = 0.0 # config.gated_linear_unit = False # no gated # config.activation_func = quick_gelu # hidden_act config.kv_channels = config.hidden_size // config.num_attention_heads config.num_query_groups = config.num_attention_heads # no GQA config.layernorm_zero_centered_gamma = False # False config.apply_query_key_layer_scaling = False # factor=math.sqrt(head_dim) config.bias_activation_fusion = False # no swiglu, set false config.bias_dropout_fusion = False # no dropout, set false config.attention_softmax_in_fp32 = True # use True # config.normalization = 'LayerNorm' # use RMSNorm config.seq_length = 1 config.tp_comm_overlap = False config.sequence_parallel = False config.temporal_patch_size = 2 config.patch_size = 14 config.in_channels = 3 config.spatial_merge_size = 2 config.fullatt_block_indexes = [7, 15, 23, 31] config._qwen2_5_vl_window_size = 112 return config def get_vision_projection_config( config: TransformerConfig, embed_dim: int, spatial_merge_size: int ) -> TransformerConfig: # merger: # context_dim = hidden_size * merge_size**2 # out_hidden_size = hidden_size # context_dim -> context_dim -> out_hidden_size # MLP: # input_size -> ffn_hidden_size -> hidden_size # spec: LN -> Linear(bias=True) -> GELU -> Linear(bias=True) config.gated_linear_unit = False config.bias_activation_fusion = False config.add_bias_linear = True config.ffn_hidden_size = embed_dim * (spatial_merge_size**2) config.activation_func = torch.nn.functional.gelu config.tp_comm_overlap = False config.sequence_parallel = False return config ================================================ FILE: verl_distillation/verl/models/mcore/qwen2_5_vl/vision_model.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch from megatron.core import InferenceParams from megatron.core.models.common.vision_module.vision_module import VisionModule from megatron.core.models.vision.multimodal_projector import MultimodalProjector from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer.enums import ModelType from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_config import TransformerConfig from torch import nn from torch.nn import functional as F from .vision_transformer_block import Qwen2_5VisionTransformerBlock as TransformerBlock # copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py class PatchEmbed(nn.Module): def __init__( self, patch_size: int = 14, temporal_patch_size: int = 2, in_channels: int = 3, embed_dim: int = 1152, ) -> None: super().__init__() self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.in_channels = in_channels self.embed_dim = embed_dim kernel_size = [temporal_patch_size, patch_size, patch_size] self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: target_dtype = self.proj.weight.dtype hidden_states = hidden_states.view( -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size ) hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim) return hidden_states # copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py class VisionRotaryEmbedding(nn.Module): def __init__(self, dim: int, theta: float = 10000.0) -> None: super().__init__() inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) def forward(self, seqlen: int) -> torch.Tensor: seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) freqs = torch.outer(seq, self.inv_freq) return freqs.float() class Qwen2_5VisionModel(VisionModule): """Qwen2.5 ViT vision model. Args: transformer_config (TransformerConfig): Transformer config. transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers. ln_pre_impl (ModuleSpec or type): Specifies the layer norm type to use for ln_pre. add_class_token (bool, optional): Include a class token. Defaults to True. class_token_len (int): Class token length. Defaults to 1 but 8 may be faster. patch_dim (int): Image patch size. img_h (int): Input image height. img_w (int): Input image width. """ def __init__( self, transformer_config: TransformerConfig, transformer_layer_spec: ModuleSpec, projection_config: TransformerConfig, projection_layer_spec: ModuleSpec, projection_type: str = "mlp", pre_process: bool = True, post_process: bool = False, ) -> None: super().__init__(config=transformer_config) self.spatial_merge_size = transformer_config.spatial_merge_size embed_dim = transformer_config.hidden_size num_heads = transformer_config.num_attention_heads temporal_patch_size = transformer_config.temporal_patch_size patch_size = transformer_config.patch_size in_channels = transformer_config.in_channels self.patch_size = transformer_config.patch_size self.fullatt_block_indexes = transformer_config.fullatt_block_indexes self.window_size = transformer_config._qwen2_5_vl_window_size self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size self.max_sequence_length = transformer_config.seq_length self.patch_embed = PatchEmbed( patch_size=patch_size, temporal_patch_size=temporal_patch_size, in_channels=in_channels, embed_dim=embed_dim, ) head_dim = embed_dim // num_heads self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2) self.model_type = ModelType.encoder_or_decoder self.pre_process = pre_process self.post_process = post_process # Transformer layers. # TODO: Follow-up changes will make pre and post_process configurable. They are needed for supporting # pipeline parallelism. # NOTE: a final layer norm and/or linear layer present in some implementations are omitted here. self.decoder = TransformerBlock( config=transformer_config, spec=transformer_layer_spec, pre_process=self.pre_process, post_process=self.post_process, post_layer_norm=True, ) self.merge_hidden_size = projection_config.ffn_hidden_size self.square_merge_size = self.merge_hidden_size // embed_dim if self.post_process: self.projection = MultimodalProjector( projection_config, projection_layer_spec, projection_type, projection_config.ffn_hidden_size ) else: self.projection = None self.input_tensor = None def set_input_tensor(self, input_tensor: torch.Tensor) -> None: """Sets input tensor to the model. Args: input_tensor (Tensor): Sets the input tensor for the model. """ if self.pre_process: # always True self.input_tensor = input_tensor else: raise NotImplementedError() def rot_pos_emb(self, grid_thw): pos_ids = [] for t, h, w in grid_thw: hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) hpos_ids = hpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) hpos_ids = hpos_ids.permute(0, 2, 1, 3) hpos_ids = hpos_ids.flatten() wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) wpos_ids = wpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) wpos_ids = wpos_ids.permute(0, 2, 1, 3) wpos_ids = wpos_ids.flatten() pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) pos_ids = torch.cat(pos_ids, dim=0).to(grid_thw.device) max_grid_size = grid_thw[:, 1:].max() rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size).to(grid_thw.device) rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) return rotary_pos_emb def get_window_index(self, grid_thw): window_index: list = [] cu_window_seqlens: list = [0] window_index_id = 0 vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size for grid_t, grid_h, grid_w in grid_thw: llm_grid_h, llm_grid_w = ( grid_h // self.spatial_merge_size, grid_w // self.spatial_merge_size, ) index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) index_padded = index_padded.reshape( grid_t, num_windows_h, vit_merger_window_size, num_windows_w, vit_merger_window_size, ) index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( grid_t, num_windows_h * num_windows_w, vit_merger_window_size, vit_merger_window_size, ) seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) index_padded = index_padded.reshape(-1) index_new = index_padded[index_padded != -100] window_index.append(index_new + window_index_id) cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1] cu_window_seqlens.extend(cu_seqlens_tmp.tolist()) window_index_id += (grid_t * llm_grid_h * llm_grid_w).item() window_index = torch.cat(window_index, dim=0) return window_index, cu_window_seqlens def forward( self, vision_data: Optional[torch.Tensor], grid_thw: torch.Tensor, inference_params: Optional[InferenceParams] = None, extra_block_kwargs: dict = None, ) -> torch.Tensor: """Forward function of the Qwen2 Vision Model. This function passes the input tensors through the embedding layer and then the transformer. Args: x (torch.Tensor): input image/video data of shape [n_tokens, n_dims] grid_thw (torch.Tensor): the size tensor indicates grid size of each image/frame packed_seq_params (PackedSeqParams): parameters to build attention mask in the backend Returns: x (torch.Tensor): output after final transformer block of shape [b, s, h]. """ assert grid_thw is not None assert self.input_tensor is None assert inference_params is None # Rotary positional embeddings (embedding is None for PP intermediate devices) vision_data = self.patch_embed(vision_data) window_index, cu_window_seqlens = self.get_window_index(grid_thw) cu_window_seqlens = torch.tensor( cu_window_seqlens, device=vision_data.device, dtype=torch.int32, ) cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) seq_len, _ = vision_data.size() vision_data = vision_data.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) vision_data = vision_data[window_index, :, :] vision_data = vision_data.reshape(seq_len, 1, -1) rotary_pos_emb = self.rot_pos_emb(grid_thw) rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) rotary_pos_emb = rotary_pos_emb[window_index, :, :] rotary_pos_emb = rotary_pos_emb.reshape(seq_len, 1, 1, -1).repeat(1, 1, 1, 2) hidden_states = self.decoder( hidden_states=vision_data, attention_mask=None, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb, packed_seq_params=self.build_packed_seq_params(None, cu_window_seqlens), packed_seq_params_full=self.build_packed_seq_params(grid_thw), fullatt_block_indexes=self.fullatt_block_indexes, **(extra_block_kwargs or {}), ) hidden_states = self.projection(hidden_states.view(-1, self.merge_hidden_size)) reverse_indices = torch.argsort(window_index) return hidden_states[reverse_indices, :] def build_packed_seq_params( self, grid_thw: Optional[torch.Tensor], cu_seqlens: Optional[torch.Tensor] = None, ) -> PackedSeqParams: # NOTE: each frame is a sequence (rather than each grid) if grid_thw is not None: seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]) cu_seqlens = seqlens.cumsum(dim=0) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0).int() else: seqlens = cu_seqlens[1:] - cu_seqlens[:-1] max_seqlen_q = seqlens.max() return PackedSeqParams( cu_seqlens_q=cu_seqlens, cu_seqlens_kv=cu_seqlens, qkv_format="thd", max_seqlen_q=max_seqlen_q, max_seqlen_kv=max_seqlen_q, ) ================================================ FILE: verl_distillation/verl/models/mcore/qwen2_5_vl/vision_transformer_block.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from megatron.core.transformer.transformer_block import * class Qwen2_5VisionTransformerBlock(TransformerBlock): def _checkpointed_forward( self, hidden_states: Tensor, attention_mask: Tensor, context: Tensor, context_mask: Tensor, rotary_pos_emb: Tensor, attention_bias: Tensor, packed_seq_params: PackedSeqParams, packed_seq_params_full: PackedSeqParams, fullatt_block_indexes, ): """Forward method with activation checkpointing.""" def custom(start: int, end: int): def custom_forward(hidden_states, attention_mask, context, context_mask, rotary_pos_emb): for index in range(start, end): if index in fullatt_block_indexes: packed_seq_params_now = packed_seq_params_full else: packed_seq_params_now = packed_seq_params layer = self._get_layer(index) hidden_states, context = layer( hidden_states=hidden_states, attention_mask=attention_mask, context=context, context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, attention_bias=attention_bias, inference_context=None, packed_seq_params=packed_seq_params_now, ) return hidden_states, context return custom_forward def checkpoint_handler(forward_func): """Determines whether to use the `te_checkpoint` or `tensor_parallel.checkpoint`""" if self.config.fp8: return te_checkpoint( forward_func, self.config.distribute_saved_activations, tensor_parallel.random.get_cuda_rng_tracker, parallel_state.get_tensor_model_parallel_group(), hidden_states, attention_mask, context, context_mask, rotary_pos_emb, ) else: return tensor_parallel.checkpoint( forward_func, self.config.distribute_saved_activations, hidden_states, attention_mask, context, context_mask, rotary_pos_emb, ) if self.config.recompute_method == "uniform": # Uniformly divide the total number of Transformer layers and checkpoint # the input activation of each divided chunk. # A method to further reduce memory usage reducing checkpoints. layer_idx = 0 while layer_idx < self.num_layers_per_pipeline_rank: hidden_states, context = checkpoint_handler( custom(layer_idx, layer_idx + self.config.recompute_num_layers) ) layer_idx += self.config.recompute_num_layers elif self.config.recompute_method == "block": # Checkpoint the input activation of only a set number of individual # Transformer layers and skip the rest. # A method fully use the device memory removing redundant re-computation. recompute_skip_num_layers = 0 for layer_idx in range(self.num_layers_per_pipeline_rank): # Skip recomputation when input grad computation is not needed. # Need to have at least one input tensor with gradient computation # for re-enterant autograd engine. if self.config.fp8 and not hidden_states.requires_grad: recompute_skip_num_layers += 1 if ( layer_idx >= recompute_skip_num_layers and layer_idx < self.config.recompute_num_layers + recompute_skip_num_layers ): hidden_states, context = checkpoint_handler(custom(layer_idx, layer_idx + 1)) else: hidden_states, context = custom(layer_idx, layer_idx + 1)( hidden_states, attention_mask, context, context_mask, rotary_pos_emb ) else: raise ValueError("Invalid activation recompute method.") return hidden_states def forward( self, hidden_states: Union[Tensor, WrappedTensor], attention_mask: Optional[Tensor], context: Optional[Tensor] = None, context_mask: Optional[Tensor] = None, rotary_pos_emb: Optional[Tensor] = None, rotary_pos_cos: Optional[Tensor] = None, rotary_pos_sin: Optional[Tensor] = None, attention_bias: Optional[Tensor] = None, inference_context: Optional[BaseInferenceContext] = None, packed_seq_params: Optional[PackedSeqParams] = None, sequence_len_offset: Optional[Tensor] = None, packed_seq_params_full: PackedSeqParams = None, fullatt_block_indexes=None, *, inference_params: Optional[BaseInferenceContext] = None, ): """ Perform the forward pass through the transformer block. This method handles the core computation of the transformer, including self-attention, optional cross-attention, and feed-forward operations. Args: hidden_states (Union[Tensor, WrappedTensor]): Input tensor of shape [s, b, h] where s is the sequence length, b is the batch size, and h is the hidden size. Can be passed as a WrappedTensor during inference to avoid an obsolete reference in the calling function. attention_mask (Tensor): Boolean tensor of shape [1, 1, s, s] for masking self-attention. context (Tensor, optional): Context tensor for cross-attention. context_mask (Tensor, optional): Mask for cross-attention context rotary_pos_emb (Tensor, optional): Rotary positional embeddings. attention_bias (Tensor): Bias tensor for Q * K.T of shape in shape broadcastable to [b, num_head, sq, skv], e.g. [1, 1, sq, skv]. Used as an alternative to apply attention mask for TE cuDNN attention. inference_context (BaseInferenceContext, optional): Parameters for inference-time optimizations. packed_seq_params (PackedSeqParams, optional): Parameters for packed sequence processing. Returns: Union[Tensor, Tuple[Tensor, Tensor]]: The output hidden states tensor of shape [s, b, h], and optionally the updated context tensor if cross-attention is used. """ inference_context = deprecate_inference_params(inference_context, inference_params) # Delete the obsolete reference to the initial input tensor if necessary if isinstance(hidden_states, WrappedTensor): hidden_states = hidden_states.unwrap() if not self.pre_process: # See set_input_tensor() hidden_states = self.input_tensor # Update the inference parameters with the current batch size in case it is variable if inference_context and not self.training: inference_context.current_batch_size = hidden_states.size(1) # Viewless tensor. # - We only need to create a viewless tensor in the case of micro batch # size (mbs) == 1, since in this case, 'hidden_states.transpose()' # above creates a view tensor, and '.contiguous()' is a pass-through. # For mbs >= 2, '.contiguous()' creates a new tensor, eliminating # the need to make it viewless. # # However, we don't explicitly check mbs == 1 here because # make_viewless_tensor() has negligible overhead when its input # is already viewless. # # - For the 'else' case above, calling make_viewless_tensor() here is # likely redundant, since p2p_communication.py (likely originator) # already creates viewless tensors. That said, make_viewless_tensor() # is called here to be future-proof and corner-case-proof. hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True) if self.config.sequence_parallel: rng_context = tensor_parallel.get_cuda_rng_tracker().fork() else: rng_context = nullcontext() # If fp8_recipe is delayed, wrap the entire pass with get_fp8_context(), # otherwise do nothing extra at the outer level # if we are using other fp8 recipes, then the context manager enter&exit are free # we can wrap fp8_context within the for loop over layers, so that we can fine-grained # control which layer will be fp8 or bf16 use_outer_fp8_context = self.config.fp8 and self.config.fp8_recipe == Fp8Recipe.delayed use_inner_fp8_context = self.config.fp8 and self.config.fp8_recipe != Fp8Recipe.delayed outer_fp8_context = get_fp8_context(self.config) if use_outer_fp8_context else nullcontext() with rng_context, outer_fp8_context: # Forward pass. if self.config.recompute_granularity == "full" and self.training: hidden_states = self._checkpointed_forward( hidden_states=hidden_states, attention_mask=attention_mask, context=context, context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, attention_bias=attention_bias, packed_seq_params=packed_seq_params, packed_seq_params_full=packed_seq_params_full, fullatt_block_indexes=fullatt_block_indexes, ) else: for l_no, layer in enumerate(self.layers): inner_fp8_context = ( get_fp8_context(self.config, layer.layer_number - 1) if use_inner_fp8_context else nullcontext() ) if l_no in fullatt_block_indexes: packed_seq_params_now = packed_seq_params_full else: packed_seq_params_now = packed_seq_params with self.offload_context, inner_fp8_context: hidden_states, context = layer( hidden_states=hidden_states, attention_mask=attention_mask, context=context, context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, rotary_pos_cos=rotary_pos_cos, rotary_pos_sin=rotary_pos_sin, attention_bias=attention_bias, inference_context=inference_context, packed_seq_params=packed_seq_params_now, sequence_len_offset=sequence_len_offset, ) if ( torch.is_grad_enabled() and self.config.cpu_offloading and self.group_prefetch_offload_commit_async is not None ): hidden_states = self.group_prefetch_offload_commit_async(hidden_states) # Final layer norm. if self.final_layernorm is not None: hidden_states = self.final_layernorm(hidden_states) # TENorm produces a "viewed" tensor. This will result in schedule.py's # deallocate_output_tensor() throwing an error, so a viewless tensor is # created to prevent this. hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True) return hidden_states ================================================ FILE: verl_distillation/verl/models/mcore/readme.md ================================================ # verl Megatron-Core Models Now we use [mbridge](https://github.com/iseekyan/mbridge) to support megatron models. And we will migrate to [megatron-bridge](https://github.com/NVIDIA-NeMo/Megatron-Bridge) in the future. With the mbridge, we can use allmost all the Megatron-Core features to support new models with little effort. And no offline weights conversion is needed, all the weights conversion is done online. We can directly save the mcore model to huggingface format during training. Also, we can easily upgrade the mcore version to the latest version. In most cases, the upgrade is seamless. (except when the mcore API changes and we need to update the verl code accordingly) ## How to support new models (new) 1. make sure the model is supported by vLLM 2. Support the model in [mbridge](https://github.com/iseekyan/mbridge), see its currently supported models for example. - we will migrate to [megatron-bridge](https://github.com/NVIDIA-NeMo/Megatron-Bridge) in the future. 3. Register the model forward function in verl, see the example in `verl/verl/models/mcore/registry.py`. # Below are deprecated The earlier versions of verl use `Megatron-LM` 0.4 and workaround huggingface model classes. To better use the latest features and speedup of modern Megatron, we are migrating to `Megatron-Core`(mcore), and use the recommended `GPTModel` class for all language models. With mcore `GPTModel`, we can use the latest features like `context parallel`, `expert parallel`, `dist_checkpointing`, etc. and we can update mcore with little effort in the future for new features. The migration has been successful with the help of the mcore team and the community. What we have done is: 1. update `Megatron` version to `0.14.0` 2. migrate `LlamaForCausalLM` and `Qwen2ForCausalLM` to mcore `GPTModel` 3. support sequence packing/thd format. 4. support `tensor parallel`, `pipeline parallel`, `sequence parallel`, `virtual pipeline parallel`, `context parallel`. 5. support the mcore `dist_checkpointing` feature and a basic offline weighs conversion script from huggingface to mcore `dist_checkpointing` format. We are working on the following features: - support `Qwen2MoeForCausalLM` - support `MixtralForCausalLM` - support `DeepseekV3ForCausalLM` - support `expert parallel` Features we invite the community to contribute: - better scripts for offline weights conversion from huggingface to mcore `dist_checkpointing` format. - conversion of large models with multiple GPUs - conversion of large models with single GPU - refactor the `megatron_checkpoint_manager.py` by `dist_checkpointing` format. - support llama4 - support qwen2.5-vl To track the progress of verl mcore integration, please refer to the [mcore integration issue](https://github.com/volcengine/verl/issues/1033). ## How things work now To engage the community in contributing, here are the key steps in our mcore integration process and features under development. The huggingface `transformers` is the de facto standard of model zoo while mcore is good at computation efficiency. The main challenge is conversion between the two. main steps: 1. modelling the huggingface model with mcore `GPTModel` - a. convert the huggingface config to mcore `TransformerConfig` - b. init the mcore `GPTModel` with the converted config - c. load the huggingface model weights to the `GPTModel` 2. online weight conversion from mcore to huggingface (due to the rollout engine `vLLM` is using huggingface format) - a. bridge the gap between mcore and huggingface weights format and name mapping - b. online resharding the mcore weights to rollout engine - this part is very complicated with multiple parallel strategies composition between mcore and rollout engine 3. support the mcore features in verl - a. support `tensor parallel`, `pipeline parallel`, `sequence parallel`, `virtual pipeline parallel`, `context parallel` - b. support recompute and other mcore speed up features 4. checkpointing - a. support recovering the verl training. - b. support exporting the mcore checkpoint to huggingface format, for downstream inference. ### Modelling the huggingface model with mcore `GPTModel` The first step is to convert huggingface config to mcore `TransformerConfig` and init the mcore `GPTModel` with the converted config. See code in `verl/models/mcore/config_converter.py` and `verl/verl/models/mcore/models/model_initializer.py`. The corresponding model forward code is in `verl/verl/models/mcore/models/model_forward.py`. There are two ways of loading the huggingface model weights to the `GPTModel` 1. Runtime loading - every rank loads the entire huggingface model weights and then shard and convert to mcore weights. - speed is slow and memory consumption is high. - this way is deprecated and will not support new models. 2. Offline loading - use offline script to convert the huggingface model weights to mcore weights and save with mcore `dist_checkpointing` format. - online loading and sharding is automatically done by mcore `dist_checkpointing` format. The speed is fast and memory consumption is low. - the offline script is in `verl/scripts/converter_hf_to_mcore.py`. ### online weight conversion from mcore to huggingface See function `convert_megatron_model_to_transformers_model` in `verl/utils/megatron_utils.py` for the details. It should be refatored for extensibility and better performance. ### support the mcore features in verl Most of the features of `GPTModel` is out-of-the-box supported in verl through changing the `TransformerConfig`, except those about parallel strategies, such as `expert parallel`. Features about parallel strategies should be supported with changes about the online weights conversion(especially the resharding part) and verl work dispatching. ### checkpointing The existing checkpointing code is in `verl/utils/checkpoint/megatron_checkpoint_manager.py`. And the script to convert checkpoint to huggingface format is in `verl/scripts/model_merger`. The existing checkpoint format simply saves every rank's weights and optimizer states. It should be refactored by `dist_checkpointing` format. ## How to support new models 1. make sure the model is supported by vLLM 2. modelling the huggingface model with mcore `GPTModel` (The [Pai-Megatron-Path](https://github.com/alibaba/Pai-Megatron-Patch/tree/main) is a good reference) - a. convert the huggingface config to mcore `TransformerConfig` - b. init the mcore `GPTModel` with the converted config - c. load the huggingface model weights to the `GPTModel` - d. for VLM the interface might be different, it is ok to add a new model class with GPTModel as its module. 3. offline weights conversion from huggingface to mcore `dist_checkpointing` format 4. support online weights conversion from mcore to huggingface - it is recommended to initialize a vLLM model with the converted mcore weights, and then test if the generating sequence is correct. ## How to scale up to larger models like deepseek-v3 or other 100B+ models The greatest challenge for scaling up to larger models is the memory consumption. The necessary features under development for scaling up are 1. Training engine part - expert parallel 2. Rollout engine part - pipeline parallel - expert parallel - more efficient and general weight resharding and loading 3. Offline weights conversion - support weights larger than single GPU memory ================================================ FILE: verl_distillation/verl/models/mcore/registry.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Registry module for model architecture components. """ from enum import Enum from typing import Callable import torch import torch.nn as nn from .config_converter import ( PretrainedConfig, TransformerConfig, hf_to_mcore_config_dense, hf_to_mcore_config_dpskv3, hf_to_mcore_config_llama4, hf_to_mcore_config_mixtral, hf_to_mcore_config_qwen2_5_vl, hf_to_mcore_config_qwen2moe, hf_to_mcore_config_qwen3moe, ) from .model_forward import gptmodel_forward_no_padding, model_forward_gen from .model_forward_fused import fused_forward_model_gen from .model_initializer import ( BaseModelInitializer, DeepseekV3Model, DenseModel, MixtralModel, Qwen2MoEModel, Qwen3MoEModel, Qwen25VLModel, ) from .weight_converter import ( McoreToHFWeightConverterDense, McoreToHFWeightConverterDpskv3, McoreToHFWeightConverterMixtral, McoreToHFWeightConverterQwen2_5_VL, McoreToHFWeightConverterQwen2Moe, McoreToHFWeightConverterQwen3Moe, ) class SupportedModel(Enum): LLAMA = "LlamaForCausalLM" # tested QWEN2 = "Qwen2ForCausalLM" # tested QWEN2_MOE = "Qwen2MoeForCausalLM" # pending DEEPSEEK_V3 = "DeepseekV3ForCausalLM" # not tested MIXTRAL = "MixtralForCausalLM" # tested QWEN2_5_VL = "Qwen2_5_VLForConditionalGeneration" # not supported LLAMA4 = "Llama4ForConditionalGeneration" # not tested QWEN3 = "Qwen3ForCausalLM" # tested QWEN3_MOE = "Qwen3MoeForCausalLM" # tested GLM4_MOE = "Glm4MoeForCausalLM" QWEN3_TOKEN_CLASSIFICATION = "Qwen3ForTokenClassification" QWEN3_MOE_VL = "Qwen3VLMoeForConditionalGeneration" QWEN3_VL = "Qwen3VLForConditionalGeneration" # Registry for model configuration converters MODEL_CONFIG_CONVERTER_REGISTRY: dict[SupportedModel, Callable[[PretrainedConfig, torch.dtype], TransformerConfig]] = { SupportedModel.LLAMA: hf_to_mcore_config_dense, SupportedModel.QWEN2: hf_to_mcore_config_dense, SupportedModel.QWEN2_MOE: hf_to_mcore_config_qwen2moe, SupportedModel.DEEPSEEK_V3: hf_to_mcore_config_dpskv3, SupportedModel.MIXTRAL: hf_to_mcore_config_mixtral, SupportedModel.QWEN2_5_VL: hf_to_mcore_config_qwen2_5_vl, SupportedModel.LLAMA4: hf_to_mcore_config_llama4, SupportedModel.QWEN3: hf_to_mcore_config_dense, SupportedModel.QWEN3_MOE: hf_to_mcore_config_qwen3moe, SupportedModel.QWEN3_TOKEN_CLASSIFICATION: hf_to_mcore_config_dense, } # Registry for model initializers MODEL_INITIALIZER_REGISTRY: dict[SupportedModel, type[BaseModelInitializer]] = { SupportedModel.LLAMA: DenseModel, SupportedModel.QWEN2: DenseModel, SupportedModel.QWEN2_MOE: Qwen2MoEModel, SupportedModel.MIXTRAL: MixtralModel, SupportedModel.DEEPSEEK_V3: DeepseekV3Model, SupportedModel.QWEN2_5_VL: Qwen25VLModel, SupportedModel.LLAMA4: DenseModel, SupportedModel.QWEN3: DenseModel, SupportedModel.QWEN3_MOE: Qwen3MoEModel, SupportedModel.QWEN3_TOKEN_CLASSIFICATION: DenseModel, } # Registry for model forward functions MODEL_FORWARD_REGISTRY: dict[SupportedModel, Callable] = { SupportedModel.LLAMA: model_forward_gen(), SupportedModel.QWEN2: model_forward_gen(), SupportedModel.QWEN2_MOE: model_forward_gen(), SupportedModel.MIXTRAL: model_forward_gen(), SupportedModel.DEEPSEEK_V3: model_forward_gen(), SupportedModel.LLAMA4: model_forward_gen(), SupportedModel.QWEN3: model_forward_gen(), SupportedModel.QWEN3_MOE: model_forward_gen(), SupportedModel.QWEN2_5_VL: model_forward_gen(True), SupportedModel.QWEN3_MOE_VL: model_forward_gen(True), SupportedModel.QWEN3_VL: model_forward_gen(True), SupportedModel.DEEPSEEK_V3: model_forward_gen(), SupportedModel.GLM4_MOE: model_forward_gen(), SupportedModel.QWEN3_TOKEN_CLASSIFICATION: model_forward_gen(), } # Registry for model forward functions MODEL_FORWARD_NOPAD_REGISTRY: dict[SupportedModel, Callable] = { SupportedModel.LLAMA: gptmodel_forward_no_padding, SupportedModel.QWEN2: gptmodel_forward_no_padding, SupportedModel.QWEN2_MOE: gptmodel_forward_no_padding, SupportedModel.MIXTRAL: gptmodel_forward_no_padding, SupportedModel.DEEPSEEK_V3: gptmodel_forward_no_padding, SupportedModel.QWEN2_5_VL: gptmodel_forward_no_padding, SupportedModel.QWEN3_MOE_VL: gptmodel_forward_no_padding, SupportedModel.QWEN3_VL: gptmodel_forward_no_padding, SupportedModel.LLAMA4: gptmodel_forward_no_padding, SupportedModel.QWEN3: gptmodel_forward_no_padding, SupportedModel.QWEN3_MOE: gptmodel_forward_no_padding, SupportedModel.DEEPSEEK_V3: gptmodel_forward_no_padding, SupportedModel.GLM4_MOE: gptmodel_forward_no_padding, SupportedModel.QWEN3_TOKEN_CLASSIFICATION: gptmodel_forward_no_padding, } # Registry for model forward functions MODEL_FORWARD_FUSED_REGISTRY: dict[SupportedModel, Callable] = { SupportedModel.LLAMA: fused_forward_model_gen(), SupportedModel.QWEN2: fused_forward_model_gen(), SupportedModel.QWEN2_MOE: fused_forward_model_gen(), SupportedModel.MIXTRAL: fused_forward_model_gen(), SupportedModel.DEEPSEEK_V3: fused_forward_model_gen(), SupportedModel.QWEN2_5_VL: fused_forward_model_gen(True), SupportedModel.QWEN3_MOE_VL: fused_forward_model_gen(True), SupportedModel.QWEN3_VL: fused_forward_model_gen(True), SupportedModel.LLAMA4: fused_forward_model_gen(), SupportedModel.QWEN3: fused_forward_model_gen(), SupportedModel.QWEN3_MOE: fused_forward_model_gen(), SupportedModel.DEEPSEEK_V3: fused_forward_model_gen(), SupportedModel.GLM4_MOE: fused_forward_model_gen(), } # Registry for model weight converters MODEL_WEIGHT_CONVERTER_REGISTRY: dict[SupportedModel, type] = { SupportedModel.LLAMA: McoreToHFWeightConverterDense, SupportedModel.QWEN2: McoreToHFWeightConverterDense, SupportedModel.QWEN2_MOE: McoreToHFWeightConverterQwen2Moe, SupportedModel.MIXTRAL: McoreToHFWeightConverterMixtral, SupportedModel.DEEPSEEK_V3: McoreToHFWeightConverterDpskv3, SupportedModel.QWEN3: McoreToHFWeightConverterDense, SupportedModel.QWEN3_MOE: McoreToHFWeightConverterQwen3Moe, SupportedModel.QWEN2_5_VL: McoreToHFWeightConverterQwen2_5_VL, SupportedModel.QWEN3_TOKEN_CLASSIFICATION: McoreToHFWeightConverterDense, } def get_supported_model(model_type: str) -> SupportedModel: try: return SupportedModel(model_type) except ValueError as err: supported_models = [e.value for e in SupportedModel] raise NotImplementedError( f"Model Type: {model_type} not supported. Supported models: {supported_models}" ) from err def hf_to_mcore_config( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: """Convert huggingface PretrainedConfig to mcore TransformerConfig. Args: hf_config: The huggingface PretrainedConfig. dtype: The dtype of the model. **override_transformer_config_kwargs: The kwargs to override the transformer config. Returns: The mcore TransformerConfig. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) return MODEL_CONFIG_CONVERTER_REGISTRY[model](hf_config, dtype, **override_transformer_config_kwargs) def init_mcore_model( tfconfig: TransformerConfig, hf_config: PretrainedConfig, pre_process: bool = True, post_process: bool = None, *, share_embeddings_and_output_weights: bool = False, value: bool = False, **extra_kwargs, # may be used for vlm and moe ) -> nn.Module: """ Initialize a Mcore model. Args: tfconfig: The transformer config. hf_config: The HuggingFace config. pre_process: Optional pre-processing function. post_process: Optional post-processing function. share_embeddings_and_output_weights: Whether to share embeddings and output weights. value: Whether to use value. **extra_kwargs: Additional keyword arguments. Returns: The initialized model. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) initializer_cls = MODEL_INITIALIZER_REGISTRY[model] initializer = initializer_cls(tfconfig, hf_config) return initializer.initialize( pre_process=pre_process, post_process=post_process, share_embeddings_and_output_weights=share_embeddings_and_output_weights, value=value, **extra_kwargs, ) def get_mcore_forward_fn(hf_config: PretrainedConfig) -> Callable: """ Get the forward function for given model architecture. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) return MODEL_FORWARD_REGISTRY[model] def get_mcore_forward_no_padding_fn(hf_config: PretrainedConfig) -> Callable: """ Get the forward function for given model architecture. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) return MODEL_FORWARD_NOPAD_REGISTRY[model] def get_mcore_forward_fused_fn(hf_config: PretrainedConfig) -> Callable: """ Get the forward function for given model architecture. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) return MODEL_FORWARD_FUSED_REGISTRY[model] def get_mcore_weight_converter(hf_config: PretrainedConfig, dtype: torch.dtype) -> Callable: """ Get the weight converter for given model architecture. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) tfconfig = hf_to_mcore_config(hf_config, dtype) return MODEL_WEIGHT_CONVERTER_REGISTRY[model](hf_config, tfconfig) ================================================ FILE: verl_distillation/verl/models/mcore/saver.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from megatron.core import mpu from megatron.core.distributed import DistributedDataParallel as LocalDDP from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.device import get_device_id, get_torch_device from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model def _megatron_calc_global_rank( tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0, cp_rank: int = 0, ep_rank: int = 0 ): """Calculate global rank with support for CP/EP parallelism""" # Get parallel sizes for each dimension tp_size = mpu.get_tensor_model_parallel_world_size() dp_size = mpu.get_data_parallel_world_size() pp_size = mpu.get_pipeline_model_parallel_world_size() cp_size = mpu.get_context_parallel_world_size() # ep_size = mpu.get_expert_model_parallel_world_size() # Verify total GPU count matches (must be consistent with parallel_state.py) total_size = tp_size * dp_size * pp_size * cp_size assert total_size == torch.distributed.get_world_size(), ( f"{tp_size}x{dp_size}x{pp_size}x{cp_size} != {torch.distributed.get_world_size()}" ) # Core calculation logic (corresponds to RankGenerator order parameter) # Assumes default order is "tp-cp-ep-dp-pp" return ((pp_rank * dp_size + dp_rank) * cp_size + cp_rank) * tp_size + tp_rank def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def merge_megatron_ckpt_gptmodel(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False): """Merge sharded parameters of a Megatron module into a merged checkpoint. Args: wrapped_models (list of megatron.core.distributed.DistributedDataParallel): The local DDP wrapped megatron modules. config (str or None): HF config for model dtype: model params type is_value_model: if model is value model tie_word_embeddings: tie_word_embeddings Returns: state_dict (dict): The merged state_dict in rank 0, and an empty dictionary in other ranks. """ start_time = time.time() def _get_gpt_model(model): return model dp_rank = mpu.get_data_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() pp_rank = mpu.get_pipeline_model_parallel_rank() cp_rank = mpu.get_context_parallel_rank() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if dist.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) assert len(models[i].decoder.layers) == num_layers_per_model, ( "len model layers {} not equal to num_layers_per_model {}".format( len(models[i].decoder.layers), num_layers_per_model ) ) state_dict = dict() def _get_cpu_tensor(tensor: torch.Tensor): if tensor is None: return None if tensor.device == torch.device("cpu"): return tensor.detach().clone() return tensor.detach().cpu() def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor: """broadcast tensor across mp_group""" nonlocal state_dict nonlocal mp_group src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) if torch.distributed.get_rank() == src_rank: if tensor is None: weight = None tensor_shape = None else: weight = tensor tensor_shape = weight.shape else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not exist, skip collect") return if weight is None: weight = torch.empty( tensor_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) dist.broadcast(weight, src=src_rank, group=mp_group) if torch.distributed.get_rank() == 0: state_dict[name] = _get_cpu_tensor(weight) def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group # tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=concat_dim) if mutate_func is not None: full_tensor = mutate_func(full_tensor) state_dict[name] = full_tensor def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group # tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) intermediate_size_tp = config.intermediate_size // tp_size gate_weight_list = [] up_weight_list = [] for i in range(tp_size): gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)] gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp] up_weight_tp = gate_up_weight_tp[intermediate_size_tp:] gate_weight_list.append(gate_weight_tp) up_weight_list.append(up_weight_tp) state_dict[gate_name] = torch.cat(gate_weight_list, dim=0) state_dict[up_name] = torch.cat(up_weight_list, dim=0) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank): """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group # tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) q_weight_list = [] k_weight_list = [] v_weight_list = [] hidden_size_per_head = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) if config.num_key_value_heads >= tp_size: q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): num_query_groups_per_partition = wrapped_models[0].config.num_query_groups // tp_size qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_size_chunk = q_size_tp // num_query_groups_per_partition kv_size_chunk = kv_size_tp // num_query_groups_per_partition for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition): q_part = qkv_part_chunk[:q_size_chunk] k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk] v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :] q_weight_list.append(q_part) k_weight_list.append(k_part) v_weight_list.append(v_part) else: q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): num_query_groups_per_partition = wrapped_models[0].config.num_query_groups // tp_size qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_size_chunk = q_size_tp // num_query_groups_per_partition kv_size_chunk = kv_size_tp // num_query_groups_per_partition for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition): q_part = qkv_part_chunk[:q_size_chunk] k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk] v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :] q_weight_list.append(q_part) if i * config.num_key_value_heads % tp_size == 0: k_weight_list.append(k_part) v_weight_list.append(v_part) state_dict[q_name] = torch.cat(q_weight_list, dim=0) state_dict[k_name] = torch.cat(k_weight_list, dim=0) state_dict[v_name] = torch.cat(v_weight_list, dim=0) # empty cache before collecting weights get_torch_device().empty_cache() # Embeddings # ------------------- if dp_rank == 0 and cp_rank == 0: # models are identical across cp ranks # Embeddings # ------------------- print_rank_0("collecting embeddings...") gpt_model_module = _get_gpt_model(models[0]) _broadcast_tp_shard_tensor( gpt_model_module.embedding.word_embeddings.weight if pp_rank == 0 else None, "model.embed_tokens.weight", src_pp_rank=0, ) # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"collecting layer #{layer}...") layer_name = f"model.layers.{layer}" src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank]) sync_layer = gpt_model_module.decoder.layers[src_layer_idx] _broadcast_tensor( sync_layer.self_attention.linear_qkv.layer_norm_weight, f"{layer_name}.input_layernorm.weight", src_pp_rank=src_pp_rank, ) if gpt_model_module.config.qk_layernorm: _broadcast_tensor( sync_layer.self_attention.q_layernorm.weight, f"{layer_name}.self_attn.q_norm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tensor( sync_layer.self_attention.k_layernorm.weight, f"{layer_name}.self_attn.k_norm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attention.linear_qkv.weight, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", src_pp_rank=src_pp_rank, ) if gpt_model_module.config.add_qkv_bias: _broadcast_tp_shard_tensor_qkv( sync_layer.self_attention.linear_qkv.bias, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.self_attention.linear_proj.weight, f"{layer_name}.self_attn.o_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) _broadcast_tensor( sync_layer.mlp.linear_fc1.layer_norm_weight, f"{layer_name}.post_attention_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.linear_fc1.weight, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.mlp.linear_fc2.weight, f"{layer_name}.mlp.down_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) # Final Layernorm # ------------------- print_rank_0("collecting final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.decoder.final_layernorm, "weight", None), "model.norm.weight", src_pp_rank=pp_size - 1, ) if tie_word_embeddings: print_rank_0("tie word embedding skip load lm_head...") else: print_rank_0("collecting lm_head...") if is_value_model: lm_head_weight = None if pp_rank == pp_size - 1: lm_head_weight = getattr(gpt_model_module.output_layer, "weight", None) _broadcast_tensor(lm_head_weight, "lm_head.weight", src_pp_rank=pp_size - 1) else: _broadcast_tp_shard_tensor( getattr(gpt_model_module.output_layer, "weight", None) if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) dist.barrier() get_torch_device().empty_cache() if torch.distributed.get_rank() == 0: for k, v in state_dict.items(): if dtype != v.dtype: state_dict[k] = v.to(dtype) print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s") return state_dict def merge_megatron_ckpt_gptmodel_qwen_moe( wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False ): raise NotImplementedError("merge_megatron_ckpt_gptmodel_qwen_moe is not implemented") def merge_megatron_ckpt_gptmodel_qwen2_5_vl( wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False ): raise NotImplementedError("merge_megatron_ckpt_gptmodel_qwen2_5_vl is not implemented") def merge_megatron_ckpt_gptmodel_dpskv3(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False): raise NotImplementedError("merge_megatron_ckpt_gptmodel_dpskv3 is not implemented") def merge_megatron_ckpt_gptmodel_mixtral( wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False ): raise NotImplementedError("merge_megatron_ckpt_gptmodel_mixtral is not implemented") ================================================ FILE: verl_distillation/verl/models/mcore/util.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from megatron.core import parallel_state as mpu from megatron.core.packed_seq_params import PackedSeqParams from verl.utils.model import CausalLMOutputForPPO def preprocess_packed_seqs( input_ids: torch.Tensor, attention_mask: torch.Tensor, pre_process: bool = True ) -> tuple[torch.Tensor, PackedSeqParams]: """ Preprocess packed sequences CP splits sequence into CP*2 chunks, and each GPU gets 2 chunks (GPU0 gets first and last chunks, GPU1 gets second and second last chunks, and so on), this is for load balancing with causal masking. See https://github.com/NVIDIA/TransformerEngine/issues/1368 """ batch_size = input_ids.shape[0] seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) tp_size = mpu.get_tensor_model_parallel_world_size() cp_size = mpu.get_context_parallel_world_size() cp_rank = mpu.get_context_parallel_rank() align_size = tp_size * cp_size * 2 if cp_size > 1 else tp_size pad_size = (align_size - seqlens_in_batch % align_size) % align_size seqlens_in_batch_padded = seqlens_in_batch + pad_size cu_seqlens = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device) cu_seqlens[1:] = torch.cumsum(seqlens_in_batch, dim=0) cu_seqlens_padded = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device) cu_seqlens_padded[1:] = torch.cumsum(seqlens_in_batch_padded, dim=0) # ---------------------------------------------------------------------------- # Move the index information needed in the subsequent loop to the CPU at once, # to avoid frequent .item() calls in the loop that cause D2H synchronization # ---------------------------------------------------------------------------- seqlens_in_batch_cpu: list[int] = seqlens_in_batch.tolist() # original valid lengths seqlens_in_batch_padded_cpu: list[int] = seqlens_in_batch_padded.tolist() # lengths after padding cu_seqlens_padded_cpu: list[int] = cu_seqlens_padded.tolist() # start positions (after padding) # Pure Python int calculation to avoid further synchronization max_seqlen_in_batch = max(seqlens_in_batch_padded_cpu) shape = list(input_ids.shape[1:]) shape[0] = sum(seqlens_in_batch_padded_cpu) // cp_size if pre_process: input_ids_rmpad = torch.zeros(shape, dtype=input_ids.dtype, device=input_ids.device) for i in range(batch_size): # Use Python int, so no GPU→CPU sync in the loop if cp_size <= 1: seqlen = seqlens_in_batch_cpu[i] start_idx = cu_seqlens_padded_cpu[i] input_ids_rmpad[start_idx : start_idx + seqlen] = input_ids[i, attention_mask[i]] continue seqlen_padded_i = seqlens_in_batch_padded_cpu[i] seqlen = seqlen_padded_i // cp_size half_seqlen = seqlen // 2 start_idx = cu_seqlens_padded_cpu[i] // cp_size # split to 2 chunks d = input_ids[i, attention_mask[i]] input_ids_rmpad[start_idx : start_idx + half_seqlen] = d[ half_seqlen * cp_rank : half_seqlen * (cp_rank + 1) ] remain_start = seqlen_padded_i - half_seqlen * (cp_rank + 1) remain_end = seqlen_padded_i - half_seqlen * cp_rank remain_end = min(remain_end, d.shape[0]) remain_len = remain_end - remain_start if remain_len > 0: input_ids_rmpad[start_idx + half_seqlen : start_idx + half_seqlen + remain_len] = d[ remain_start:remain_end ] packed_seq_params = PackedSeqParams( qkv_format="thd", cu_seqlens_q=cu_seqlens_padded, max_seqlen_q=max_seqlen_in_batch, cu_seqlens_kv=cu_seqlens_padded, max_seqlen_kv=max_seqlen_in_batch, cu_seqlens_q_padded=cu_seqlens_padded, cu_seqlens_kv_padded=cu_seqlens_padded, ) if pre_process: return input_ids_rmpad.unsqueeze(0), packed_seq_params else: return input_ids, packed_seq_params def postprocess_packed_seqs( output: torch.Tensor, packed_seq_params: PackedSeqParams, attention_mask: torch.Tensor, batch_size: int, seq_len: int, post_process: bool = True, ) -> torch.Tensor: """ Postprocess packed sequences """ if not post_process: return output # ------------------------------------------------------------------------- # Move the lengths and offsets needed for subsequent Python-level indexing to the CPU in advance, # to avoid a large number of .item() calls in the loop # ------------------------------------------------------------------------- cu_padded_cpu: list[int] = packed_seq_params.cu_seqlens_q_padded.tolist() seq_lens_cpu: list[int] = attention_mask.sum(dim=1, dtype=torch.int32).cpu().tolist() shape = [batch_size, seq_len] + list(output.shape[2:]) # 1,packed, dim -> batch_size, seq_len, dim output_new = torch.zeros(shape, dtype=output.dtype, device=output.device) cp_size = mpu.get_context_parallel_world_size() # all gather output across context parallel group if cp_size > 1: # output shape: [1, packed_len, hidden_dim] # need to gather across cp group and concatenate in sequence dimension output_list = [torch.empty_like(output) for _ in range(cp_size)] torch.distributed.all_gather(output_list, output.detach(), group=mpu.get_context_parallel_group()) output_list[mpu.get_context_parallel_rank()] = output else: output_list = [output] for i in range(batch_size): if cp_size <= 1: s = seq_lens_cpu[i] start_idx = cu_padded_cpu[i] output_new[i, attention_mask[i]] = output[0][start_idx : start_idx + s] continue s_len_padded_chunk = (cu_padded_cpu[i + 1] - cu_padded_cpu[i]) // cp_size half_seqlen = s_len_padded_chunk // 2 s_len = seq_lens_cpu[i] s_len_padded = s_len_padded_chunk * cp_size tmp = torch.empty(s_len_padded, *output.shape[2:], device=output.device) for j in range(cp_size): o = output_list[j][0] # split to 2 chunks packed_start_idx = cu_padded_cpu[i] // cp_size o0, o1 = ( o[packed_start_idx : packed_start_idx + half_seqlen], o[packed_start_idx + half_seqlen : packed_start_idx + s_len_padded_chunk], ) tmp[j * half_seqlen : (j + 1) * half_seqlen] = o0 tmp[s_len_padded - (j + 1) * half_seqlen : s_len_padded - j * half_seqlen] = o1 output_new[i, attention_mask[i]] = tmp[:s_len] return output_new def preprocess_packed_seqs_no_padding( input_ids: torch.Tensor, pre_process: bool = True ) -> tuple[torch.Tensor, PackedSeqParams]: """ Preprocess packed sequences CP splits sequence into CP*2 chunks, and each GPU gets 2 chunks (GPU0 gets first and last chunks, GPU1 gets second and second last chunks, and so on), this is for load balancing with causal masking. See https://github.com/NVIDIA/TransformerEngine/issues/1368 """ batch_size = input_ids.shape[0] tp_size = mpu.get_tensor_model_parallel_world_size() cp_size = mpu.get_context_parallel_world_size() cp_rank = mpu.get_context_parallel_rank() align_size = tp_size * cp_size * 2 if cp_size > 1 else tp_size seqlens_in_batch = input_ids.offsets().diff() pad_size = (align_size - seqlens_in_batch % align_size) % align_size seqlens_in_batch_padded = seqlens_in_batch + pad_size cu_seqlens = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device) cu_seqlens[1:] = torch.cumsum(seqlens_in_batch, dim=0) cu_seqlens_padded = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device) cu_seqlens_padded[1:] = torch.cumsum(seqlens_in_batch_padded, dim=0) # ---------------------------------------------------------------------------- # Move the index information needed in the subsequent loop to the CPU at once, # to avoid frequent .item() calls in the loop that cause D2H synchronization # ---------------------------------------------------------------------------- seqlens_in_batch_cpu: list[int] = seqlens_in_batch.tolist() # original valid lengths seqlens_in_batch_padded_cpu: list[int] = seqlens_in_batch_padded.tolist() # lengths after padding cu_seqlens_padded_cpu: list[int] = cu_seqlens_padded.tolist() # start positions (after padding) # Pure Python int calculation to avoid further synchronization max_seqlen_in_batch = max(seqlens_in_batch_padded_cpu) shape = list(input_ids.shape[1:]) shape[0] = sum(seqlens_in_batch_padded_cpu) // cp_size if pre_process: input_ids_rmpad = torch.zeros(shape, dtype=input_ids.dtype, device=input_ids.device) for i in range(batch_size): # Use Python int, so no GPU→CPU sync in the loop if cp_size <= 1: seqlen = seqlens_in_batch_cpu[i] start_idx = cu_seqlens_padded_cpu[i] input_ids_rmpad[start_idx : start_idx + seqlen] = input_ids[i] continue seqlen_padded_i = seqlens_in_batch_padded_cpu[i] seqlen = seqlen_padded_i // cp_size half_seqlen = seqlen // 2 start_idx = cu_seqlens_padded_cpu[i] // cp_size # split to 2 chunks d = input_ids[i] input_ids_rmpad[start_idx : start_idx + half_seqlen] = d[ half_seqlen * cp_rank : half_seqlen * (cp_rank + 1) ] remain_start = seqlen_padded_i - half_seqlen * (cp_rank + 1) remain_end = seqlen_padded_i - half_seqlen * cp_rank remain_end = min(remain_end, d.shape[0]) remain_len = remain_end - remain_start if remain_len > 0: input_ids_rmpad[start_idx + half_seqlen : start_idx + half_seqlen + remain_len] = d[ remain_start:remain_end ] packed_seq_params = PackedSeqParams( qkv_format="thd", cu_seqlens_q=cu_seqlens_padded, max_seqlen_q=max_seqlen_in_batch, cu_seqlens_kv=cu_seqlens_padded, max_seqlen_kv=max_seqlen_in_batch, cu_seqlens_q_padded=cu_seqlens_padded, cu_seqlens_kv_padded=cu_seqlens_padded, ) if pre_process: return input_ids_rmpad.unsqueeze(0), packed_seq_params else: return input_ids, packed_seq_params def postprocess_packed_seqs_no_padding( output: torch.Tensor, packed_seq_params: PackedSeqParams, input_ids: torch.Tensor, batch_size: int, post_process: bool = True, ) -> torch.Tensor: """ Postprocess packed sequences """ if not post_process: return output # ------------------------------------------------------------------------- # Move the lengths and offsets needed for subsequent Python-level indexing to the CPU in advance, # to avoid a large number of .item() calls in the loop # ------------------------------------------------------------------------- cu_padded_cpu: list[int] = packed_seq_params.cu_seqlens_q_padded.tolist() # The reason why we use input_ids.offsets() instead of packed_seq_params.cu_seqlens_q.diff() # is that the latter one is the padded length, while the former one is the original length. cu_seqlens = input_ids.offsets() seq_lens_cpu: list[int] = cu_seqlens.diff().tolist() output_new = [] cp_size = mpu.get_context_parallel_world_size() # all gather output across context parallel group if cp_size > 1: # output shape: [1, packed_len, hidden_dim] # need to gather across cp group and concatenate in sequence dimension output_list = [torch.empty_like(output) for _ in range(cp_size)] torch.distributed.all_gather(output_list, output.detach(), group=mpu.get_context_parallel_group()) output_list[mpu.get_context_parallel_rank()] = output else: output_list = [output] for i in range(batch_size): if cp_size <= 1: s = seq_lens_cpu[i] start_idx = cu_padded_cpu[i] output_new.append(output[0][start_idx : start_idx + s]) continue s_len_padded_chunk = (cu_padded_cpu[i + 1] - cu_padded_cpu[i]) // cp_size half_seqlen = s_len_padded_chunk // 2 s_len = seq_lens_cpu[i] s_len_padded = s_len_padded_chunk * cp_size tmp = torch.empty(s_len_padded, *output.shape[2:], device=output.device) for j in range(cp_size): o = output_list[j][0] # split to 2 chunks packed_start_idx = cu_padded_cpu[i] // cp_size o0, o1 = ( o[packed_start_idx : packed_start_idx + half_seqlen], o[packed_start_idx + half_seqlen : packed_start_idx + s_len_padded_chunk], ) tmp[j * half_seqlen : (j + 1) * half_seqlen] = o0 tmp[s_len_padded - (j + 1) * half_seqlen : s_len_padded - j * half_seqlen] = o1 output_new.append(tmp[:s_len]) output_new_tensor = torch.nested.as_nested_tensor(output_new, layout=torch.jagged) return output_new_tensor def remove_left_padding( input_ids: torch.Tensor, attention_mask: torch.Tensor, position_ids: torch.Tensor, sequence_parallel: bool = False, pre_process: bool = True, ): """ Remove left padding from input_ids, attention_mask and position_ids return new_input_ids, new_attention_mask, new_position_ids """ assert attention_mask.ndim == 2 assert position_ids.ndim == 2 cp_size = mpu.get_context_parallel_world_size() assert cp_size == 1, "Context parallel size without seq_pack is not supported" batch_size = input_ids.shape[0] shape = list(input_ids.shape) # batch_size, seq_len,... seq_lens = attention_mask.sum(dim=1) seq_len = seq_lens.max().item() if sequence_parallel: sp_world_size = mpu.get_tensor_model_parallel_world_size() pad_size = (sp_world_size - seq_len % sp_world_size) % sp_world_size seq_len = seq_len + pad_size shape[1] = seq_len if pre_process: new_input_ids = torch.zeros(dtype=input_ids.dtype, device=input_ids.device, size=shape) new_attention_mask = torch.zeros( dtype=attention_mask.dtype, device=attention_mask.device, size=(batch_size, seq_len) ) new_position_ids = torch.zeros(dtype=position_ids.dtype, device=position_ids.device, size=(batch_size, seq_len)) for i in range(batch_size): if pre_process: new_input_ids[i, : seq_lens[i]] = input_ids[i, attention_mask[i]] new_attention_mask[i, : seq_lens[i]] = attention_mask[i, attention_mask[i]] new_position_ids[i, : seq_lens[i]] = position_ids[i, attention_mask[i]] if pre_process: return new_input_ids, new_attention_mask, new_position_ids else: return input_ids, new_attention_mask, new_position_ids def recover_left_padding( result, attention_mask: torch.Tensor, original_attention_mask: torch.Tensor, origin_seqlen: int, post_process: bool = True, ): """ Recover left padding from result return result """ if not post_process: return result shape = list(result.shape) batch_size = shape[0] shape[1] = origin_seqlen new_result = torch.zeros(dtype=result.dtype, device=result.device, size=shape) for i in range(batch_size): new_result[i, original_attention_mask[i]] = result[i, attention_mask[i]] return new_result def postprocess_packed_seqs_for_dict_output( labels_mask: torch.Tensor, output: CausalLMOutputForPPO, packed_seq_params: PackedSeqParams, attention_mask: torch.Tensor, batch_size: int, seq_len: int, post_process: bool = True, ) -> dict[str, torch.Tensor]: """_summary_ For fused kernels, the output is a dictionary with keys like 'log_probs', 'entropy', etc. This function post-processes each tensor in the output dictionary. Args: output (CausalLMOutputForPPO): _description_ packed_seq_params (PackedSeqParams): _description_ attention_mask (torch.Tensor): _description_ batch_size (int): _description_ seq_len (int): _description_ post_process (bool, optional): _description_. Defaults to True. Returns: CausalLMOutputForPPO: _description_ """ ret = {} output.entropy = output.entropy.view(1, -1) output.log_probs = output.log_probs.view(1, -1) output.log_probs = output.log_probs.masked_fill(~labels_mask, 0.0) ret["entropy"] = postprocess_packed_seqs( output.entropy, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) ret["log_probs"] = postprocess_packed_seqs( output.log_probs, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) return ret ================================================ FILE: verl_distillation/verl/models/mcore/weight_converter.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # online convert mcore weight to pure huggingface weight, no any fusion # including format conversion and name mapping # not including resharding import torch from megatron.core.transformer import TransformerConfig from transformers import PretrainedConfig class McoreToHFWeightConverterBase: def __init__(self, hf_config: PretrainedConfig, mcore_config: TransformerConfig): self.hf_config = hf_config self.mcore_config = mcore_config def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> torch.Tensor: raise NotImplementedError class McoreToHFWeightConverterDense(McoreToHFWeightConverterBase): def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # 'decoder.layers.0.self_attention.linear_proj.weight' # 'decoder.layers.0.self_attention.linear_qkv.layer_norm_weight' # 'decoder.layers.0.self_attention.linear_qkv.weight' # 'decoder.layers.0.self_attention.linear_qkv.bias' layer_number = name.split(".")[2] convert_names = [] if "self_attention.linear_qkv.bias" in name or "self_attention.linear_qkv.weight" in name: param_type = name.split(".")[-1] assert param_type == "bias" or param_type == "weight" convert_names.append(f"model.layers.{layer_number}.self_attn.q_proj.{param_type}") convert_names.append(f"model.layers.{layer_number}.self_attn.k_proj.{param_type}") convert_names.append(f"model.layers.{layer_number}.self_attn.v_proj.{param_type}") assert len(params) == 3 elif "self_attention.linear_proj.weight" in name: convert_names.append(f"model.layers.{layer_number}.self_attn.o_proj.weight") assert len(params) == 1 elif "self_attention.linear_qkv.layer_norm_weight" in name: convert_names.append(f"model.layers.{layer_number}.input_layernorm.weight") assert len(params) == 1 elif "self_attention.q_layernorm.weight" in name: convert_names.append(f"model.layers.{layer_number}.self_attn.q_norm.weight") assert len(params) == 1 elif "self_attention.k_layernorm.weight" in name: convert_names.append(f"model.layers.{layer_number}.self_attn.k_norm.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # 'decoder.layers.0.mlp.linear_fc1.layer_norm_weight' # 'decoder.layers.0.mlp.linear_fc1.weight' # 'decoder.layers.0.mlp.linear_fc2.weight' layer_number = name.split(".")[2] convert_names = [] if "mlp.linear_fc1.weight" in name: # split gate_proj and up_proj convert_names.append(f"model.layers.{layer_number}.mlp.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.up_proj.weight") assert len(params) == 2 elif "mlp.linear_fc1.layer_norm_weight" in name: convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight") assert len(params) == 1 elif "mlp.linear_fc2.weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.down_proj.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: direct_name_mapping = { "embedding.word_embeddings.weight": "model.embed_tokens.weight", "decoder.final_layernorm.weight": "model.norm.weight", "output_layer.weight": "lm_head.weight", } if name in direct_name_mapping: return [direct_name_mapping[name]], [params_one_group[0]] if "self_attention" in name: return self._convert_attention_param(name, params_one_group) elif "mlp" in name: return self._convert_mlp_param(name, params_one_group) else: raise NotImplementedError(f"Unsupported parameter name: {name}") class McoreToHFWeightConverterQwen2Moe(McoreToHFWeightConverterDense): def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # 'decoder.layers.0.pre_mlp_layernorm.weight', # 'decoder.layers.0.mlp.router.weight', # 'decoder.layers.0.mlp.shared_experts.gate_weight', # 'decoder.layers.0.mlp.shared_experts.linear_fc1.weight', # 'decoder.layers.0.mlp.shared_experts.linear_fc2.weight' # moe1 # 'decoder.layers.0.mlp.experts.linear_fc1.weight0', # 'decoder.layers.0.mlp.experts.linear_fc1.weight1', # 'decoder.layers.0.mlp.experts.linear_fc1.weight2', # 'decoder.layers.0.mlp.experts.linear_fc1.weight3', # moe2 # 'decoder.layers.0.mlp.experts.linear_fc2.weight0', # 'decoder.layers.0.mlp.experts.linear_fc2.weight1', layer_number = name.split(".")[2] convert_names = [] if "pre_mlp_layernorm" in name: convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight") assert len(params) == 1 elif "mlp.router.weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.gate.weight") assert len(params) == 1 elif "shared_experts.gate_weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert_gate.weight") assert len(params) == 1 elif "shared_experts.linear_fc1.weight" in name: # split gate_proj and up_proj convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.up_proj.weight") assert len(params) == 2 elif "shared_experts.linear_fc2.weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.down_proj.weight") assert len(params) == 1 elif "mlp.experts.linear_fc1" in name: # split gate_proj and up_proj expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight") assert len(params) == 2 elif "mlp.experts.linear_fc2" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params class McoreToHFWeightConverterQwen2_5_VL(McoreToHFWeightConverterDense): def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: direct_name_mapping = { "language_model.embedding.word_embeddings.weight": "model.embed_tokens.weight", "language_model.decoder.final_layernorm.weight": "model.norm.weight", "language_model.output_layer.weight": "lm_head.weight", "vision_model.patch_embed.proj.weight": "visual.patch_embed.proj.weight", "vision_model.decoder.final_layernorm.weight": "visual.merger.ln_q.weight", "vision_model.projection.encoder.linear_fc1.weight": "visual.merger.mlp.0.weight", "vision_model.projection.encoder.linear_fc1.bias": "visual.merger.mlp.0.bias", "vision_model.projection.encoder.linear_fc2.weight": "visual.merger.mlp.2.weight", "vision_model.projection.encoder.linear_fc2.bias": "visual.merger.mlp.2.bias", } if name in direct_name_mapping: return [direct_name_mapping[name]], [params_one_group[0]] if "self_attention" in name: return self._convert_attention_param(name, params_one_group) elif "mlp" in name: return self._convert_mlp_param(name, params_one_group) else: raise NotImplementedError(f"Unsupported parameter name: {name}") def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: model_type, _, _, layer_number = name.split(".")[:4] convert_names = [] if model_type == "language_model": name_map_after_layer = { "self_attention.linear_qkv.bias": [ "self_attn.q_proj.bias", "self_attn.k_proj.bias", "self_attn.v_proj.bias", ], "self_attention.linear_qkv.weight": [ "self_attn.q_proj.weight", "self_attn.k_proj.weight", "self_attn.v_proj.weight", ], "self_attention.linear_proj.weight": "self_attn.o_proj.weight", "self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight", } name_after_layer = ".".join(name.split(".")[-3:]) mapped_name = name_map_after_layer.get(name_after_layer) if isinstance(mapped_name, list): assert len(params) == len(mapped_name) for one in mapped_name: convert_names.append(f"model.layers.{layer_number}.{one}") else: assert len(params) == 1 convert_names.append(f"model.layers.{layer_number}.{mapped_name}") elif model_type == "vision_model": name_map_after_layer = { "self_attention.linear_proj.weight": "attn.proj.weight", "self_attention.linear_proj.bias": "attn.proj.bias", "self_attention.linear_qkv.layer_norm_weight": "norm1.weight", } name_after_layer = ".".join(name.split(".")[-3:]) mapped_name = name_map_after_layer.get(name_after_layer, None) if mapped_name is None: assert "linear_qkv" in name_after_layer assert len(params) == 3 new_param = torch.cat(params, dim=0) params = [new_param] if "bias" in name_after_layer: convert_names.append(f"visual.blocks.{layer_number}.attn.qkv.bias") else: convert_names.append(f"visual.blocks.{layer_number}.attn.qkv.weight") else: assert len(params) == 1 convert_names.append(f"visual.blocks.{layer_number}.{mapped_name}") else: raise NotImplementedError(f"Unsupported model type: {model_type}") return convert_names, params def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: model_type, _, _, layer_number = name.split(".")[:4] convert_names = [] if model_type == "language_model": name_map_after_layer = { "mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"], "mlp.linear_fc1.bias": ["mlp.gate_proj.bias", "mlp.up_proj.bias"], "mlp.linear_fc2.weight": "mlp.down_proj.weight", "mlp.linear_fc2.bias": "mlp.down_proj.bias", "mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight", } name_after_layer = ".".join(name.split(".")[-3:]) mapped_name = name_map_after_layer.get(name_after_layer) if isinstance(mapped_name, list): assert len(params) == len(mapped_name) for one in mapped_name: convert_names.append(f"model.layers.{layer_number}.{one}") else: assert len(params) == 1 convert_names.append(f"model.layers.{layer_number}.{mapped_name}") elif model_type == "vision_model": name_map_after_layer = { "mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"], "mlp.linear_fc1.bias": ["mlp.gate_proj.bias", "mlp.up_proj.bias"], "mlp.linear_fc2.weight": "mlp.down_proj.weight", "mlp.linear_fc2.bias": "mlp.down_proj.bias", "mlp.linear_fc1.layer_norm_weight": "norm2.weight", } name_after_layer = ".".join(name.split(".")[-3:]) mapped_name = name_map_after_layer.get(name_after_layer) if isinstance(mapped_name, list): assert len(params) == len(mapped_name) for one in mapped_name: convert_names.append(f"visual.blocks.{layer_number}.{one}") else: assert len(params) == 1 convert_names.append(f"visual.blocks.{layer_number}.{mapped_name}") else: raise NotImplementedError(f"Unsupported model type: {model_type}") return convert_names, params class McoreToHFWeightConverterDpskv3(McoreToHFWeightConverterBase): def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # mcore # 'decoder.layers.0.input_layernorm.weight' # 'decoder.layers.0.self_attention.linear_proj.weight' # 'decoder.layers.0.self_attention.linear_q_proj.weight' # 'decoder.layers.0.self_attention.linear_kv_down_proj.weight' # 'decoder.layers.0.self_attention.linear_kv_up_proj.layer_norm_weight' # 'decoder.layers.0.self_attention.linear_kv_up_proj.weight' # 'decoder.layers.0.self_attention.linear_q_down_proj.weight' # 'decoder.layers.0.self_attention.linear_q_up_proj.weight' # 'decoder.layers.0.self_attention.linear_q_up_proj.layer_norm_weight' # hf # 'model.layers.0.input_layernorm.weight' # 'model.layers.0.self_attn.o_proj.weight' # 'model.layers.0.self_attn.q_proj.weight' # 'model.layers.0.self_attn.kv_a_proj_with_mqa.weight' # 'model.layers.0.self_attn.kv_a_layernorm.weight' # 'model.layers.0.self_attn.kv_b_proj.weight' # 'model.layers.0.self_attn.q_a_proj.weight' # 'model.layers.0.self_attn.q_b_proj.weight' # 'model.layers.0.self_attn.q_a_layernorm.weight' name_map_after_layer = { "input_layernorm.weight": "input_layernorm.weight", "self_attention.linear_proj.weight": "self_attn.o_proj.weight", "self_attention.linear_q_proj.weight": "self_attn.q_proj.weight", "self_attention.linear_kv_down_proj.weight": "self_attn.kv_a_proj_with_mqa.weight", "self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight", "self_attention.linear_kv_up_proj.weight": "self_attn.kv_b_proj.weight", "self_attention.linear_q_down_proj.weight": "self_attn.q_a_proj.weight", "self_attention.linear_q_up_proj.weight": "self_attn.q_b_proj.weight", "self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight", } assert len(params) == 1 convert_names = [] layer_number = name.split(".")[2] name_after_layer = name.split(f".{layer_number}.")[1] convert_names.append(f"model.layers.{layer_number}.{name_map_after_layer[name_after_layer]}") return convert_names, params def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # mcore dense # 'decoder.layers.0.mlp.linear_fc1.layer_norm_weight' # 'decoder.layers.0.mlp.linear_fc2.weight' # 'decoder.layers.0.mlp.linear_fc1.weight' # --- # 'decoder.layers.1.mlp.shared_experts.linear_fc1.weight' # --- # 'decoder.layers.1.mlp.shared_experts.linear_fc2.weight' # hf dense # 'model.layers.0.post_attention_layernorm.weight' # 'model.layers.0.mlp.down_proj.weight' # 'model.layers.0.mlp.gate_proj.weight' # 'model.layers.0.mlp.up_proj.weight' # 'model.layers.1.mlp.shared_experts.gate_proj.weight' # 'model.layers.1.mlp.shared_experts.up_proj.weight' # 'model.layers.1.mlp.shared_experts.down_proj.weight' # mcore moe # 'decoder.layers.1.pre_mlp_layernorm.weight' # 'decoder.layers.1.mlp.router.weight' # 'decoder.layers.1.mlp.router.expert_bias' # 'decoder.layers.1.mlp.experts.linear_fc1.weight0' # --- # 'decoder.layers.1.mlp.experts.linear_fc2.weight0' # hf moe # 'model.layers.1.post_attention_layernorm.weight' # 'model.layers.1.mlp.gate.weight' # 'model.layers.1.mlp.gate.e_score_correction_bias' # 'model.layers.1.mlp.experts.0.gate_proj.weight' # 'model.layers.1.mlp.experts.0.up_proj.weight' # 'model.layers.1.mlp.experts.0.down_proj.weight' name_map_after_layer = { "mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight", "mlp.linear_fc2.weight": "mlp.down_proj.weight", "mlp.shared_experts.linear_fc2.weight": "mlp.shared_experts.down_proj.weight", "mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"], "mlp.shared_experts.linear_fc1.weight": [ "mlp.shared_experts.gate_proj.weight", "mlp.shared_experts.up_proj.weight", ], "pre_mlp_layernorm.weight": "post_attention_layernorm.weight", "mlp.router.weight": "mlp.gate.weight", "mlp.router.expert_bias": "mlp.gate.e_score_correction_bias", } convert_names = [] layer_number = name.split(".")[2] name_after_layer = name.split(f".{layer_number}.")[1] if name_after_layer in name_map_after_layer: mapped_name = name_map_after_layer[name_after_layer] if isinstance(mapped_name, list): assert len(params) == len(mapped_name) for one in mapped_name: convert_names.append(f"model.layers.{layer_number}.{one}") else: assert len(params) == 1 convert_names.append(f"model.layers.{layer_number}.{mapped_name}") else: if "mlp.experts.linear_fc1.weight" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight") assert len(params) == 2 elif "mlp.experts.linear_fc2.weight" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params def _convert_mtp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: assert self.mcore_config.mtp_num_layers == 1, "only support one mtp layer for now" assert self.mcore_config.num_layers == 61, "only support 61 layers for now" direct_name_mapping = { "mtp.layers.0.enorm.weight": "model.layers.61.enorm.weight", "mtp.layers.0.hnorm.weight": "model.layers.61.hnorm.weight", "mtp.layers.0.eh_proj.weight": "model.layers.61.eh_proj.weight", "mtp.layers.0.final_layernorm.weight": "model.layers.61.shared_head.norm.weight", } if name in direct_name_mapping: return [direct_name_mapping[name]], [params[0]] assert "mtp.layers.0.transformer_layer" in name, "only support transformer layer for now" # use proxy name to convert proxy_name = name.replace("mtp.layers.0.transformer_layer", "decoder.layers.61") if "self_attention" in proxy_name or "input_layernorm.weight" in proxy_name: convert_names, params = self._convert_attention_param(proxy_name, params) elif "mlp" in proxy_name: convert_names, params = self._convert_mlp_param(proxy_name, params) else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: direct_name_mapping = { "embedding.word_embeddings.weight": "model.embed_tokens.weight", "decoder.final_layernorm.weight": "model.norm.weight", "output_layer.weight": "lm_head.weight", } if name in direct_name_mapping: return [direct_name_mapping[name]], [params_one_group[0]] if "mtp" in name: return self._convert_mtp_param(name, params_one_group) elif "self_attention" in name or "input_layernorm.weight" in name: return self._convert_attention_param(name, params_one_group) elif "mlp" in name: return self._convert_mlp_param(name, params_one_group) else: raise NotImplementedError(f"Unsupported parameter name: {name}") class McoreToHFWeightConverterMixtral(McoreToHFWeightConverterDense): def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # decoder.layers.0.mlp.router.weight # decoder.layers.0.mlp.experts.linear_fc1.weight0 - weight7 # decoder.layers.0.mlp.experts.linear_fc2.weight0 - weight7 layer_number = name.split(".")[2] convert_names = [] if "pre_mlp_layernorm" in name: convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight") elif "mlp.router.weight" in name: convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.gate.weight") elif "mlp.experts.linear_fc1.weight" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w1.weight") convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w3.weight") elif "mlp.experts.linear_fc2.weight" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w2.weight") else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params class McoreToHFWeightConverterQwen3Moe(McoreToHFWeightConverterDense): def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # qwen3 moe no share expert # 'decoder.layers.0.pre_mlp_layernorm.weight', # 'decoder.layers.0.mlp.router.weight', # moe1 # 'decoder.layers.0.mlp.experts.linear_fc1.weight0', # 'decoder.layers.0.mlp.experts.linear_fc1.weight1', # 'decoder.layers.0.mlp.experts.linear_fc1.weight2', # 'decoder.layers.0.mlp.experts.linear_fc1.weight3', # moe2 # 'decoder.layers.0.mlp.experts.linear_fc2.weight0', # 'decoder.layers.0.mlp.experts.linear_fc2.weight1', layer_number = name.split(".")[2] convert_names = [] if "pre_mlp_layernorm" in name: convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight") assert len(params) == 1 elif "mlp.router.weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.gate.weight") assert len(params) == 1 elif "mlp.experts.linear_fc1" in name: # split gate_proj and up_proj expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight") assert len(params) == 2 elif "mlp.experts.linear_fc2" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params ================================================ FILE: verl_distillation/verl/models/qwen2/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .modeling_qwen2_megatron import ( ParallelQwen2ForCausalLM, # rmpad with megatron ParallelQwen2ForCausalLMRmPad, # rmpad with megatron and pipeline parallelism ParallelQwen2ForCausalLMRmPadPP, ParallelQwen2ForValueRmPad, ParallelQwen2ForValueRmPadPP, # original model with megatron ParallelQwen2Model, ) __all__ = [ "ParallelQwen2ForCausalLM", "ParallelQwen2ForCausalLMRmPad", "ParallelQwen2ForCausalLMRmPadPP", "ParallelQwen2ForValueRmPad", "ParallelQwen2ForValueRmPadPP", "ParallelQwen2Model", ] ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/checkpoint_utils/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/checkpoint_utils/qwen2_loader.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_qwen2( state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False ): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def fetch_params(module): for param in module.parameters(): torch.distributed.fetch( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, ( f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size: " f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}" ) models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.model.layers) == num_layers_per_model def _fetch_tensor(tensor, name) -> torch.Tensor: """fetch tensor""" nonlocal state_dict if tensor is not None: tensor = tensor.data.copy_(state_dict[name], non_blocking=True) def _fetch_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """fetch tensor in tp shards""" nonlocal state_dict tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) if tensor is not None: tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True) else: print(f"tp_shard tensor:[{name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """fetch tensor in tp shards""" nonlocal state_dict tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) if tensor is not None: tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True) else: print(f"tp_shard tensor:[{name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """fetch gate_up tensor in tp shards""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if gate_name in state_dict and up_name in state_dict: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) if tensor is not None: tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True) else: print(f"tp_shard tensor:[{gate_name}, {up_name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor: """fetch tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp if not bias: new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) else: new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0)) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp if not bias: new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) else: new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0)) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) if tensor is not None: tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True) # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) if pp_rank == 0: embed_tokens_weight = gpt_model_module.model.embed_tokens.weight _fetch_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() num_layer_per_pp = config.num_hidden_layers // pp_size vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() layer_list = [] if vpp_size is not None: for vpp_rank in range(vpp_size): num_layer_vpp_chunk = num_layer_per_pp // vpp_size num_layer_this_model = num_layer_vpp_chunk offset = vpp_rank * (config.num_hidden_layers // mpu.get_virtual_pipeline_model_parallel_world_size()) + ( mpu.get_pipeline_model_parallel_rank() * num_layer_vpp_chunk ) layer_list.extend(list(range(offset, offset + num_layer_this_model))) else: num_layer_this_model = num_layer_per_pp offset = pp_rank * num_layer_per_pp layer_list.extend(list(range(offset, offset + num_layer_this_model))) for layer in layer_list: print(f"{torch.distributed.get_rank()} loading layer #{layer}...") layer_name = f"model.layers.{layer}" dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] print( f"{torch.distributed.get_rank()} offset: {offset}, num_layer_this_model: {num_layer_this_model}, " f"layer_name: {layer_name}, layer_map[layer]: {layer_map[layer]}" ) gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[dst_layer_idx] _fetch_tensor( sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) _fetch_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) _fetch_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.bias if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", bias=True, ) _fetch_tp_shard_tensor( sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _fetch_tensor( sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _fetch_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _fetch_tp_shard_tensor( sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _fetch_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", ) if tie_word_embeddings: print_rank_0("tie_word_embeddings skip load lm_head") else: print_rank_0("loading lm_head...") if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.lm_head.weight if is_value_model: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _fetch_tensor(lm_head_weight, "lm_head.weight") print_rank_0("load lm_head from value_head weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _fetch_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _fetch_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") else: _fetch_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/checkpoint_utils/qwen2_loader_depracated.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_qwen2( state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False ): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def broadcast_params(module): for param in module.parameters(): torch.distributed.broadcast( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, ( f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size: " f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}" ) models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.model.layers) == num_layers_per_model def _broadcast_tensor(tensor, name) -> torch.Tensor: """broadcast tensor from rank0 across mp_group""" nonlocal state_dict nonlocal mp_group if torch.distributed.get_rank() == 0: if name in state_dict: weight = state_dict[name] tensor_shape = weight.shape else: tensor_shape = None else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not in state_dict, skip load") return if tensor is None: tensor = torch.empty( tensor_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) if torch.distributed.get_rank() == 0: tensor.data.copy_(weight) dist.broadcast(tensor, src=0, group=mp_group) def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank() == 0:} tensor {gate_name, up_name} shape " f"{tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp if not bias: new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) else: new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_( torch.cat([q_part, k_part, v_part], dim=0) ) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp if not bias: new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) else: new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_( torch.cat([q_part, k_part, v_part], dim=0) ) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name, k_name, v_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) if dp_rank == 0: # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) embed_tokens_weight = None if pp_rank == 0: embed_tokens_weight = gpt_model_module.model.embed_tokens.weight _broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"loading layer #{layer}...") layer_name = f"model.layers.{layer}" dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[dst_layer_idx] _broadcast_tensor( sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.bias if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", bias=True, ) _broadcast_tp_shard_tensor( sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _broadcast_tensor( sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _broadcast_tp_shard_tensor( sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", ) if tie_word_embeddings: print_rank_0("tie_word_embeddings skip load lm_head") else: print_rank_0("loading lm_head...") lm_head_weight = None if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.lm_head.weight if is_value_model: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "lm_head.weight") print_rank_0("load lm_head from value_head weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _broadcast_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") else: _broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() # Broadcast weights inside data parallel groups for wrapped_model in wrapped_models: broadcast_params(wrapped_model) get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/checkpoint_utils/qwen2_saver.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from megatron.core import mpu from megatron.core.distributed import DistributedDataParallel as LocalDDP from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.device import get_device_id, get_torch_device from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model def _megatron_calc_global_rank(tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0): """given TP,DP,PP rank to get the global rank.""" tp_size = mpu.get_tensor_model_parallel_world_size() dp_size = mpu.get_data_parallel_world_size() pp_size = mpu.get_pipeline_model_parallel_world_size() assert tp_size * dp_size * pp_size == torch.distributed.get_world_size(), ( f"{tp_size} x {dp_size} x {pp_size} != {torch.distributed.get_world_size()}" ) # We only support TP-DP-PP grouping, for correctness when resharding return (pp_rank * dp_size + dp_rank) * tp_size + tp_rank def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def merge_megatron_ckpt_qwen2(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False): """Merge sharded parameters of a Megatron module into a merged checkpoint. Args: wrapped_models (list of megatron.core.distributed.DistributedDataParallel): The local DDP wrapped megatron modules. config (str or None): HF config for model dtype: model params type is_value_model: if model is value model tie_word_embeddings: tie_word_embeddings Returns: state_dict (dict): The merged state_dict in rank 0, and an empty dictionary in other ranks. """ start_time = time.time() def _get_gpt_model(model): return model dp_rank = mpu.get_data_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() pp_rank = mpu.get_pipeline_model_parallel_rank() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if dist.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) assert len(models[i].model.layers) == num_layers_per_model, ( "len model layers {} not equal to num_layers_per_model {}".format( len(models[i].model.layers), num_layers_per_model ) ) state_dict = dict() def _get_cpu_tensor(tensor: torch.Tensor): if tensor is None: return None if tensor.device == torch.device("cpu"): return tensor.detach().clone() return tensor.detach().cpu() def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor: """broadcast tensor across mp_group""" nonlocal state_dict nonlocal mp_group src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) if torch.distributed.get_rank() == src_rank: if tensor is None: weight = None tensor_shape = None else: weight = tensor tensor_shape = weight.shape else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not exist, skip collect") return if weight is None: weight = torch.empty( tensor_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) dist.broadcast(weight, src=src_rank, group=mp_group) if torch.distributed.get_rank() == 0: state_dict[name] = _get_cpu_tensor(weight) def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=concat_dim) if mutate_func is not None: full_tensor = mutate_func(full_tensor) state_dict[name] = full_tensor def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) intermediate_size_tp = config.intermediate_size // tp_size gate_weight_list = [] up_weight_list = [] for i in range(tp_size): gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)] gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp] up_weight_tp = gate_up_weight_tp[intermediate_size_tp:] gate_weight_list.append(gate_weight_tp) up_weight_list.append(up_weight_tp) state_dict[gate_name] = torch.cat(gate_weight_list, dim=0) state_dict[up_name] = torch.cat(up_weight_list, dim=0) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank): """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) q_weight_list = [] k_weight_list = [] v_weight_list = [] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_part = qkv_part[:q_size_tp] k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp] v_part = qkv_part[q_size_tp + kv_size_tp : total_size] q_weight_list.append(q_part) k_weight_list.append(k_part) v_weight_list.append(v_part) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_part = qkv_part[:q_size_tp] k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp] v_part = qkv_part[q_size_tp + kv_size_tp : total_size] q_weight_list.append(q_part) if i * config.num_key_value_heads % tp_size == 0: k_weight_list.append(k_part) v_weight_list.append(v_part) state_dict[q_name] = torch.cat(q_weight_list, dim=0) state_dict[k_name] = torch.cat(k_weight_list, dim=0) state_dict[v_name] = torch.cat(v_weight_list, dim=0) # empty cache before collecting weights get_torch_device().empty_cache() # Embeddings # ------------------- if dp_rank == 0: # Embeddings # ------------------- print_rank_0("collecting embeddings...") gpt_model_module = _get_gpt_model(models[0]) _broadcast_tp_shard_tensor( gpt_model_module.model.embed_tokens.weight if pp_rank == 0 else None, "model.embed_tokens.weight", src_pp_rank=0, ) # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"collecting layer #{layer}...") layer_name = f"model.layers.{layer}" src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[src_layer_idx] _broadcast_tensor( sync_layer.input_layernorm.weight, f"{layer_name}.input_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.bias, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.self_attn.o_proj.weight, f"{layer_name}.self_attn.o_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) _broadcast_tensor( sync_layer.post_attention_layernorm.weight, f"{layer_name}.post_attention_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.mlp.down_proj.weight, f"{layer_name}.mlp.down_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) # Final Layernorm # ------------------- print_rank_0("collecting final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", src_pp_rank=pp_size - 1, ) if tie_word_embeddings: print_rank_0("tie word embedding skip load lm_head...") else: print_rank_0("collecting lm_head...") if is_value_model: _broadcast_tensor( gpt_model_module.lm_head.weight if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) _broadcast_tensor( gpt_model_module.reward_head.weight if pp_rank == pp_size - 1 and getattr(gpt_model_module, "reward_weight", None) is not None else None, "reward_head.weight", src_pp_rank=pp_size - 1, ) else: _broadcast_tp_shard_tensor( getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) dist.barrier() get_torch_device().empty_cache() if torch.distributed.get_rank() == 0: for k, v in state_dict.items(): if dtype != v.dtype: state_dict[k] = v.to(dtype) print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s") return state_dict ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/layers/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .parallel_attention import ParallelQwen2Attention from .parallel_decoder import ParallelQwen2DecoderLayer, ParallelQwen2DecoderLayerRmPad from .parallel_mlp import ParallelQwen2MLP from .parallel_rmsnorm import ParallelQwen2RMSNorm __all__ = [ "ParallelQwen2Attention", "ParallelQwen2DecoderLayer", "ParallelQwen2DecoderLayerRmPad", "ParallelQwen2MLP", "ParallelQwen2RMSNorm", ] ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/layers/parallel_attention.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional import torch.nn.functional as F from einops import rearrange from transformers.utils import is_flash_attn_2_available if is_flash_attn_2_available(): from flash_attn import flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa: F401 import torch from flash_attn.layers.rotary import apply_rotary_emb from megatron.core import ModelParallelConfig, tensor_parallel from megatron.core import parallel_state as mpu from torch import nn from transformers import Qwen2Config from verl.models.qwen2.megatron.layers.parallel_linear import QKVParallelLinear from verl.utils.megatron import tensor_parallel as tp_utils class Qwen2RotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) class Qwen2LinearScalingRotaryEmbedding(Qwen2RotaryEmbedding): """Qwen2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) t = t / self.scaling_factor freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) class Qwen2DynamicNTKScalingRotaryEmbedding(Qwen2RotaryEmbedding): """Qwen2RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class ParallelQwen2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config = config self.megatron_config = megatron_config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta # assign values after tp tp_size = mpu.get_tensor_model_parallel_world_size() assert self.num_heads % tp_size == 0, ( f"num_head must be divisible by tp_size. Got num_head={self.num_heads}, tp_size={tp_size}" ) assert self.num_key_value_heads % tp_size == 0, ( f"num_key_value_heads must be divisible by tp_size. Got num_key_value_heads=" f"{self.num_key_value_heads}, tp_size={tp_size}" ) self.num_heads_per_tp = self.num_heads // tp_size self.num_key_value_heads_per_tp = self.num_key_value_heads // tp_size self.hidden_size_per_tp = self.hidden_size // tp_size if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and " f"`num_heads`: {self.num_heads})." ) column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" assert row_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, megatron_config) tp_utils.update_kwargs_with_config(row_kwargs, megatron_config) # [self.q_size, self.k_size, self.v_size] self.qkv_proj = QKVParallelLinear( input_size=self.hidden_size, num_heads=self.num_heads, num_key_value_heads=self.num_key_value_heads, head_dim=self.head_dim, # bias=config.attention_bias, bias=True, gather_output=False, skip_bias_add=False, **column_kwargs, ) self.q_size = self.num_heads_per_tp * self.head_dim self.k_size = self.num_key_value_heads_per_tp * self.head_dim self.v_size = self.num_key_value_heads_per_tp * self.head_dim self.o_proj = tensor_parallel.RowParallelLinear( input_size=self.num_heads * self.head_dim, output_size=self.hidden_size, # bias=config.attention_bias, bias=False, input_is_parallel=True, skip_bias_add=False, **row_kwargs, ) self._init_rope() def _init_rope(self): self.rotary_emb = Qwen2RotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() qkv = self.qkv_proj(hidden_states)[0] query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1) query_states = query_states.view(bsz, q_len, self.num_heads_per_tp, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads_per_tp, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads_per_tp, q_len, kv_seq_len)}, " f"but is {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads_per_tp, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads_per_tp, q_len, self.head_dim)}, " f"but is {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size_per_tp) attn_output = self.o_proj(attn_output)[0] return attn_output """ Remove padding Attention - Using Flash-attn 2 - Compatible with sequence parallel """ def apply_rotary_pos_emb_rmpad(q, k, cos, sin, position_ids, indices, sequence_length): batch_size = position_ids.shape[0] q = pad_input(q, indices, batch_size, sequence_length) # (batch_size, seqlen, num_head, head_dim) k = pad_input(k, indices, batch_size, sequence_length) cos = cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim] sin = sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) q_embed = index_first_axis(rearrange(q_embed, "b s ... -> (b s) ..."), indices) k_embed = index_first_axis(rearrange(k_embed, "b s ... -> (b s) ..."), indices) return q_embed, k_embed # use flash-attn rotary embeddings with rmpad # cos/sin shoudl be: (seq_length, rotary_dim / 2) def apply_rotary_pos_emb_rmpad_flash(q, k, cos, sin, cu_seqlens, max_seqlen): q_embed = apply_rotary_emb( q, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) k_embed = apply_rotary_emb( k, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) return q_embed, k_embed class ParallelQwen2AttentionRmPad(ParallelQwen2Attention): def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: torch.Tensor = None, max_seqlen_in_batch: int = None, ): total_nnz, _, _ = hidden_states.size() # This is the total_nnz padded after sequence parallel if self.megatron_config.sequence_parallel: total_nnz = total_nnz * mpu.get_tensor_model_parallel_world_size() qkv = self.qkv_proj(hidden_states)[0] query_states, key_states, value_states = qkv.split( [self.q_size, self.k_size, self.v_size], dim=-1 ) # (total_nnz, 1, hidden_size) if self.megatron_config.sequence_parallel: sequence_parallel_pad = total_nnz - cu_seqlens[-1] total_nnz = cu_seqlens[-1] # total_nnz before sp padding query_states = query_states[:total_nnz] key_states = key_states[:total_nnz] value_states = value_states[:total_nnz] # Flash attention requires the input to have the shape # batch_size x seq_length x head_dime x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(total_nnz, self.num_heads_per_tp, self.head_dim) key_states = key_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim) value_states = value_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim) cos, sin = self.rotary_emb(value_states, seq_len=sequence_length) cos, sin = cos[:, : cos.shape[1] // 2], sin[:, : sin.shape[1] // 2] # flash attn only needs half query_states, key_states = apply_rotary_pos_emb_rmpad_flash( query_states, key_states, cos, sin, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen_in_batch ) # query_states, key_states = apply_rotary_pos_emb_rmpad(query_states, key_states, cos, sin, # position_ids, indices, # It is recommended to use dropout with FA according to the docs # when training. dropout_rate = 0.0 # if not self.training else self.attn_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (Qwen2RMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: query_states = query_states.to(torch.float16) key_states = key_states.to(torch.float16) value_states = value_states.to(torch.float16) attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens, cu_seqlens_k=cu_seqlens, max_seqlen_q=max_seqlen_in_batch, max_seqlen_k=max_seqlen_in_batch, dropout_p=dropout_rate, softmax_scale=None, causal=True, ) attn_output_unpad = attn_output_unpad.to(input_dtype) attn_output_unpad = attn_output_unpad.reshape(total_nnz, 1, self.hidden_size_per_tp).contiguous() # sequence parallel reduce_scatter is performed inside RowColumnParallel if enabled # Here we need to repad if self.megatron_config.sequence_parallel: attn_output_unpad = F.pad(attn_output_unpad, pad=(0, 0, 0, 0, 0, sequence_parallel_pad)) attn_output_unpad = self.o_proj(attn_output_unpad)[0] return attn_output_unpad ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/layers/parallel_decoder.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch from megatron.core import ModelParallelConfig from torch import nn from transformers import Qwen2Config from verl.utils.megatron_utils import TransformerConfig, convert_config from .parallel_attention import ParallelQwen2Attention, ParallelQwen2AttentionRmPad from .parallel_mlp import ParallelQwen2MLP from .parallel_rmsnorm import ParallelQwen2RMSNorm class ParallelQwen2DecoderLayer(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, layer_idx: int): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.self_attn = ParallelQwen2Attention(config=config, megatron_config=megatron_config) self.mlp = ParallelQwen2MLP(config, megatron_config=megatron_config) self.input_layernorm = ParallelQwen2RMSNorm(config, megatron_config) self.post_attention_layernorm = ParallelQwen2RMSNorm(config, megatron_config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Note: sequence parallel is hidden inside ColumnParallelLinear # reduce scatter is hidden inside RowParallelLinear # Self Attention hidden_states = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, ) # TODO: add sequence parallel operator reduce_scatter here hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) # TODO: add sequence parallel operator all_gather here hidden_states = self.mlp(hidden_states) # TODO: add sequence parallel operator reduce_scatter here hidden_states = residual + hidden_states outputs = hidden_states return outputs class ParallelQwen2DecoderLayerRmPad(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, layer_idx: int): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.hidden_size = config.hidden_size self.layer_idx = layer_idx self.self_attn = ParallelQwen2AttentionRmPad(config=config, megatron_config=megatron_config) self.mlp = ParallelQwen2MLP(config, megatron_config=megatron_config) self.input_layernorm = ParallelQwen2RMSNorm(config, megatron_config) self.post_attention_layernorm = ParallelQwen2RMSNorm(config, megatron_config) def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states # (total_nnz // sp, 1, hidden_size) hidden_states = self.input_layernorm(hidden_states) # Self Attention # (total_nnz // sp, 1, hidden_size) -> all-gather (total_nnz, 1, hidden_size) # -> col + row -> reduce-scatter -> (total_nnz // sp, 1, hidden_size) hidden_states = self.self_attn( hidden_states=hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = residual + hidden_states # Fully Connected # shape changes same as attn residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = hidden_states return outputs ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/layers/parallel_linear.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023 The vLLM team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/linear.py from megatron.core import tensor_parallel class QKVParallelLinear(tensor_parallel.ColumnParallelLinear): def __init__( self, input_size, num_heads, num_key_value_heads, head_dim, *, bias=True, gather_output=True, skip_bias_add=False, **kwargs, ): # Keep input parameters, and already restrict the head numbers self.input_size = input_size self.q_output_size = num_heads * head_dim self.kv_output_size = num_key_value_heads * head_dim self.head_dim = head_dim self.gather_output = gather_output self.skip_bias_add = skip_bias_add input_size = self.input_size output_size = (num_heads + 2 * num_key_value_heads) * self.head_dim super().__init__( input_size=input_size, output_size=output_size, bias=bias, gather_output=gather_output, skip_bias_add=skip_bias_add, **kwargs, ) class MergedColumnParallelLinear(tensor_parallel.ColumnParallelLinear): def __init__( self, input_size, gate_ouput_size, up_output_size, *, bias=True, gather_output=True, skip_bias_add=False, **kwargs, ): # Keep input parameters, and already restrict the head numbers self.input_size = input_size self.output_size = gate_ouput_size + up_output_size self.gather_output = gather_output self.skip_bias_add = skip_bias_add super().__init__( input_size=self.input_size, output_size=self.output_size, bias=bias, gather_output=gather_output, skip_bias_add=skip_bias_add, **kwargs, ) ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/layers/parallel_mlp.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from megatron.core import ModelParallelConfig, tensor_parallel from megatron.core import parallel_state as mpu from torch import nn from transformers.activations import ACT2FN from verl.models.qwen2.megatron.layers.parallel_linear import MergedColumnParallelLinear from verl.utils.megatron import tensor_parallel as tp_utils class ParallelQwen2MLP(nn.Module): def __init__(self, config, megatron_config: ModelParallelConfig = None) -> None: super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size # The weight is only [hidden_size, intermediate_size // model_parallel_world_size] column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" assert row_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(row_kwargs, megatron_config) tp_utils.update_kwargs_with_config(column_kwargs, megatron_config) tp_size = mpu.get_tensor_model_parallel_world_size() self.gate_up_proj = MergedColumnParallelLinear( input_size=self.hidden_size, gate_ouput_size=self.intermediate_size, up_output_size=self.intermediate_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) self.gate_size = self.intermediate_size // tp_size self.down_proj = tensor_parallel.RowParallelLinear( input_size=self.intermediate_size, output_size=self.hidden_size, bias=False, input_is_parallel=True, skip_bias_add=False, **row_kwargs, ) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): gate_up = self.gate_up_proj(x)[0] gate, up = gate_up.split(self.gate_size, dim=-1) return self.down_proj(self.act_fn(gate) * up)[0] ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/layers/parallel_rmsnorm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numbers import torch from apex.normalization.fused_layer_norm import fused_rms_norm_affine from megatron.core import ModelParallelConfig from torch import nn from transformers import Qwen2Config from verl.utils.megatron import sequence_parallel as sp_utils class ParallelQwen2RMSNorm(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): """ Qwen2RMSNorm is equivalent to T5LayerNorm """ super().__init__() if isinstance(config.hidden_size, numbers.Integral): normalized_shape = (config.hidden_size,) self.normalized_shape = torch.Size(normalized_shape) self.weight = nn.Parameter(torch.ones(self.normalized_shape)) self.variance_epsilon = config.rms_norm_eps if megatron_config.sequence_parallel: sp_utils.mark_parameter_as_sequence_parallel(self.weight) def forward(self, hidden_states): return fused_rms_norm_affine( input=hidden_states, weight=self.weight, normalized_shape=self.normalized_shape, eps=self.variance_epsilon, memory_efficient=True, ) ================================================ FILE: verl_distillation/verl/models/qwen2/megatron/modeling_qwen2_megatron.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen2 model.""" from typing import Optional import torch import torch.utils.checkpoint from megatron.core import ModelParallelConfig, mpu, parallel_state, tensor_parallel from torch import nn from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.models.qwen2.configuration_qwen2 import Qwen2Config from transformers.models.qwen2.modeling_qwen2 import CausalLMOutputWithPast from verl.utils.device import get_device_name from verl.utils.megatron import sequence_parallel as sp_utils from verl.utils.megatron import tensor_parallel as tp_utils from verl.utils.megatron_utils import TransformerConfig, convert_config from .layers import ParallelQwen2DecoderLayer, ParallelQwen2DecoderLayerRmPad, ParallelQwen2RMSNorm """ TODO: 1. Add weight initialization. Here we need to be careful on TP weight init. 2. Add sequence parallel 3. Load checkpoint from Qwen2 pretrained checkpoint """ # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class ParallelQwen2Model(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`] Args: config: Qwen2Config """ def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, megatron_config) self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) self.layers = nn.ModuleList( [ParallelQwen2DecoderLayer(config, megatron_config) for _ in range(config.num_hidden_layers)] ) self.norm = ParallelQwen2RMSNorm(config, megatron_config) # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (batch_size, seq_length) attention_mask: attention_mask. shape (batch_size, seq_length) position_ids: position ids. shape (batch_size, seq_length) Returns: """ batch_size, seq_length = input_ids.shape inputs_embeds = self.embed_tokens(input_ids) # embed positions attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds) hidden_states = inputs_embeds for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return hidden_states class ParallelQwen2ForCausalLM(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.model = ParallelQwen2Model(config, megatron_config=megatron_config) self.vocab_size = config.vocab_size column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) hidden_states = outputs logits = self.lm_head(hidden_states)[0] logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) logits = logits.float() return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa: F401, E402 class ParallelQwen2ModelRmPad(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`] Args: config: Qwen2Config """ def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() self.megatron_config = megatron_config if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) self.layers = nn.ModuleList( [ParallelQwen2DecoderLayerRmPad(config, megatron_config) for _ in range(config.num_hidden_layers)] ) self.norm = ParallelQwen2RMSNorm(config, megatron_config) def forward( self, input_ids: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (1, totol_nnz) position_ids: position ids. shape (batch_size, seq_length) Returns: """ inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size) # (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size) inputs_embeds = inputs_embeds.transpose(0, 1) if self.megatron_config.sequence_parallel: inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds) hidden_states = inputs_embeds for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return hidden_states class ParallelQwen2ForCausalLMRmPad(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.megatron_config = megatron_config self.model = ParallelQwen2ModelRmPad(config, megatron_config=megatron_config) self.vocab_size = config.vocab_size self._init_head(config) def _init_head(self, config: Qwen2Config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def _forward_head(self, hidden_states): # all_gather from sequence parallel region is performed inside lm_head logits = self.lm_head(hidden_states)[0] logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp) logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) # (total_nnz_padded, 1, vocab_size) return logits def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" batch_size, sequence_length = input_ids.shape # remove padding here input_ids, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input( input_ids.unsqueeze(dim=-1), attention_mask ) # (total_nnz, 1) # pad input_ids to multiple of tp for all tp ranks # TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap if self.megatron_config.sequence_parallel: input_ids = sp_utils.pad_to_sequence_parallel(input_ids) input_ids = input_ids.transpose(0, 1) # (1, total_nnz+pad) outputs = self.model( input_ids=input_ids, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = outputs logits = self._forward_head(hidden_states) # remove padding from sequence parallel if self.megatron_config.sequence_parallel: totol_nnz = cu_seqlens[-1] logits = logits[:totol_nnz] # (total_nnz_padded) logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # add removed padding back logits = pad_input( logits, indices, batch_size, seqlen=sequence_length ) # (batch_size, sequence_length, vocab_size) return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) class ParallelQwen2ForValueRmPad(ParallelQwen2ForCausalLMRmPad): def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False) # lm_head is effectively the same as sequence parallel sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight) def _forward_head(self, hidden_states): logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1) logits = logits.float() if self.megatron_config.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: output = super().forward(input_ids, attention_mask, position_ids) output.logits = torch.squeeze(output.logits, dim=-1) return output """ Support pipeline parallelism """ class ParallelQwen2ModelRmPadPP(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`] This model definition supports pipeline parallelism. To support pp and vpp, - This model only contains layer in this pp stage and vpp chunk - When calling get_model in Megatron, this rank will instantiate all the vpp chunks in this pp. Args: config: Qwen2Config """ def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, pre_process, post_process): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.pre_process = pre_process self.post_process = post_process self.megatron_config = megatron_config embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) if pre_process: self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) else: self.embed_tokens = None pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = megatron_config.pipeline_model_parallel_size self.num_layer_per_pp = config.num_hidden_layers // pp_size vpp_size = megatron_config.virtual_pipeline_model_parallel_size vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank() if vpp_size is not None: self.num_layer_vpp_chunk = self.num_layer_per_pp // vpp_size self.num_layer_this_model = self.num_layer_vpp_chunk offset = vpp_rank * (config.num_hidden_layers // vpp_size) + (pp_rank * self.num_layer_vpp_chunk) else: self.num_layer_this_model = self.num_layer_per_pp offset = pp_rank * self.num_layer_per_pp self.layers = nn.ModuleList() for i in range(self.num_layer_this_model): layer = ParallelQwen2DecoderLayerRmPad(config, megatron_config, layer_idx=i + offset) self.layers.add_module(f"{i}", layer) if post_process: self.norm = ParallelQwen2RMSNorm(config, megatron_config) else: self.norm = None def set_input_tensor(self, input_tensor): """Set input tensor to be used instead of forward()'s input. When doing pipeline parallelism the input from the previous stage comes from communication, not from the input, so the model's forward_step_func won't have it. This function is thus used by internal code to bypass the input provided by the forward_step_func""" self.input_tensor = input_tensor def forward( self, input_ids: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (1, totol_nnz) position_ids: position ids. shape (batch_size, seq_length) Returns: """ if self.pre_process: inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size) # vocab parallel embedding will not do sequence parallel reduce-scatter in open source megatron # so need to deal with it by handle here: # (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size) inputs_embeds = inputs_embeds.transpose(0, 1) if self.megatron_config.sequence_parallel: inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds) hidden_states = inputs_embeds else: # self.hidden_states should be passed by Megatron hidden_states = self.input_tensor for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = layer_outputs if self.post_process: hidden_states = self.norm(hidden_states) return hidden_states class ParallelQwen2ForCausalLMRmPadPP(nn.Module): def __init__( self, config: Qwen2Config, megatron_config: ModelParallelConfig, pre_process, post_process, share_embeddings_and_output_weights, ): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.megatron_config = megatron_config self.model = ParallelQwen2ModelRmPadPP( config, megatron_config=megatron_config, pre_process=pre_process, post_process=post_process ) self.share_embeddings_and_output_weights = share_embeddings_and_output_weights self.vocab_size = config.vocab_size self.pre_process = pre_process self.post_process = post_process if post_process: self._init_head(config) if pre_process or post_process: self.setup_embeddings_and_output_layer() def set_input_tensor(self, input_tensor): """Set input tensor to be used instead of forward()'s input. When doing pipeline parallelism the input from the previous stage comes from communication, not from the input, so the model's forward_step_func won't have it. This function is thus used by internal code to bypass the input provided by the forward_step_func""" assert len(input_tensor) == 1 self.model.set_input_tensor(input_tensor[0]) def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, skip_weight_param_allocation=self.pre_process and self.share_embeddings_and_output_weights, **column_kwargs, ) def setup_embeddings_and_output_layer(self) -> None: """Sets up embedding layer in first stage and output layer in last stage. This function initalizes word embeddings in the final stage when we are using pipeline parallelism and sharing word embeddings, and sets up param attributes on the embedding and output layers. """ # Set `is_embedding_or_output_parameter` attribute. if self.pre_process: self.model.embed_tokens.weight.is_embedding_or_output_parameter = True if self.post_process and self.lm_head.weight is not None: self.lm_head.weight.is_embedding_or_output_parameter = True if not self.share_embeddings_and_output_weights: return if parallel_state.get_pipeline_model_parallel_world_size() == 1: # Zero out wgrad if sharing embeddings between two layers on same # pipeline stage to make sure grad accumulation into main_grad is # correct and does not include garbage values (e.g., from torch.empty). self.shared_embedding_or_output_weight().zero_out_wgrad = True return if parallel_state.is_pipeline_first_stage() and self.pre_process and not self.post_process: self.shared_embedding_or_output_weight().shared_embedding = True if self.post_process and not self.pre_process: assert not parallel_state.is_pipeline_first_stage() # set word_embeddings weights to 0 here, then copy first # stage's weights using all_reduce below. self.lm_head.weight.data.fill_(0) self.lm_head.weight.shared = True self.lm_head.weight.shared_embedding = True if torch.distributed.is_initialized() and parallel_state.is_rank_in_embedding_group(): weight = self.shared_embedding_or_output_weight() weight.data = weight.data.to(get_device_name()) torch.distributed.all_reduce(weight.data, group=parallel_state.get_embedding_group()) def shared_embedding_or_output_weight(self) -> torch.Tensor: if self.pre_process: return self.model.embed_tokens.weight elif self.post_process: return self.lm_head.weight return None def _forward_head(self, hidden_states): # all_gather from sequence parallel region is performed inside lm_head # print(f'logits shape before forward_head: {hidden_states.shape}, vocab_size = ' # f'{self.config.vocab_size}') # [4, 32, 4096] output_weight = None if self.share_embeddings_and_output_weights: output_weight = self.shared_embedding_or_output_weight() logits = self.lm_head(hidden_states, weight=output_weight)[0] # print(f'logits shape after forward_head: {logits.shape}') # [8, 32, 8] logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp) return logits def forward( self, # original input *, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" # Note that input_ids, attention_mask and position_ids should be passed to every pp layer. # In the first pp, input_ids will be used, in other pp layers hidden_states will be used inside self.model batch_size, sequence_length = input_ids.shape # remove padding here input_ids_rmpad, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input( input_ids.unsqueeze(dim=-1), attention_mask ) # (total_nnz, 1) # pad input_ids to multiple of tp for all tp ranks # TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap if self.megatron_config.sequence_parallel: input_ids_rmpad = sp_utils.pad_to_sequence_parallel(input_ids_rmpad) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz+pad) outputs = self.model( input_ids=input_ids_rmpad, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) if self.post_process: hidden_states = outputs logits = self._forward_head(hidden_states) logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # torch.Size([8, 32, 16]) # remove padding from sequence parallel if self.megatron_config.sequence_parallel: totol_nnz = cu_seqlens[-1] logits = logits[:totol_nnz] # (total_nnz_padded) # add removed padding back. If input is already rmpad, we let the caller pad_input logits = pad_input( logits, indices, batch_size, seqlen=sequence_length ) # (batch_size, sequence_length, vocab_size) return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) else: return outputs class ParallelQwen2ForValueRmPadPP(ParallelQwen2ForCausalLMRmPadPP): def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False) # lm_head is effectively the same as sequence parallel sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight) def _forward_head(self, hidden_states): logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1) logits = logits.float() if self.megatron_config.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits def forward( self, *, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: output = super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) if self.post_process: output.logits = torch.squeeze(output.logits, dim=-1) return output else: return output ================================================ FILE: verl_distillation/verl/models/registry.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import Optional import torch.nn as nn # Supported models in Megatron-LM # Architecture -> (module, class). _MODELS = { "LlamaForCausalLM": ( "llama", ("ParallelLlamaForCausalLMRmPadPP", "ParallelLlamaForValueRmPadPP", "ParallelLlamaForCausalLMRmPad"), ), "Qwen2ForCausalLM": ( "qwen2", ("ParallelQwen2ForCausalLMRmPadPP", "ParallelQwen2ForValueRmPadPP", "ParallelQwen2ForCausalLMRmPad"), ), "MistralForCausalLM": ( "mistral", ("ParallelMistralForCausalLMRmPadPP", "ParallelMistralForValueRmPadPP", "ParallelMistralForCausalLMRmPad"), ), "ApertusForCausalLM": ( "apertus", ("ParallelApertusForCausalLMRmPadPP", "ParallelApertusForValueRmPadPP", "ParallelApertusForCausalLMRmPad"), ), } # return model class class ModelRegistry: @staticmethod def load_model_cls(model_arch: str, value=False) -> Optional[type[nn.Module]]: if model_arch not in _MODELS: return None megatron = "megatron" module_name, model_cls_name = _MODELS[model_arch] if not value: # actor/ref model_cls_name = model_cls_name[0] elif value: # critic/rm model_cls_name = model_cls_name[1] module = importlib.import_module(f"verl.models.{module_name}.{megatron}.modeling_{module_name}_megatron") return getattr(module, model_cls_name, None) @staticmethod def get_supported_archs() -> list[str]: return list(_MODELS.keys()) ================================================ FILE: verl_distillation/verl/models/transformers/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/models/transformers/apertus.py ================================================ # Copyright 2025 The SwissAI Initiative # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from typing import Callable, Optional import torch if sys.version_info >= (3, 11): pass else: pass from transformers.cache_utils import Cache from transformers.models.apertus.modeling_apertus import apply_rotary_pos_emb from transformers.utils import logging # Import compatibility wrapper for flash_attn_supports_top_left_mask from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) logger = logging.get_logger(__name__) def apertus_attn_forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """ Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0. Key differences from Llama attention: - QK normalization applied after Q/K projections NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.50.0. """ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS from transformers.models.apertus.modeling_apertus import eager_attention_forward bsz, q_len, _ = hidden_states.shape query_states = self.q_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) query_states = self.q_norm(query_states) key_states = self.k_norm(key_states) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.config.num_attention_heads, ulysses_sp_size) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. " "Falling back to eager attention. This warning can be removed using the argument " '`attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights ================================================ FILE: verl_distillation/verl/models/transformers/dense_common.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Union import torch from transformers.cache_utils import Cache from transformers.modeling_outputs import CausalLMOutputWithPast @dataclass class CausalLMOutputForPPO(CausalLMOutputWithPast): log_probs: Optional[torch.FloatTensor] = None entropy: Optional[torch.FloatTensor] = None def forward_base_model( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> CausalLMOutputWithPast: r""" Copy paste LLaMa's forward https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/transformers/model/llama.py This function should be generic enough for all pure text models. ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) return outputs def forward_with_torch_backend( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union["Cache", list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: int | torch.Tensor = 0, temperature: float = 1.0, **loss_kwargs, ) -> tuple | CausalLMOutputForPPO: from verl.utils.experimental.torch_functional import FusedLinearForPPO outputs = forward_base_model( self, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, ) hidden_states = outputs[0] if not return_dict: raise NotImplementedError("forward_with_torch_backend has to return_dict") # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.") fused_linear_for_ppo = FusedLinearForPPO() log_probs, entropy = fused_linear_for_ppo.forward( hidden_states=hidden_states, vocab_weights=self.lm_head.weight, input_ids=rolled_labels, temperature=temperature, ) return CausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def forward_with_triton_backend( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union["Cache", list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: int | torch.Tensor = 0, temperature: float = 1.0, **loss_kwargs, ) -> tuple | CausalLMOutputForPPO: from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy outputs = forward_base_model( self, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] if not return_dict: raise NotImplementedError("forward_with_triton_backend has to return_dict") # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.") log_probs, entropy = linear_cross_entropy( hidden_states, self.lm_head.weight, rolled_labels, temperature, "none", ) return CausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) ================================================ FILE: verl_distillation/verl/models/transformers/glm4v.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import itertools import logging import os from dataclasses import dataclass from typing import Optional import torch import torch.distributed as dist from transformers.modeling_flash_attention_utils import _flash_attention_forward, fa_peft_integration_check from transformers.models.glm4v.modeling_glm4v import ( Glm4vCausalLMOutputWithPast, Glm4vForConditionalGeneration, Glm4vTextAttention, ) from transformers.utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10 from verl.utils.device import is_npu_available from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_group, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func _flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters _flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters _flash_use_top_left_mask = not is_flash_attn_greater_or_equal_2_10() if is_npu_available: from transformers.integrations.npu_flash_attention import npu_flash_attn_func as flash_attn_func from transformers.integrations.npu_flash_attention import npu_flash_attn_varlen_func as flash_attn_varlen_func from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask _flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters _flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters _flash_use_top_left_mask = flash_attn_supports_top_left_mask() _flash_deterministic_enabled = os.getenv("FLASH_ATTENTION_DETERMINISTIC", "0") == "1" def get_rope_index( processor, input_ids: torch.Tensor, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Gets the position ids for GLM4V in padding-free format. The batch dim has been removed and the input_ids should be a 1D tensor representing a single example. """ spatial_merge_size = processor.image_processor.merge_size image_token_id = processor.tokenizer.convert_tokens_to_ids("<|image|>") video_start_token_id = processor.tokenizer.convert_tokens_to_ids("<|begin_of_video|>") video_end_token_id = processor.tokenizer.convert_tokens_to_ids("<|end_of_video|>") if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): if attention_mask is None: attention_mask = torch.ones_like(input_ids) position_ids = torch.ones(3, input_ids.size(0), dtype=input_ids.dtype, device=input_ids.device) # (3, seqlen) image_index, video_index = 0, 0 video_group_index = 0 input_ids_filtered = input_ids[attention_mask == 1] input_tokens = input_ids_filtered.tolist() input_token_type = [] video_check_flg = False for token in input_tokens: if token == video_start_token_id: video_check_flg = True elif token == video_end_token_id: video_check_flg = False if token == image_token_id and not video_check_flg: input_token_type.append("image") elif token == image_token_id and video_check_flg: input_token_type.append("video") else: input_token_type.append("text") input_type_group = [] for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]): group = list(group) start_index = group[0][0] end_index = group[-1][0] + 1 input_type_group.append((key, start_index, end_index)) llm_pos_ids_list = [] video_frame_num = 1 for modality_type, start_idx, end_idx in input_type_group: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 if modality_type == "image": t, h, w = ( image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2], ) llm_grid_t, llm_grid_h, llm_grid_w = ( t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx) image_index += 1 video_frame_num = 1 elif modality_type == "video": t, h, w = ( video_frame_num, video_grid_thw[video_index][1], video_grid_thw[video_index][2], ) llm_grid_t, llm_grid_h, llm_grid_w = ( t, h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) for t_idx in range(llm_grid_t): t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx) video_group_index += 1 if video_group_index >= video_grid_thw[video_index][0]: video_index += 1 video_group_index = 0 video_frame_num += 1 else: text_len = end_idx - start_idx llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) video_frame_num = 1 llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device) else: if attention_mask is not None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1).to(input_ids.device) else: position_ids = torch.arange(input_ids.shape[0], device=input_ids.device).view(1, -1).expand(3, -1) return position_ids def prepare_fa2_from_position_ids( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_ids: torch.Tensor ): assert position_ids.ndim == 2 # (batch_size, seq_length) query = query.contiguous().view(-1, query.size(-2), query.size(-1)) key = key.contiguous().view(-1, key.size(-2), key.size(-1)) value = value.contiguous().view(-1, value.size(-2), value.size(-1)) position_ids = position_ids.view(-1) cu_seqlens = torch.cat( ( (position_ids == 0).nonzero().view(-1).to(torch.int32), torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32), ) ) max_length = cu_seqlens.diff().max() # use cu_seqlens to infer max_length for qwen2vl mrope return (query, key, value, (cu_seqlens, cu_seqlens), (max_length, max_length)) def _custom_flash_attention_forward( query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: Optional[torch.Tensor], query_length: int, is_causal: bool = True, position_ids: Optional[torch.Tensor] = None, use_top_left_mask: bool = False, deterministic: Optional[bool] = None, **kwargs, ): """ Patches flash attention forward to handle 3D position ids in mrope. (3, batch_size, seq_length) """ # Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length). flash_kwargs = {} if _flash_supports_deterministic: flash_kwargs["deterministic"] = deterministic if deterministic is not None else _flash_deterministic_enabled if kwargs.get("softcap") is not None: flash_kwargs["softcap"] = kwargs.pop("softcap") query_states, key_states, value_states = fa_peft_integration_check( query_states, key_states, value_states, target_dtype=torch.bfloat16 ) if position_ids is not None: assert position_ids.ndim == 2 # (batch_size, seq_length / sp_size) sp_size = get_ulysses_sequence_parallel_world_size() if sp_size > 1: # qkv: (batch_size, seq_length / sp_size, num_head, head_size) validate_ulysses_config(query_states.size(2), sp_size) query_states = gather_seq_scatter_heads(query_states, seq_dim=1, head_dim=2) key_states = gather_seq_scatter_heads(key_states, seq_dim=1, head_dim=2) value_states = gather_seq_scatter_heads(value_states, seq_dim=1, head_dim=2) position_ids_lst = [torch.empty_like(position_ids) for _ in range(sp_size)] position_ids = dist.all_gather(position_ids_lst, position_ids, group=get_ulysses_sequence_parallel_group()) position_ids = torch.cat(position_ids_lst, dim=-1) # (batch_size, seq_length) if position_ids is not None and query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all(): batch_size = query_states.size(0) q, k, v, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) = prepare_fa2_from_position_ids( query_states, key_states, value_states, position_ids ) attn_output = flash_attn_varlen_func( q=q, k=k, v=v, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, dropout_p=kwargs.pop("dropout", 0.0), softmax_scale=kwargs.pop("softmax_scale", None), causal=is_causal, **flash_kwargs, ) attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1)) else: attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, query_length, is_causal=is_causal, use_top_left_mask=use_top_left_mask, deterministic=deterministic, **kwargs, ) # do not pass position_ids to old flash_attention_forward if sp_size > 1: # (batch_size, seq_length, num_head, head_size) attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1) return attn_output def glm4v_attn_forward( self: "Glm4vTextAttention", hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 **kwargs, ) -> tuple[torch.Tensor, None, None]: from transformers.models.glm4v.modeling_glm4v import apply_multimodal_rotary_pos_emb, repeat_kv bsz, q_len, _ = hidden_states.size() # q_len = seq_length / sp_size query_states = self.q_proj(hidden_states) # (batch_size, seq_length / sp_size, num_heads * head_size) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) # Because the input can be padded, the absolute sequence length depends on the max position id. cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] ) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # This is before the transpose q_len = query_states.shape[2] # FA2 uses non-transposed inputs query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = _custom_flash_attention_forward( query_states, key_states, value_states, attention_mask, query_length=q_len, is_causal=getattr(self, "is_causal", True), dropout=dropout_rate, use_top_left_mask=_flash_use_top_left_mask, position_ids=position_ids, # important: pass position ids ) # (batch_size, seq_length / sp_size, num_head, head_size) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) return attn_output, None def _get_input_embeds( model: "Glm4vForConditionalGeneration", input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, ): inputs_embeds = model.get_input_embeddings()(input_ids) if pixel_values is not None: pixel_values = pixel_values.type(model.visual.dtype) image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw) n_image_tokens = (input_ids == model.config.image_token_id).sum().item() n_image_features = image_embeds.shape[0] if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) mask = input_ids == model.config.image_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) image_mask = mask_expanded.to(inputs_embeds.device) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: pixel_values_videos = pixel_values_videos.type(model.visual.dtype) video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw) n_video_tokens = (input_ids == model.config.video_token_id).sum().item() n_video_features = video_embeds.shape[0] if n_video_tokens != n_video_features: raise ValueError( f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}" ) mask = input_ids == model.config.video_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) video_mask = mask_expanded.to(inputs_embeds.device) video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) if pixel_values is None and pixel_values_videos is None: # handle mixed text-image data pixel_values = torch.zeros((16, 1176), dtype=inputs_embeds.dtype, device=inputs_embeds.device) image_grid_thw = torch.tensor([[1, 4, 4]], dtype=torch.long, device=inputs_embeds.device) image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw) inputs_embeds += 0.0 * image_embeds.mean() if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) return inputs_embeds, attention_mask def process_position_ids(position_ids: torch.Tensor) -> torch.Tensor: if position_ids.ndim != 3 or position_ids.size(0) != 4: # we concat the text position ids with the 3D vision position ids by default # see https://github.com/huggingface/transformers/pull/39447 raise ValueError("position_ids should be a 3D tensor of shape (4, batch_size, seq_length).") return position_ids @dataclass class Glm4vCausalLMOutputForPPO(Glm4vCausalLMOutputWithPast): log_probs: Optional[torch.FloatTensor] = None entropy: Optional[torch.FloatTensor] = None def glm4v_base_forward( self: "Glm4vForConditionalGeneration", input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, **kwargs, ): kwargs["inputs_embeds"], kwargs["attention_mask"] = _get_input_embeds( self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw ) # avoid lora module having multiple keyword arguments return self.language_model( input_ids=None, **kwargs, ) def glm4v_forward( self: "Glm4vForConditionalGeneration", input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, **kwargs, ): return self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=process_position_ids(position_ids), pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, **kwargs, ) def forward_with_normal_backend( self: Glm4vForConditionalGeneration, input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> "Glm4vCausalLMOutputWithPast": outputs = glm4v_forward(self, input_ids, **kwargs) hidden_states = outputs[0] logits = self.lm_head(hidden_states) return Glm4vCausalLMOutputWithPast( logits=logits, hidden_states=outputs.hidden_states, ) def forward_with_torch_backend( self: Glm4vForConditionalGeneration, input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> tuple | Glm4vCausalLMOutputForPPO: from verl.utils.experimental.torch_functional import FusedLinearForPPO outputs = glm4v_forward(self, input_ids, **kwargs) hidden_states = outputs[0] # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.") fused_linear_for_ppo = FusedLinearForPPO() log_probs, entropy = fused_linear_for_ppo.forward( hidden_states=hidden_states, vocab_weights=self.lm_head.weight, input_ids=rolled_labels, temperature=temperature, ) return Glm4vCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, hidden_states=outputs.hidden_states, ) def forward_with_triton_backend( self: Glm4vForConditionalGeneration, input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> tuple | Glm4vCausalLMOutputForPPO: from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy outputs = glm4v_forward(self, input_ids, **kwargs) hidden_states = outputs[0] # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.") log_probs, entropy = linear_cross_entropy( hidden_states, self.lm_head.weight, rolled_labels, temperature, "none", ) return Glm4vCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, hidden_states=outputs.hidden_states, ) ================================================ FILE: verl_distillation/verl/models/transformers/kimi_vl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch import torch.nn.functional as F from transformers.cache_utils import Cache from transformers.modeling_flash_attention_utils import _flash_attention_forward from verl.models.transformers.monkey_patch import is_transformers_version_in_range # Import compatibility wrapper for flash_attn_supports_top_left_mask from verl.utils.transformers_compat import flash_attn_supports_top_left_mask from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) b, h, s, d = q.shape q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) b, h, s, d = k.shape k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def _ulysses_flash_attn_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() if self.q_lora_rank is None: q = self.q_proj(hidden_states) else: q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape compressed_kv = self.kv_a_proj_with_mqa(hidden_states) compressed_kv, k_pe = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2) kv = ( self.kv_b_proj(self.kv_a_layernorm(compressed_kv)) .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim) .transpose(1, 2) ) k_nope, value_states = torch.split(kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1) # patch ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.num_heads, ulysses_sp_size) num_key_value_groups = self.config.num_attention_heads // self.config.num_key_value_heads k_pe = repeat_kv(k_pe, ulysses_sp_size) # to keep heads=1 after a2a k_nope = repeat_kv(k_nope, num_key_value_groups) value_states = repeat_kv(value_states, num_key_value_groups) q = gather_seq_scatter_heads(q, seq_dim=2, head_dim=1) k_pe = gather_seq_scatter_heads(k_pe, seq_dim=2, head_dim=1) k_nope = gather_seq_scatter_heads(k_nope, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) # (batch_size, num_head / sp_size, seq_length, head_size) full_q_len = q.size(2) # full_q_len = seq_length else: full_q_len = q_len q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) cos, sin = self.rotary_emb(value_states, seq_len=full_q_len) q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids) query_states = k_pe.new_empty(bsz, self.num_heads // ulysses_sp_size, full_q_len, self.q_head_dim) query_states[:, :, :, : self.qk_nope_head_dim] = q_nope query_states[:, :, :, self.qk_nope_head_dim :] = q_pe key_states = k_pe.new_empty(bsz, self.num_heads // ulysses_sp_size, full_q_len, self.q_head_dim) key_states[:, :, :, : self.qk_nope_head_dim] = k_nope key_states[:, :, :, self.qk_nope_head_dim :] = k_pe if self.q_head_dim != self.v_head_dim: value_states = F.pad(value_states, [0, self.q_head_dim - self.v_head_dim]) # TODO: These transpose are quite inefficient but Flash Attention requires the layout # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, full_q_len, dropout=dropout_rate, sliding_window=None, is_causal=self.is_causal, use_top_left_mask=flash_attn_supports_top_left_mask(), position_ids=position_ids, # important: pass position ids softmax_scale=self.softmax_scale, ) if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1) if self.q_head_dim != self.v_head_dim: attn_output = attn_output[:, :, :, : self.v_head_dim] attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim).contiguous() attn_output = self.o_proj(attn_output) if is_transformers_version_in_range(min_version="4.53.0"): return attn_output, None else: return attn_output, None, None ================================================ FILE: verl_distillation/verl/models/transformers/llama.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from typing import Callable, Optional import torch if sys.version_info >= (3, 11): pass else: pass from transformers.cache_utils import Cache from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.models.llama.modeling_llama import apply_rotary_pos_emb from transformers.utils import logging # Import compatibility wrapper for flash_attn_supports_top_left_mask from verl.utils.transformers_compat import flash_attn_supports_top_left_mask from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) logger = logging.get_logger(__name__) def llama_flash_attn_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """ Adapted from transformers 4.47.1 to support Ulysses sequence parallelism. NOTE: This function is used for transformers versions in the range [4.45.0, 4.47.1]. """ output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) # trade off: repeat first and then all to all # key_states = repeat_kv(key_states, self.num_key_value_groups) # value_states = repeat_kv(value_states, self.num_key_value_groups) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.num_heads, ulysses_sp_size) # (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) # full seq length if position_embeddings is None: logger.warning_once( "The attention layers in this model are transitioning from computing the RoPE embeddings internally " "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " "removed and `position_embeddings` will be mandatory." ) cos, sin = self.rotary_emb(value_states, position_ids) else: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to " f"the fact you have upcasted embedding or layer norm layers in float32. We will cast back the " f"input in {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, full_q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, "sliding_window", None), use_top_left_mask=flash_attn_supports_top_left_mask(), is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def llama_attn_forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """ Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0. NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.50.0. """ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS from transformers.models.llama.modeling_llama import eager_attention_forward bsz, q_len, _ = hidden_states.shape query_states = self.q_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.config.num_attention_heads, ulysses_sp_size) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. " "Falling back to eager attention. This warning can be removed using the argument " '`attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights ================================================ FILE: verl_distillation/verl/models/transformers/monkey_patch.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Apply monkey-patch function to models """ import sys from types import SimpleNamespace from typing import Optional import torch from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.modeling_utils import PreTrainedModel from verl.utils.import_utils import is_trl_available from verl.utils.transformers_compat import is_transformers_version_in_range from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_group, get_ulysses_sequence_parallel_world_size, slice_input_tensor, ) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=2, repeats=n_rep). The hidden states go from (batch, seqlen, num_key_value_heads, head_dim) to (batch, seqlen, num_attention_heads, head_dim) """ batch, slen, num_key_value_heads, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, :, None, :].expand(batch, slen, num_key_value_heads, n_rep, head_dim) return hidden_states.reshape(batch, slen, num_key_value_heads * n_rep, head_dim) def _ulysses_flash_attention_forward( query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: Optional[torch.Tensor], query_length: int, *args, position_ids: Optional[torch.Tensor] = None, **kwargs, ): """Insert all-to-all before and after flash attention. DeepSpeed-Ulysses: https://arxiv.org/pdf/2309.14509 For transformers>=4.55, the flash attention api has changed, we need to pass the query_length after doing ulysses all2all. See https://github.com/huggingface/transformers/issues/40399 Args: query_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads, head_dim) key_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads_k, head_dim) value_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads_k, head_dim) position_ids (torch.Tensor, optional): (batch_size, seqlen/sp_size) Returns: torch.Tensor: (batch_size, seqlen/sp_size, nheads, head_dim) """ ulysses_sp_size = get_ulysses_sequence_parallel_world_size() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: assert position_ids is not None, "position_ids is required for Ulysses sequence parallelism" # NOTE: repeat kv heads to be divided by sequence parallel. Instead of repeating nheads_q//nheads_k, # we choose to repeat sp_size//nheads_k, since flash_attention supports MQA/GQA. # For example: # - nheads_k=4, sp=8, repeats=2 # - nheads_k=8, sp=8, repeats=1 # - nheads_k=16, sp=8, repeats=1 repeats = max(ulysses_sp_size // key_states.size(2), 1) key_states = repeat_kv(key_states, repeats) value_states = repeat_kv(value_states, repeats) # (bsz, seq_len/n, n_head, head_dim) -> (bsz, seq_len, n_head/n, head_dim) query_states = gather_seq_scatter_heads(query_states, seq_dim=1, head_dim=2) key_states = gather_seq_scatter_heads(key_states, seq_dim=1, head_dim=2) value_states = gather_seq_scatter_heads(value_states, seq_dim=1, head_dim=2) # TODO: all_gather position_ids because `prepare_fa2_from_position_ids` needs it, we can eliminate # this all_gather by passing cu_seq_lens_q, cu_seq_lens_k, max_length_k, max_length_q explicitly. # https://github.com/huggingface/transformers/pull/33932 # (bsz, seq_len/n) -> (bsz, seq_len) position_ids_list = [torch.empty_like(position_ids) for _ in range(ulysses_sp_size)] torch.distributed.all_gather(position_ids_list, position_ids, group=get_ulysses_sequence_parallel_group()) position_ids = torch.concat(position_ids_list, dim=-1) # (bsz, seq_len, n_head/n, head_dim) query_length = query_states.size(1) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, query_length, *args, position_ids=position_ids, **kwargs ) ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: # (bsz, seq_len, n_head/n, head_dim) -> (bsz, seq_len/n, n_head, head_dim) attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) return attn_output def patch_vlm_for_ulysses_input_slicing(model_class: type): """ Applies a monkey patch to the forward method of a given model class to enable Ulysses sequence parallelism input slicing. """ def _create_ulysses_wrapped_decoder_forward(original_forward): def ulysses_wrapped_decoder_forward(self, *args, **kwargs): inputs_embeds = kwargs.get("inputs_embeds") position_ids = kwargs.get("position_ids") visual_pos_masks = kwargs.get("visual_pos_masks") deepstack_visual_embeds = kwargs.get("deepstack_visual_embeds") call_kwargs = kwargs.copy() current_ulysses_sp_size = get_ulysses_sequence_parallel_world_size() slice_now = ( inputs_embeds is not None and current_ulysses_sp_size > 1 and getattr(self, "_needs_initial_slice", True) ) if slice_now: call_kwargs["inputs_embeds"] = slice_input_tensor(inputs_embeds, dim=1, padding=False) call_kwargs["position_ids"] = slice_input_tensor(position_ids, dim=-1, padding=False) # Also slice visual_pos_masks and deepstack_visual_embeds for Qwen3 VL models if visual_pos_masks is not None: original_visual_mask = visual_pos_masks sliced_visual_mask = slice_input_tensor(visual_pos_masks, dim=1, padding=False) call_kwargs["visual_pos_masks"] = sliced_visual_mask if deepstack_visual_embeds is not None: sliced_embeds = [] num_visual_before = original_visual_mask.sum().item() num_visual_in_shard = sliced_visual_mask.sum().item() if num_visual_in_shard > 0 and num_visual_before > 0: # Calculate which visual embeddings belong to this shard # We need to find the offset of visual tokens in this shard from verl.utils.ulysses import get_ulysses_sequence_parallel_rank rank = get_ulysses_sequence_parallel_rank() seq_len = original_visual_mask.shape[1] local_seq_len = seq_len // current_ulysses_sp_size start_idx = rank * local_seq_len end_idx = start_idx + local_seq_len # Get total visual tokens before and up to the end of the shard's sequence slice # This correctly handles batches by summing across all samples visual_start = original_visual_mask[:, :start_idx].sum().item() if start_idx > 0 else 0 visual_end = original_visual_mask[:, :end_idx].sum().item() # Slice each tensor in deepstack_visual_embeds for embed in deepstack_visual_embeds: sliced_embeds.append(embed[visual_start:visual_end]) else: # No visual tokens in this shard, create empty tensors to maintain gradient flow for embed in deepstack_visual_embeds: sliced_embeds.append(embed[:0]) call_kwargs["deepstack_visual_embeds"] = sliced_embeds self._needs_initial_slice = False try: return original_forward(self, *args, **call_kwargs) finally: if slice_now: self._needs_initial_slice = True return ulysses_wrapped_decoder_forward original_forward = model_class.forward wrapped_forward = _create_ulysses_wrapped_decoder_forward(original_forward) model_class.forward = wrapped_forward print(f"Monkey patch {model_class.__name__}.forward for Ulysses SP input slicing.") def patch_forward_with_backends( model: PreTrainedModel, use_fused_kernels: bool = False, fused_kernels_backend: str = None, ): """ Choose the forward function based on the model and backend. Args: model (PreTrainedModel): The model to apply the monkey patch. use_fused_kernels (bool): Whether to use fused kernels. fused_kernels_backend (str): The backend to use for fused kernels. """ if not use_fused_kernels or fused_kernels_backend not in ["triton", "torch"]: print( f"Skipping monkey patch for {model.__class__.__name__} as use_fused_kernels is " f"{use_fused_kernels} or fused_kernels_backend is {fused_kernels_backend}" ) return forward_with_torch_backend_function = model.__class__.forward forward_with_triton_backend_function = model.__class__.forward if model.config.model_type in ["qwen2_5_vl", "qwen2_vl"]: from verl.models.transformers.qwen2_vl import forward_with_torch_backend, forward_with_triton_backend forward_with_torch_backend_function = forward_with_torch_backend forward_with_triton_backend_function = forward_with_triton_backend elif model.config.model_type in ["qwen3_vl", "qwen3_vl_moe"]: from verl.models.transformers.qwen3_vl import forward_with_torch_backend, forward_with_triton_backend forward_with_torch_backend_function = forward_with_torch_backend forward_with_triton_backend_function = forward_with_triton_backend elif model.config.model_type == "glm4v": from verl.models.transformers.glm4v import forward_with_torch_backend, forward_with_triton_backend forward_with_torch_backend_function = forward_with_torch_backend forward_with_triton_backend_function = forward_with_triton_backend else: from verl.models.transformers.dense_common import forward_with_torch_backend, forward_with_triton_backend forward_with_torch_backend_function = forward_with_torch_backend forward_with_triton_backend_function = forward_with_triton_backend if fused_kernels_backend == "triton": model.__class__.forward = forward_with_triton_backend_function print(f"Using Triton backend for fused kernels in {model.__class__.__name__}") elif fused_kernels_backend == "torch": model.__class__.forward = forward_with_torch_backend_function print(f"Using Torch backend for fused kernels in {model.__class__.__name__}") else: raise ValueError(f"Unsupported fused_kernels_backend: {fused_kernels_backend}. Choose 'triton' or 'torch'.") def apply_monkey_patch( model: PreTrainedModel, ulysses_sp_size: int = 1, use_remove_padding: bool = True, use_fused_kernels: bool = False, fused_kernels_backend: str = None, ): """ Apply monkey patch to the models for ulysses sequence parallel and fused kernel. In the end of this function forward function of the model is patched for fused kernel. If the model is not supported with fused kernel, please return after patch. """ """Replace _flash_attention_forward to _ulysses_flash_attention_forward""" module = sys.modules[model.__module__] try: num_attention_heads, num_key_value_heads = model.config.num_attention_heads, model.config.num_key_value_heads except AttributeError: num_attention_heads, num_key_value_heads = ( model.config.text_config.num_attention_heads, model.config.text_config.num_key_value_heads, ) assert num_attention_heads % ulysses_sp_size == 0, ( f"num_attention_heads {num_attention_heads} must be divisible by ulysses_sp_size {ulysses_sp_size}" ) assert num_key_value_heads % ulysses_sp_size == 0 or ulysses_sp_size % num_key_value_heads == 0, ( f"num_key_value_heads {num_key_value_heads} must be divisible by ulysses_sp_size " f"{ulysses_sp_size}or vise versa. Upon ulysses_sp_size % num_key_value_heads == 0," f"kv heads are repeated to ensure correctness." ) if is_trl_available(): from trl import AutoModelForCausalLMWithValueHead # type: ignore def state_dict(self, *args, **kwargs): return torch.nn.Module.state_dict(self, *args, **kwargs) AutoModelForCausalLMWithValueHead.state_dict = state_dict print("Monkey patch state_dict in AutoModelForCausalLMWithValueHead. ") # TODO: VLM models only, unify monkey patch to LLM models. if model.config.model_type in ["qwen2_5_vl", "qwen2_vl"]: # Step 1: patch model to support image-text mixed data if is_transformers_version_in_range(min_version="4.52.0"): from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLModel, Qwen2_5_VLTextModel, ) from transformers.models.qwen2_vl.modeling_qwen2_vl import ( Qwen2VLForConditionalGeneration, Qwen2VLModel, Qwen2VLTextModel, ) else: from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLForConditionalGeneration from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLModel as Qwen2_5_VLTextModel from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLModel as Qwen2VLTextModel Qwen2_5_VLModel = SimpleNamespace(forward=None) Qwen2VLModel = SimpleNamespace(forward=None) from verl.models.transformers.qwen2_vl import forward_with_normal_backend, qwen2_vl_base_forward Qwen2_5_VLModel.forward = qwen2_vl_base_forward Qwen2VLModel.forward = qwen2_vl_base_forward Qwen2_5_VLForConditionalGeneration.forward = forward_with_normal_backend Qwen2VLForConditionalGeneration.forward = forward_with_normal_backend print(f"Monkey patch {model.__class__.__name__} model forward") # Step 2: patch attention to support ulysses parallelism if is_transformers_version_in_range(min_version="4.54.0"): from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLAttention from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLAttention elif is_transformers_version_in_range(min_version="4.53.0"): raise RuntimeError("Transformers 4.53.* is bugged. Use transformers 4.54.0 or later.") else: from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( Qwen2_5_VLFlashAttention2 as Qwen2_5_VLAttention, ) from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLFlashAttention2 as Qwen2VLAttention if use_remove_padding or ulysses_sp_size > 1: from verl.models.transformers.qwen2_vl import qwen2_vl_attn_forward Qwen2_5_VLAttention.forward = qwen2_vl_attn_forward Qwen2VLAttention.forward = qwen2_vl_attn_forward print(f"Monkey patch {model.__class__.__name__} attention layer") # Step 3: patch input for multimodal sequence parallelism if ulysses_sp_size > 1: patch_vlm_for_ulysses_input_slicing(Qwen2_5_VLTextModel) patch_vlm_for_ulysses_input_slicing(Qwen2VLTextModel) elif model.config.model_type in ["qwen3_vl", "qwen3_vl_moe"]: # Step 1: patch model to support image-text mixed data from transformers.models.qwen3_vl.modeling_qwen3_vl import ( Qwen3VLForConditionalGeneration, Qwen3VLModel, Qwen3VLTextModel, ) from transformers.models.qwen3_vl_moe.modeling_qwen3_vl_moe import ( Qwen3VLMoeForConditionalGeneration, Qwen3VLMoeModel, Qwen3VLMoeTextModel, ) from verl.models.transformers.qwen3_vl import forward_with_normal_backend, qwen3_vl_base_forward Qwen3VLModel.forward = qwen3_vl_base_forward Qwen3VLMoeModel.forward = qwen3_vl_base_forward Qwen3VLForConditionalGeneration.forward = forward_with_normal_backend Qwen3VLMoeForConditionalGeneration.forward = forward_with_normal_backend print(f"Monkey patch {model.__class__.__name__} model forward") # Step 2: patch input for multimodal sequence parallelism if ulysses_sp_size > 1: patch_vlm_for_ulysses_input_slicing(Qwen3VLTextModel) patch_vlm_for_ulysses_input_slicing(Qwen3VLMoeTextModel) elif model.config.model_type == "glm4v": # Step 1: patch model to support image-text mixed data from transformers.models.glm4v.modeling_glm4v import ( Glm4vForConditionalGeneration, Glm4vModel, Glm4vTextAttention, Glm4vTextModel, ) from verl.models.transformers.glm4v import forward_with_normal_backend, glm4v_base_forward Glm4vModel.forward = glm4v_base_forward Glm4vForConditionalGeneration.forward = forward_with_normal_backend print(f"Monkey patch {model.__class__.__name__} model forward") # Step 2: patch attention to support ulysses parallelism if use_remove_padding or ulysses_sp_size > 1: from verl.models.transformers.glm4v import glm4v_attn_forward Glm4vTextAttention.forward = glm4v_attn_forward print(f"Monkey patch {model.__class__.__name__} attention layer") # Step 3: patch input for multimodal sequence parallelism if ulysses_sp_size > 1: patch_vlm_for_ulysses_input_slicing(Glm4vTextModel) elif model.config.model_type == "kimi_vl": if use_remove_padding or ulysses_sp_size > 1: # TODO: Changes need to be made when transformers are adapted. from verl.models.transformers.kimi_vl import _ulysses_flash_attn_forward module.DeepseekV3FlashAttention2.forward = _ulysses_flash_attn_forward print("Monkey patch FlashAttention2.forward in KimiVL") if ulysses_sp_size > 1: patch_vlm_for_ulysses_input_slicing(module.DeepseekV3ForCausalLM) if use_fused_kernels: print("Not support fused kernels for KimiVL") return if use_remove_padding or ulysses_sp_size > 1: if hasattr(module, "_flash_attention_forward"): # transformers <= 4.47.1 or legacy models module._flash_attention_forward = _ulysses_flash_attention_forward print(f"Monkey patch _flash_attention_forward in {model.__module__}") else: from transformers.integrations import flash_attention flash_attention._flash_attention_forward = _ulysses_flash_attention_forward print(f"Monkey patch _flash_attention_forward in {flash_attention.__name__}") patch_forward_with_backends(model, use_fused_kernels=use_fused_kernels, fused_kernels_backend=fused_kernels_backend) ================================================ FILE: verl_distillation/verl/models/transformers/npu_patch.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Copyright 2025 The Qwen Team and The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from importlib.metadata import version as get_version from typing import Optional import torch import torch.nn.functional as F import torch_npu from torch_npu import npu_rotary_mul as apply_rotary_emb from transformers.modeling_utils import PretrainedConfig, PreTrainedModel from transformers.models.qwen2_5_vl import modeling_qwen2_5_vl from transformers.models.qwen3 import modeling_qwen3 from transformers.models.qwen3_moe import modeling_qwen3_moe from transformers.utils import logging logger = logging.get_logger(__name__) # This patch takes effect when using apply_rotary_pos_emb_flashatt on qwen2_5_vl and will be removed in # subsequent versions # https://github.com/huggingface/transformers/pull/38491 def apply_rotary_pos_emb_flashatt_qwen2_5_vl_npu( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: cos = cos.chunk(2, dim=-1)[0].contiguous() sin = sin.chunk(2, dim=-1)[0].contiguous() cos = cos.repeat(1, 2) sin = sin.repeat(1, 2) q_embed = apply_rotary_emb( q.float(), cos.unsqueeze(0).unsqueeze(2).float(), sin.unsqueeze(0).unsqueeze(2).float() ).type_as(q) k_embed = apply_rotary_emb( k.float(), cos.unsqueeze(0).unsqueeze(2).float(), sin.unsqueeze(0).unsqueeze(2).float() ).type_as(k) return q_embed, k_embed # This api can improve performance on ASCEND NPU def rms_norm_forward(self, x): return torch_npu.npu_rms_norm(x, self.weight, epsilon=self.variance_epsilon)[0] def silu_forward(self, hidden_state): """NPU optimized silu""" gate_up = torch.cat((self.gate_proj(hidden_state), self.up_proj(hidden_state)), dim=-1) return self.down_proj(torch_npu.npu_swiglu(gate_up, dim=-1)) def apply_rotary_pos_emb_qwen3_npu(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = torch_npu.npu_rotary_mul(q, cos, sin) k_embed = torch_npu.npu_rotary_mul(k, cos, sin) return q_embed.to(q.dtype), k_embed.to(k.dtype) class GmmFunction(torch.autograd.Function): @staticmethod def forward(ctx, x, weight, group_list, split_size): ctx.save_for_backward(x, weight) ctx.group_list = group_list ctx.split_size = split_size outputs = torch_npu.npu_grouped_matmul([x], [weight], group_list=group_list, group_type=0, split_item=2) return outputs[0] @staticmethod def backward(ctx, grad_outputs): x, weight = ctx.saved_tensors group_list = ctx.group_list wt = weight.permute(0, 2, 1) xt = x.permute(1, 0) dx = torch_npu.npu_grouped_matmul([grad_outputs], [wt], group_list=group_list, group_type=0, split_item=2) dw = torch.zeros_like(weight) split_size = ctx.split_size xt_list = torch.split(xt, split_size, dim=1) grad_outputs_list = torch.split(grad_outputs, split_size, dim=0) with torch.npu.amp.autocast(enabled=False): dw = torch.stack([torch.matmul(xt_list[i], grad_outputs_list[i]) for i in range(len(xt_list))]) return dx[0], dw, None, None def moe_block_forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ """ batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) # router_logits: (batch * sequence_length, n_experts) router_logits = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) if self.norm_topk_prob: # only diff with mixtral sparse moe block! routing_weights /= routing_weights.sum(dim=-1, keepdim=True) # we cast back to the input dtype routing_weights = routing_weights.to(hidden_states.dtype) final_hidden_states = torch.zeros( (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device ) # One hot encode the selected experts to create an expert mask # this will be used to easily index which expert is going to be sollicitated expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) # Loop over all available experts in the model and perform the computation on each expert # Concat all weights input_dtype = hidden_states.dtype up_weight_list = [e.up_proj.weight.t().to(input_dtype) for e in self.experts] gate_weight_list = [e.gate_proj.weight.t().to(input_dtype) for e in self.experts] down_weight_list = [e.down_proj.weight.t().to(input_dtype) for e in self.experts] w1 = torch.stack(up_weight_list) w2 = torch.stack(gate_weight_list) w3 = torch.stack(down_weight_list) # Copied from mindspeed moe_utils.py:permute routing_map = selected_experts flatten_indices = routing_map.view(-1) sorted_indices = torch.sort(flatten_indices.float(), stable=True)[1] permuted_tokens = hidden_states.index_select(0, sorted_indices // self.top_k) tokens_per_experts = torch.sum(expert_mask, dim=(1, 2)) group_list = torch.cumsum(tokens_per_experts, dim=0) cpu_group_list = group_list.to("cpu", non_blocking=False) cpu_group_list = [0] + cpu_group_list.tolist() split_size = [cpu_group_list[i + 1] - cpu_group_list[i] for i in range(len(cpu_group_list) - 1)] up_res = GmmFunction.apply(permuted_tokens, w1, group_list, split_size) gate_res = GmmFunction.apply(permuted_tokens, w2, group_list, split_size) act_res = torch_npu.npu_swiglu(torch.cat([gate_res, up_res], dim=-1)) down_res = GmmFunction.apply(act_res, w3, group_list, split_size) probs = routing_weights num_unpermuted_tokens = probs.numel() topk = self.top_k permuted_tokens = down_res unpermuted_tokens = torch.zeros( [num_unpermuted_tokens, permuted_tokens.shape[-1]], dtype=permuted_tokens.dtype, device=permuted_tokens.device, ) unpermuted_tokens.index_copy_(0, sorted_indices, permuted_tokens) unpermuted_tokens = unpermuted_tokens.reshape(-1, topk, permuted_tokens.size(-1)) unpermuted_tokens = unpermuted_tokens * probs.unsqueeze(-1) unpermuted_tokens = unpermuted_tokens.sum(dim=1).to(hidden_states.dtype) final_hidden_states = unpermuted_tokens return final_hidden_states, router_logits @classmethod def _check_and_enable_flash_attn_2( cls, config, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[str | dict[str, int]] = None, check_device_map: bool = True, hard_check_only: bool = False, ) -> PretrainedConfig: """ Checks the availability of Flash Attention 2 and compatibility with the current model. If all checks pass and `hard_check_only` is False, the method will set the config attribute `attn_implementation` to "flash_attention_2" so that the model can initialize the correct attention module. """ if not cls._supports_flash_attn_2: raise ValueError( f"{cls.__name__} does not support Flash Attention 2.0 yet. Please request to add support where the" f" model is hosted, on its model hub page: https://huggingface.co/{config._name_or_path}/discussions/new" " or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new" ) if not hard_check_only: config._attn_implementation = "flash_attention_2" logger.info("Detect using FlashAttention2 on Ascend NPU.") return config modeling_qwen2_5_vl.Qwen2RMSNorm.forward = rms_norm_forward modeling_qwen2_5_vl.Qwen2_5_VLMLP.forward = silu_forward modeling_qwen2_5_vl.apply_rotary_pos_emb_flashatt = apply_rotary_pos_emb_flashatt_qwen2_5_vl_npu modeling_qwen3_moe.Qwen3MoeRMSNorm.forward = rms_norm_forward modeling_qwen3_moe.Qwen3MoeSparseMoeBlock.forward = moe_block_forward modeling_qwen3_moe.apply_rotary_pos_emb = apply_rotary_pos_emb_qwen3_npu modeling_qwen3.Qwen3RMSNorm.forward = rms_norm_forward modeling_qwen3.Qwen3MLP.forward = silu_forward if get_version("transformers") == "4.52.4": PreTrainedModel._check_and_enable_flash_attn_2 = _check_and_enable_flash_attn_2 ================================================ FILE: verl_distillation/verl/models/transformers/qwen2.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional import torch from transformers.cache_utils import Cache from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv from transformers.utils import logging # Import compatibility wrapper for flash_attn_supports_top_left_mask from verl.utils.transformers_compat import flash_attn_supports_top_left_mask from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) logger = logging.get_logger(__name__) def qwen2_flash_attn_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 ): """ Adapted from transformers 4.47.1 to support Ulysses sequence parallelism. NOTE: This function is only tested on transformers versions between 4.45.0 and 4.47.1. """ bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.num_heads, ulysses_sp_size) # (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) # full seq length if position_embeddings is None: logger.warning_once( "The attention layers in this model are transitioning from computing the RoPE embeddings internally " "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " "removed and `position_embeddings` will be mandatory." ) cos, sin = self.rotary_emb(value_states, position_ids) else: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to " f"the fact you have upcasted embedding or layer norm layers in float32. We will cast back the " f"input in {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) # Reashape to the expected shape for Flash Attention query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) if ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): sliding_window = self.config.sliding_window else: sliding_window = None attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, full_q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=sliding_window, is_causal=self.is_causal, use_top_left_mask=flash_attn_supports_top_left_mask(), ) # use full_q_len to reshape attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def qwen2_attn_forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """ Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0. NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.50.0. """ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS bsz, q_len, _ = hidden_states.shape hidden_shape = (bsz, q_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.config.num_attention_heads, ulysses_sp_size) # (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) sliding_window = None if ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): sliding_window = self.config.sliding_window from transformers.models.qwen2.modeling_qwen2 import eager_attention_forward attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. " "Falling back to eager attention. This warning can be removed using the argument " '`attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=sliding_window, # main diff with Llama **kwargs, ) attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: # (bsz, seq_len, n_head/n, head_dim) -> (bsz, seq_len/n, n_head, head_dim) attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights ================================================ FILE: verl_distillation/verl/models/transformers/qwen2_vl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import os from dataclasses import dataclass from typing import Optional import torch import torch.distributed as dist from transformers.modeling_flash_attention_utils import _flash_attention_forward, fa_peft_integration_check from transformers.models.qwen2_vl.modeling_qwen2_vl import ( Qwen2VLAttention, Qwen2VLCausalLMOutputWithPast, Qwen2VLForConditionalGeneration, ) from transformers.utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10 from verl.utils.device import is_npu_available from verl.utils.transformers_compat import is_transformers_version_in_range from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_group, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func _flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters _flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters _flash_use_top_left_mask = not is_flash_attn_greater_or_equal_2_10() if is_npu_available: from transformers.integrations.npu_flash_attention import npu_flash_attn_func as flash_attn_func from transformers.integrations.npu_flash_attention import npu_flash_attn_varlen_func as flash_attn_varlen_func from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask _flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters _flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters _flash_use_top_left_mask = flash_attn_supports_top_left_mask() _flash_deterministic_enabled = os.getenv("FLASH_ATTENTION_DETERMINISTIC", "0") == "1" def get_rope_index( processor, input_ids: torch.Tensor, image_grid_thw: Optional[torch.Tensor] = None, video_grid_thw: Optional[torch.Tensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Gets the position ids for Qwen2-VL, it should be generated before sharding the sequence. The batch dim has been removed and the input_ids should be a 1D tensor representing a single example. https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py#L1405 """ spatial_merge_size = processor.image_processor.merge_size tokens_per_second = 2 image_token_id = processor.tokenizer.convert_tokens_to_ids("<|image_pad|>") video_token_id = processor.tokenizer.convert_tokens_to_ids("<|video_pad|>") vision_start_token_id = processor.tokenizer.convert_tokens_to_ids("<|vision_start|>") if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): if attention_mask is None: attention_mask = torch.ones_like(input_ids) position_ids = torch.ones(3, input_ids.size(0), dtype=input_ids.dtype, device=input_ids.device) # (3, seqlen) image_index, video_index = 0, 0 input_ids = input_ids[attention_mask == 1] image_nums, video_nums = 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id) vision_tokens = input_ids[vision_start_indices + 1] image_nums = (vision_tokens == image_token_id).sum() video_nums = (vision_tokens == video_token_id).sum() input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos = image_nums, video_nums for _ in range(image_nums + video_nums): if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if ed_image < ed_video: t, h, w = ( image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2], ) second_per_grid_t = 0 image_index += 1 remain_images -= 1 ed = ed_image else: t, h, w = ( video_grid_thw[video_index][0], video_grid_thw[video_index][1], video_grid_thw[video_index][2], ) second_per_grid_t = second_per_grid_ts[video_index] if second_per_grid_ts is not None else 1.0 video_index += 1 remain_videos -= 1 ed = ed_video llm_grid_t, llm_grid_h, llm_grid_w = ( t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) text_len = ed - st st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w) t_index = (t_index * second_per_grid_t * tokens_per_second).long().flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx) st = ed + llm_grid_t * llm_grid_h * llm_grid_w if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device) else: if attention_mask is not None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1).to(input_ids.device) else: position_ids = torch.arange(input_ids.shape[1], device=input_ids.device).view(1, -1).expand(3, -1) return position_ids def prepare_fa2_from_position_ids( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_ids: torch.Tensor ): assert position_ids.ndim == 2 # (batch_size, seq_length) query = query.contiguous().view(-1, query.size(-2), query.size(-1)) key = key.contiguous().view(-1, key.size(-2), key.size(-1)) value = value.contiguous().view(-1, value.size(-2), value.size(-1)) position_ids = position_ids.view(-1) cu_seqlens = torch.cat( ( (position_ids == 0).nonzero().view(-1).to(torch.int32), torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32), ) ) max_length = cu_seqlens.diff().max() # use cu_seqlens to infer max_length for qwen2vl mrope return (query, key, value, (cu_seqlens, cu_seqlens), (max_length, max_length)) def _custom_flash_attention_forward( query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: Optional[torch.Tensor], query_length: int, is_causal: bool = True, position_ids: Optional[torch.Tensor] = None, sliding_window: Optional[int] = None, use_top_left_mask: bool = False, deterministic: Optional[bool] = None, **kwargs, ): """ Patches flash attention forward to handle 3D position ids in mrope. (3, batch_size, seq_length) """ # Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length). use_sliding_windows = ( _flash_supports_window_size and sliding_window is not None and key_states.shape[1] > sliding_window ) flash_kwargs = {"window_size": (sliding_window, sliding_window)} if use_sliding_windows else {} if _flash_supports_deterministic: flash_kwargs["deterministic"] = deterministic if deterministic is not None else _flash_deterministic_enabled if kwargs.get("softcap") is not None: flash_kwargs["softcap"] = kwargs.pop("softcap") query_states, key_states, value_states = fa_peft_integration_check( query_states, key_states, value_states, target_dtype=torch.bfloat16 ) if position_ids is not None: assert position_ids.ndim == 2 # (batch_size, seq_length / sp_size) sp_size = get_ulysses_sequence_parallel_world_size() if sp_size > 1: # qkv: (batch_size, seq_length / sp_size, num_head, head_size) validate_ulysses_config(query_states.size(2), sp_size) query_states = gather_seq_scatter_heads(query_states, seq_dim=1, head_dim=2) key_states = gather_seq_scatter_heads(key_states, seq_dim=1, head_dim=2) value_states = gather_seq_scatter_heads(value_states, seq_dim=1, head_dim=2) position_ids_lst = [torch.empty_like(position_ids) for _ in range(sp_size)] position_ids = dist.all_gather(position_ids_lst, position_ids, group=get_ulysses_sequence_parallel_group()) position_ids = torch.cat(position_ids_lst, dim=-1) # (batch_size, seq_length) if position_ids is not None and query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all(): batch_size = query_states.size(0) q, k, v, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) = prepare_fa2_from_position_ids( query_states, key_states, value_states, position_ids ) attn_output = flash_attn_varlen_func( q=q, k=k, v=v, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, dropout_p=kwargs.pop("dropout", 0.0), softmax_scale=kwargs.pop("softmax_scale", None), causal=is_causal, **flash_kwargs, ) attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1)) else: attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, query_length, is_causal=is_causal, sliding_window=sliding_window, use_top_left_mask=use_top_left_mask, deterministic=deterministic, **kwargs, ) # do not pass position_ids to old flash_attention_forward if sp_size > 1: # (batch_size, seq_length, num_head, head_size) attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1) return attn_output def qwen2_vl_attn_forward( self: "Qwen2VLAttention", hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 **kwargs, ) -> tuple[torch.Tensor, None, None]: from transformers.models.qwen2_vl.modeling_qwen2_vl import apply_multimodal_rotary_pos_emb, repeat_kv bsz, q_len, _ = hidden_states.size() # q_len = seq_length / sp_size query_states = self.q_proj(hidden_states) # (batch_size, seq_length / sp_size, num_heads * head_size) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) # Because the input can be padded, the absolute sequence length depends on the max position id. cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] ) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout sliding_window = None if ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): sliding_window = self.config.sliding_window # This is before the transpose q_len = query_states.shape[2] # FA2 uses non-transposed inputs query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) if position_ids.ndim == 3: position_ids = position_ids[0] attn_output = _custom_flash_attention_forward( query_states, key_states, value_states, attention_mask, query_length=q_len, is_causal=getattr(self, "is_causal", True), dropout=dropout_rate, sliding_window=sliding_window, use_top_left_mask=_flash_use_top_left_mask, position_ids=position_ids, # important: pass position ids ) # (batch_size, seq_length / sp_size, num_head, head_size) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) if is_transformers_version_in_range(min_version="4.54.0"): return attn_output, None else: return attn_output, None, None def _get_input_embeds( model: "Qwen2VLForConditionalGeneration", input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, ): inputs_embeds = model.get_input_embeddings()(input_ids) if pixel_values is not None: pixel_values = pixel_values.type(model.visual.dtype) image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw) n_image_tokens = (input_ids == model.config.image_token_id).sum().item() n_image_features = image_embeds.shape[0] if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) mask = input_ids == model.config.image_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) image_mask = mask_expanded.to(inputs_embeds.device) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: pixel_values_videos = pixel_values_videos.type(model.visual.dtype) video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw) n_video_tokens = (input_ids == model.config.video_token_id).sum().item() n_video_features = video_embeds.shape[0] if n_video_tokens != n_video_features: raise ValueError( f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}" ) mask = input_ids == model.config.video_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) video_mask = mask_expanded.to(inputs_embeds.device) video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) if pixel_values is None and pixel_values_videos is None: # handle mixed text-image data config = model.config.vision_config patch_dim = config.in_channels * config.temporal_patch_size * config.patch_size**2 pixel_values = torch.zeros((16, patch_dim), dtype=inputs_embeds.dtype, device=inputs_embeds.device) image_grid_thw = torch.tensor([[1, 4, 4]], dtype=torch.long, device=inputs_embeds.device) image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw) inputs_embeds += 0.0 * image_embeds.mean() if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) return inputs_embeds, attention_mask def process_position_ids(position_ids: torch.Tensor) -> torch.Tensor: if position_ids.ndim != 3 or position_ids.size(0) != 4: # we concat the text position ids with the 3D vision position ids by default # see https://github.com/huggingface/transformers/pull/39447 raise ValueError("position_ids should be a 3D tensor of shape (4, batch_size, seq_length).") if is_transformers_version_in_range(max_version="4.53.3"): # transformers < 4.54.0 only accepts vision position ids, so we discard the text position ids here position_ids = position_ids[1:] return position_ids @dataclass class Qwen2VLCausalLMOutputForPPO(Qwen2VLCausalLMOutputWithPast): log_probs: Optional[torch.FloatTensor] = None entropy: Optional[torch.FloatTensor] = None def qwen2_vl_base_forward( self: "Qwen2VLForConditionalGeneration", input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, **kwargs, ): kwargs["inputs_embeds"], kwargs["attention_mask"] = _get_input_embeds( self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw ) # avoid lora module having multiple keyword arguments return self.language_model(input_ids=None, **kwargs) def qwen2_vl_forward( self: "Qwen2VLForConditionalGeneration", input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, **kwargs, ): if is_transformers_version_in_range(min_version="4.52.0"): return self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=process_position_ids(position_ids), pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, **kwargs, ) else: inputs_embeds, attention_mask = _get_input_embeds( self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw ) return self.model( input_ids=None, attention_mask=attention_mask, position_ids=process_position_ids(position_ids), inputs_embeds=inputs_embeds, **kwargs, ) def forward_with_normal_backend( self: Qwen2VLForConditionalGeneration, input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> "Qwen2VLCausalLMOutputWithPast": outputs = qwen2_vl_forward(self, input_ids, **kwargs) hidden_states = outputs[0] logits = self.lm_head(hidden_states) return Qwen2VLCausalLMOutputWithPast( logits=logits, hidden_states=outputs.hidden_states, ) def forward_with_torch_backend( self: Qwen2VLForConditionalGeneration, input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> tuple | Qwen2VLCausalLMOutputForPPO: from verl.utils.experimental.torch_functional import FusedLinearForPPO outputs = qwen2_vl_forward(self, input_ids, **kwargs) hidden_states = outputs[0] # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.") fused_linear_for_ppo = FusedLinearForPPO() log_probs, entropy = fused_linear_for_ppo.forward( hidden_states=hidden_states, vocab_weights=self.lm_head.weight, input_ids=rolled_labels, temperature=temperature, ) return Qwen2VLCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, hidden_states=outputs.hidden_states, ) def forward_with_triton_backend( self: Qwen2VLForConditionalGeneration, input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> tuple | Qwen2VLCausalLMOutputForPPO: from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy outputs = qwen2_vl_forward(self, input_ids, **kwargs) hidden_states = outputs[0] # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.") log_probs, entropy = linear_cross_entropy( hidden_states, self.lm_head.weight, rolled_labels, temperature, "none", ) return Qwen2VLCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, hidden_states=outputs.hidden_states, ) ================================================ FILE: verl_distillation/verl/models/transformers/qwen3_vl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from dataclasses import dataclass from typing import Optional import torch from transformers.models.qwen3_vl.modeling_qwen3_vl import ( Qwen3VLCausalLMOutputWithPast, Qwen3VLForConditionalGeneration, ) logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def get_rope_index( processor, input_ids: torch.Tensor, image_grid_thw: Optional[torch.Tensor] = None, video_grid_thw: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: """ Gets the position ids for Qwen3-VL, it should be generated before sharding the sequence. The batch dim has been removed and the input_ids should be a 1D tensor representing a single example. https://github.com/huggingface/transformers/blob/v4.57.0/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py#L916 """ spatial_merge_size = processor.image_processor.merge_size image_token_id = processor.image_token_id video_token_id = processor.video_token_id vision_start_token_id = processor.vision_start_token_id # Since we use timestamps to seperate videos, # like , # the video_grid_thw should also be split if video_grid_thw is not None: video_grid_thw = torch.repeat_interleave(video_grid_thw, video_grid_thw[:, 0], dim=0) video_grid_thw[:, 0] = 1 if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): if attention_mask is None: attention_mask = torch.ones_like(input_ids) position_ids = torch.ones(3, input_ids.shape[0], dtype=input_ids.dtype, device=input_ids.device) image_index, video_index = 0, 0 attention_mask = attention_mask.to(input_ids.device) input_ids = input_ids[attention_mask == 1] image_nums, video_nums = 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id) vision_tokens = input_ids[vision_start_indices + 1] image_nums = (vision_tokens == image_token_id).sum() video_nums = (vision_tokens == video_token_id).sum() input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos = image_nums, video_nums for _ in range(image_nums + video_nums): if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if ed_image < ed_video: t, h, w = ( image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2], ) image_index += 1 remain_images -= 1 ed = ed_image else: t, h, w = ( video_grid_thw[video_index][0], video_grid_thw[video_index][1], video_grid_thw[video_index][2], ) video_index += 1 remain_videos -= 1 ed = ed_video llm_grid_t, llm_grid_h, llm_grid_w = ( t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) text_len = ed - st st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) # t_index is always 0 because llm_grid_t is always 1 # (we use timestamps to encode the temporal information for videos) t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx) st = ed + llm_grid_t * llm_grid_h * llm_grid_w if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device) else: if attention_mask is not None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1).to(attention_mask.device) else: position_ids = torch.arange(input_ids.shape[1], device=input_ids.device).view(1, -1).expand(3, -1) return position_ids def _get_input_embeds( model: "Qwen3VLForConditionalGeneration", input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, ): inputs_embeds = model.get_input_embeddings()(input_ids) image_mask, video_mask = None, None if pixel_values is not None: pixel_values = pixel_values.type(model.visual.dtype) image_embeds, deepstack_image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw) n_image_tokens = (input_ids == model.config.image_token_id).sum().item() n_image_features = image_embeds.shape[0] if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) mask = input_ids == model.config.image_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) image_mask = mask_expanded.to(inputs_embeds.device) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: pixel_values_videos = pixel_values_videos.type(model.visual.dtype) video_embeds, deepstack_video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw) n_video_tokens = (input_ids == model.config.video_token_id).sum().item() n_video_features = video_embeds.shape[0] if n_video_tokens != n_video_features: raise ValueError( f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}" ) mask = input_ids == model.config.video_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) video_mask = mask_expanded.to(inputs_embeds.device) video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) visual_pos_masks = None deepstack_visual_embeds = None if image_mask is not None and video_mask is not None: # aggregate visual_pos_masks and deepstack_visual_embeds image_mask = image_mask[..., 0] video_mask = video_mask[..., 0] visual_pos_masks = image_mask | video_mask deepstack_visual_embeds = [] image_mask_joint = image_mask[visual_pos_masks] video_mask_joint = video_mask[visual_pos_masks] for img_embed, vid_embed in zip(deepstack_image_embeds, deepstack_video_embeds, strict=False): embed_joint = img_embed.new_zeros(visual_pos_masks.sum(), img_embed.shape[-1]).to(img_embed.device) embed_joint[image_mask_joint, :] = img_embed embed_joint[video_mask_joint, :] = vid_embed deepstack_visual_embeds.append(embed_joint) elif image_mask is not None: image_mask = image_mask[..., 0] visual_pos_masks = image_mask deepstack_visual_embeds = deepstack_image_embeds elif video_mask is not None: video_mask = video_mask[..., 0] visual_pos_masks = video_mask deepstack_visual_embeds = deepstack_video_embeds if pixel_values is None and pixel_values_videos is None: config = model.config.vision_config patch_dim = config.in_channels * config.temporal_patch_size * config.patch_size**2 pixel_values = torch.zeros((16, patch_dim), dtype=inputs_embeds.dtype, device=inputs_embeds.device) image_grid_thw = torch.tensor([[1, 4, 4]], dtype=torch.long, device=inputs_embeds.device) image_embeds, dummy_deepstack_image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw) inputs_embeds += 0.0 * image_embeds.mean() for emb in dummy_deepstack_image_embeds or []: inputs_embeds += 0.0 * emb.mean() if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) return { "inputs_embeds": inputs_embeds, "attention_mask": attention_mask, "visual_pos_masks": visual_pos_masks, "deepstack_visual_embeds": deepstack_visual_embeds, } @dataclass class Qwen3VLCausalLMOutputForPPO(Qwen3VLCausalLMOutputWithPast): log_probs: Optional[torch.FloatTensor] = None entropy: Optional[torch.FloatTensor] = None def qwen3_vl_base_forward( self: "Qwen3VLForConditionalGeneration", input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, **kwargs, ): input_kwargs = _get_input_embeds( self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw ) # avoid lora module having multiple keyword arguments kwargs.update(input_kwargs) return self.language_model( input_ids=None, **kwargs, ) def forward_with_normal_backend( self: "Qwen3VLForConditionalGeneration", input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> "Qwen3VLCausalLMOutputForPPO": outputs = self.model(input_ids, **kwargs) hidden_states = outputs[0] logits = self.lm_head(hidden_states) return Qwen3VLCausalLMOutputForPPO( logits=logits, hidden_states=outputs.hidden_states, ) def forward_with_torch_backend( self: "Qwen3VLForConditionalGeneration", input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> "Qwen3VLCausalLMOutputForPPO": from verl.utils.experimental.torch_functional import FusedLinearForPPO outputs = self.model(input_ids, **kwargs) hidden_states = outputs[0] # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.") fused_linear_for_ppo = FusedLinearForPPO() log_probs, entropy = fused_linear_for_ppo.forward( hidden_states=hidden_states, vocab_weights=self.lm_head.weight, input_ids=rolled_labels, temperature=temperature, ) return Qwen3VLCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, hidden_states=outputs.hidden_states, ) def forward_with_triton_backend( self: "Qwen3VLForConditionalGeneration", input_ids: torch.LongTensor = None, labels: Optional[torch.LongTensor] = None, temperature: float = 1.0, **kwargs, ) -> "Qwen3VLCausalLMOutputForPPO": from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy outputs = self.model(input_ids, **kwargs) hidden_states = outputs[0] # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.") log_probs, entropy = linear_cross_entropy( hidden_states, self.lm_head.weight, rolled_labels, temperature, "none", ) return Qwen3VLCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, hidden_states=outputs.hidden_states, ) ================================================ FILE: verl_distillation/verl/models/weight_loader_registry.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_weight_loader(arch: str): from verl.models.mcore.loader import load_state_dict_to_megatron_gptmodel _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY = { "LlamaForCausalLM": load_state_dict_to_megatron_gptmodel, "Qwen2ForCausalLM": load_state_dict_to_megatron_gptmodel, } if arch in _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY: return _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY[arch] raise ValueError( f"Model architectures {arch} loader are not supported for now. Supported architectures: " f"{_MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY.keys()}" ) def get_weight_saver(arch: str): from verl.models.mcore.saver import ( merge_megatron_ckpt_gptmodel, merge_megatron_ckpt_gptmodel_dpskv3, merge_megatron_ckpt_gptmodel_mixtral, merge_megatron_ckpt_gptmodel_qwen2_5_vl, merge_megatron_ckpt_gptmodel_qwen_moe, ) _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY = { "LlamaForCausalLM": merge_megatron_ckpt_gptmodel, "Qwen2ForCausalLM": merge_megatron_ckpt_gptmodel, "MixtralForCausalLM": merge_megatron_ckpt_gptmodel_mixtral, "Qwen2MoeForCausalLM": merge_megatron_ckpt_gptmodel_qwen_moe, "Qwen2_5_VLForConditionalGeneration": merge_megatron_ckpt_gptmodel_qwen2_5_vl, "DeepseekV3ForCausalLM": merge_megatron_ckpt_gptmodel_dpskv3, "Qwen3ForCausalLM": merge_megatron_ckpt_gptmodel, "Qwen3ForTokenClassification": merge_megatron_ckpt_gptmodel, "Qwen3MoeForCausalLM": merge_megatron_ckpt_gptmodel_qwen_moe, } if arch in _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY: return _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY[arch] raise ValueError( f"Model architectures {arch} saver are not supported for now. Supported architectures: " f"{_MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY.keys()}" ) ================================================ FILE: verl_distillation/verl/protocol.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implement base data transfer protocol between any two functions, modules. We can subclass Protocol to define more detailed batch info with specific keys """ import contextlib import copy import logging import math import os import pickle from dataclasses import dataclass, field from typing import Any, Callable, Optional import numpy as np import ray import tensordict import torch import torch.distributed from packaging import version from packaging.version import parse as parse_version from tensordict import TensorDict from torch.utils.data import DataLoader from verl.utils.device import get_device_id, get_torch_device from verl.utils.py_functional import union_two_dict from verl.utils.torch_functional import allgather_dict_tensors __all__ = ["DataProto", "union_tensor_dict"] with contextlib.suppress(Exception): tensordict.set_lazy_legacy(False).set() if parse_version(tensordict.__version__) < parse_version("0.10.0"): tensordict.set_list_to_stack(True).set() class _DataProtoConfigMeta(type): _config = {} auto_padding_key = "_verl_auto_padding" @property def auto_padding(cls): enabled_by_env = os.getenv("VERL_AUTO_PADDING", "FALSE").upper() in ["TRUE", "1"] return enabled_by_env or cls._config.get(cls.auto_padding_key, False) @auto_padding.setter def auto_padding(cls, enabled: bool): assert isinstance(enabled, bool), f"enabled must be a boolean, got {enabled} as {type(enabled)}" cls._config[cls.auto_padding_key] = enabled class DataProtoConfig(metaclass=_DataProtoConfigMeta): pass _padding_size_key = "_padding_size_key_x123d" def pad_dataproto_to_divisor(data: "DataProto", size_divisor: int): """Pad a DataProto to size divisible by size_divisor Args: size_divisor (int): size divisor Returns: data: (DataProto): the padded DataProto pad_size (int) """ assert isinstance(data, DataProto), "data must be a DataProto" if len(data) % size_divisor != 0: pad_size = size_divisor - len(data) % size_divisor padding_protos = [] remaining_pad = pad_size while remaining_pad > 0: take_size = min(remaining_pad, len(data)) padding_protos.append(data[:take_size]) remaining_pad -= take_size data_padded = DataProto.concat([data] + padding_protos) else: if len(data) == 0: logging.warning("padding a DataProto with no item, no changed made") pad_size = 0 data_padded = data return data_padded, pad_size def unpad_dataproto(data: "DataProto", pad_size): """Unpad the data proto with pad_size. i.e. `data[:-pad_size]`""" if pad_size != 0: data = data[:-pad_size] return data def union_tensor_dict(tensor_dict1: TensorDict, tensor_dict2: TensorDict) -> TensorDict: """Union two tensordicts.""" assert tensor_dict1.batch_size == tensor_dict2.batch_size, ( f"Two tensor dict must have identical batch size. Got {tensor_dict1.batch_size} and {tensor_dict2.batch_size}" ) for key in tensor_dict2.keys(): if key not in tensor_dict1.keys(): tensor_dict1[key] = tensor_dict2[key] else: assert tensor_dict1[key].equal(tensor_dict2[key]), ( f"{key} in tensor_dict1 and tensor_dict2 are not the same object" ) return tensor_dict1 def _array_equal(array1: np.ndarray, array2: np.ndarray, visited: set[int]) -> bool: """ Recursively compares two NumPy arrays for strict equality, with special handling for object-dtype arrays, NaN values, and circular references. This function assumes that the two arguments provided are NumPy arrays. Args: array1: The first NumPy array. array2: The second NumPy array. Returns: True if the arrays' dtypes, shapes, and all elements are equal. """ # Check dtype and shape first, as this is the fastest failure path. if array1.dtype != array2.dtype or array1.shape != array2.shape: return False # For non-object dtypes, use NumPy's implementation with equal_nan=True. if array1.dtype != "object": return np.array_equal(array1, array2, equal_nan=True) # For object-dtype arrays, we must recursively compare each element. # We delegate to _deep_equal to handle elements, as they could be any # type, including other nested arrays or NaNs. return all(_deep_equal(x, y, visited) for x, y in zip(array1.flat, array2.flat, strict=False)) def _deep_equal(a: Any, b: Any, visited: set[int]) -> bool: """ Recursively performs a deep comparison between two Python objects. - Handles NaN values correctly (NaN == NaN evaluates to True). - Handling circular references. - Dispatches to _array_equal if both objects are NumPy arrays. - Otherwise, uses standard '==' comparison. """ if type(a) is not type(b): return False # If we have seen this object ID before on this path, it's a cycle. # Since we already know the types match, we can safely assume this part # of the structure is equal. obj_id = id(a) if obj_id in visited: return True visited.add(obj_id) # Perform the specific comparison based on type result = False if isinstance(a, float) and math.isnan(a) and math.isnan(b): result = True elif isinstance(a, np.ndarray): # We know b is also an ndarray due to the initial type check result = _array_equal(a, b, visited) else: # Standard equality for all other types result = a == b # Clean up the visited set on the way out of the recursion visited.remove(obj_id) return result def union_numpy_dict(tensor_dict1: dict[str, np.ndarray], tensor_dict2: dict[str, np.ndarray]) -> dict[str, np.ndarray]: for key, val in tensor_dict2.items(): if key in tensor_dict1: assert isinstance(tensor_dict2[key], np.ndarray) assert isinstance(tensor_dict1[key], np.ndarray) # to properly deal with nan and object type assert _deep_equal(tensor_dict1[key], tensor_dict2[key], visited=set()), ( f"`{key}` in tensor_dict1 and tensor_dict2 are not the same object." ) tensor_dict1[key] = val return tensor_dict1 def list_of_dict_to_dict_of_list(list_of_dict: list[dict]): if len(list_of_dict) == 0: return {} keys = list_of_dict[0].keys() output = {key: [] for key in keys} for data in list_of_dict: for key, item in data.items(): assert key in output output[key].append(item) return output def fold_batch_dim(data: "DataProto", new_batch_size): """ Fold a batch dim from [bsz, xxx] into [new_bsz, bsz // new_bsz, xxx] """ batch_size = data.batch.batch_size[0] assert batch_size % new_batch_size == 0 tensor: TensorDict = data.batch non_tensor = data.non_tensor_batch tensor = tensor.view(new_batch_size, -1) tensor.auto_batch_size_(batch_dims=1) for key, val in non_tensor.items(): non_tensor[key] = np.reshape(val, newshape=(new_batch_size, -1, *val.shape[1:])) return type(data)(batch=tensor, non_tensor_batch=non_tensor, meta_info=data.meta_info) def unfold_batch_dim(data: "DataProto", batch_dims=2): """ Unfold the first n dims as new batch dim """ tensor: TensorDict = data.batch non_tensor = data.non_tensor_batch tensor.auto_batch_size_(batch_dims=batch_dims) tensor = tensor.view(-1) batch_size = tensor.batch_size[0] non_tensor_new = {} for key, val in non_tensor.items(): non_tensor_new[key] = np.reshape(val, newshape=(batch_size, *val.shape[batch_dims:])) return type(data)(batch=tensor, non_tensor_batch=non_tensor_new, meta_info=data.meta_info) def serialize_single_tensor(obj: torch.Tensor) -> tuple[str, tuple[int, ...], int | memoryview]: data = obj.flatten().contiguous().view(torch.uint8).numpy() dtype = str(obj.dtype).removeprefix("torch.") return dtype, obj.shape, data def serialize_tensordict(batch: TensorDict) -> tuple[tuple[int, ...], Optional[str], dict[str, tuple[str, Any]]]: encoded_items: dict[str, tuple[Any]] = {} for k, v in batch.items(): if not v.is_nested: encoded_items[k] = serialize_single_tensor(v) else: layout = str(v.layout).removeprefix("torch.") data = [serialize_single_tensor(tensor) for tensor in v.unbind()] encoded_items[k] = (layout, data) batch_size = tuple(batch.batch_size) device = str(batch.device) if batch.device is not None else None return batch_size, device, encoded_items def deserialize_single_tensor(arr: Any) -> torch.Tensor: dtype, shape, data = arr torch_dtype = getattr(torch, dtype) assert isinstance(torch_dtype, torch.dtype) buffer = bytearray(data) # Create uint8 array arr = torch.frombuffer(buffer, dtype=torch.uint8) # Convert back to proper shape & type return arr.view(torch_dtype).view(shape) def deserialize_tensordict(arr: Any) -> TensorDict: batch_size, device, encoded_items = arr decoded_items: dict[str, Any] = {} for k, v in encoded_items.items(): if len(v) == 3: # decode single tensor decoded_items[k] = deserialize_single_tensor(v) elif len(v) == 2: # decode nested tensor layout, data = v torch_layout = getattr(torch, layout) decoded_items[k] = torch.nested.as_nested_tensor( [deserialize_single_tensor(tensor) for tensor in data], layout=torch_layout ) else: raise ValueError(f"Invalid tensor encoding format, expected length 2 or 3, got {len(v)}") return TensorDict(source=decoded_items, batch_size=batch_size, device=device) def collate_fn(x: list["DataProtoItem"]): batch = [] non_tensor_batch = [] for data in x: batch.append(data.batch) non_tensor_batch.append(data.non_tensor_batch) batch = torch.stack(batch).contiguous() non_tensor_batch = list_of_dict_to_dict_of_list(non_tensor_batch) for key, val in non_tensor_batch.items(): non_tensor_batch[key] = np.array(val, dtype=object) return DataProto(batch=batch, non_tensor_batch=non_tensor_batch) @dataclass class DataProtoItem: # TODO(zhangchi.usc1992) add consistency check batch: TensorDict = None non_tensor_batch: dict = field(default_factory=dict) meta_info: dict = field(default_factory=dict) @dataclass class DataProto: """ A DataProto is a data structure that aims to provide a standard protocol for data exchange between functions. It contains a batch (TensorDict) and a meta_info (Dict). The batch is a TensorDict https://pytorch.org/tensordict/. TensorDict allows you to manipulate a dictionary of Tensors like a single Tensor. Ideally, the tensors with the same batch size should be put inside batch. """ batch: TensorDict = None non_tensor_batch: dict = field(default_factory=dict) meta_info: dict = field(default_factory=dict) def __post_init__(self): # perform necessary checking self.check_consistency() def __len__(self): if self.batch is not None: return self.batch.batch_size[0] elif self.non_tensor_batch is not None and len(self.non_tensor_batch) > 0: random_key = list(self.non_tensor_batch.keys())[0] return self.non_tensor_batch[random_key].shape[0] else: return 0 def __getitem__(self, item): """ Enhanced indexing for DataProto objects. Args: item: Can be one of: - int: A single index - slice: A slice object (start:stop:step) - list: A list of indices - numpy.ndarray: An array of indices - torch.Tensor: A tensor of indices Returns: DataProto: For all indexing types except single integers DataProtoItem: Only for single integer indices """ # Case 1: Slice object - use the slice method if isinstance(item, slice): return self.slice(item.start, item.stop, item.step) # Case 2: List, numpy array, or torch tensor - use sel_idxs elif isinstance(item, list | np.ndarray | torch.Tensor): return self.select_idxs(item) # Case 3: Single integer - return DataProtoItem for backward compatibility elif isinstance(item, int | np.integer): tensor_data = self.batch[item] if self.batch is not None else None non_tensor_data = {key: val[item] for key, val in self.non_tensor_batch.items()} return DataProtoItem(batch=tensor_data, non_tensor_batch=non_tensor_data, meta_info=self.meta_info) # # Case 4: Unsupported type else: raise TypeError(f"Indexing with {type(item)} is not supported") def __getstate__(self): if version.parse(tensordict.__version__) >= version.parse("0.5.0") and self.batch is not None: batch = self.batch.contiguous().consolidate() else: batch = self.batch if os.getenv("VERL_DATAPROTO_SERIALIZATION_METHOD") == "numpy": if batch is not None: batch = serialize_tensordict(self.batch) return ( batch, self.non_tensor_batch, self.meta_info, ) else: import io buffer = io.BytesIO() torch.save(batch, buffer) buffer_bytes = buffer.getvalue() return buffer_bytes, self.non_tensor_batch, self.meta_info def __setstate__(self, data): batch_deserialized_bytes, non_tensor_batch, meta_info = data if os.getenv("VERL_DATAPROTO_SERIALIZATION_METHOD") == "numpy": if batch_deserialized_bytes is not None: self.batch = deserialize_tensordict(batch_deserialized_bytes) else: self.batch = None else: import io batch_deserialized = io.BytesIO(initial_bytes=batch_deserialized_bytes) batch = torch.load( batch_deserialized, weights_only=False, map_location="cpu" if not get_torch_device().is_available() else None, ) self.batch = batch self.non_tensor_batch = non_tensor_batch self.meta_info = meta_info def save_to_disk(self, filepath): with open(filepath, "wb") as f: pickle.dump(self, f) @staticmethod def load_from_disk(filepath) -> "DataProto": with open(filepath, "rb") as f: data = pickle.load(f) return data def print_size(self, prefix=""): size_of_tensordict = 0 if self.batch is not None: for _, tensor in self.batch.items(): size_of_tensordict += tensor.element_size() * tensor.numel() size_of_numpy_array = 0 for _, numpy_array in self.non_tensor_batch.items(): size_of_numpy_array += numpy_array.nbytes size_of_numpy_array /= 1024**3 size_of_tensordict /= 1024**3 message = f"Size of tensordict: {size_of_tensordict} GB, size of non_tensor_batch: {size_of_numpy_array} GB" if prefix: message = f"{prefix}, " + message print(message) def check_consistency(self): """Check the consistency of the DataProto. Mainly for batch and non_tensor_batch We expose this function as a public one so that user can call themselves directly """ if self.batch is not None: assert len(self.batch.batch_size) == 1, "only support num_batch_dims=1" if self.non_tensor_batch is not None: for key, val in self.non_tensor_batch.items(): assert isinstance(val, np.ndarray) if self.batch is not None and self.non_tensor_batch is not None and len(self.non_tensor_batch) != 0: # TODO: we can actually lift this restriction if needed assert len(self.batch.batch_size) == 1, "only support num_batch_dims=1 when non_tensor_batch is not empty." batch_size = self.batch.batch_size[0] for key, val in self.non_tensor_batch.items(): assert isinstance(val, np.ndarray), ( f"data in the non_tensor_batch must be a numpy.array with dtype=object, but for " f"{key=}, got {type(val)=}" ) assert val.shape[0] == batch_size, ( f"key {key} length {len(val)} is not equal to batch size {batch_size}" ) @classmethod def from_single_dict(cls, data: dict[str, torch.Tensor | np.ndarray], meta_info=None, auto_padding=False): """Create a DataProto from a dict of tensors and non_tensors""" tensors = {} non_tensors = {} for key, val in data.items(): if isinstance(val, torch.Tensor): tensors[key] = val elif isinstance(val, np.ndarray): non_tensors[key] = val else: raise ValueError(f"Unsupported type in data {type(val)}") return cls.from_dict(tensors=tensors, non_tensors=non_tensors, meta_info=meta_info, auto_padding=auto_padding) @classmethod def from_dict( cls, tensors: Optional[dict[str, torch.Tensor]] = None, non_tensors=None, meta_info=None, num_batch_dims=1, auto_padding=False, ): """Create a DataProto from a dict of tensors. This assumes that 1. All the tensor in tensors have the same dim0 2. Only dim0 is the batch dim """ assert num_batch_dims > 0, "num_batch_dims must be greater than zero" if non_tensors is not None: assert num_batch_dims == 1, "only support num_batch_dims=1 when non_tensors is not None." if tensors is None: tensors = {} if meta_info is None: meta_info = {} if non_tensors is None: non_tensors = {} assert isinstance(non_tensors, dict) # get and check batch size batch_size = None pivot_key = None for key, tensor in tensors.items(): if batch_size is None: batch_size = tensor.shape[:num_batch_dims] pivot_key = key else: current_batch = tensor.shape[:num_batch_dims] assert batch_size == current_batch, ( f"Not all the tensor in tensors have the same batch size with batch_dims={num_batch_dims}. " f"Got {pivot_key} has {batch_size}, {key} has {current_batch}" ) for key, val in non_tensors.items(): if not isinstance(val, np.ndarray): non_tensors[key] = np.array(val, dtype=object) tensor_dict = TensorDict(source=tensors, batch_size=batch_size) if tensors else None if auto_padding: meta_info[DataProtoConfig.auto_padding_key] = True return cls(batch=tensor_dict, non_tensor_batch=non_tensors, meta_info=meta_info) @classmethod def from_tensordict( cls, tensor_dict: TensorDict = None, meta_info=None, num_batch_dims=1, ): """Create a DataProto from a TensorDict. This assumes that 1. All the tensor in tensor_dict have the same dim0 2. Only dim0 is the batch dim """ assert version.parse(tensordict.__version__) >= version.parse("0.10.0"), ( "Build DataProto from TensorDict at least requires tensordict version 0.10.0" ) from tensordict import NonTensorData, NonTensorStack assert num_batch_dims > 0, "num_batch_dims must be greater than zero" if not all(isinstance(val, torch.Tensor) for val in tensor_dict.values()): assert num_batch_dims == 1, "only support num_batch_dims=1 when tensor_dict contains non tensor data." if meta_info is None: meta_info = {} batch = {} non_tensor_batch = {} batch_size = None for key, val in tensor_dict.items(): if isinstance(val, torch.Tensor): batch[key] = val if batch_size is None: batch_size = val.shape[:num_batch_dims] elif isinstance(val, NonTensorStack): non_tensor_batch[key] = np.array([elem.data for elem in val], dtype=object) elif isinstance(val, NonTensorData): meta_info[key] = val.data return cls( batch=TensorDict(batch, batch_size=batch_size), non_tensor_batch=non_tensor_batch, meta_info=meta_info, ) def to(self, device) -> "DataProto": """move the batch to device Args: device (torch.device, str): torch device Returns: DataProto: the current DataProto """ if self.batch is not None: self.batch = self.batch.to(device) return self def select(self, batch_keys=None, non_tensor_batch_keys=None, meta_info_keys=None, deepcopy=False) -> "DataProto": """Select a subset of the DataProto via batch_keys and meta_info_keys Args: batch_keys (list, optional): a list of strings indicating the keys in batch to select meta_info_keys (list, optional): a list of keys indicating the meta info to select Returns: DataProto: the DataProto with the selected batch_keys and meta_info_keys """ # TODO (zhangchi.usc1992) whether to copy if batch_keys is not None: batch_keys = tuple(batch_keys) sub_batch = self.batch.select(*batch_keys) else: sub_batch = self.batch if non_tensor_batch_keys is not None: non_tensor_batch = {key: val for key, val in self.non_tensor_batch.items() if key in non_tensor_batch_keys} else: non_tensor_batch = self.non_tensor_batch if deepcopy: non_tensor_batch = copy.deepcopy(non_tensor_batch) if meta_info_keys is not None: sub_meta_info = {key: val for key, val in self.meta_info.items() if key in meta_info_keys} else: sub_meta_info = self.meta_info if deepcopy: sub_meta_info = copy.deepcopy(sub_meta_info) return type(self)(batch=sub_batch, non_tensor_batch=non_tensor_batch, meta_info=sub_meta_info) def select_idxs(self, idxs): """ Select specific indices from the DataProto. Args: idxs (torch.Tensor or numpy.ndarray or list): Indices to select Returns: DataProto: A new DataProto containing only the selected indices """ if isinstance(idxs, list): idxs = torch.tensor(idxs) if idxs.dtype != torch.bool: idxs = idxs.type(torch.int32) if isinstance(idxs, np.ndarray): idxs_np = idxs idxs_torch = torch.from_numpy(idxs) else: # torch.Tensor idxs_torch = idxs idxs_np = idxs.detach().cpu().numpy() batch_size = int(idxs_np.sum()) if idxs_np.dtype == bool else idxs_np.shape[0] if self.batch is not None: # Use TensorDict's built-in indexing capabilities selected_batch = TensorDict( source={key: tensor[idxs_torch] for key, tensor in self.batch.items()}, batch_size=(batch_size,), device=self.batch.device, ) else: selected_batch = None selected_non_tensor = {} for key, val in self.non_tensor_batch.items(): selected_non_tensor[key] = val[idxs_np] return type(self)(batch=selected_batch, non_tensor_batch=selected_non_tensor, meta_info=self.meta_info) def slice(self, start=None, end=None, step=None): """ Slice the DataProto and return a new DataProto object. This is an improved version of direct slicing which returns a DataProtoItem. Args: start (int, optional): Start index. Defaults to None (start from beginning). end (int, optional): End index (exclusive). Defaults to None (go to end). step (int, optional): Step size. Defaults to None (step=1). Returns: DataProto: A new DataProto containing the sliced data Examples: # Using the slice method directly sliced_data = data_proto.slice(10, 20) # Using enhanced indexing (returns DataProto) sliced_data = data_proto[10:20] sliced_data = data_proto[::2] # Every other element # Using list indexing (returns DataProto) indices = [1, 5, 10] selected_data = data_proto[indices] # Single index still returns DataProtoItem single_item = data_proto[5] """ # Create a slice object slice_obj = slice(start, end, step) # Handle the batch data if self.batch is not None: # Use TensorDict's built-in slicing capabilities sliced_batch = self.batch[slice_obj] else: sliced_batch = None # Handle the non-tensor batch data sliced_non_tensor = {} for key, val in self.non_tensor_batch.items(): sliced_non_tensor[key] = val[slice_obj] # Return a new DataProto object return type(self)(batch=sliced_batch, non_tensor_batch=sliced_non_tensor, meta_info=self.meta_info) def pop(self, batch_keys=None, non_tensor_batch_keys=None, meta_info_keys=None) -> "DataProto": """Pop a subset of the DataProto via `batch_keys` and `meta_info_keys` Args: batch_keys (list, optional): a list of strings indicating the keys in batch to pop meta_info_keys (list, optional): a list of keys indicating the meta info to pop Returns: DataProto: the DataProto with the poped batch_keys and meta_info_keys """ if batch_keys is None: batch_keys = [] if meta_info_keys is None: meta_info_keys = [] if non_tensor_batch_keys is None: non_tensor_batch_keys = [] tensors = {} # tensor batch for key in batch_keys: assert key in self.batch.keys() tensors[key] = self.batch.pop(key) non_tensors = {} # non tensor batch for key in non_tensor_batch_keys: assert key in self.non_tensor_batch.keys() non_tensors[key] = self.non_tensor_batch.pop(key) meta_info = {} for key in meta_info_keys: assert key in self.meta_info.keys() meta_info[key] = self.meta_info.pop(key) return DataProto.from_dict(tensors=tensors, non_tensors=non_tensors, meta_info=meta_info) def rename(self, old_keys=None, new_keys=None) -> "DataProto": """ Note that this function only rename the key in the batch """ def validate_input(keys): if keys is not None: if isinstance(keys, str): keys = [keys] elif isinstance(keys, list): pass else: raise TypeError(f"keys must be a list or a string, but got {type(keys)}") return keys old_keys = validate_input(old_keys) new_keys = validate_input(new_keys) if len(new_keys) != len(old_keys): raise ValueError( f"new_keys and old_keys must have the same length, but got {len(new_keys)} and {len(old_keys)}" ) self.batch.rename_key_(tuple(old_keys), tuple(new_keys)) return self def union(self, other: "DataProto") -> "DataProto": """Union with another DataProto. Union batch and meta_info separately. Throw an error if - there are conflict keys in batch and they are not equal - the batch size of two data batch is not the same - there are conflict keys in meta_info and they are not the same. Args: other (DataProto): another DataProto to union Returns: DataProto: the DataProto after union """ self.batch = union_tensor_dict(self.batch, other.batch) self.non_tensor_batch = union_numpy_dict(self.non_tensor_batch, other.non_tensor_batch) self.meta_info = union_two_dict(self.meta_info, other.meta_info) return self def make_iterator(self, mini_batch_size, epochs, seed=None, dataloader_kwargs=None): r"""Make an iterator from the DataProto. This is built upon that TensorDict can be used as a normal Pytorch dataset. See https://pytorch.org/tensordict/tutorials/data_fashion for more details. Args: mini_batch_size (int): mini-batch size when iterating the dataset. We require that ``batch.batch_size[0] % mini_batch_size == 0``. epochs (int): number of epochs when iterating the dataset. dataloader_kwargs (Any): internally, it returns a DataLoader over the batch. The dataloader_kwargs is the kwargs passed to the DataLoader. Returns: Iterator: an iterator that yields a mini-batch data at a time. The total number of iteration steps is ``self.batch.batch_size * epochs // mini_batch_size`` """ assert self.batch.batch_size[0] % mini_batch_size == 0, f"{self.batch.batch_size[0]} % {mini_batch_size} != 0" # we can directly create a dataloader from TensorDict if dataloader_kwargs is None: dataloader_kwargs = {} if seed is not None: generator = torch.Generator() generator.manual_seed(seed) else: generator = None assert isinstance(dataloader_kwargs, dict) train_dataloader = DataLoader( dataset=self, batch_size=mini_batch_size, collate_fn=collate_fn, generator=generator, **dataloader_kwargs ) def get_data(): for _ in range(epochs): for d in train_dataloader: d.meta_info = self.meta_info yield d return iter(get_data()) def is_padding_enabled(self): """ Check if padding is enabled for the DataProto. Returns: bool: True if padding is enabled, False otherwise. """ dataproto_specific_padding = self.meta_info.get(DataProtoConfig.auto_padding_key, False) return dataproto_specific_padding or DataProtoConfig.auto_padding def padding(self, padding_size, padding_candidate=""): """Pad the DataProto by concating with padding_candidate.repeat(padding_size) Args: padding_size (int): the number of repeated padding_candidate padding_candidate: the item to be repeated and appended to the DataProto, only supporting ["first", "last"] """ if padding_size == 0: return padding_candidate = self.select_idxs([0 if padding_candidate == "first" else len(self) - 1]) padding_part = padding_candidate.repeat(padding_size) padded_dp = DataProto.concat([self, padding_part]) self.batch = padded_dp.batch self.non_tensor_batch = padded_dp.non_tensor_batch def chunk(self, chunks: int) -> list["DataProto"]: """Split the batch among dim=0 into chunks. The meta_info is passed to each DataProto after split. Args: chunks (int): the number of chunks to split on dim=0 Returns: List[DataProto]: a list of DataProto after splitting """ if not self.is_padding_enabled(): assert len(self) % chunks == 0, ( f"only support equal chunk. Got size of DataProto {len(self)} and chunk {chunks}." ) bsz_in_batch = None if self.batch is not None: batch_lst = self.batch.chunk(chunks=chunks, dim=0) bsz_in_batch = np.array([batch.batch_size[0] for batch in batch_lst]) chunk_indices = np.cumsum(bsz_in_batch)[:-1] else: batch_lst = [None for _ in range(chunks)] non_tensor_batch_lst = [{} for _ in range(chunks)] for key, val in self.non_tensor_batch.items(): assert isinstance(val, np.ndarray) if bsz_in_batch is not None: non_tensor_lst = np.array_split(val, chunk_indices.tolist()) else: non_tensor_lst = np.array_split(val, chunks) assert len(non_tensor_lst) == chunks for i in range(chunks): non_tensor_batch_lst[i][key] = non_tensor_lst[i] output = [] for i in range(chunks): output.append( type(self)(batch=batch_lst[i], non_tensor_batch=non_tensor_batch_lst[i], meta_info=self.meta_info) ) return output def split(self, split_size: int) -> list["DataProto"]: """Split the batch among dim=0 into chunks. The meta_info is passed to each DataProto after split. Args: split_size (int): the size of each split Returns: List[DataProto]: a list of DataProto after splitting """ return [self[i : i + split_size] for i in range(0, len(self), split_size)] @staticmethod def concat(data: list["DataProto"]) -> "DataProto": """Concat a list of DataProto. The batch is concatenated among dim=0. The meta_info is merged, with special handling for metrics from different workers. Args: data (List[DataProto]): list of DataProto Returns: DataProto: concatenated DataProto """ batch_lst = [] for batch in data: batch_lst.append(batch.batch) new_batch = torch.cat(batch_lst, dim=0) if batch_lst[0] is not None else None non_tensor_batch = list_of_dict_to_dict_of_list(list_of_dict=[d.non_tensor_batch for d in data]) for key, val in non_tensor_batch.items(): non_tensor_batch[key] = np.concatenate(val, axis=0) # Merge meta_info with special handling for metrics merged_meta_info = {} if data: # Merge non-metric meta_info and aggregate metrics from all workers. all_metrics = [] for d in data: for k, v in d.meta_info.items(): if k == "metrics": if v is not None: if isinstance(v, list): all_metrics.extend(v) else: all_metrics.append(v) else: if k in merged_meta_info: # Ensure consistency for overlapping non-metric keys assert merged_meta_info[k] == v, f"Conflicting values for meta_info key '{k}'" else: merged_meta_info[k] = v # Flatten list of dicts to dict of lists for consistent metrics structure if all_metrics: merged_meta_info["metrics"] = list_of_dict_to_dict_of_list(all_metrics) cls = type(data[0]) if len(data) > 0 else DataProto return cls(batch=new_batch, non_tensor_batch=non_tensor_batch, meta_info=merged_meta_info) def reorder(self, indices): """ Note that this operation is in-place """ indices_np = indices.detach().numpy() self.batch = self.batch[indices] self.non_tensor_batch = {key: val[indices_np] for key, val in self.non_tensor_batch.items()} def repeat(self, repeat_times=2, interleave=True): """ Repeat the batch data a specified number of times. Args: repeat_times (int): Number of times to repeat the data. interleave (bool): Whether to interleave the repeated data. Returns: DataProto: A new DataProto with repeated data. """ if self.batch is not None: if interleave: # Interleave the data repeated_tensors = { key: tensor.repeat_interleave(repeat_times, dim=0) for key, tensor in self.batch.items() } else: # Stack the data repeated_tensors = { key: tensor.unsqueeze(0).expand(repeat_times, *tensor.shape).reshape(-1, *tensor.shape[1:]) for key, tensor in self.batch.items() } repeated_batch = TensorDict( source=repeated_tensors, batch_size=(self.batch.batch_size[0] * repeat_times,), ) else: repeated_batch = None repeated_non_tensor_batch = {} for key, val in self.non_tensor_batch.items(): if interleave: repeated_non_tensor_batch[key] = np.repeat(val, repeat_times, axis=0) else: repeated_non_tensor_batch[key] = np.tile(val, (repeat_times,) + (1,) * (val.ndim - 1)) return type(self)( batch=repeated_batch, non_tensor_batch=repeated_non_tensor_batch, meta_info=self.meta_info, ) def unfold_column_chunks(self, n_split: int, split_keys: Optional[list[str]] = None): """Split along the second dim into `n_split`, unfold it to the first dim (batch dim) Useful in passing grouped tensors that doesn't want to be shuffled in dataset. keys not in split_keys are repeated to match the shape Note that if the `split_keys` is not provided, it will repeat all the keys in the second dim. """ if self.batch is not None: unfolded_batch = {} for key in self.batch.keys(): if key in split_keys if split_keys is not None else False: shape = list(self.batch[key].shape) shape[0] = self.batch[key].shape[0] * n_split shape[1] = self.batch[key].shape[1] // n_split unfolded_batch[key] = self.batch[key].reshape(*shape) else: unfolded_batch[key] = torch.repeat_interleave(self.batch[key], n_split, dim=0) # locate the `unfolded_batch` as a TensorDict on the same device as the original batch unfolded_batch = TensorDict( source=unfolded_batch, batch_size=(self.batch.batch_size[0] * n_split,), device=self.batch.device ) else: unfolded_batch = None repeated_non_tensor_batch = {} for key, val in self.non_tensor_batch.items(): if key in split_keys: shape = list(val.shape) shape[0] = val.shape[0] * n_split shape[1] = val.shape[1] // n_split repeated_non_tensor_batch[key] = val.reshape(*shape) else: repeated_non_tensor_batch[key] = np.repeat(val, n_split, axis=0) return type(self)( batch=unfolded_batch, non_tensor_batch=repeated_non_tensor_batch, meta_info=self.meta_info, ) def sample_level_repeat(self, repeat_times): """ Repeat each row of the batch data a specified number of times. Args: repeat_times (torch.tensor, list, tuple, ndarray): Number of times to repeat the data. Returns: DataProto: A new DataProto with repeated data. """ if isinstance(repeat_times, tuple): repeat_times = list(repeat_times) elif isinstance(repeat_times, torch.Tensor): assert len(repeat_times.shape) == 1 repeat_times = repeat_times.tolist() elif isinstance(repeat_times, np.ndarray): assert len(repeat_times.shape) == 1 repeat_times = repeat_times.tolist() else: assert isinstance(repeat_times, list), ( f"repeat_times type must be in [list, torch.Tensor, np.ndarray, tuple], got {type(repeat_times)}" ) repeat_times = torch.tensor(repeat_times) if self.batch is not None: # Interleave the data repeated_tensors = { key: tensor.repeat_interleave(repeat_times, dim=0) for key, tensor in self.batch.items() } repeated_batch = TensorDict( source=repeated_tensors, batch_size=(repeat_times.sum().item(),), device=self.batch.device, ) else: repeated_batch = None repeated_non_tensor_batch = {} for key, val in self.non_tensor_batch.items(): repeated_non_tensor_batch[key] = np.repeat(val, repeat_times, axis=0) return type(self)( batch=repeated_batch, non_tensor_batch=repeated_non_tensor_batch, meta_info=self.meta_info, ) def to_tensordict(self) -> TensorDict: """Convert this DataProto to TensorDict. Note that this requires tensordict version at least 0.10 Returns: """ assert parse_version(tensordict.__version__) >= parse_version("0.10"), ( "Convert DataProto to TensorDict at least requires tensordict version 0.10" ) tensor_batch = self.batch.to_dict() non_tensor_batch = self.non_tensor_batch from verl.utils import tensordict_utils as tu common_keys = set(tensor_batch.keys()) & set(non_tensor_batch.keys()) assert len(common_keys) == 0, f"tensor_batch and non_tensor_batch have common keys {common_keys}" for key, val in non_tensor_batch.items(): assert isinstance(val, np.ndarray) tensor_batch[key] = val.tolist() output = tu.get_tensordict(tensor_dict=tensor_batch, non_tensor_dict=self.meta_info) return output def get_data_info(self) -> str: """Return formatted information about stored data with nested type details. Returns: str: Formatted string showing tensor details and recursive metadata types """ info = ["batch"] for key, tensor in self.batch.items(): if hasattr(tensor, "shape") and hasattr(tensor, "dtype") and hasattr(tensor, "device"): info.append(f" {key}: {tuple(tensor.shape)} ({tensor.dtype}) {tensor.device}") elif hasattr(tensor, "shape") and hasattr(tensor, "dtype"): info.append(f" {key}: {tuple(tensor.shape)} ({tensor.dtype})") else: info.append(f" {key}: {type(tensor).__name__}") info.append("non_tensor_batch") for key, array in self.non_tensor_batch.items(): info.append(f" {key}: ndarray{array.shape} ({array.dtype})") info.append("meta_info") for k, v in self.meta_info.items(): type_info = self._get_type_info(v) info.append(f" {k}: {type_info}") return "\n".join(info) def _get_type_info(self, value): """Recursively get type information for nested structures""" if isinstance(value, list): elem_types = {self._get_type_info(v) for v in value[:3]} return f"list[{'|'.join(elem_types) if elem_types else '...'}]" if isinstance(value, tuple): elem_types = [self._get_type_info(v) for v in value] return f"tuple({', '.join(elem_types)})" if isinstance(value, dict): if not value: return "dict" k, v = next(iter(value.items())) return f"dict[{self._get_type_info(k)}: {self._get_type_info(v)}]" if isinstance(value, np.ndarray): return f"ndarray{value.shape} ({value.dtype})" return type(value).__name__ @dataclass class DataProtoFuture: """ DataProtoFuture aims to eliminate actual data fetching on driver. By doing so, the driver doesn't have to wait for data so that asynchronous execution becomes possible. DataProtoFuture contains a list of futures from another WorkerGroup of size world_size. - collect_fn is a Callable that reduces the list of futures to a DataProto - dispatch_fn is a Callable that partitions the DataProto into a list of DataProto of size world_size and then select Potential issue: we can optimize dispatch_fn(collect_fn) such that only needed data is fetched on destination - DataProtoFuture only supports directly passing from the output of a method to another input. You can't perform any operation on the DataProtoFuture in driver. """ collect_fn: Callable futures: list[ray.ObjectRef] dispatch_fn: Callable = None @staticmethod def concat(data: list[ray.ObjectRef]) -> "DataProtoFuture": output = DataProtoFuture(collect_fn=DataProto.concat, futures=data) return output def chunk(self, chunks: int) -> list["DataProtoFuture"]: from functools import partial arg_future_lst = [] for i in range(chunks): # note that we can't directly pass i and chunks def dispatch_fn(x, i, chunks): return x.chunk(chunks=chunks)[i] arg_future = DataProtoFuture( collect_fn=self.collect_fn, dispatch_fn=partial(dispatch_fn, i=i, chunks=chunks), futures=self.futures ) arg_future_lst.append(arg_future) return arg_future_lst def get(self): output = ray.get(self.futures) # dp_size. for o in output: assert isinstance(o, DataProto) output = self.collect_fn(output) # select dp, concat if self.dispatch_fn is not None: output = self.dispatch_fn(output) # split in batch dim, select using dp return output def all_gather_data_proto(data: DataProto, process_group): # Note that this is an inplace operator just like torch.distributed.all_gather group_size = torch.distributed.get_world_size(group=process_group) assert isinstance(data, DataProto) prev_device = data.batch.device data = data.to(get_device_id()) data.batch = allgather_dict_tensors(data.batch.contiguous(), size=group_size, group=process_group, dim=0) data = data.to(prev_device) # all gather non_tensor_batch all_non_tensor_batch = [None for _ in range(group_size)] torch.distributed.all_gather_object(all_non_tensor_batch, data.non_tensor_batch, group=process_group) data.non_tensor_batch = {k: np.concatenate([d[k] for d in all_non_tensor_batch]) for k in data.non_tensor_batch} ================================================ FILE: verl_distillation/verl/py.typed ================================================ ================================================ FILE: verl_distillation/verl/single_controller/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from . import base from .base import * version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__))) # Note(haibin.lin): single_controller.__version__ is deprecated with open(os.path.join(os.path.join(version_folder, os.pardir), "version/version")) as f: __version__ = f.read().strip() __all__ = base.__all__ ================================================ FILE: verl_distillation/verl/single_controller/base/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .worker import Worker from .worker_group import ClassWithInitArgs, ResourcePool, WorkerGroup __all__ = ["Worker", "WorkerGroup", "ClassWithInitArgs", "ResourcePool"] ================================================ FILE: verl_distillation/verl/single_controller/base/decorator.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from functools import partial, wraps from types import FunctionType from verl.protocol import DataProtoFuture, _padding_size_key from verl.utils.py_functional import DynamicEnum from verl.utils.transferqueue_utils import BatchMeta # here we add a magic number of avoid user-defined function already have this attribute MAGIC_ATTR = "attrs_3141562937" class Dispatch(DynamicEnum): """Enum class defining different dispatch modes for distributed computation. Each mode represents a specific strategy for distributing data across different ranks in a distributed system. The modes are used to control how data is partitioned and processed across different worker groups. """ _registry = {} _next_value = 0 def init_predefined_dispatch_mode(): Dispatch.register("RANK_ZERO") Dispatch.register("ONE_TO_ALL") Dispatch.register("ALL_TO_ALL") Dispatch.register("DP_COMPUTE") Dispatch.register("DP_COMPUTE_PROTO") Dispatch.register("DP_COMPUTE_PROTO_WITH_FUNC") Dispatch.register("DP_COMPUTE_METRIC") # This is a special dispatch mode for vllm ExternalRayDistributedExecutor Dispatch.register("DIRECT_ROLLOUT_METHOD") class Execute(DynamicEnum): """Enum class defining different execution modes for distributed computation. These modes control how a function should be executed across different ranks in a distributed system. """ _registry = {} _next_value = 0 def init_predefined_execute_mode(): Execute.register("ALL") Execute.register("RANK_ZERO") # Initialize the two Dynamic Enum Classes init_predefined_dispatch_mode() init_predefined_execute_mode() def _split_args_kwargs_data_proto(chunks, *args, **kwargs): from verl.protocol import DataProto, DataProtoFuture splitted_args = [] for arg in args: assert isinstance(arg, DataProto | DataProtoFuture | BatchMeta) splitted_args.append(arg.chunk(chunks=chunks)) splitted_kwargs = {} for key, val in kwargs.items(): assert isinstance(val, DataProto | DataProtoFuture | BatchMeta) splitted_kwargs[key] = val.chunk(chunks=chunks) return splitted_args, splitted_kwargs def _split_args_kwargs_data_proto_with_auto_padding(chunks, *args, **kwargs): from verl.protocol import DataProto, DataProtoFuture data_proto_len = None padding_size = None def _padding_and_split_data(obj, chunks): nonlocal data_proto_len, padding_size assert isinstance(obj, DataProto | DataProtoFuture) if isinstance(obj, DataProto) and obj.is_padding_enabled(): # for padding, we only support DataProto with same length if data_proto_len is None: data_proto_len = len(obj) padding_size = (chunks - (data_proto_len % chunks)) if (data_proto_len % chunks > 0) else 0 else: assert data_proto_len == len(obj), ( f"expecting all arg share same length of {data_proto_len}, but got {len(obj)}" ) obj.padding(padding_size=padding_size) return obj.chunk(chunks=chunks) splitted_args = [_padding_and_split_data(arg, chunks) for arg in args] splitted_kwargs = {key: _padding_and_split_data(val, chunks) for key, val in kwargs.items()} if padding_size is not None: splitted_kwargs[_padding_size_key] = padding_size return splitted_args, splitted_kwargs def dispatch_one_to_all(worker_group, *args, **kwargs): args = tuple([arg] * worker_group.world_size for arg in args) kwargs = {k: [v] * worker_group.world_size for k, v in kwargs.items()} return args, kwargs def dummy_direct_rollout_call(worker_group, *args, **kwargs): raise NotImplementedError("Direct rollout call is forbidden.") def dispatch_all_to_all(worker_group, *args, **kwargs): return args, kwargs def collect_all_to_all(worker_group, output): return output def _concat_data_proto_or_future(output: list): import ray from verl.protocol import DataProto, DataProtoFuture # make sure all the elements in output has the same type for o in output: assert type(o) is type(output[0]) o = output[0] if isinstance(o, DataProto): return DataProto.concat(output) elif isinstance(o, ray.ObjectRef): return DataProtoFuture.concat(output) elif isinstance(o, BatchMeta): return BatchMeta.concat(output) else: raise NotImplementedError def dispatch_dp_compute(worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) for arg in args: assert isinstance(arg, tuple | list) and len(arg) == worker_group.world_size for k, v in kwargs.items(): assert isinstance(v, tuple | list) and len(v) == worker_group.world_size return args, kwargs def collect_dp_compute(worker_group, output): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) assert len(output) == worker_group.world_size return output def dispatch_dp_compute_data_proto(worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) # Note: enable auto padding for dp compute DatapProto splitted_args, splitted_kwargs = _split_args_kwargs_data_proto_with_auto_padding( worker_group.world_size, *args, **kwargs, ) return splitted_args, splitted_kwargs def dispatch_dp_compute_data_proto_with_func(worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) assert isinstance(args[0], FunctionType) # NOTE: The first one args is a function! splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.world_size, *args[1:], **kwargs) splitted_args_with_func = [[args[0]] * worker_group.world_size] + splitted_args return splitted_args_with_func, splitted_kwargs def collect_dp_compute_data_proto(worker_group, output): import ray from verl.protocol import DataProto for o in output: assert isinstance(o, DataProto | ray.ObjectRef), f"expecting {o} to be DataProto, but got {type(o)}" output = collect_dp_compute(worker_group, output) return _concat_data_proto_or_future(output) def dispatch_nd_compute(dp_rank_mapping: list[int], dp_size, worker_group, *args, **kwargs): import os from verl.single_controller.base.worker_group import WorkerGroup from verl.utils.ray_utils import parallel_put assert isinstance(worker_group, WorkerGroup) max_workers = max(1, min(len(args[0]), os.cpu_count())) args = [parallel_put(arg, max_workers=max_workers) for arg in args] kwargs = {k: parallel_put(v, max_workers=max_workers) for k, v in kwargs.items()} all_args = [] for arg in args: assert isinstance(arg, tuple | list) and len(arg) == dp_size transformed_args = [] for i in range(worker_group.world_size): local_dp_rank = dp_rank_mapping[i] transformed_args.append(arg[local_dp_rank]) all_args.append(transformed_args) all_args = tuple(all_args) all_kwargs = {} for k, v in kwargs.items(): assert isinstance(v, tuple | list) and len(v) == dp_size transformed_v = [] for i in range(worker_group.world_size): local_dp_rank = dp_rank_mapping[i] transformed_v.append(v[local_dp_rank]) all_kwargs[k] = transformed_v return all_args, all_kwargs def collect_nd_compute(collect_mask: list[bool], worker_group, output): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) assert len(output) == worker_group.world_size output_in_dp = [] for global_rank in range(worker_group.world_size): collect_dp_rank = collect_mask[global_rank] if collect_dp_rank: output_in_dp.append(output[global_rank]) return output_in_dp def dispatch_nd_compute_dataproto(dp_rank_mapping: list[int], dp_size, worker_group, *args, **kwargs): splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(dp_size, *args, **kwargs) return dispatch_nd_compute(dp_rank_mapping, dp_size, worker_group, *splitted_args, **splitted_kwargs) def collect_nd_compute_dataproto(collect_mask: list[bool], worker_group, output): output = collect_nd_compute(collect_mask, worker_group, output) import ray from verl.protocol import DataProto for o in output: assert isinstance(o, DataProto | ray.ObjectRef | BatchMeta), ( f"expecting {o} to be DataProto or BatchMeta, but got {type(o)}" ) return _concat_data_proto_or_future(output) def dispatch_lazy_compute_data_proto(mesh_name, worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) # query dispatch info of the worker group if mesh_name not in worker_group._dispatch_info: worker_group._dispatch_info[mesh_name] = worker_group._query_dispatch_info(mesh_name) assert len(worker_group._dispatch_info[mesh_name]) == worker_group.world_size dp_rank_mapping = worker_group._dispatch_info[mesh_name] # perform dispatch dp_size = max(dp_rank_mapping) + 1 return dispatch_nd_compute_dataproto(dp_rank_mapping, dp_size, worker_group, *args, **kwargs) def collect_lazy_compute_data_proto(mesh_name, worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) # the dispatch info is stored in the worker group assert mesh_name in worker_group._dispatch_info if mesh_name not in worker_group._collect_info: worker_group._collect_info[mesh_name] = worker_group._query_collect_info(mesh_name) assert len(worker_group._collect_info[mesh_name]) == worker_group.world_size # a boolean of whether the dp_rank is used for collect collect_mask = worker_group._collect_info[mesh_name] # perform dispatch return collect_nd_compute_dataproto(collect_mask, worker_group, *args, **kwargs) def make_nd_compute_dataproto_dispatch_fn(mesh_name): return { "dispatch_fn": partial(dispatch_lazy_compute_data_proto, mesh_name), "collect_fn": partial(collect_lazy_compute_data_proto, mesh_name), } # Global registry for dispatch mode. DISPATCH_MODE_FN_REGISTRY = { Dispatch.ONE_TO_ALL: { "dispatch_fn": dispatch_one_to_all, "collect_fn": collect_all_to_all, }, Dispatch.ALL_TO_ALL: { "dispatch_fn": dispatch_all_to_all, "collect_fn": collect_all_to_all, }, Dispatch.DP_COMPUTE: {"dispatch_fn": dispatch_dp_compute, "collect_fn": collect_dp_compute}, Dispatch.DP_COMPUTE_PROTO: { "dispatch_fn": dispatch_dp_compute_data_proto, "collect_fn": collect_dp_compute_data_proto, }, Dispatch.DP_COMPUTE_PROTO_WITH_FUNC: { "dispatch_fn": dispatch_dp_compute_data_proto_with_func, "collect_fn": collect_dp_compute_data_proto, }, Dispatch.DP_COMPUTE_METRIC: {"dispatch_fn": dispatch_dp_compute_data_proto, "collect_fn": collect_dp_compute}, Dispatch.DIRECT_ROLLOUT_METHOD: { "dispatch_fn": dummy_direct_rollout_call, "collect_fn": dummy_direct_rollout_call, }, } def get_predefined_dispatch_fn(dispatch_mode): return DISPATCH_MODE_FN_REGISTRY[dispatch_mode] def register_dispatch_mode(dispatch_mode_name, dispatch_fn, collect_fn): """ Register a new dispatch mode. """ dispatch_mode = Dispatch.register(dispatch_mode_name) _check_dispatch_mode(dispatch_mode) assert dispatch_mode not in DISPATCH_MODE_FN_REGISTRY, f"dispatch_mode_name {dispatch_mode_name} already exists" DISPATCH_MODE_FN_REGISTRY[dispatch_mode] = {"dispatch_fn": dispatch_fn, "collect_fn": collect_fn} def update_dispatch_mode(dispatch_mode, dispatch_fn, collect_fn): """ Update the dispatch mode. """ _check_dispatch_mode(dispatch_mode) assert dispatch_mode in DISPATCH_MODE_FN_REGISTRY, f"dispatch_mode {dispatch_mode} not found" DISPATCH_MODE_FN_REGISTRY[dispatch_mode] = {"dispatch_fn": dispatch_fn, "collect_fn": collect_fn} def get_predefined_execute_fn(execute_mode): """ Note that here we only asks execute_all and execute_rank_zero to be implemented Leave the choice of how these two functions handle argument 'blocking' to users """ predefined_execute_mode_fn = { Execute.ALL: {"execute_fn_name": "execute_all"}, Execute.RANK_ZERO: {"execute_fn_name": "execute_rank_zero"}, } return predefined_execute_mode_fn[execute_mode] def _check_dispatch_mode(dispatch_mode): assert isinstance(dispatch_mode, Dispatch | dict), ( f"dispatch_mode must be a Dispatch or a Dict. Got {dispatch_mode}" ) if isinstance(dispatch_mode, dict): necessary_keys = ["dispatch_fn", "collect_fn"] for key in necessary_keys: assert key in dispatch_mode, f"key {key} should be in dispatch_mode if it is a dictionary" def _check_execute_mode(execute_mode): assert isinstance(execute_mode, Execute), f"execute_mode must be a Execute. Got {execute_mode}" def _materialize_futures(*args, **kwargs): new_args = [] for arg in args: if isinstance(arg, DataProtoFuture): arg = arg.get() # add more type to materialize new_args.append(arg) for k, v in kwargs.items(): if isinstance(v, DataProtoFuture): kwargs[k] = v.get() new_args = tuple(new_args) return new_args, kwargs def register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.ALL, blocking=True, materialize_futures=True): """Register a function with distributed execution configuration. This decorator registers a function with specific dispatch and execution modes for distributed computation. It handles both synchronous and asynchronous functions, and optionally materializes futures before execution. Args: dispatch_mode: Dispatch mode for computation distribution. Default: Dispatch.ALL_TO_ALL. execute_mode: Execute mode for computation distribution. Default: Execute.ALL. blocking: Whether the execution should be blocking. Defaults to True. materialize_futures: Whether to materialize the data before dispatching. Defaults to True. Returns: A decorator that wraps the original function with distributed execution configuration. """ from verl.utils.transferqueue_utils import tqbridge _check_dispatch_mode(dispatch_mode=dispatch_mode) _check_execute_mode(execute_mode=execute_mode) def decorator(func): func = tqbridge()(func) @wraps(func) def inner(*args, **kwargs): if materialize_futures: args, kwargs = _materialize_futures(*args, **kwargs) return func(*args, **kwargs) @wraps(func) async def async_inner(*args, **kwargs): if materialize_futures: args, kwargs = _materialize_futures(*args, **kwargs) return await func(*args, **kwargs) wrapper = async_inner if inspect.iscoroutinefunction(func) else inner attrs = {"dispatch_mode": dispatch_mode, "execute_mode": execute_mode, "blocking": blocking} setattr(wrapper, MAGIC_ATTR, attrs) return wrapper return decorator ================================================ FILE: verl_distillation/verl/single_controller/base/worker.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ the class for Worker """ import os import socket import warnings from dataclasses import dataclass import ray from verl.utils.device import ( get_torch_device, get_visible_devices_keyword, is_npu_available, ) from .decorator import Dispatch, Execute, register @dataclass class DistRankInfo: tp_rank: int dp_rank: int pp_rank: int cp_rank: int @dataclass class DistGlobalInfo: tp_size: int dp_size: int pp_size: int cp_size: int class WorkerHelper: @staticmethod def _get_node_ip(): if os.getenv("WG_BACKEND", None) == "ray": return ray.util.get_node_ip_address() else: raise NotImplementedError("WG_BACKEND now just support ray mode.") @staticmethod def _get_free_port(): with socket.socket() as sock: sock.bind(("", 0)) return sock.getsockname()[1] def get_availale_master_addr_port(self): warnings.warn( "This function is deprecated due to typo in name; Please use `get_available_master_addr_port` instead", stacklevel=2, ) return self.get_available_master_addr_port() def get_available_master_addr_port(self): return self._get_node_ip().strip("[]"), str(self._get_free_port()) # we assume that in each WorkerGroup, there is a Master Worker class Worker(WorkerHelper): """A distributed worker that handles initialization and configuration for distributed training. This class manages worker initialization, configuration, and provides methods for executing distributed operations. It handles communication settings, device configuration, and worker metadata management. """ fused_worker_attr_name = "fused_worker_dict" def _register_dispatch_collect_info(self, mesh_name: str, dp_rank: int, is_collect: bool): """Register the dp_rank for a given mesh name. This function is meant to be called by the worker Args: mesh_name (str): Name of the mesh to register dp_rank for. dp_rank (int): dp_rank to register for the given mesh name. is_collect (bool): Whether the dp_rank is used for collect. """ if mesh_name in self.__dispatch_dp_rank or mesh_name in self.__collect_dp_rank: raise ValueError(f"mesh_name {mesh_name} has been registered") self.__dispatch_dp_rank[mesh_name] = dp_rank self.__collect_dp_rank[mesh_name] = is_collect @register(dispatch_mode=Dispatch.ONE_TO_ALL) def _query_dispatch_info(self, mesh_name: str): """Query the dispatch info for a given mesh name. Args: mesh_name (str): Name of the mesh to query dispatch info for. Returns: int: The dp_rank for the given mesh name. """ assert mesh_name in self.__dispatch_dp_rank, f"{mesh_name} is not registered in {self.__class__.__name__}" # note that each rank store its own dp_rank return self.__dispatch_dp_rank[mesh_name] @register(dispatch_mode=Dispatch.ONE_TO_ALL) def _query_collect_info(self, mesh_name: str): """Query the collect info for a given mesh name. Args: mesh_name (str): Name of the mesh to query collect info for. Returns: bool: Whether the dp_rank is used for collect. """ assert mesh_name in self.__collect_dp_rank, f"{mesh_name} is not registered in {self.__class__.__name__}" return self.__collect_dp_rank[mesh_name] @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=True) def create_transferqueue_client(self, controller_infos, storage_infos, role="train"): from verl.utils.transferqueue_utils import create_transferqueue_client create_transferqueue_client( client_id=f"{role}_worker_{self.rank}", controller_infos=controller_infos, storage_infos=storage_infos, ) @classmethod def env_keys(cls): """The keys of the environment variables that are used to configure the Worker.""" return [ "WORLD_SIZE", "RANK", "LOCAL_WORLD_SIZE", "LOCAL_RANK", "MASTER_ADDR", "MASTER_PORT", get_visible_devices_keyword().upper(), ] def __init__(self, cuda_visible_devices=None) -> None: """Initialize the worker with environment settings and device configuration. Args: cuda_visible_devices (str, optional): CUDA visible devices configuration. Defaults to None. """ # construct a meta from environment variable. Note that the import must be inside the class because # it is executed remotely import os self._setup_env_cuda_visible_devices() world_size = int(os.environ["WORLD_SIZE"]) rank = int(os.environ["RANK"]) self._rank = rank self._world_size = world_size master_addr = os.environ["MASTER_ADDR"] master_port = os.environ["MASTER_PORT"] local_world_size = int(os.getenv("LOCAL_WORLD_SIZE", "1")) local_rank = int(os.getenv("LOCAL_RANK", "0")) store = { "_world_size": world_size, "_rank": rank, "_local_world_size": local_world_size, "_local_rank": local_rank, "_master_addr": master_addr, "_master_port": master_port, } if cuda_visible_devices is not None: store[f"_{get_visible_devices_keyword()}".lower()] = cuda_visible_devices self._configure_with_store(store=store) self.fused_worker_dict = {} self.__dispatch_dp_rank = {} self.__collect_dp_rank = {} def get_fused_worker_by_name(self, worker_name: str): """Get a fused worker by its name. Args: worker_name (str): Name of the worker to retrieve """ return self.fused_worker_dict.get(worker_name, None) def _setup_env_cuda_visible_devices(self): from verl.utils.ray_utils import ray_noset_visible_devices is_ray_noset_visible_devices = ray_noset_visible_devices() # Prevent use of clashing `{CUDA/HIP/ROCR}_VISIBLE_DEVICES`` rocr_val = os.environ.get("ROCR_VISIBLE_DEVICES", None) hip_val = os.environ.get("HIP_VISIBLE_DEVICES", None) cuda_val = os.environ.get("CUDA_VISIBLE_DEVICES", None) if hip_val: # Switch the use of HIP_VISIBLE_DEVICES to CUDA_VISIBLE_DEVICES for consistency. # Make sure that the HIP_VISIBLE_DEVICES is set to the same value as CUDA_VISIBLE_DEVICES # at this point. val = os.environ.pop("HIP_VISIBLE_DEVICES") hip_val = None if cuda_val: assert val == cuda_val, ( f"Please use the same HIP_VISIBLE_DEVICES or CUDA_VISIBLE_DEVICES, inconsistant values " f"found: {val} and {cuda_val}." ) else: cuda_val = val os.environ["CUDA_VISIBLE_DEVICES"] = val # os.environ["HIP_VISIBLE_DEVICES"] = val if rocr_val: # You must take care if both HIP/CUDA and ROCR env vars are set as they have # different meanings. Both env vars accept either a list of ints or a # list of UUIDs. The ROCR env var is processed first which then reduces # the number of GPUs that HIP can select from. # https://github.com/pytorch/pytorch/pull/144026 # To avoid the complexity of this, we simply gives out error if both are set # (Also to keep consistency with ray's practice with 2.45.0). # Otherwise, we will set ROCR_VISIBLE_DEVICES to CUDA_VISIBLE_DEVICES # and remove ROCR_VISIBLE_DEVICES. if cuda_val: raise ValueError("Please don't set ROCR_VISIBLE_DEVICES when HIP/CUDA_VISIBLE_DEVICES is set.") cuda_val = os.environ.pop("ROCR_VISIBLE_DEVICES") os.environ["CUDA_VISIBLE_DEVICES"] = cuda_val rocr_val = None if is_ray_noset_visible_devices: # NOTE: Ray will automatically set the *_VISIBLE_DEVICES # environment variable for each actor, unless # RAY_EXPERIMENTAL_NOSET_*_VISIBLE_DEVICES is set, # so we need to set local rank when the flag is set. device_name = "NPU" if is_npu_available else "GPU" local_rank = ray.get_runtime_context().get_accelerator_ids()[device_name][0] os.environ["LOCAL_RANK"] = local_rank get_torch_device().set_device(int(local_rank)) def _configure_with_store(self, store: dict): """ This function should only be called inside by WorkerGroup """ store_env_dict = {f"_{key.lower()}": store.get(f"_{key.lower()}", None) for key in type(self).env_keys()} self.__dict__.update(store_env_dict) # this is hacky # print(f"__dict__: {self.__dict__}") for key in type(self).env_keys(): val = self.__dict__.get(f"_{key.lower()}", None) if val is not None: # print(f"set {key} to {val}") os.environ[key] = str(val) os.environ["REDIS_STORE_SERVER_HOST"] = ( str(self._master_addr).replace("[", "").replace("]", "") if self._master_addr else "" ) def get_master_addr_port(self): """Get the master address and port for distributed communication.""" return self._master_addr, self._master_port def get_cuda_visible_devices(self): """Get the CUDA visible devices configuration.""" import os visible_devices = os.environ.get(get_visible_devices_keyword().upper(), "not set") return visible_devices @property def world_size(self): """Get the total number of workers in the distributed setup.""" return self._world_size @property def rank(self): """Get the rank of this worker in the distributed setup.""" return self._rank @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO_WITH_FUNC) def execute_with_func_generator(self, func, *args, **kwargs): """Execute a function with function generator dispatch mode. Args: func: Function to execute *args: Positional arguments for the function **kwargs: Keyword arguments for the function """ ret_proto = func(self, *args, **kwargs) return ret_proto @register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.RANK_ZERO) def execute_func_rank_zero(self, func, *args, **kwargs): """Execute a function in rank zero execution mode. Args: func: Function to execute *args: Positional arguments for the function **kwargs: Keyword arguments for the function """ result = func(*args, **kwargs) return result ================================================ FILE: verl_distillation/verl/single_controller/base/worker_group.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ the class of WorkerGroup """ import logging import signal import threading import time from typing import Any, Callable from .decorator import MAGIC_ATTR, Dispatch, get_predefined_dispatch_fn, get_predefined_execute_fn class ResourcePool: """ Manages a pool of resources across multiple nodes, tracking process counts and GPU allocations. The class provides methods to calculate world size, local world sizes, and local ranks across all nodes in the pool. """ def __init__(self, process_on_nodes=None, max_colocate_count: int = 10, n_gpus_per_node=8) -> None: """Initialize the ResourcePool with node processes and GPU configuration. Args: process_on_nodes (List[int], optional): List of process counts per node. Defaults to empty list. max_colocate_count (int, optional): Maximum number of processes that can be colocated. Defaults to 10. n_gpus_per_node (int, optional): Number of GPUs available per node. Defaults to 8. """ if process_on_nodes is None: process_on_nodes = [] self._store = process_on_nodes self.max_colocate_count = max_colocate_count self.n_gpus_per_node = n_gpus_per_node # this is left for future huawei GPU that contains 16 GPUs per node def add_node(self, process_count): self._store.append(process_count) @property def world_size(self): """Total number of processes across all nodes in the pool.""" return sum(self._store) def __call__(self) -> Any: return self._store @property def store(self): return self._store def local_world_size_list(self) -> list[int]: """Returns a flat list where each process has its local world size.""" nested_local_world_size_list = [ [local_world_size for _ in range(local_world_size)] for local_world_size in self._store ] return [item for row in nested_local_world_size_list for item in row] def local_rank_list(self) -> list[int]: """Returns a flat list of local ranks for all processes across all nodes.""" nested_local_rank_list = [[i for i in range(local_world_size)] for local_world_size in self._store] return [item for row in nested_local_rank_list for item in row] class ClassWithInitArgs: """ Wrapper class that stores constructor arguments for deferred instantiation. This class is particularly useful for remote class instantiation where the actual construction needs to happen at a different time or location. """ def __init__(self, cls, *args, **kwargs) -> None: """Initialize the ClassWithInitArgs instance. Args: cls: The class to be instantiated later *args: Positional arguments for the class constructor **kwargs: Keyword arguments for the class constructor """ self.cls = cls self.args = args self.kwargs = kwargs self.fused_worker_used = False def __call__(self) -> Any: """Instantiate the stored class with the stored arguments.""" return self.cls(*self.args, **self.kwargs) def check_workers_alive(workers: list, is_alive: Callable, gap_time: float = 1) -> None: """Continuously monitors worker processes and raises SIGABRT if any worker dies. Args: workers (List): List of worker objects to monitor is_alive (Callable): Function to check if a worker is alive gap_time (float): Time interval between checks """ import time while True: for worker in workers: if not is_alive(worker): logging.warning(f"worker {worker} is not alive sending signal to main thread") signal.raise_signal(signal.SIGABRT) time.sleep(gap_time) class WorkerGroup: """ Base class for managing a group of workers in a distributed system. The class provides methods for worker management, aliveness checking, and method binding. """ fused_worker_execute_fn_name = "_fuw_execute" def __init__(self, resource_pool: ResourcePool, **kwargs) -> None: self._is_init_with_detached_workers = resource_pool is None self.fused_worker_used = False if resource_pool is not None: # handle the case when WorkGroup is attached to an existing one self._procecss_dispatch_config = resource_pool() else: self._procecss_dispatch_config = None self._workers = [] self._worker_names = [] self._dispatch_info = {} self._collect_info = {} self._master_addr = None self._master_port = None self._checker_thread: threading.Thread = None def _is_worker_alive(self, worker): """Check if a worker is alive. Must be implemented by derived classes.""" raise NotImplementedError("WorkerGroup._is_worker_alive called, should be implemented in derived class.") def _block_until_all_workers_alive(self) -> None: """Blocks until all workers in the group are alive.""" while True: all_state = [self._is_worker_alive(worker) for worker in self._workers] if False in all_state: time.sleep(1) else: break def start_worker_aliveness_check(self, every_n_seconds=1) -> None: """Starts a background thread to monitor worker aliveness. Args: every_n_seconds (int): Interval between aliveness checks """ # before starting checking worker aliveness, make sure all workers are already alive self._block_until_all_workers_alive() self._checker_thread = threading.Thread( target=check_workers_alive, args=(self._workers, self._is_worker_alive, every_n_seconds) ) self._checker_thread.start() @property def world_size(self): """Number of workers in the group.""" return len(self._workers) def _bind_worker_method(self, user_defined_cls, func_generator): """Binds worker methods to the WorkerGroup based on registered attributes. Args: user_defined_cls (type): The class containing methods to bind func_generator (Callable): Function that generates the bound method Returns: List[str]: List of method names that were successfully bound """ method_names = [] for method_name in dir(user_defined_cls): try: method = getattr(user_defined_cls, method_name) assert callable(method), f"{method_name} in {user_defined_cls} is not callable" except Exception: # if it is a property, it will fail because Class doesn't have instance property continue if hasattr(method, MAGIC_ATTR): # this method is decorated by register attribute = getattr(method, MAGIC_ATTR) assert isinstance(attribute, dict), f"attribute must be a dictionary. Got {type(attribute)}" assert "dispatch_mode" in attribute, "attribute must contain dispatch_mode in its key" dispatch_mode = attribute["dispatch_mode"] execute_mode = attribute["execute_mode"] blocking = attribute["blocking"] # get dispatch fn if isinstance(dispatch_mode, Dispatch): # get default dispatch fn fn = get_predefined_dispatch_fn(dispatch_mode=dispatch_mode) dispatch_fn = fn["dispatch_fn"] collect_fn = fn["collect_fn"] else: assert isinstance(dispatch_mode, dict) assert "dispatch_fn" in dispatch_mode assert "collect_fn" in dispatch_mode dispatch_fn = dispatch_mode["dispatch_fn"] collect_fn = dispatch_mode["collect_fn"] # get execute_fn_name execute_mode = get_predefined_execute_fn(execute_mode=execute_mode) wg_execute_fn_name = execute_mode["execute_fn_name"] # get execute_fn from string try: execute_fn = getattr(self, wg_execute_fn_name) assert callable(execute_fn), "execute_fn must be callable" except Exception: print(f"execute_fn {wg_execute_fn_name} is invalid") raise # bind a new method to the RayWorkerGroup func = func_generator( self, method_name, dispatch_fn=dispatch_fn, collect_fn=collect_fn, execute_fn=execute_fn, blocking=blocking, ) try: setattr(self, method_name, func) method_names.append(method_name) except Exception as e: raise ValueError(f"Fail to set method_name {method_name}") from e return method_names ================================================ FILE: verl_distillation/verl/single_controller/ray/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import ( RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, create_colocated_worker_cls, create_colocated_worker_cls_fused, ) __all__ = [ "RayClassWithInitArgs", "RayResourcePool", "RayWorkerGroup", "create_colocated_worker_cls", "create_colocated_worker_cls_fused", ] ================================================ FILE: verl_distillation/verl/single_controller/ray/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import socket from copy import deepcopy from typing import Any, Optional import ray from ray.experimental.state.api import get_actor from ray.util.placement_group import PlacementGroup, placement_group from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy, PlacementGroupSchedulingStrategy from verl.protocol import DataProto, _padding_size_key from verl.single_controller.base import ClassWithInitArgs, ResourcePool, Worker, WorkerGroup from verl.single_controller.base.decorator import MAGIC_ATTR, Dispatch from verl.utils.py_functional import temp_env_var __all__ = ["Worker"] def get_random_string(length: int) -> str: import random import string letters_digits = string.ascii_letters + string.digits return "".join(random.choice(letters_digits) for _ in range(length)) def func_generator(self, method_name, dispatch_fn, collect_fn, execute_fn, blocking): class Functor: def __call__(this, *args, **kwargs): args, kwargs = dispatch_fn(self, *args, **kwargs) padding_count = kwargs.pop(_padding_size_key, 0) output = execute_fn(method_name, *args, **kwargs) if blocking: output = ray.get(output) output = collect_fn(self, output) if padding_count > 0: if isinstance(output, DataProto): indices = [i for i in range(len(output))][:-padding_count] output = output.select_idxs(indices) elif isinstance(output, list): output = output[:-padding_count] return output # use class type to pass the method_name to get a better observability return type(method_name, (Functor,), {})() def sort_placement_group_by_node_ip(pgs: list[PlacementGroup]) -> list[PlacementGroup]: """ Sort the placement groups by node ip, all bundles in a single placement group should be on the same node. FSDPCheckpointManager saves sharded model states and optimizer states in local storage, which requires RANK to be consistent across nodes when resume from checkpoint. With this function, if there's only one resource pool and there's no node change, RANK should be consistent across nodes in multiple ray jobs, even if the whole ray cluster is restarted. """ node_ip = {node["NodeID"]: node["NodeManagerAddress"] for node in ray.nodes()} pg_ip = {} for pg in pgs: specs = ray._private.state.state.placement_group_table(pg.id) # all bunles should be on the same node node_id = specs["bundles_to_node_id"][0] pg_ip[pg.id] = node_ip[node_id] return sorted(pgs, key=lambda pg: pg_ip[pg.id]) @ray.remote def get_master_addr_port() -> tuple[str, str]: addr = ray.util.get_node_ip_address().strip("[]") with socket.socket() as sock: sock.bind(("", 0)) port = sock.getsockname()[1] return addr, str(port) class RayResourcePool(ResourcePool): def __init__( self, process_on_nodes: Optional[list[int]] = None, use_gpu: bool = True, name_prefix: str = None, max_colocate_count: int = 10, detached=False, accelerator_type: Optional[str] = None, ) -> None: super().__init__(process_on_nodes, max_colocate_count) self.use_gpu = use_gpu # print(f"in RayProcessDispatchConfiguration: name_prefix = {name_prefix}") self.name_prefix = get_random_string(length=6) if name_prefix is None else name_prefix self.pgs = None self.detached = detached self.accelerator_type = accelerator_type def get_placement_groups(self, strategy="STRICT_PACK", name=None, device_name="cuda"): if self.pgs is not None: return self.pgs pg_name_prefix = ( name if name else f"{self.name_prefix}verl_group_{'_'.join([str(count) for count in self._store])}:" ) # print(f"pg_name_prefix = {pg_name_prefix}") if device_name == "npu": device_name = "NPU" elif device_name == "cuda": device_name = "GPU" bundle = {"CPU": self.max_colocate_count} if self.use_gpu: bundle[device_name] = 1 if self.accelerator_type is not None: bundle[self.accelerator_type] = 1e-4 pg_scheme = [[bundle.copy() for _ in range(process_count)] for process_count in self._store] lifetime = "detached" if self.detached else None pgs = [ placement_group(bundles=bundles, strategy=strategy, name=pg_name_prefix + str(idx), lifetime=lifetime) for idx, bundles in enumerate(pg_scheme) ] ray.get([pg.ready() for pg in pgs]) self.pgs = pgs return pgs def extract_pg_from_exist( resource_pools: dict[str, RayResourcePool], src_role_names: list[str], resource_pool: RayResourcePool ) -> list: src_pgs = [ pg for role_name, resource_pool in resource_pools.items() for pg in resource_pool.get_placement_groups() if role_name in src_role_names ] sorted_src_pgs = sorted(src_pgs, key=lambda pg: pg.bundle_count, reverse=True) sorted_process_on_nodes = sorted([(val, idx) for idx, val in enumerate(resource_pool.store)], reverse=True) unsorted_pgs: list[tuple[int, PlacementGroup]] = [] searching_idx = 0 for request_process, original_idx in sorted_process_on_nodes: assert searching_idx < len(sorted_src_pgs), f"no enough nodes for request: searching {searching_idx} th node" assert request_process <= sorted_src_pgs[searching_idx].bundle_count, ( f"requesting {request_process} processes, bundle count cannot satisfy" ) unsorted_pgs.append((original_idx, sorted_src_pgs[searching_idx])) searching_idx += 1 return [pg for _, pg in sorted(unsorted_pgs)] def merge_resource_pool(rp1: RayResourcePool, rp2: RayResourcePool) -> RayResourcePool: assert rp1.use_gpu == rp2.use_gpu, "Both RayResourcePool must either use_gpu or not" assert rp1.max_colocate_count == rp2.max_colocate_count, "Both RayResourcePool must has the same max_colocate_count" assert rp1.n_gpus_per_node == rp2.n_gpus_per_node, "Both RayResourcePool must has the same n_gpus_per_node" assert rp1.detached == rp2.detached, "Detached ResourcePool cannot be merged with non-detached ResourcePool" new_store = rp1.store + rp2.store merged = type(rp1)(new_store, rp1.use_gpu, f"{rp1.name_prefix}_{rp2.name_prefix}") merged.pgs = rp1.get_placement_groups() + rp2.get_placement_groups() return merged class RayClassWithInitArgs(ClassWithInitArgs): """A wrapper class for Ray actors with initialization arguments. This class extends ClassWithInitArgs to provide additional functionality for configuring and creating Ray actors with specific resource requirements and scheduling strategies. """ def __init__(self, cls, *args, **kwargs) -> None: # self._options = kwargs.pop('options', dict()) super().__init__(cls, *args, **kwargs) self._options = {} self._additional_resource = {} def set_additional_resource(self, additional_resource): """Set additional resource requirements for the actor. Args: additional_resource: Dictionary specifying additional resource requirements """ self._additional_resource = additional_resource def update_options(self, options: dict): """Update the Ray actor creation options. Args: options: Dictionary of options to update """ self._options.update(options) def __call__( self, placement_group, placement_group_bundle_idx, use_gpu: bool = True, num_gpus=1, sharing_with=None, device_name="cuda", ) -> Any: """Create and return a Ray actor with the configured options. Args: placement_group: Ray placement group for scheduling placement_group_bundle_idx: Index of the bundle in the placement group use_gpu: Whether to use GPU resources num_gpus: Number of GPUs to allocate sharing_with: Actor to share resources with device_name: Device for training Returns: A Ray actor handle with the configured options """ if sharing_with is not None: target_node_id = ray.get(sharing_with.get_node_id.remote()) visible_devices = ray.get(sharing_with.get_cuda_visible_devices.remote()) options = {"scheduling_strategy": NodeAffinitySchedulingStrategy(node_id=target_node_id, soft=False)} return self.cls.options(**options).remote(*self.args, cuda_visible_devices=visible_devices, **self.kwargs) options = { "scheduling_strategy": PlacementGroupSchedulingStrategy( placement_group=placement_group, placement_group_bundle_index=placement_group_bundle_idx ) } options.update(self._options) if use_gpu and device_name == "cuda": options["num_gpus"] = num_gpus if use_gpu and device_name == "npu": options["resources"] = {"NPU": num_gpus} if len(self._additional_resource) > 1: for k, v in self._additional_resource.items(): options[k] = v # print("cls:", self.cls) # print("args: ", self.args) # print("kwargs: ", self.kwargs) return self.cls.options(**options).remote(*self.args, **self.kwargs) class RayWorkerGroup(WorkerGroup): """A group of Ray workers that can be managed collectively. This class extends WorkerGroup to provide Ray-specific functionality for creating and managing groups of Ray actors with specific resource requirements and scheduling strategies. """ def __init__( self, resource_pool: RayResourcePool = None, ray_cls_with_init: RayClassWithInitArgs = None, bin_pack: bool = True, name_prefix: str = None, detached=False, worker_names=None, worker_handles: list[ray.actor.ActorHandle] = None, ray_wait_register_center_timeout: int = 300, **kwargs, ) -> None: """Initialize a RayWorkerGroup. Args: resource_pool: Resource pool for worker allocation ray_cls_with_init: Class with initialization arguments for workers bin_pack: Whether to use strict bin packing for resource allocation name_prefix: Prefix for worker names detached: Whether workers should be detached worker_names: Names of existing workers to attach to ray_wait_register_center_timeout: Timeout for waiting on register center **kwargs: Additional keyword arguments """ super().__init__(resource_pool=resource_pool, **kwargs) self.ray_cls_with_init = ray_cls_with_init self.name_prefix = get_random_string(length=6) if name_prefix is None else name_prefix self._ray_wait_register_center_timeout = ray_wait_register_center_timeout # Whether the WorkerGroup is a Colocate WorkerGroup created by FusedWorker. self.fused_worker_used = ray_cls_with_init.fused_worker_used # if a WorkerGroup is spawned from Colocate WorkerGroup, this indicates which sub-class is binded to # this WorkerGroup. self.sub_cls_name = "" self.device_name = kwargs.get("device_name", "cuda") self.profile_steps = kwargs.get("profile_steps", None) self.worker_nsight_options = kwargs.get("worker_nsight_options", None) self.customized_worker_env = kwargs.get("worker_env", {}) if self.worker_nsight_options is not None and self.worker_nsight_options["capture-range-end"] is None: self.worker_nsight_options["capture-range-end"] = f"repeat-shutdown:{6 * len(self.profile_steps)}" if worker_names is not None and (not self.fused_worker_used): assert self._is_init_with_detached_workers self._worker_names = worker_names if self._is_init_with_detached_workers: self._init_with_detached_workers(worker_names=worker_names, worker_handles=worker_handles) else: self._init_with_resource_pool( resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, bin_pack=bin_pack, detached=detached, worker_env=self.customized_worker_env, ) if ray_cls_with_init is not None: self._bind_worker_method(self.ray_cls_with_init.cls, func_generator) self.wg_dict = None self.method_names = [] def _is_worker_alive(self, worker: ray.actor.ActorHandle): """Check if a worker actor is still alive. Args: worker: Ray actor handle to check Returns: bool: True if the worker is alive, False otherwise """ worker_state_dict = get_actor(worker._actor_id.hex()) return worker_state_dict.get("state", "undefined") == "ALIVE" if worker_state_dict is not None else False def _init_with_detached_workers(self, worker_names, worker_handles): # ray.get_actor holds a weak reference to the actor, which causes actors garbage collected unexpectedly # if we only hold spawn RayWorkerGroup. By passing actor handle explicitly, spawn RayWorkerGroup have # strong reference to these actors. # https://github.com/ray-project/ray/pull/45699 workers = worker_handles if worker_handles else [ray.get_actor(name=name) for name in worker_names] self._workers = workers self._world_size = len(worker_names) def _get_master_addr_port(self, pg): """Get master addr and port for this worker group""" self._master_addr, self._master_port = ray.get( get_master_addr_port.options( scheduling_strategy=PlacementGroupSchedulingStrategy( placement_group=pg, placement_group_bundle_index=0 ), ).remote() ) def _init_with_resource_pool(self, resource_pool, ray_cls_with_init, bin_pack, detached, worker_env=None): """Initialize the worker group by creating new workers from a resource pool. Args: resource_pool: Resource pool for worker allocation ray_cls_with_init: Class with initialization arguments for workers bin_pack: Whether to use strict bin packing for resource allocation detached: Whether workers should be detached """ use_gpu = resource_pool.use_gpu strategy = "PACK" if bin_pack: strategy = "STRICT_PACK" pgs = resource_pool.get_placement_groups(strategy=strategy, device_name=self.device_name) world_size = resource_pool.world_size self._world_size = world_size # cia.add_kwarg("_world_size", world_size) num_gpus = 1 / resource_pool.max_colocate_count rank = -1 local_world_size = resource_pool.store[0] for pg_idx, pg in enumerate(sort_placement_group_by_node_ip(pgs)): assert local_world_size <= pg.bundle_count, f"when generating for {self.name_prefix}, for the " if pg_idx == 0: self._get_master_addr_port(pg) for local_rank in range(local_world_size): rank += 1 # we pass in environment variable at option so that Worker can use environment variable to set env_vars = { "WORLD_SIZE": str(world_size), "RANK": str(rank), "WG_PREFIX": self.name_prefix, "WG_BACKEND": "ray", "RAY_LOCAL_WORLD_SIZE": str(local_world_size), "MASTER_ADDR": self._master_addr, "MASTER_PORT": self._master_port, } if worker_env is not None: logging.debug(f"Appending ray class env, origin: {env_vars}, customized env: {worker_env}") conflict_env_vars = set(env_vars.keys()) & set(worker_env.keys()) if len(conflict_env_vars) > 0: logging.error( f"User customized env vars conflict with system env: {conflict_env_vars} " f"Overriding may cause unexpected behavior." ) raise ValueError(f"Cannot override protected system env: {conflict_env_vars}") env_vars.update(worker_env) import re cia_name = type(ray_cls_with_init.cls).__name__ match = re.search(r"ActorClass\(([^)]+)\)", cia_name) # ray.remote(Obj) -> "ActorClass(Obj)" cia_name = match.group(1) if match else cia_name # "ActorClass(Obj)" -> "Obj" name = f"{self.name_prefix}{cia_name}_{pg_idx}:{local_rank}" # e.g. Worker_2:5 if self.profile_steps and self.device_name == "cuda": ray_cls_with_init.update_options( { "runtime_env": { "env_vars": env_vars, "nsight": self.worker_nsight_options, }, "name": name, } ) else: ray_cls_with_init.update_options({"runtime_env": {"env_vars": env_vars}, "name": name}) if detached: ray_cls_with_init.update_options({"lifetime": "detached"}) # create a worker worker = ray_cls_with_init( placement_group=pg, placement_group_bundle_idx=local_rank, use_gpu=use_gpu, num_gpus=num_gpus, device_name=self.device_name, ) self._workers.append(worker) self._worker_names.append(name) @property def worker_names(self): return self._worker_names @classmethod def from_detached( cls, name_prefix=None, worker_names=None, worker_handles=None, ray_cls_with_init=None, **kwargs, ): """Create a worker group from existing detached workers. Args: name_prefix: Prefix for worker names worker_names: Names of existing workers to attach to ray_cls_with_init: Class with initialization arguments for workers Returns: A new RayWorkerGroup instance """ worker_group = cls( resource_pool=None, ray_cls_with_init=ray_cls_with_init, name_prefix=name_prefix, worker_names=worker_names, worker_handles=worker_handles, **kwargs, ) return worker_group def spawn(self, prefix_set): """Spawn to a dictionary of worker groups, each with a subset of method with prefix. Args: prefix_set: Set of prefixes to create worker groups for Returns: Dictionary of worker groups keyed by prefix """ if self.fused_worker_used: return self.spawn_fused(prefix_set) def _rebind_actor_methods(worker_group, actor_name): prefix: str = actor_name + "_" for method_name in dir(worker_group): if method_name.startswith(prefix): original_method_name = method_name.removeprefix(prefix) method = getattr(worker_group, method_name) setattr(worker_group, original_method_name, method) new_worker_group_dict = {} for prefix in prefix_set: new_worker_group = self.from_detached( name_prefix=self.name_prefix, worker_names=self._worker_names, worker_handles=self._workers, ray_cls_with_init=self.ray_cls_with_init, profile_steps=self.profile_steps, worker_nsight_options=self.worker_nsight_options, ) _rebind_actor_methods(new_worker_group, prefix) new_worker_group_dict[prefix] = new_worker_group return new_worker_group_dict def spawn_fused(self, prefix_set): """Create a dictionary of worker groups for fused workers. Args: prefix_set: Set of prefixes to create worker groups for Returns: Dictionary of worker groups keyed by prefix """ wg_dict = dict() for key in prefix_set: new_wg = deepcopy(self) new_wg._bind_worker_method(self.ray_cls_with_init.cls.raw_cls_dict[key], func_generator) new_wg.sub_cls_name = key wg_dict[key] = new_wg return wg_dict def fuse(self, prefix_set): """Fuse multiple worker groups into the current worker group. Args: prefix_set: Set of prefixes to fuse into the worker group """ if self.wg_dict is None: self.wg_dict = self.spawn(prefix_set) for role_name, role_wg in self.wg_dict.items(): setattr(self, role_name, role_wg) self.method_names = self._bind_worker_method(self.ray_cls_with_init.cls, func_generator) def _execute_remote_single_worker(self, worker, method_name: str, *args, **kwargs): """Execute a method on a single worker remotely. Args: worker: The worker actor handle method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: Remote object reference to the method execution """ if self.fused_worker_used and method_name not in self.method_names: remote_call = getattr(worker, self.fused_worker_execute_fn_name) return remote_call.remote(f"{self.sub_cls_name}_fwmn_{method_name}", *args, **kwargs) # fused worker not used remote_call = getattr(worker, method_name) return remote_call.remote(*args, **kwargs) def execute_rank_zero_sync(self, method_name: str, *args, **kwargs): """Execute a method on rank zero worker synchronously. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: Result of the method execution """ return ray.get(self.execute_rank_zero_async(method_name, *args, **kwargs)) def execute_rank_zero_async(self, method_name: str, *args, **kwargs): """Execute a method on rank zero worker asynchronously. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: Remote object reference to the method execution """ return self._execute_remote_single_worker(self._workers[0], method_name, *args, **kwargs) def execute_rank_zero(self, method_name: str, *args, **kwargs): """Alias for execute_rank_zero_async. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: Remote object reference to the method execution """ return self.execute_rank_zero_async(method_name, *args, **kwargs) def execute_all(self, method_name: str, *args, **kwargs): """Alias for execute_all_async. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: List of remote object references to the method executions """ return self.execute_all_async(method_name, *args, **kwargs) def execute_all_sync(self, method_name: str, *args, **kwargs): """Execute a method on all workers synchronously. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: List of results from all workers """ return ray.get(self.execute_all_async(method_name, *args, **kwargs)) def execute_all_async(self, method_name: str, *args, **kwargs): """Execute a method on all workers asynchronously. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: List of remote object references to the method executions """ # Here, we assume that if all arguments in args and kwargs are lists, # and their lengths match len(self._workers), we'll distribute each # element in these lists to the corresponding worker # print(f"execute_all_async: method {method_name}({args}, {kwargs})") length = len(self._workers) if all(isinstance(arg, list) for arg in args) and all(isinstance(kwarg, list) for kwarg in kwargs.values()): if all(len(arg) == length for arg in args) and all(len(kwarg) == length for kwarg in kwargs.values()): # print(f"splitting args and kwargs into {length} shards") result = [] for i in range(length): sliced_args = tuple(arg[i] for arg in args) sliced_kwargs = {k: v[i] for k, v in kwargs.items()} result.append( self._execute_remote_single_worker(self._workers[i], method_name, *sliced_args, **sliced_kwargs) ) return result return [self._execute_remote_single_worker(worker, method_name, *args, **kwargs) for worker in self._workers] @property def master_address(self): return self._master_addr @property def master_port(self): return self._master_port @property def workers(self): return self._workers @property def world_size(self): return self._world_size """ Utilities that enables creating workers inside the same ray.Actor, with code written in separate ray.Actors. """ # deprecated, switching to FusedWorker def _bind_workers_method_to_parent(cls, key, user_defined_cls): """ Binds the methods of each worker to the WorkerDict. Note that we only bind public methods that are decorated by register """ for method_name in dir(user_defined_cls): try: method = getattr(user_defined_cls, method_name) assert callable(method), f"{method_name} in {user_defined_cls} is not callable" except Exception: # if it is a property, it will fail because Class doesn't have instance property continue if hasattr(method, MAGIC_ATTR): def generate_function(name, key=key): def func(self, *args, **kwargs): # dispatch to the actual worker return getattr(self.worker_dict[key], name)(*args, **kwargs) async def async_func(self, *args, **kwargs): # dispatch to the actual worker return await getattr(self.worker_dict[key], name)(*args, **kwargs) wrapper = async_func if inspect.iscoroutinefunction(method) else func # noqa: B023 return wrapper func = generate_function(method_name) # pass MAGIC_ATTR for outer worker group attrs = getattr(method, MAGIC_ATTR) setattr(func, MAGIC_ATTR, attrs) try: # bind direct rollout method to class without prefix if attrs["dispatch_mode"] == Dispatch.DIRECT_ROLLOUT_METHOD and "rollout" in key: assert not hasattr(cls, method_name), ( f"conflict direct rollout method {method_name} with role {key}" ) setattr(cls, method_name, func) print(f"bind role {key} method {method_name} to class {cls}") else: method_name_with_prefix = key + "_" + method_name setattr(cls, method_name_with_prefix, func) except Exception as e: raise ValueError(f"Fail to set method_name {method_name}") from e def _unwrap_ray_remote(cls): if hasattr(cls, "__ray_actor_class__"): cls = cls.__ray_actor_class__ return cls def _determine_fsdp_megatron_base_class(mros: list): """ - megatron: base class should be MegatronWorker - fsdp: base class should be Worker """ for cls in mros[0]: if cls.__name__ == "MegatronWorker": return cls if cls.__name__ == "Worker": return cls raise ValueError(f"Cannot determine base class for {mros}") # deprecated, switching to FusedWorker def create_colocated_worker_cls(class_dict: dict[str, RayClassWithInitArgs]): """ This function should return a class instance that delegates the calls to every cls in cls_dict """ cls_dict = {} init_args_dict = {} worker_cls = _determine_fsdp_megatron_base_class( [cls.cls.__ray_actor_class__.__mro__ for cls in class_dict.values()] ) assert issubclass(worker_cls, Worker), f"worker_cls {worker_cls} should be a subclass of Worker" print(f"colocated worker base class {worker_cls}") for key, cls in class_dict.items(): cls_dict[key] = cls.cls init_args_dict[key] = {"args": cls.args, "kwargs": cls.kwargs} assert cls_dict.keys() == init_args_dict.keys() # TODO: create a class with customizable name class WorkerDict(worker_cls): def __init__(self): super().__init__() self.worker_dict = {} for key, user_defined_cls in cls_dict.items(): user_defined_cls = _unwrap_ray_remote(user_defined_cls) # directly instantiate the class without remote # in worker class, e.g. # when DISABLE_WORKER_INIT == 1 it will return immediately with temp_env_var("DISABLE_WORKER_INIT", "1"): self.worker_dict[key] = user_defined_cls( *init_args_dict[key].get("args", ()), **init_args_dict[key].get("kwargs", {}) ) # now monkey-patch the methods from inner class to WorkerDict for key, user_defined_cls in cls_dict.items(): user_defined_cls = _unwrap_ray_remote(user_defined_cls) _bind_workers_method_to_parent(WorkerDict, key, user_defined_cls) remote_cls = ray.remote(WorkerDict) remote_cls = RayClassWithInitArgs(cls=remote_cls) return remote_cls FusedWorkerCLSName = "FusedWorker" def create_colocated_worker_raw_cls(class_dict: dict[str, RayClassWithInitArgs]): """ This function returns a FusedWorker class. `FusedWorker.{class_name}` -> FusedClass Use `class_name` as a param to directly access the underlying class. `FusedWorker._fuw_execute("{class_name}_fwmn_{method_name}", *args, **kwargs)` First param must be "{class_name}_fwmn_{method_name}" in order to access `method_name` of underlying class `{class_name}`. `FusedWorker.fused_worker_dict` -> {"class_name": FusedClass} Stores all underlying classes. `FusedClass.fused_worker_dict` -> {"class_name": FusedClass} The same as `FusedWorker.fused_worker_dict`, enables underlying class to access other underlying classes. """ raw_cls_dict = {cls_name: _unwrap_ray_remote(cia.cls) for cls_name, cia in class_dict.items()} init_args_dict = {cls_name: cia.args for cls_name, cia in class_dict.items()} init_kwargs_dict = {cls_name: cia.kwargs for cls_name, cia in class_dict.items()} cls_names = list(class_dict.keys()) # FusedWorker_Actor_Critic class_name_renamed = "_".join([FusedWorkerCLSName] + cls_names) class FusedWorker(Worker): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cls_names = cls_names self.raw_cls_dict = raw_cls_dict self.init_args_dict = init_args_dict self.init_kwargs_dict = init_kwargs_dict for cls_name, udc, ud_args, ud_kwargs in zip( self.cls_names, self.raw_cls_dict.values(), self.init_args_dict.values(), self.init_kwargs_dict.values(), strict=True, ): with temp_env_var("DISABLE_WORKER_INIT", "1"): udc._get_ray_actor_cls_name = lambda x, name_renamed=class_name_renamed: name_renamed udc._get_ray_method_prefix = lambda x, name_prefixed=cls_name: f"{name_prefixed}_" # cls_name = "actor", "critic", udc = ActorWorker, CriticWorker self.fused_worker_dict[cls_name] = udc(*ud_args, **ud_kwargs) setattr(self, cls_name, self.fused_worker_dict[cls_name]) # injecting fused_worker to each sub worker so they can be aware of existence of each other for _, worker in self.fused_worker_dict.items(): setattr(worker, Worker.fused_worker_attr_name, self.fused_worker_dict) def _fuw_execute(self, method_name: str, *args, **kwargs): # for fused_worker, method_name is in a form of "{cls_name}_fwmn_{method_name}" # where fwmn stands "fused worker method name" names = method_name.split("_fwmn_") cls_name = names[0] method_name = names[1] assert cls_name in self.fused_worker_dict, ( f"calling {cls_name}'s {method_name}, but {cls_name} not in fused_worker_dict" ) udc_method = getattr(self.fused_worker_dict[cls_name], method_name) return udc_method(*args, **kwargs) renamed_fused_worker_cls = type(class_name_renamed, (FusedWorker,), {}) renamed_fused_worker_cls.is_fused_worker = True renamed_fused_worker_cls.raw_cls_dict = raw_cls_dict return renamed_fused_worker_cls def create_colocated_worker_cls_fused(class_dict: dict[str, RayClassWithInitArgs]): """ This function returns a RayClassWithInitArgs instance of FusedWorker, which is an replacement of `create_colocated_worker_cls`. WorkerGroup constructed using this class will be a colocated WorkerGroup, which will be referenced as `ColocateWorkerGroup` below. `ColocateWorkerGroup.spawn(prefix_set)` returns a dict of WorkerGroup {"class_name": WorkerGroup}, WorkerGroup in this dict will have methods of underlying class `class_name` attached. `ColocateWorkerGroup.fuse(prefix_set)` After executing this function, `ColocateWorkerGroup.{class_name}` will return WorkerGroup with methods of underlying class `class_name` attached. """ raw_colocated_worker_cls = create_colocated_worker_raw_cls(class_dict) remote_cls = ray.remote(raw_colocated_worker_cls) cia = RayClassWithInitArgs(cls=remote_cls) cia.fused_worker_used = True return cia ================================================ FILE: verl_distillation/verl/third_party/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/third_party/sglang/__init__.py ================================================ # Copyright 2023-2024 SGLang Team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/third_party/sglang/parallel_state.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023 The SGlang team. # Adapted from # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. """Model and data parallel groups.""" import os from typing import Optional import sglang.srt.distributed.parallel_state as ps import torch import torch.distributed from sglang.srt.distributed.parallel_state import ( get_pp_group, get_world_group, init_distributed_environment, init_model_parallel_group, ) """ This version is strongly tied with Megatron to implement HybridEngine and weight sharing between vllm and Megatron. - We assume the Megatron tp+dp+pp world is already established before calling this function. """ # Device mesh for using DTensor _DEVICE_MESH = None # Tensor model parallel group that the current rank belongs to. _TP = None # Pipeline model parallel group that the current rank belongs to. _PP = None # This method is for initializing the ParallelGroup when using HybridEngine # NOTE(linjunrong): this function is for megatron def initialize_parallel_state( distributed_init_method: str = "env://", backend: str = "nccl", tensor_model_parallel_size: int = 1, num_tp_per_train_tp: int = 1, pipeline_model_parallel_size: int = 1, ): # torch.distributed.all_reduce does not free the input tensor until # the synchronization point. This causes the memory usage to grow # as the number of all_reduce calls increases. This env var disables # this behavior. # Related issue: # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573 os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" # NOTE(sgm): Modify for verl, Env vars will be set by TORCHRUN. rank = int(os.getenv("RANK", "-1")) local_rank = int(os.getenv("LOCAL_RANK", "0")) # Use the world_size set by TORCHRUN world_size = int(os.getenv("WORLD_SIZE", "-1")) assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN" init_distributed_environment(world_size, rank, distributed_init_method, local_rank, backend) if torch.distributed.get_world_size() > 1: # NOTE: build a separate inference group with infer tp & micro dp initialize_model_parallel_for_sglang( tensor_model_parallel_size=tensor_model_parallel_size, num_tensor_model_parallel_groups_per_train_tp=num_tp_per_train_tp, ) else: initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend) # NOTE(linjunrong): After init SGLang rollout using class EngineFragment, user should always remember to call # this function to sync the _TP, _PP define at the beginning of this file. Otherwise, only the conterparts # inside sglang.srt.distributed are init as ProcessGroup, the symbols defined in this file remain as None. # It could be weird to maintain two _TP and _PP, I follow the same way to maintain an extra ones for # verl itself as how it was done in verl.third_party.vllm.parallel_state. Note that the process is a little # bit different def ensure_model_parallel_initialized( tensor_model_parallel_size: int, pipeline_model_parallel_size: int = 1, backend: Optional[str] = None, ) -> None: """Helper to initialize model parallel groups if they are not initialized, or ensure tensor-parallel and pipeline-parallel sizes are equal to expected values if the model parallel groups are initialized. """ # get the backend of _DEVICE_WORLD_GROUP backend = backend or torch.distributed.get_backend(get_world_group().device_group) if not model_parallel_is_initialized(): initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend) return assert get_tensor_model_parallel_world_size() == tensor_model_parallel_size, ( f"tensor parallel group already initialized, but of unexpected size: " f"{get_tensor_model_parallel_world_size()=} vs. {tensor_model_parallel_size=}" ) pp_world_size = get_pp_group().world_size assert pp_world_size == pipeline_model_parallel_size, ( f"pipeline parallel group already initialized, but of unexpected size: {pp_world_size=} vs. " f"{pipeline_model_parallel_size=}" ) # TODO(sgm): deviate from the v0.5.4, not pp now # NOTE(linjunrong): the SGLang version using _TP instead of ps._TP def model_parallel_is_initialized(): """Check if tensor and pipeline parallel groups are initialized.""" return _TP is not None # and _PIPELINE_MODEL_PARALLEL_GROUP is not None) def initialize_model_parallel_for_sglang( tensor_model_parallel_size: int, num_tensor_model_parallel_groups_per_train_tp: int = 1, pipeline_model_parallel_size: int = 1, ) -> None: pass # Get world size and rank. Ensure some consistencies. assert torch.distributed.is_initialized() assert isinstance(tensor_model_parallel_size, int) # assert num_tensor_model_parallel_groups_per_train_tp == 1 and not different_tp_group # assert num_tensor_model_parallel_groups_per_train_tp > 1 and different_tp_group # Build the tensor model-parallel groups. assert ps._TP is None, "tensor model parallel group is already initialized" global _TP world_size: int = torch.distributed.get_world_size() backend = torch.distributed.get_backend() num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size if num_tensor_model_parallel_groups_per_train_tp == 1: # if tensor_model_parallel_size == train_tensor_parallel_size: # using the same tp group as Megatron/vllm assert _TP is None, "tensor model parallel group is already initialized" group_ranks = [] for i in range(num_tensor_model_parallel_groups): ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size) group_ranks.append(ranks) _TP = init_model_parallel_group( group_ranks=group_ranks, local_rank=get_world_group().local_rank, backend=backend, use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer use_message_queue_broadcaster=True, ) ps._TP = _TP # _MICRO_DATA_PARALLEL_GROUP is move to hybrid engine else: # initialize a micro_dp group and a tp group # assume training tp=4, infer tp=2, then, weight is partitioned as # [1], [2], [3], [4] for training and [1,2], [1,2], [3,4], [3,4] for inference # Build the inference tp groups # train_tp = train_tensor_parallel_size train_tp = num_tensor_model_parallel_groups_per_train_tp * tensor_model_parallel_size # num_tensor_model_parallel_groups_per_train_tp = train_tp // tensor_model_parallel_size assert _TP is None, "tensor model parallel group is already initialized" group_ranks = [] for i in range(num_tensor_model_parallel_groups // num_tensor_model_parallel_groups_per_train_tp): start = train_tp * i end = train_tp * (i + 1) for j in range(num_tensor_model_parallel_groups_per_train_tp): ranks = list(range(start, end, num_tensor_model_parallel_groups_per_train_tp)) for i in range(len(ranks)): ranks[i] += j group_ranks.append(ranks) _TP = init_model_parallel_group( group_ranks=group_ranks, local_rank=get_world_group().local_rank, backend=backend, use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer use_message_queue_broadcaster=True, ) ps._TP = _TP # Build the pipeline model-parallel groups. # global _PIPELINE_MODEL_PARALLEL_GROUP # global _PIPELINE_GLOBAL_RANKS # assert ps._PIPELINE_MODEL_PARALLEL_GROUP is None, ("pipeline model parallel group is already initialized") # ps._PIPELINE_MODEL_PARALLEL_GROUP = mpu.get_pipeline_model_parallel_group() # ps._PIPELINE_GLOBAL_RANKS = mpu.get_pipeline_model_parallel_ranks() # TODO: init using device mesh (not support hybrid engine now) # Build the pipeline model-parallel groups. num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size global _PP assert _PP is None, "pipeline model parallel group is already initialized" group_ranks = [] for i in range(num_pipeline_model_parallel_groups): ranks = list(range(i, world_size, num_pipeline_model_parallel_groups)) group_ranks.append(ranks) # pipeline parallel does not need custom allreduce _PP = init_model_parallel_group(group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False) ps._PP = _PP # for verl def initialize_model_parallel( tensor_model_parallel_size: int = 1, pipeline_model_parallel_size: int = 1, backend: Optional[str] = None, ) -> None: """ NOTE: This method is a hack from the open-sourced version without asertion of world_size = tp * pp Initialize model parallel groups. Arguments: tensor_model_parallel_size: number of GPUs used for tensor model parallelism. pipeline_model_parallel_size: number of GPUs used for pipeline model parallelism. Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize the model pipeline. The present function will create 4 tensor model-parallel groups and 2 pipeline model-parallel groups: 4 tensor model-parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7] 2 pipeline model-parallel groups: [g0, g2, g4, g6], [g1, g3, g5, g7] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. """ # Get world size and rank. Ensure some consistencies. assert torch.distributed.is_initialized() world_size: int = torch.distributed.get_world_size() backend = backend or torch.distributed.get_backend(ps.get_world_group().device_group) # NOTE(sgm) we don't assert world_size == tp * pp # DP is not managed by vllm but by the VeRL WorkerGroup # if (world_size != # tensor_model_parallel_size * pipeline_model_parallel_size): # raise RuntimeError( # f"world_size ({world_size}) is not equal to " # f"tensor_model_parallel_size ({tensor_model_parallel_size}) x " # f"pipeline_model_parallel_size ({pipeline_model_parallel_size})") num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size global _TP assert _TP is None, "tensor model parallel group is already initialized" group_ranks = [] for i in range(num_tensor_model_parallel_groups): ranks = list(range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)) group_ranks.append(ranks) # message queue broadcaster is only used in tensor model parallel group if ps._TP is not None: _TP = ps._TP else: _TP = init_model_parallel_group( group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer use_message_queue_broadcaster=True, ) ps._TP = _TP # TODO: init using device mesh (not support hybrid engine now) # Build the pipeline model-parallel groups. num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size global _PP assert _PP is None, "pipeline model parallel group is already initialized" group_ranks = [] for i in range(num_pipeline_model_parallel_groups): ranks = list(range(i, world_size, num_pipeline_model_parallel_groups)) group_ranks.append(ranks) # pipeline parallel does not need custom allreduce if ps._TP is not None: _PP = ps._TP else: _PP = init_model_parallel_group(group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False) ps._PP = _PP """ Device mesh utilities """ def get_device_mesh(): assert _DEVICE_MESH is not None, "device mesh is not initialized" return _DEVICE_MESH """ Tensor model parallel utilities """ # NOTE(linjunrong): In the vllm version parallel_state.py. verl created its own _TP and _PP as verl want to use # the process group for some extra purpose. Under the hood, there is no difference between them and the original # one in vllm.distributed.parallel_state. However, the implementation need to hack the init process of inference # engine, as we do not maintain another SGLang here, I just use the original _TP and _PP directly. def get_tensor_model_parallel_group(): """Get the tensor model parallel group the caller rank belongs to.""" assert _TP is not None, "tensor model parallel group is not initialized" return _TP.device_group def get_tensor_model_parallel_world_size(): """Return world size for the tensor model parallel group.""" return torch.distributed.get_world_size(group=get_tensor_model_parallel_group()) def get_tensor_model_parallel_rank(): """Return my rank for the tensor model parallel group.""" return torch.distributed.get_rank(group=get_tensor_model_parallel_group()) def get_tensor_model_parallel_src_rank(): """Calculate the global rank corresponding to the first local rank in the tensor model parallel group.""" global_rank = torch.distributed.get_rank() local_world_size = get_tensor_model_parallel_world_size() return (global_rank // local_world_size) * local_world_size ================================================ FILE: verl_distillation/verl/third_party/torch/__init__.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: verl_distillation/verl/third_party/torch/distributed/__init__.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: verl_distillation/verl/third_party/torch/distributed/_state_dict_utils.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ruff: noqa: B028, UP038, UP007, E721, E501 # mypy: allow-untyped-defs import copy import io import math import weakref from collections.abc import Mapping, MutableMapping from typing import TYPE_CHECKING, Any, Callable, NamedTuple, Optional, Union, cast import torch import torch.distributed as dist import torch.nn.functional as F from torch.distributed._functional_collectives import AsyncCollectiveTensor if dist.is_available() or TYPE_CHECKING: from torch.distributed import distributed_c10d from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed.tensor import DTensor, Replicate, distribute_tensor from torch.distributed.tensor._utils import compute_local_shape_and_global_offset def _identity_func( obj: torch.Tensor, pg: Optional[dist.ProcessGroup], device: Optional[torch.device], companion_obj: Any, ) -> torch.Tensor: return obj def _all_gather_sharded_tensor( sharded_tensor: "ShardedTensor", pg: Optional[dist.ProcessGroup] = None, device: Optional[torch.device] = None, ) -> torch.Tensor: if pg is None: pg = distributed_c10d._get_default_group() world_size = dist.get_world_size(pg) shards = sharded_tensor.local_shards() dim_0_size = sharded_tensor.size()[0] # type: ignore[index] tensor_numel = sharded_tensor.size().numel() # type: ignore[union-attr] chunk_size = math.ceil(dim_0_size / world_size) * tensor_numel // dim_0_size pg_device = distributed_c10d._get_pg_default_device(pg) if device is None else device if shards: local_tensor = shards[0].tensor.flatten() if local_tensor.device.type != pg_device.type: local_tensor = local_tensor.to(pg_device) num_padding = chunk_size - local_tensor.numel() if num_padding > 0: local_tensor = F.pad(local_tensor, [0, num_padding]) else: local_tensor = torch.zeros(chunk_size, dtype=sharded_tensor.dtype, device=pg_device) tensor = torch.empty( chunk_size * world_size, dtype=local_tensor.dtype, device=pg_device, ) dist.all_gather_into_tensor(tensor, local_tensor, group=pg) tensor = tensor.narrow(0, 0, tensor_numel).reshape(sharded_tensor.size()) return tensor class CompanionMismatch(Exception): pass def _iterate_state_dict( iter_object: Any, sharded_tensor_func: Callable, dtensor_func: Callable, tensor_func: Callable, *, pg: Optional[dist.ProcessGroup] = None, device: Optional[torch.device] = None, cpu_offload: bool = False, companion_obj: Any = None, ranks_only: tuple[int, ...] = (), type_check: bool = True, non_blocking: bool = True, ) -> dict[str, Any]: """Iterate through the state dict, applying the given functions to each tensor type. Args: iter_object (Any): the target state_dict. sharded_tensor_func (Callable): the function to apply to ShardedTensor dtensor_func (Callable): the function to apply to DTensor tensor_func (Callable): the function to apply to Tensor pg (Optional[dist.ProcessGroup]): process group passed to tensor functions device (Optional[torch.device]): device passed to tensor functions cpu_offload (bool): whether to offload the tensors to CPU memory. This option is ignored if a companion_obj is supplied. companion_obj (Any): A companion object to the state dict. If this object is supplied, we attempt to copy the tensor to the companion object. ranks_only (Tuple[int, ...]): if this tuple is empty, all ranks will have the same state_dicts. Otherwise only ranks that in ``ranks_only`` have the same state_dicts. Other ranks will get empty state_dicts. type_check (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. non_blocking (bool): whether to use non-blocking copy when copying to the companion object. """ # TODO: should we use pytree? cpu_device = torch.device("cpu") if isinstance(iter_object, ShardedTensor): ret = sharded_tensor_func(iter_object, pg, device, companion_obj) elif isinstance(iter_object, DTensor): ret = dtensor_func(iter_object, pg, device, companion_obj) elif isinstance(iter_object, torch.Tensor): ret = tensor_func(iter_object, pg, device, companion_obj) elif isinstance(iter_object, (int, float, str, bytes, io.BytesIO)) or iter_object is None: ret = iter_object elif isinstance(iter_object, dict): if companion_obj is not None and ( not isinstance(companion_obj, dict) or set(companion_obj.keys()) != set(iter_object.keys()) ): msg = "" if isinstance(companion_obj, dict) else f"{set(companion_obj.keys())=} {set(iter_object.keys())=}" raise CompanionMismatch(msg) ret = { key: _iterate_state_dict( value, sharded_tensor_func, dtensor_func, tensor_func, pg=pg, device=device, cpu_offload=cpu_offload, companion_obj=companion_obj[key] if companion_obj is not None else None, ranks_only=ranks_only, type_check=type_check, non_blocking=non_blocking, ) for key, value in iter_object.items() } elif isinstance(iter_object, (list, tuple)): if companion_obj is not None and ( not isinstance(companion_obj, (list, tuple)) or len(companion_obj) != len(iter_object) ): raise CompanionMismatch ret = [ _iterate_state_dict( v, sharded_tensor_func, dtensor_func, tensor_func, pg=pg, device=device, cpu_offload=cpu_offload, companion_obj=companion_obj[idx] if companion_obj is not None else None, ranks_only=ranks_only, type_check=type_check, non_blocking=non_blocking, ) for idx, v in enumerate(iter_object) ] if isinstance(iter_object, tuple): ret = tuple(ret) elif not type_check: ret = copy.deepcopy(iter_object) else: raise ValueError(f"Unexpected value type {type(iter_object)}") if not ranks_only or dist.get_rank(pg) in ranks_only: if isinstance(ret, torch.Tensor): if cpu_offload and companion_obj is None: ret = ret.to(cpu_device) if companion_obj is not None: if isinstance(companion_obj, DTensor): assert isinstance(ret, DTensor) companion_obj._local_tensor.copy_(ret._local_tensor, non_blocking=non_blocking) else: companion_obj.copy_(ret, non_blocking=non_blocking) ret = companion_obj else: ret = {} if isinstance(ret, dict) else None return ret def _gather_state_dict( state_dict: dict[str, Any], *, pg: Optional[dist.ProcessGroup] = None, device: Optional[torch.device] = None, cpu_offload: bool = False, ranks_only: tuple[int, ...] = (), type_check: bool = True, ) -> dict[str, Any]: """ Given a state_dict, this API gathers all the ShardedTensors or DTensors in the state_dict. Args: state_dict (Dict[str, Any]): the target sharded state_dict. pg (Optional[dist.ProcessGroup]): the process group that is used to gather ShardedTensor. Note that gathering a DTensor will use the DeviceMesh. So this argument will be ignored when gathering a DTensor. device: (Optional[torch.device]): the device that is used to perform allgather for ShardedTensor. Note that gathering a DTensor will use the DeviceMesh. So this argument will be ignored when gathering a DTensor. cpu_offload (bool): whether to offload the tensors to CPU memory. The default value is False. ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will have the same state_dicts. Otherwise only ranks that in ``ranks_only`` have the same state_dicts. Other ranks will get empty state_dicts. type_check: (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Returns: The gathered state dictionary. """ def sharded_tensor_func(value, pg, device, companion_obj): # ShardedTensor does not seem to record the original device type. # So if the tensor is moved to CPU, we won't know the original type. # As a result, we have to rely on the user to tell us the correct one. cpu_device = torch.device("cpu") output_tensor = _all_gather_sharded_tensor(value, pg, device) local_shard_device = value.local_shards()[0].tensor.device if value.local_shards() else cpu_device if output_tensor.device != local_shard_device: value = output_tensor.to(local_shard_device) else: value = output_tensor return value def dtensor_func(value, pg, device, companion_obj): if value.device != value.device_mesh.device_type: value = value.to(value.device_mesh.device_type) # FSDP all_gather: [Shard(0)] -> [Replicate()] # HSDP all_gather: [Replicate(), Shard(0)] -> [Replicate(), Replicate()] # 2D FSDP + TP all_gather: # - [Shard(0), Shard(n)] -> [Replicate(), Replicate()] # - [Shard(0), Replicate()] -> [Replicate(), Replicate()] placements = [Replicate() for _ in value.placements] value = value.redistribute( device_mesh=value.device_mesh, placements=placements, ) # Call `wait()` to force the tensor to be synchronous with respect # to the main stream. # See the discussion in https://github.com/pytorch/pytorch/pull/117799. value = value.to_local() if isinstance(value, AsyncCollectiveTensor): value = value.wait() return value return _iterate_state_dict( state_dict, sharded_tensor_func, dtensor_func, _identity_func, pg=pg, device=device, cpu_offload=cpu_offload, ranks_only=ranks_only, type_check=type_check, ) def _offload_state_dict_to_cpu( state_dict: dict[str, Any], *, ranks_only: tuple[int, ...] = (), type_check: bool = True, ) -> dict[str, Any]: """ Given a state_dict, this API offload all the tensors to CPU memory. Args: state_dict (Dict[str, Any]): the target state_dict. pg (Optional[dist.ProcessGroup]): the process group that is used to gather ShardedTensor. Note that gathering a DTensor will use the DeviceMesh. So this argument will be ignored when gathering a DTensor. ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will have the same state_dicts. Otherwise only ranks that in ``ranks_only`` have the same state_dicts. Other ranks will get empty state_dicts. type_check: (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Returns: The gathered state dictionary. """ ret = _iterate_state_dict( state_dict, _identity_func, _identity_func, _identity_func, pg=None, device=None, cpu_offload=True, ranks_only=ranks_only, type_check=type_check, ) return ret @torch.no_grad() def _copy_state_dict( state_dict: dict[str, Any], copy_state_dict: dict[str, Any], non_blocking: bool = False, type_check: bool = True, ) -> dict[str, Any]: """ Copies all tensors in a given state dict into a different state_dict with the same structure. Additionally, a copied state dict with the same value references is returned. Editing the keys on this state dict will not affect the passed in copy_state_dict (but the value references are the same). .. warning:: It is expected by this function that state_dict and copy_state_dict share the same structure and data types. .. warning:: The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Args: state_dict (Dict[str, Any]): the target state_dict. copy_state_dict (Dict[str, Any]): The state dict we are copying into. This state_dict must have exactly the same structure as the source `state_dict`. non_blocking: (bool): Whether copy ops should be performed asynchronously type_check (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Returns: State Dict copy """ return _iterate_state_dict( state_dict, _identity_func, _identity_func, _identity_func, pg=None, device=None, cpu_offload=False, ranks_only=(), companion_obj=copy_state_dict, type_check=type_check, non_blocking=non_blocking, ) @torch.no_grad() def _create_cpu_state_dict( state_dict: dict[str, Any], pin_memory: bool = False, share_memory: bool = False ) -> dict[str, Any]: """ Given a state_dict, create another state_dict with the same structure and elements. However, all tensors in the returned state_dict are new tensors on CPU. These tensors can be placed on pin_memory or share_memory based on the provided arguments. .. warning:: Setting both `pin_memory` and `share_memory` to True significantly increases the latency of this method because of the nuances which require us to register memory as pinned directly as opposed to relying on the pin_memory cache allocator. This option should only be used for long lived tensors which are required to be shared. This is not the case as long as at least one of `pin_memory` or `share_memory` is set to False. """ def tensor_func( obj: torch.Tensor, pg: Optional[dist.ProcessGroup], device: Optional[torch.device], _: Any, ) -> torch.Tensor: if len(obj.size()) == 0: return torch.tensor(0, dtype=obj.dtype) if share_memory: t = torch.empty(*tuple(obj.size()), dtype=obj.dtype) t = t.share_memory_() if pin_memory: def unpin_memory(t): succ = int(torch.cuda.cudart().cudaHostUnregister(t.data_ptr())) assert succ == 0, f"Unpinning shared memory failed with error-code: {succ}" weakref.finalize(t, unpin_memory, t) succ = int( torch.cuda.cudart().cudaHostRegister( t.data_ptr(), t.numel() * t.element_size(), 1, # lines up with 'cudaHostRegisterPortable' ) ) assert succ == 0, f"Pinning shared memory failed with error-code: {succ}" return t elif pin_memory: return torch.empty(*tuple(obj.size()), dtype=obj.dtype).pin_memory() else: return torch.empty(*tuple(obj.size()), dtype=obj.dtype) def dtensor_func( obj: DTensor, pg: Optional[dist.ProcessGroup], device: Optional[torch.device], _: Any, ) -> DTensor: if len(obj.size()) == 0: return obj if obj.device != torch.device("cpu"): ret = cast(DTensor, obj.to(device="cpu")) else: ret = copy.deepcopy(obj) ret._local_tensor = tensor_func(ret._local_tensor, pg, device, None) return ret ret = _iterate_state_dict( state_dict, _identity_func, dtensor_func, tensor_func, pg=None, device=None, cpu_offload=False, ranks_only=(), type_check=False, ) return ret def _check_state_dict_similarity( state_dict: dict[str, Any], compared_state_dict: dict[str, Any], ) -> bool: """ Given two state_dicts, check if the structures are the same. And if a [key, tensor] pair exist in one state_dict there must be the a corresponding pait, [key, other_tensor], in the other state_dict, where tensor and other_tensor have the same size and dtype. Return the check result. """ def tensor_func( obj: torch.Tensor, pg: Optional[dist.ProcessGroup], device: Optional[torch.device], companion_obj: Any, ) -> torch.Tensor: if companion_obj.dtype != obj.dtype or companion_obj.size() != obj.size(): raise CompanionMismatch return obj try: _iterate_state_dict( state_dict, _identity_func, _identity_func, tensor_func, pg=None, device=None, cpu_offload=False, ranks_only=(), companion_obj=compared_state_dict, type_check=False, ) except CompanionMismatch: return False return True class _TensorInfo(NamedTuple): size: torch.Size dtype: torch.dtype def _broadcast_tensors( full_state_dict: dict[str, Any], local_state_dict: dict[str, Any], keys: list[str], device: torch.device, pg: Optional[dist.ProcessGroup] = None, ) -> None: tensors = [] for key in keys: if dist.get_rank() == 0: full_state = full_state_dict[key] assert isinstance(full_state, torch.Tensor) full_tensor = full_state.detach().to(device) else: tensor_info = full_state_dict[key] full_tensor = torch.empty( size=tensor_info.size, device=device, dtype=tensor_info.dtype, ) tensors.append(full_tensor) local_state = local_state_dict.get(key, None) if local_state is None: continue elif isinstance(local_state, DTensor): local_state_dict[key] = (local_state, full_tensor) else: local_state_dict[key] = full_tensor if pg is None: pg = dist.distributed_c10d._get_default_group() if len(tensors) > 1: dist._broadcast_coalesced(pg, tensors, 500, 0) else: dist.broadcast(tensors[0], src=0, group=pg) _distribute_tensors(local_state_dict, keys, device, pg) def _distribute_tensors( local_state_dict: dict[str, Any], keys: list[str], device: torch.device, pg: Optional[dist.ProcessGroup] = None, ) -> None: if pg is None: pg = dist.distributed_c10d._get_default_group() for key in keys: _local_state = local_state_dict.get(key, None) if _local_state is None or torch.is_tensor(_local_state): continue local_state = _local_state[0] full_tensor = _local_state[1] shape, offset = compute_local_shape_and_global_offset( full_tensor.shape, local_state.device_mesh, local_state.placements ) slices = [ slice(cur_offset, cur_offset + cur_shape) for cur_shape, cur_offset in zip(shape, offset, strict=False) ] if local_state.is_meta: # Use .clone() here rather than view to clone and return only the sliced portion, minimizing memory access and cost. local_tensor = full_tensor[slices].detach().clone() # TODO: currently, we cannot handle strided sharding if the dp dimension is not even. For example, # one of the case that is not yet supported is when placements = (Shard(0), _StridedShard(0, sf=2)). ret = DTensor.from_local( local_tensor, local_state.device_mesh, local_state.placements, shape=local_state.shape, stride=local_state.stride(), ) else: ret = local_state # Copy full_tensor[slices] into local_state.to_local() to reduce memory footprint. ret.to_local().copy_(full_tensor[slices]) local_state_dict[key] = ret def _broadcast_state_dict( full_state_dict: dict[str, Any], local_state_dict: dict[str, Any], device: torch.device, pg: Optional[dist.ProcessGroup] = None, strict: bool = False, cpu_offload: bool = False, ) -> None: # Broadcast from rank0's `full_state_dict` to all ranks' `local_state_dict`. # If strict is True, any keys in `local_state_dict` but not in `full_state_dict` # will be removed from `local_state_dict`. ret = {} if dist.get_rank() == 0: for key, value in full_state_dict.items(): if not torch.is_tensor(value): ret[key] = value elif value.dim() == 0: ret[key] = value.cpu() else: ret[key] = _TensorInfo(value.size(), value.dtype) broadcast_list = [ret] dist.broadcast_object_list(broadcast_list, src=0, group=pg) ret = broadcast_list[0] # Gather values keys = [] local_state_dict_keys = set(local_state_dict.keys()) global_keys = set() for key, value in ret.items(): global_keys.add(key) if not isinstance(value, _TensorInfo): if key in local_state_dict: local_state_dict[key] = value continue if dist.get_rank() == 0: ret[key] = full_state_dict[key] keys.append(key) # Broadcast every tensor to avoid OOM for now. if len(keys) >= 1: _broadcast_tensors(ret, local_state_dict, keys, device, pg) if cpu_offload: for key in keys: local_state_dict[key] = local_state_dict[key].cpu() keys.clear() if strict: if missing_keys := (local_state_dict_keys - global_keys): for key in missing_keys: local_state_dict.pop(key) if keys: _broadcast_tensors(ret, local_state_dict, keys, device, pg) if cpu_offload: for key in keys: local_state_dict[key] = local_state_dict[key].cpu() def _distribute_state_dict( full_state_dict: dict[str, Any], local_state_dict: dict[str, Any], device: torch.device, pg: Optional[dist.ProcessGroup] = None, ) -> None: # Full_state_dict = True, broadcast_from_rank0 = False here. Each rank has # full_state_dict. Skip the broadcast in ``_broadcast_state_dict`` and # distribute tensors in each rank for key, value in full_state_dict.items(): if key not in full_state_dict: continue if not torch.is_tensor(value): local_state_dict[key] = value elif value.dim() == 0: local_state_dict[key] = value.cpu() else: assert isinstance(value, torch.Tensor) local_state = local_state_dict.get(key, None) if local_state is None: continue elif isinstance(local_state, DTensor): local_state_dict[key] = distribute_tensor( value.detach().to(device), local_state.device_mesh, local_state.placements, ) else: local_state_dict[key] = value.detach().to(device) # These APIs are from torch.distributed.checkpoint. # TODO: We should consolidate the code here as some not all modules can depend on # DCP. PATH_ITEM = Union[str, int] OBJ_PATH = tuple[PATH_ITEM, ...] FLATTEN_MAPPING = dict[str, OBJ_PATH] STATE_DICT_TYPE = dict[str, Any] CONTAINER_TYPE = MutableMapping[PATH_ITEM, Any] def _traverse_state_dict( state_dict: STATE_DICT_TYPE, visitor: Callable[[OBJ_PATH, Any], None], ) -> None: """ Invoke ``visitor`` for each value recursively in ``state_dict``. Mapping, list, and tuple will be flattened and other value types are treated as the terminal values and will invoke ``visitor``. """ def _traverse_obj(path: OBJ_PATH, value: Any) -> None: if isinstance(value, Mapping): for k, v in value.items(): _traverse_obj(path + (str(k),), v) elif isinstance(value, (list, tuple)): for i, v in enumerate(value): _traverse_obj(path + (i,), v) else: visitor(path, value) for key, value in state_dict.items(): _traverse_obj((str(key),), value) def _flatten_state_dict( state_dict: STATE_DICT_TYPE, ) -> tuple[STATE_DICT_TYPE, FLATTEN_MAPPING]: """ Flatten ``state_dict`` made of nested dicts and lists into a top level dictionary. Use ``unflatten_state_dict`` to revert this process. Returns: A tuple with the flatten state_dict and a mapping from original to new state_dict. N.B. The new keys are derived from the object paths, joined by dot. For example: ``{ 'a': {'b':...}}`` results in the key `a.b`. """ flattened: STATE_DICT_TYPE = {} mappings: FLATTEN_MAPPING = {} def flat_copy(path: OBJ_PATH, value: Any) -> None: new_fqn = ".".join(map(str, path)) if new_fqn in flattened: raise ValueError(f"duplicated flatten key {new_fqn}") flattened[new_fqn] = value mappings[new_fqn] = path _traverse_state_dict(state_dict, flat_copy) return flattened, mappings def _set_element(root_dict: STATE_DICT_TYPE, path: OBJ_PATH, value: Any) -> None: """Set ``value`` in ``root_dict`` along the ``path`` object path.""" cur_container = cast(CONTAINER_TYPE, root_dict) def extend_list(lst: list[Any], idx: int) -> None: while len(lst) <= idx: lst.append(None) for i in range(1, len(path)): prev_key = path[i - 1] key = path[i] def_val: CONTAINER_TYPE | list[Any] = {} if type(key) == str else [] if isinstance(cur_container, Mapping): cur_container = cast(CONTAINER_TYPE, cur_container.setdefault(prev_key, def_val)) else: extend_list(cur_container, prev_key) if cur_container[prev_key] is None: cur_container[prev_key] = def_val cur_container = cur_container[prev_key] key = path[-1] if type(key) == int: extend_list(cast(list[Any], cur_container), key) cur_container[key] = value def _unflatten_state_dict(state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING) -> STATE_DICT_TYPE: """Restore the original nested state_dict according to ``mapping`` and the flattened ``state_dict``.""" nested: STATE_DICT_TYPE = {} for key, value in state_dict.items(): _set_element(nested, mapping[key], value) return nested ================================================ FILE: verl_distillation/verl/third_party/torch/distributed/checkpoint/__init__.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: verl_distillation/verl/third_party/torch/distributed/checkpoint/state_dict.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ruff: noqa: B028, UP038, UP007, E721 # mypy: allow-untyped-defs import contextlib import functools import gc import warnings from collections.abc import Generator, Iterable from dataclasses import asdict, dataclass, field from itertools import chain from typing import Any, Callable, Optional, Union, cast, no_type_check import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, ) from torch.distributed.fsdp import ( FullOptimStateDictConfig, FullStateDictConfig, OptimStateDictConfig, ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictConfig, StateDictType, ) from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ) from torch.distributed.fsdp._common_utils import ( FSDP_WRAPPED_MODULE, _get_module_fsdp_state_if_fully_sharded_module, ) from torch.distributed.tensor import DTensor from torch.nn.modules.module import _IncompatibleKeys from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils._pytree import tree_map_only from verl.third_party.torch.distributed._state_dict_utils import ( _broadcast_state_dict, _distribute_state_dict, _flatten_state_dict, _gather_state_dict, _offload_state_dict_to_cpu, _unflatten_state_dict, ) __all__ = [ "FQNS_T", "PrimitiveType", "ValueType", "DictValueType", "ListDictValueType", "OptimizerStateType", "StateDictOptions", "get_model_state_dict", "get_optimizer_state_dict", "get_state_dict", "set_model_state_dict", "set_optimizer_state_dict", "set_state_dict", ] _FLAT_PARAM = "_flat_param" _PG = "param_groups" _PARAMS = "params" _STATE = "state" FQNS_T = set[str] PrimitiveType = Union[DTensor, ShardedTensor, torch.Tensor, int, float, str] ValueType = Union[PrimitiveType, list[PrimitiveType], tuple[PrimitiveType], dict[str, "ValueType"]] DictValueType = dict[str, ValueType] ListDictValueType = list[DictValueType] OptimizerStateType = dict[str, DictValueType | ListDictValueType] _patched_state_dict: set[Callable] = set() @contextlib.contextmanager def _gc_context(): is_enabled = gc.isenabled() gc.disable() try: yield finally: if is_enabled: gc.enable() @dataclass class StateDictOptions: """ This dataclass specifies how get_state_dict/set_state_dict will work. - ``full_state_dict``: if this is set to True, all the tensors in the returned state_dict will be gathered. No ShardedTensor and DTensor will be in the returned state_dict. - ``cpu_offload``: offload all the tensors to cpu. To prevent CPU OOM, if ``full_state_dict`` is also true, then only the rank0 will get the state_dict and all other ranks will get empty state_dict. - ``ignore_frozen_params``: if the value is True, the returned state_dict won't contain any frozen parameters -- the ``requires_grad`` is False. The default value is False. - ``keep_submodule_prefixes`` (deprecated): when ``submodules`` is not None, this option indicates whether to keep the submodule prefixes from the state_dict keys. or example, if the submodule is ``module.pretrain`` and the full FQN of the parameter is ``pretrain.layer1.weight`` of the param. When this option is True, the parameter's key in the returned state_dict will be ``pretrain.layer1.weight``. If the options is False, the key will be ``layer1.weight``. Note that if ``keep_submodule_prefixes`` is False, there may be conflicted FQNs, hence there should be only one submodule in ``submodules``. - ``strict``: the ``strict`` option when ``set_state_dict`` calls model.load_state_dict(). - ``broadcast_from_rank0``: when the option is True, rank0 should receive a full state_dict and will broadcast the tensors in the state_dict/ optim_state_dict one by one to other ranks. Other ranks will receive the tensors and shard according to the local shards in the model and optimizer. ``full_state_dict`` must be set to True when using this option. This option currently only supports DTensor, not the legacy ShardedTensor. """ full_state_dict: bool = False cpu_offload: bool = False ignore_frozen_params: bool = False keep_submodule_prefixes: bool = True strict: bool = True broadcast_from_rank0: bool = False flatten_optimizer_state_dict: bool = False dsd_fqn_modifiers: str = "_fqn_modifiers" @dataclass class _StateDictInfo(StateDictOptions): fqn_param_mapping: dict[ str | torch.Tensor, FQNS_T | torch.Tensor, ] = field(default_factory=dict) shared_params_mapping: dict[ str | torch.Tensor, FQNS_T | torch.Tensor, ] = field(default_factory=dict) submodule_prefixes: set[str] = field(default_factory=set) handle_model: bool = True handle_optim: bool = True fsdp_context: Callable = contextlib.nullcontext fsdp_modules: list[nn.Module] = field(default_factory=list) @functools.cache def _get_fqns( model: nn.Module, name: str, dsd_fqn_modifiers: str = "_fqn_modifiers", skip_ddp_prefix: bool = True, skip_compiler_prefix: bool = True, ) -> FQNS_T: """ This API is used to convert the name of a parameter to the FQNs. For FSDP without `use_orig_params`, the name of FlatParameter can be mapped to multiple original parameters. As a result, the return type of this function is `set[str]`. Args: module (nn.Module): the root model. name (str): the name skip_ddp_prefix (bool): whether to skip DDP's `module` prefix Returns: The canonical FQNs based on the model traversal. """ # Remove the checkpoint prefix, if it exists. name = name.replace(_CHECKPOINT_PREFIX, "") if "." not in name: return {name} obj_names = name.split(".") fqn_obj_names = [] curr_obj = model for i, curr_obj_name in enumerate(obj_names): if isinstance(curr_obj, DDP): assert curr_obj_name == "module" curr_obj = curr_obj.module if not skip_ddp_prefix: fqn_obj_names.append(curr_obj_name) elif isinstance(curr_obj, FSDP): if i < len(obj_names) - 1 and obj_names[i + 1] == _FLAT_PARAM: prefix = ".".join(fqn_obj_names) flat_param = getattr(curr_obj, _FLAT_PARAM) if prefix: prefix = f"{prefix}." return {f"{prefix}{fqn}" for fqn in flat_param._fqns} curr_obj = getattr(curr_obj, FSDP_WRAPPED_MODULE) if curr_obj_name != FSDP_WRAPPED_MODULE: fqn_obj_names.append(curr_obj_name) curr_obj = getattr(curr_obj, curr_obj_name) elif isinstance(curr_obj, torch._dynamo.eval_frame.OptimizedModule): assert curr_obj_name == "_orig_mod" curr_obj = curr_obj._orig_mod if not skip_compiler_prefix: fqn_obj_names.append(curr_obj_name) else: # In some modeuls, _fqn_modifiers would not shown in the state_dict keys, # skip them in the fqn to ensure load stat dict successfully for them. if hasattr(curr_obj, dsd_fqn_modifiers): if removed_fqn := getattr(curr_obj, dsd_fqn_modifiers)().get(curr_obj_name): if hasattr(curr_obj, removed_fqn): curr_obj = getattr(curr_obj, removed_fqn) fqn_obj_names.append(curr_obj_name) if curr_obj_name == nn.modules.module._EXTRA_STATE_KEY_SUFFIX: if i != len(obj_names) - 1: raise RuntimeError("Expect `_extra_state` to be the last obj name") else: curr_obj = getattr(curr_obj, curr_obj_name) return {".".join(fqn_obj_names).replace(_CHECKPOINT_PREFIX, "")} class _EXTRA_STATE: pass def _iterate_valid_model_state(model, dsd_fqn_modifiers="_fqn_modifiers"): visited_modules: set[nn.Module] = set() def recurse(module: nn.Module, curr_fqn: str) -> Generator: visited_modules.add(module) curr_fqn = f"{curr_fqn}." if curr_fqn else "" for name, submodule in module.named_children(): if submodule in visited_modules: continue # if user have state_dict_hooks in their model, they can add the state_dict key changes # at dsd_fqn_modifiers in input to align with the function of state_dict_hook if hasattr(module, dsd_fqn_modifiers) and name in getattr(module, dsd_fqn_modifiers)().values(): # skip _fqn_modifiers here thus remove the last `.` added new_fqn = curr_fqn[:-1] else: new_fqn = f"{curr_fqn}{name}" yield from recurse(submodule, new_fqn) for name, obj in chain(module.named_buffers(recurse=False), module.named_parameters(recurse=False)): if name in module._non_persistent_buffers_set: continue new_fqn = f"{curr_fqn}{name}" yield new_fqn, obj if getattr(module.__class__, "get_extra_state", nn.Module.get_extra_state) != nn.Module.get_extra_state: new_fqn = f"{curr_fqn}{nn.modules.module._EXTRA_STATE_KEY_SUFFIX}" yield new_fqn, _EXTRA_STATE() yield from recurse(model, "") def _verify_options( model: nn.Module, optims: tuple[torch.optim.Optimizer, ...], optim_only: bool, *, submodules: Optional[set[nn.Module]] = None, options: Optional[StateDictOptions] = None, ) -> _StateDictInfo: """ Verify the model and options passed by the user and generates _StateDictInfo. """ if submodules: warnings.warn( "Getting submodules only model/optim state_dict is deprecated and " "will be removed in 2.5. This feature can be achieved by manually " "filtering out the state_dict returned from get_state_dict.", FutureWarning, ) if optim_only and not optims: raise RuntimeError("Optimizers are not passed in but optim_only is set to True.") options = options or StateDictOptions() fqn_param_mapping: dict[str | torch.Tensor, set[str] | torch.Tensor] = {} shared_params_mapping: dict[str | torch.Tensor, set[str] | torch.Tensor] = {} for name, param in _iterate_valid_model_state(model): if isinstance(param, _EXTRA_STATE): continue fqns = _get_fqns(model, name) fqn = fqn_param_mapping.get(param, None) if fqn is not None: cast(set[str], fqn_param_mapping[param]).update(fqns) shared_params_mapping[param] = fqn_param_mapping[param] else: # We need to do copy as _get_fqns is lru_cached fqn_param_mapping[param] = fqns.copy() for fqn in fqns: if not isinstance(param, _EXTRA_STATE): fqn_param_mapping[fqn] = param for param_, fqns_ in list(shared_params_mapping.items()): for fqn in fqns_: shared_params_mapping[fqn] = cast(torch.Tensor, param_) submodule_prefixes: set[str] = set() if submodules: submodules = set(submodules) for name, module in model.named_modules(): if module not in submodules: continue fqns = _get_fqns(model, name) assert len(fqns) == 1, "Submodule FQN should only have 1 instance" submodule_prefixes.update(f"{fqn}." for fqn in fqns) if options.broadcast_from_rank0 and not options.full_state_dict: raise ValueError("full_state_dict must be True when broadcast_from_rank0 is True.") fsdp_modules = FSDP.fsdp_modules(model) state_dict_config: StateDictConfig optim_state_dict_config: OptimStateDictConfig fsdp_context: Callable if fsdp_modules: # FSDP API only work if at least one FSDP instance exists. if options.full_state_dict: state_dict_config = FullStateDictConfig(offload_to_cpu=options.cpu_offload, rank0_only=options.cpu_offload) optim_state_dict_config = FullOptimStateDictConfig( offload_to_cpu=options.cpu_offload, rank0_only=(options.cpu_offload or options.broadcast_from_rank0), ) state_dict_type = StateDictType.FULL_STATE_DICT else: state_dict_config = ShardedStateDictConfig( offload_to_cpu=options.cpu_offload, ) optim_state_dict_config = ShardedOptimStateDictConfig( offload_to_cpu=options.cpu_offload, ) state_dict_type = StateDictType.SHARDED_STATE_DICT @contextlib.contextmanager def fsdp_state_dict_type_without_warning( module, state_dict_type, state_dict_config, optim_state_dict_config, ): with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="FSDP.state_dict_type", category=FutureWarning) with FSDP.state_dict_type( module=module, state_dict_type=state_dict_type, state_dict_config=state_dict_config, optim_state_dict_config=optim_state_dict_config, ): yield fsdp_context = functools.partial( fsdp_state_dict_type_without_warning, module=model, state_dict_type=state_dict_type, state_dict_config=state_dict_config, optim_state_dict_config=optim_state_dict_config, ) else: fsdp_context = contextlib.nullcontext return _StateDictInfo( **asdict(options), fqn_param_mapping=fqn_param_mapping, shared_params_mapping=shared_params_mapping, submodule_prefixes=submodule_prefixes, fsdp_context=fsdp_context, fsdp_modules=cast(list[nn.Module], fsdp_modules), handle_model=not optim_only, handle_optim=(len(optims) > 0), ) def _verify_state_dict( model_state_dict: dict[str, ValueType], optim_state_dict: OptimizerStateType, info: _StateDictInfo, ) -> None: for module in info.fsdp_modules: fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) assert fsdp_state is not None, "Expected a fsdp_state with a fsdp module." # Verify if the model_state_dict and optim_state_dict are valid. This API # should give the users an explicit error message to debug or report. if ( info.handle_model and not model_state_dict and not info.submodule_prefixes and not info.ignore_frozen_params and not (info.cpu_offload and info.full_state_dict) and info.strict and not info.broadcast_from_rank0 ): raise RuntimeError( "The option indicates that model state_dict is required to save " "or load, but model state_dict is empty." f"rank = {dist.get_rank()=}." ) if info.handle_optim: if not optim_state_dict and not (info.cpu_offload and info.full_state_dict) and (not info.broadcast_from_rank0): raise RuntimeError( "The option indicates that model state_dict is required to save, " f"or load but optim state_dict is empty. {optim_state_dict}" ) for key in model_state_dict.keys(): if _FLAT_PARAM in key: raise RuntimeError(f"{key} contains {_FLAT_PARAM}. This can happen if the model is not the root module.") def _state_dict_fn(obj: nn.Module | torch.optim.Optimizer, api: str) -> Callable: call = getattr(obj, api) if call in _patched_state_dict: call = functools.partial(getattr(obj.__class__, api), self=obj) return call def _maybe_full_or_cpu_state_dict(state_dict: dict[str, Any], info: _StateDictInfo) -> dict[str, Any]: if info.full_state_dict: ranks_only = () if (not info.cpu_offload or not torch.distributed.is_initialized()) else (0,) return _gather_state_dict(state_dict, cpu_offload=info.cpu_offload, ranks_only=ranks_only) elif info.cpu_offload: return _offload_state_dict_to_cpu(state_dict) else: return state_dict @torch.no_grad() def _get_model_state_dict(model: nn.Module, info: _StateDictInfo) -> dict[str, ValueType]: if not info.handle_model: return {} with info.fsdp_context(): state_dict = _state_dict_fn(model, "state_dict")() for key in list(state_dict.keys()): fqns = _get_fqns(model, key) assert len(fqns) == 1, (key, fqns) fqn = next(iter(fqns)) if fqn != key: # As we only support FSDP, DDP, and TP, the only cases are # wrapper-based DDP and compiler. Verify if the assumption # is correct. def verify(key, fqn) -> bool: if len(fqn) >= len(key): return False fqn_split = fqn.split(".") key_split = key.split(".") fqn_idx = 0 for key_idx, key_name in enumerate(key_split): if key_name == fqn_split[fqn_idx]: fqn_idx += 1 if fqn_idx == len(fqn_split): return key_idx == len(key_split) - 1 elif key_name in ("module", "_orig_mod"): continue else: return False return True if not verify(key, fqn): raise RuntimeError(f"An unexpected key, {key}, exists. FQN is {fqn}") state_dict[fqn] = state_dict.pop(key) if info.submodule_prefixes: new_state_dict: dict[str, ValueType] = {} # TODO: make this faster. for fqn in state_dict.keys(): for prefix in info.submodule_prefixes: if not fqn.startswith(prefix): continue if info.keep_submodule_prefixes: new_state_dict[fqn] = state_dict[fqn] else: new_fqn = fqn[len(prefix) :] new_state_dict[new_fqn] = state_dict[fqn] state_dict = new_state_dict if info.ignore_frozen_params: for key, param in model.named_parameters(): if param.requires_grad: continue fqns = _get_fqns(model, key) for fqn in fqns: state_dict.pop(fqn) for key, p in list(state_dict.items()): if torch.is_tensor(p) and p.is_meta: state_dict.pop(key) return _maybe_full_or_cpu_state_dict(state_dict, info) @torch.no_grad() def _load_model_state_dict( model: nn.Module, state_dict: dict[str, ValueType], info: _StateDictInfo, ) -> _IncompatibleKeys: if not info.handle_model or (not state_dict and not info.broadcast_from_rank0): return _IncompatibleKeys({}, {}) local_state_dict = {} for key, value in _iterate_valid_model_state(model, info.dsd_fqn_modifiers): fqns = _get_fqns(model, key, info.dsd_fqn_modifiers) fqns_with_prefix = _get_fqns( model, key, info.dsd_fqn_modifiers, skip_ddp_prefix=False, skip_compiler_prefix=False, ) for fqn, fqn_with_prefix in zip(fqns, fqns_with_prefix, strict=False): if (not info.broadcast_from_rank0 or dist.get_rank() == 0) and fqn != fqn_with_prefix: load_value = state_dict.pop(fqn, None) if load_value is None: if info.strict: raise RuntimeError(f"Missing key: {fqn}.") else: state_dict[fqn_with_prefix] = load_value local_state_dict[fqn_with_prefix] = value assign = False if info.broadcast_from_rank0 or info.full_state_dict: devices = set() for key, value in local_state_dict.items(): if torch.is_tensor(value) and value.dim() > 0: devices.add(value.device) # In lora state_dict, there could be multiple devices, with meta device inside. # Take the other device in the broadcast/distribtue, and set assign to True if torch.device("meta") in devices: devices.remove(torch.device("meta")) assign = True if len(devices) == 0: devices.add(dist.distributed_c10d._get_pg_default_device()) elif len(devices) > 1: raise ValueError("Multiple devices found") if info.broadcast_from_rank0: _broadcast_state_dict( state_dict, local_state_dict, device=devices.pop(), strict=info.strict, cpu_offload=info.cpu_offload, ) elif info.full_state_dict: _distribute_state_dict(state_dict, local_state_dict, device=devices.pop()) for fqn, local_state in local_state_dict.items(): state_dict[fqn] = local_state with info.fsdp_context(): return cast( _IncompatibleKeys, _state_dict_fn(model, "load_state_dict")(state_dict=state_dict, strict=info.strict, assign=assign), ) def _init_optim_state(optim: torch.optim.Optimizer) -> None: """ Initialize optim states by calling the step() with zero grads. """ if optim.state: # The optimizer state is initialized. return # There are some stateless optimizers like SGD. These optimizer will # not return in the above condition. So if gradients exist, we should also # return. If gradients do not exist, the following initialization should # not disturb SGD because the gradients and lr are both zero. for param_group in optim.param_groups: for param in param_group[_PARAMS]: if param.grad is not None: return for param_group in optim.param_groups: for param in param_group[_PARAMS]: if param.requires_grad: param.grad = torch.zeros_like(param) # Some optimizers will update parameters regardless of grads due to lr, so # make lr to zero when calling `step()`. lrs = [] for param_group in optim.param_groups: if "lr" in param_group: lrs.append(param_group["lr"]) param_group["lr"] = torch.tensor(0.0) if isinstance(param_group["lr"], torch.Tensor) else 0.0 optim.step(closure=None) # Whether to recover the "lr" should not matter too much as we will # restore checkpointing later. for param_group in optim.param_groups: if "lr" in param_group: param_group["lr"] = lrs.pop(0) optim.zero_grad(set_to_none=True) def _flatten_optim_state_dict(state_dict: OptimizerStateType) -> dict[str, ValueType]: """ This API flattens the optimizer state_dict to support optimizer resharding for MPMD, e.g., pipeline parallelism. Without the API, the original optimizer state_dict looks like: { "state": { "layer1.weight": { "step": 10, "exp_avg": SomeTensor, "exp_avg_sq": SomeTensor }, "layer2.weight": { "step": 10, "exp_avg": SomeTensor, "exp_avg_sq": SomeTensor }, }, "param_group": [ { "lr": 0.0, "betas": (0.9, 0.95), ..., "params": ["layer1.weight", "layer2.weight"] } ] } With this API, the optimizer state_dict looks like: { "state.layer1.weight.step": 10, "state.layer2.weight.step": 10, "state.layer1.weight.exp_avg": SomeTensor, "state.layer2.weight.exp_avg": SomeTensor, "state.layer1.weight.exp_avg_sq": SomeTensor, "state.layer2.weight.exp_avg_sq": SomeTensor, "param_group.layer1.weight.lr" : 0.1, "param_group.layer2.weight.lr" : 0.1, "param_group.layer1.weight.betas" : (0.9, 0.95), "param_group.layer2.weight.betas" : (0.9, 0.95), } Note that if any of the value is a container, like the betas in the example, this API won't flattent it. """ def _raise_if_type_not_supported(v): if not isinstance(v, (torch.Tensor, int, float)): raise NotImplementedError( f"Flattening optimizer state_dict only supports tensor, int, float states now. Type is {type(v)}." ) ret: dict[str, ValueType] = {} for fqn, state in cast(DictValueType, state_dict[_STATE]).items(): for k, v in cast(DictValueType, state).items(): _raise_if_type_not_supported(v) ret[f"{_STATE}.{fqn}.{k}"] = v for param_group in cast(ListDictValueType, state_dict[_PG]): fqns = param_group.pop(_PARAMS) for fqn in cast(list[str], fqns): for k, v in param_group.items(): ret[f"{_PG}.{fqn}.{k}"] = v return ret def _unflatten_optim_state_dict( optim: torch.optim.Optimizer, state_dict: dict[str, ValueType], info: _StateDictInfo, ) -> OptimizerStateType: """ This API unflattens the state_dict generated by _flatten_optim_state_dict(). See the docstring of _flatten_optim_state_dict() for more detail. """ state: DictValueType = {} pg_state: ListDictValueType = [] return_osd: OptimizerStateType = {_STATE: state, _PG: pg_state} for param_group in optim.param_groups: pg_state.append({_PARAMS: []}) for param in param_group[_PARAMS]: for fqn in info.fqn_param_mapping[param]: # If a parameter is shared, only one of the FQN will be used. # So we need to verify which if this fqn is actually used in # the state_dict. if fqn in info.shared_params_mapping: in_params = False for k in param_group.keys(): if k == _PARAMS: continue flatten_key = f"{_PG}.{fqn}.{k}" if flatten_key in state_dict: in_params = True break else: in_params = True if not in_params: continue params = pg_state[-1][_PARAMS] assert isinstance(params, list) # typing params.append(fqn) if not param.requires_grad: continue state[fqn] = {} for state_name in optim.state[param].keys(): cast(DictValueType, state[fqn])[state_name] = state_dict[f"{_STATE}.{fqn}.{state_name}"] first_param_fqn = cast(list[str], pg_state[-1][_PARAMS])[0] for k in param_group.keys(): if k == _PARAMS: continue value = state_dict[f"{_PG}.{first_param_fqn}.{k}"] if k not in pg_state[-1]: pg_state[-1][k] = value elif pg_state[-1][k] != value: raise RuntimeError( "All the parameters in the same parameter group should have " f"the same saved param_group value. But {first_param_fqn}.{k} " f"is {value} while other(s) is {pg_state[-1][k]}." ) return return_osd @torch.no_grad() def _get_optim_state_dict( model: nn.Module, optimizers: tuple[torch.optim.Optimizer, ...], info: _StateDictInfo, ) -> OptimizerStateType: if not info.handle_optim: return {} optim_state_dict: OptimizerStateType = {_STATE: {}, _PG: []} for optim in optimizers: _init_optim_state(optim) osd = _state_dict_fn(optim, "state_dict")() if info.fsdp_modules: with info.fsdp_context(): osd = FSDP.optim_state_dict(model, optim, osd) # We need to specially handle FlatParameter FSDP as # FlatParameter FSDP converts the FQNs. # There are no easy ways to do this conversion systematically. # We can only use a string replacment without correctness check. if not osd: continue for k in list(osd[_STATE].keys()): if "_orig_mod" in k: osd[_STATE][k.replace("_orig_mod.", "")] = osd[_STATE].pop(k) for g in osd[_PG]: params = [k.replace("_orig_mod.", "") for k in g[_PARAMS]] g[_PARAMS] = params else: params = list(chain.from_iterable(g[_PARAMS] for g in optim.param_groups)) param_pid_mapping = dict(zip(params, range(len(params)), strict=False)) fqn_pid_mapping = {} for key, param in model.named_parameters(): fqns = _get_fqns(model, key) assert len(fqns) == 1 fqn = next(iter(fqns)) if param not in param_pid_mapping: continue pid = param_pid_mapping[param] fqn_pid_mapping[fqn] = pid fqn_pid_mapping[pid] = fqn for key in list(osd[_STATE].keys()): fqn = fqn_pid_mapping[key] osd[_STATE][fqn] = osd[_STATE].pop(key) for group in osd[_PG]: group[_PARAMS] = [fqn_pid_mapping[pid] for pid in group[_PARAMS]] if not osd: continue cast(DictValueType, optim_state_dict[_STATE]).update(osd[_STATE]) cast(ListDictValueType, optim_state_dict[_PG]).extend(osd[_PG]) if info.flatten_optimizer_state_dict: optim_state_dict = cast(OptimizerStateType, _flatten_optim_state_dict(optim_state_dict)) return _maybe_full_or_cpu_state_dict(optim_state_dict, info) def _split_optim_state_dict( model: nn.Module, optim: torch.optim.Optimizer, optim_state_dict: OptimizerStateType, info: _StateDictInfo, ) -> OptimizerStateType: """ Extract the corresponding optim state_dict from ``optim_state_dict`` for ``optim`` and return the result optim state_dict. Args: model (nn.Module): the root model. optim (torch.optim.Optimizer): the optimizer. optim_state_dict (Dict[str, ValueType]): the superset optim state_dict that contains the optim state_dict of ``optim``. info (_StateDictInfo): state dict information. Returns: The optim state_dict of ``optim``. """ state: DictValueType = {} pg_state: ListDictValueType = [] return_osd: OptimizerStateType = {_STATE: state, _PG: pg_state} pg_mapping: dict[int, int] = {} if all(isinstance(k, int) for k in cast(DictValueType, optim_state_dict[_STATE]).keys()): return optim_state_dict for param_group in optim.param_groups: pg_state.append({_PARAMS: []}) for param in param_group[_PARAMS]: for fqn in info.fqn_param_mapping[param]: if fqn in info.shared_params_mapping: in_params = False for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]): if fqn in cast(list[str], loaded_param_group[_PARAMS]): in_params = True break else: in_params = True if not in_params: continue params = pg_state[-1][_PARAMS] assert isinstance(params, list) params.append(fqn) if param.requires_grad: state[fqn] = cast(DictValueType, optim_state_dict[_STATE])[fqn] for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]): if fqn in cast(list[str], loaded_param_group[_PARAMS]): pg_mapping[id(loaded_param_group)] = len(return_osd[_PG]) - 1 if len(param_group[_PARAMS]) == 0: # Param_group with empty params. ret = [] for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]): if len(cast(list[str], loaded_param_group[_PARAMS])) == 0: ret.append(loaded_param_group) if len(ret) != 1: raise ValueError( "There are param groups that have zero parameters. " "In such a case, DSD only support exactly one param group " "with zero parameters." "But the loaded state_dict has zero or more than one param groups " "that have zero parameters." ) if len(optim_state_dict[_PG]) != len(optim.param_groups): raise ValueError( "When there is a parameter group that has zero parameters, multiple optimizers are not supported." ) pg_mapping[id(loaded_param_group)] = len(return_osd[_PG]) - 1 for param_group in cast(ListDictValueType, optim_state_dict[_PG]): pg_idx = pg_mapping.get(id(param_group), -1) if pg_idx == -1: continue for key, value in param_group.items(): if key == _PARAMS: continue # TODO: check if value is the same if exists. pg_state[pg_idx][key] = value return return_osd @torch.no_grad() def _load_optim_state_dict( model: nn.Module, optimizers: tuple[torch.optim.Optimizer, ...], state_dict: OptimizerStateType, info: _StateDictInfo, ) -> None: if not info.handle_optim: return for optim in optimizers: _init_optim_state(optim) if state_dict: if _STATE in state_dict: optim_state_dict = _split_optim_state_dict(model, optim, state_dict, info) else: optim_state_dict = _unflatten_optim_state_dict(optim, cast(dict[str, ValueType], state_dict), info) else: optim_state_dict = {} if info.fsdp_modules: # We need to specially handle FlatParameter FSDP as # FlatParameter FSDP converts the FQNs. for original_fqn, _ in model.named_parameters(): fqns = _get_fqns(model, original_fqn) fqns_with_compiler = _get_fqns(model, original_fqn, skip_compiler_prefix=False) if fqns == fqns_with_compiler: continue assert len(fqns) == 1 fqn = fqns.pop() fqn_with_compiler = fqns_with_compiler.pop() for g in optim_state_dict[_PG]: val = cast(dict[str, Any], g) params = [key.replace(fqn, fqn_with_compiler) for key in val[_PARAMS]] val[_PARAMS] = params osd_state = cast(DictValueType, optim_state_dict[_STATE]) for k in list(osd_state.keys()): if fqn in k: osd_state[k.replace(fqn, fqn_with_compiler)] = osd_state.pop(k) with info.fsdp_context(): optim_state_dict = FSDP.optim_state_dict_to_load(model, optim, optim_state_dict) elif info.full_state_dict: info.full_state_dict = False local_state_dict = _get_optim_state_dict(model, (optim,), info) info.full_state_dict = True device = None def _device(t): if t.dim() > 0: nonlocal device if device is None: device = t.device elif device != t.device: raise ValueError("Device mismatch") return t _ = tree_map_only(torch.Tensor, _device, local_state_dict) assert device is not None flatten_osd, osd_mapping = _flatten_state_dict(optim_state_dict) flatten_local_osd, local_osd_mapping = _flatten_state_dict(local_state_dict) if info.broadcast_from_rank0: _broadcast_state_dict(flatten_osd, flatten_local_osd, device=device) else: _distribute_state_dict(flatten_osd, flatten_local_osd, device=device) # The modifications listed seek to address the problem where optim might possess # dissimilar parameters in comparison to optim_state_dict. This is achieved by # incorporating differential parameters within local, which may result in optim # having additional parameters ultimately. for optim_key in flatten_osd.keys(): if optim_key not in flatten_local_osd: assert optim_key in osd_mapping flatten_local_osd[optim_key] = flatten_osd[optim_key] local_osd_mapping[optim_key] = osd_mapping[optim_key] optim_state_dict = _unflatten_state_dict(flatten_local_osd, local_osd_mapping) for pg in optim_state_dict[_PG]: if _PARAMS not in pg: cast(dict[str, ValueType], pg)[_PARAMS] = [] # Note that we do not have to convert the FQN back to param id here if # order in optim.param_groups[idx][_PARAMS] is the same as the one in # optim_state_dict[_PG][idx][_PARAMS]. _state_dict_fn(optim, "load_state_dict")(state_dict=optim_state_dict) def get_model_state_dict( model: nn.Module, *, submodules: Optional[set[nn.Module]] = None, options: Optional[StateDictOptions] = None, ) -> dict[str, ValueType]: """ Return the model state_dict of ``model``. See ``get_state_dict`` for the detail usage. Args: model (nn.Module): the nn.Module to the model. submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters that belong to the submodules. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be returned. See `StateDictOptions` for the details. Returns: The state_dict for ``model``. :rtype: typing.Dict[str, ValueType] """ with _gc_context(): info = _verify_options( model, (), optim_only=False, submodules=submodules, options=options, ) model_state_dict = _get_model_state_dict(model, info) _verify_state_dict(model_state_dict, {}, info) return model_state_dict def get_optimizer_state_dict( model: nn.Module, optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer], *, submodules: Optional[set[nn.Module]] = None, options: Optional[StateDictOptions] = None, ) -> OptimizerStateType: """ Return the combined state_dict for optimizers. See ``get_state_dict`` for the detail usage. Args: model (nn.Module): the nn.Module to the model. optimizers (Union[None, Optimizer, Iterable[Optimizer]]): The optimizers that are used to optimize ``model``. submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters that belong to the submodules. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be returned. See `StateDictOptions` for the details. Returns: The state_dict for ``optimizers``. :rtype: OptimizerStateType """ with _gc_context(): optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) info = _verify_options( model, optimizers, optim_only=True, submodules=submodules, options=options, ) optim_state_dict = _get_optim_state_dict(model, optimizers, info) _verify_state_dict({}, optim_state_dict, info) return optim_state_dict def get_state_dict( model: nn.Module, optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer], *, submodules: Optional[set[nn.Module]] = None, options: Optional[StateDictOptions] = None, ) -> tuple[dict[str, ValueType], OptimizerStateType]: """ Return the model state_dict and optimizers state_dict. ``get_state_dict`` can process any module that is parallelized by PyTorch FSDP/fully_shard, DDP/replicate, tensor_parallel/parallelize_module, and any combination of these parallelisms. The main functions of ``get_state_dict`` are: 1.) returning a model and optimizer state_dict that can be resharded with a different number of trainers and/or different parallelisms. 2.) hiding the parallelism-specific state_dict APIs. Users don't have to call these APIs. 3.) sanity checking the result state_dict. The keys of the result state dictionary are the canonical FQNs (Fully Qualified Names). A canonical FQN refers to the FQN based on a parameter's position in an nn.Module hierarchy. More specifically, a canonical FQN to a parameter is the FQN returned by ``module.named_parameters()`` or ``module.named_buffers()`` when the module is not distributed by any parallelisms. Since the optimizer internally uses parameter IDs to represent a parameter, there will be a conversion from the parameter IDs to the canonical FQNs when calling this API. ``get_state_dict`` can also process a module that is not parallelized. In such a case, ``get_state_dict`` only performs one function -- converting the optimizer parameter IDs to the canonical FQNs. Example: >>> # xdoctest: +SKIP >>> import torch >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP >>> from torch.nn.parallel import DistributedDataParallel as DDP >>> from torch.distributed.checkpoint.state_dict import get_state_dict >>> fsdp_model = FSDP(copy.deepcopy(model)) >>> fsdp_optim = torch.optim.Adam(model.parameters(), lr=1e-3) >>> ddp_model = DDP(copy.deepcopy(model)) >>> ddp_optim = torch.optim.Adam(model.parameters(), lr=1e-3) >>> ddp_state_dict, ddp_optim_state_dict = get_state_dict(ddp_model, ddp_optim) >>> fsdp_state_dict, fsdp_optim_state_dict = get_state_dict( ... fsdp_model, fsdp_optim ... ) >>> # if we simply call ddp_model.state_dict() and fsdp_model.state_dict(), >>> # the asserts will fail. >>> assert ddp_state_dict == fsdp_state_dict >>> assert ddp_optim_state == fsdp_optim_state_dict Args: model (nn.Module): the nn.Module to the model. optimizers (Union[None, Optimizer, Iterable[Optimizer]]): The optimizers that are used to optimize ``model``. submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters that belong to the submodules. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be returned. See `StateDictOptions` for the details. Returns: ``Tuple`` that contain model state_dict and optimizer state_dict. :rtype: typing.Tuple[typing.Dict[str, ValueType], OptimizerStateType] """ with _gc_context(): optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) info = _verify_options( model, optimizers, optim_only=False, submodules=submodules, options=options, ) model_state_dict = _get_model_state_dict(model, info) optim_state_dict = _get_optim_state_dict(model, optimizers, info) _verify_state_dict(model_state_dict, optim_state_dict, info) return model_state_dict, optim_state_dict def _unflatten_model_state_dict( model: nn.Module, state_dict: dict[nn.Module, dict[str, ValueType]] | dict[str, ValueType], ) -> dict[str, ValueType]: if not state_dict: return {} if isinstance(next(iter(state_dict.keys())), nn.Module): warnings.warn( "Passing model_state_dict as a ``Dict[nn.Module, Dict[str, Any]]``" "is deprecated and will be removed in 2.5. If you need this " "feature, please preprocessing the model_state_dict to achieve the " "same functionality.", FutureWarning, ) cast_state_dict = cast(dict[nn.Module, dict[str, ValueType]], state_dict) new_state_dict: dict[str, ValueType] = {} for submodule, sub_state_dict in cast_state_dict.items(): for name, m in model.named_modules(): if m != submodule: continue fqns = _get_fqns(model, name) assert len(fqns) == 1, "FQNs for a submodule should only have 1 element" prefix = f"{next(iter(fqns))}." new_state_dict.update({prefix + subfqn: value for subfqn, value in sub_state_dict.items()}) return new_state_dict else: return cast(dict[str, ValueType], state_dict) def set_model_state_dict( model: nn.Module, model_state_dict: dict[str, ValueType], *, options: Optional[StateDictOptions] = None, ) -> _IncompatibleKeys: """Load the model state_dict. The counterpart of ``get_model_state_dict`` to set the state_dict to the model. See ``set_state_dict`` for the detail usage. Args: model (nn.Module): the nn.Module to the model. model_state_dict: (Dict[str, ValueType]): the model state_dict to load. If the key of the ``model_state_dict`` is nn.Module, the key is a submodule of ``model`` and the value should be the state_dict of the submodule. When loading the state_dict, the prefix of the submodule will be append to the state_dict. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys :type model_state_dict: typing.Dict[str, ValueType] """ model_state_dict: dict[str, ValueType] = _unflatten_model_state_dict(model, model_state_dict) with _gc_context(): info = _verify_options(model, (), optim_only=False, options=options) _verify_state_dict(model_state_dict, {}, info) return _load_model_state_dict(model, model_state_dict, info) def set_optimizer_state_dict( model: nn.Module, optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer], optim_state_dict: OptimizerStateType, *, options: Optional[StateDictOptions] = None, ) -> None: """Load the optimizers state_dict. The counterpart of ``get_optimizer_state_dict`` to set the state_dict to the optimizers. See ``set_state_dict`` for the detail usage. WARN: ``set_optimizer_state_dict`` can only be called before ``backward()`` or after ``step()`` is called on the optimizers. Otherwise, the optimizer states won't be initialized correctly. Args: model (nn.Module): the nn.Module to the model. optimizers (Union[Optimizer, Iterable[Optimizer]]): The optimizers that are used to optimize ``model``. optim_state_dict: OptimizerStateType: the optimizer state_dict to load. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: None :type optim_state_dict: typing.OptimizerStateType """ with _gc_context(): optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) info = _verify_options(model, optimizers, optim_only=True, options=options) _verify_state_dict({}, optim_state_dict, info) _load_optim_state_dict(model, optimizers, optim_state_dict, info) def set_state_dict( model: nn.Module, optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer], *, model_state_dict: dict[str, ValueType], optim_state_dict: OptimizerStateType, options: Optional[StateDictOptions] = None, ) -> _IncompatibleKeys: """Load the model state_dict and optimizers state_dict. The counterpart of ``get_state_dict`` to set the state_dict to the model and optimizers. The given ``model_state_dict`` and ``optim_state_dict`` do not have to be returned by ``get_state_dict`` but must meet the following requirements: 1) all FQNs are canonical FQNs as defined in ``get_state_dict``, 2) if a tensor is sharded, it must be either a ShardedTensor or DTensor, 3) optimizer state_dict cannot contain the parameter IDs; the keys should be the canonical FQNs. WARN: ``set_state_dict`` can only be called before ``backward()`` or after ``step()`` is called on the optimizers. Otherwise, the optimizer states won't be initialized correctly. Args: model (nn.Module): the nn.Module to the model. optimizers (Union[Optimizer, Iterable[Optimizer]]): The optimizers that are used to optimize ``model``. model_state_dict: (Union[Dict[nn.Module, Dict[str, ValueType]], Dict[str, ValueType]]): the model state_dict to load. If the key of the ``model_state_dict`` is nn.Module, the key is a submodule of ``model`` and the value should be the state_dict of the submodule. When loading the state_dict, the prefix of the submodule will be append to the state_dict. optim_state_dict: OptimizerStateType: the optimizer state_dict to load. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys of the model state_dict. * **unexpected_keys** is a list of str containing the unexpected keys of the model state_dict. :type model_state_dict: typing.Dict[str, ValueType] :type optim_state_dict: typing.OptimizerStateType """ model_state_dict: dict[str, ValueType] = _unflatten_model_state_dict(model, model_state_dict) with _gc_context(): optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) info = _verify_options(model, optimizers, optim_only=not model_state_dict, options=options) _verify_state_dict(model_state_dict, optim_state_dict, info) _load_optim_state_dict(model, optimizers, optim_state_dict, info) return _load_model_state_dict(model, model_state_dict, info) # TODO: correct the state_dict function signature. # TODO: this API is not yet fully tested. Make it private @no_type_check def _patch_model_state_dict( model: nn.Module, *, options: Optional[StateDictOptions] = None, ) -> None: """Patch the ``state_dict`` and ``load_state_dict`` attributes of ``model``. Patch the ``state_dict`` and ``load_state_dict`` attributes of ``model`` to be a partial function to call ``get_state_dict`` and ``set_state_dict``. Example: from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.checkpoint.state_dict import patch_model_state_dict model = fsdp(model) patch_model_state_dict(model) Args: model (nn.Module): the nn.Module to the model. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: None """ _state_dict_call = functools.partial( get_model_state_dict, model=model, options=options, ) def state_dict_call(): return _state_dict_call() model.state_dict = state_dict_call _load_state_dict_call = functools.partial( set_model_state_dict, model=model, options=options, ) def load_state_dict_call(state_dict: dict[str, Any]): _load_state_dict_call(model_state_dict=state_dict) model.load_state_dict = load_state_dict_call _patched_state_dict.add(state_dict_call) _patched_state_dict.add(load_state_dict_call) # TODO: correct the load_state_dict function signature. # TODO: this API is not yet fully tested. Make it private @no_type_check def _patch_optimizer_state_dict( model: nn.Module, *, optimizers: tuple[torch.optim.Optimizer, ...], options: Optional[StateDictOptions] = None, ) -> None: """Patch the ``state_dict`` and ``load_state_dict`` attributes of ``optimizers``. Patch the ``state_dict`` and ``load_state_dict`` attributes of ``optimizers`` to be a partial function to call ``get_state_dict`` and ``set_state_dict``. Note that if there are multiple optimizers, all of the optimizers will be patched. So users only need to call one of the state_dict() to get the full result. Example: from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.checkpoint.state_dict import patch_model_state_dict model = fsdp(model) patch_model_state_dict(model) Args: model (nn.Module): the nn.Module to the model. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: None """ _state_dict_call = functools.partial( get_optimizer_state_dict, model=model, optimizers=optimizers, options=options, ) def state_dict_call(): return _state_dict_call() _load_state_dict_call = functools.partial( set_optimizer_state_dict, model=model, optimizers=optimizers, options=options, ) def load_state_dict_call(state_dict: dict[str, Any]): _load_state_dict_call(optim_state_dict=state_dict) _patched_state_dict.add(state_dict_call) _patched_state_dict.add(load_state_dict_call) optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) for optim in optimizers: optim.state_dict = state_dict_call optim.load_state_dict = load_state_dict_call ================================================ FILE: verl_distillation/verl/third_party/vllm/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from importlib.metadata import PackageNotFoundError, version from packaging import version as vs from verl.utils.device import is_npu_available from verl.utils.import_utils import is_sglang_available def get_version(pkg): try: return version(pkg) except PackageNotFoundError: return None package_name = "vllm" package_version = get_version(package_name) vllm_version = None VLLM_SLEEP_LEVEL = 1 if package_version is None: if not is_sglang_available(): raise ValueError( f"vllm version {package_version} not supported and SGLang also not Found. Currently supported " f"vllm versions are 0.7.0+" ) elif is_npu_available: # sleep_mode=2 is not supported on vllm-ascend for now, will remove this restriction when this ability is ready. VLLM_SLEEP_LEVEL = 1 from vllm import LLM from vllm.distributed import parallel_state elif vs.parse(package_version) >= vs.parse("0.7.0"): vllm_version = package_version if vs.parse(package_version) >= vs.parse("0.8.5"): VLLM_SLEEP_LEVEL = 2 from vllm import LLM from vllm.distributed import parallel_state else: if vs.parse(package_version) in [vs.parse("0.5.4"), vs.parse("0.6.3")]: raise ValueError( f"vLLM version {package_version} support has been removed. vLLM 0.5.4 and 0.6.3 are no longer " f"supported. Please use vLLM 0.7.0 or later." ) if not is_sglang_available(): raise ValueError( f"vllm version {package_version} not supported and SGLang also not Found. Currently supported " f"vllm versions are 0.7.0+" ) __all__ = ["LLM", "parallel_state"] ================================================ FILE: verl_distillation/verl/tools/__init__.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/tools/base_tool.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from typing import Any, Optional from uuid import uuid4 from verl.utils.rollout_trace import rollout_trace_op from .schemas import OpenAIFunctionToolSchema, ToolResponse class BaseTool: """Base class for tools. A tool should support the following methods: - `get_openai_tool_schema`: return the tool schema in OpenAI format. - `create`: create a tool instance for a trajectory. - `execute`: execute the tool. - `calc_reward`: calculate the reward respect to tool state. - `release`: release the tool instance. """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): self.config = config self.tool_schema = tool_schema or self.get_openai_tool_schema() assert self.tool_schema is not None, "Tool schema is not set!" self.name = self.tool_schema.function.name print(json.dumps(self.tool_schema.model_dump(exclude_unset=True, exclude_none=True), indent=2)) def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create(self, instance_id: Optional[str] = None, **kwargs) -> tuple[str, ToolResponse]: """Create a tool instance. Args: instance_id: The instance id of the tool. Returns: The instance id of the tool. tool_creation_response: The response of the tool when creating the instance. """ if instance_id is None: return str(uuid4()), ToolResponse() else: return instance_id, ToolResponse() @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: """Execute the tool. Args: instance_id: The instance id of the tool. parameters: The json string of the parameters of the tool. Returns: tool_response, tool_reward_score, tool_metrics tool_response: The ToolResponse object containing text, image, and/or video content. tool_reward_score: The step reward score of the tool. tool_metrics: The metrics of the tool. """ return ToolResponse(text="Updated the tool state."), 0.0, {} async def calc_reward(self, instance_id: str, **kwargs) -> float: """Calculate the reward of the tool. Args: instance_id: The instance id of the tool. Returns: The reward of the tool. """ return 0.0 async def release(self, instance_id: str, **kwargs) -> None: """Release the tool instance. Args: instance_id: The instance id of the tool. """ pass ================================================ FILE: verl_distillation/verl/tools/geo3k_tool.py ================================================ # Copyright 2023-2025 SGLang Team # Copyright Amazon.com, Inc. or its affiliates. # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any, Optional from uuid import uuid4 from verl.utils.reward_score import geo3k from verl.utils.rollout_trace import rollout_trace_op from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema, ToolResponse logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class Geo3kTool(BaseTool): """A demo tool for calculating the reward of geo3k. - `get_openai_tool_schema`: return the tool schema in OpenAI format. - `create`: create a tool instance for a trajectory. - `execute`: execute the tool. - `calc_reward`: calculate the reward respect to tool state. - `release`: release the tool instance. """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """ _tool_schema = OpenAIFunctionToolSchema.model_validate({ "type": "function", "function": { "name": "calc_geo3k_reward", "description": "A tool for calculating the reward of geo3k", "parameters": { "type": "object", "properties": { "answer": { "type": "string", "description": "The answer to the question, enclosed in \\boxed{}", }, }, "required": ["answer"], }, } }) """ super().__init__(config, tool_schema) self._instance_dict = {} def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create( self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs ) -> tuple[str, ToolResponse]: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id, ToolResponse() @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: answer = parameters.get("answer", "") if not isinstance(answer, str): answer = str(answer) self._instance_dict[instance_id]["response"] = answer reward = await self.calc_reward(instance_id) # penalty for non improved answer submission tool_reward = 0.0 if reward > self._instance_dict[instance_id]["reward"] else -0.05 # update the reward self._instance_dict[instance_id]["reward"] = reward return ToolResponse(text=f"Current parsed {answer=} {reward=}"), tool_reward, {} async def calc_reward(self, instance_id: str, **kwargs) -> float: return geo3k.compute_score( self._instance_dict[instance_id]["response"], self._instance_dict[instance_id]["ground_truth"], use_boxed=False, format_score=0.0, ) async def release(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_distillation/verl/tools/gsm8k_tool.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any, Optional from uuid import uuid4 from verl.utils.reward_score import gsm8k from verl.utils.rollout_trace import rollout_trace_op from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema, ToolResponse logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class Gsm8kTool(BaseTool): """A demo tool for calculating the reward of gsm8k. - `get_openai_tool_schema`: return the tool schema in OpenAI format. - `create`: create a tool instance for a trajectory. - `execute`: execute the tool. - `calc_reward`: calculate the reward respect to tool state. - `release`: release the tool instance. """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """ _tool_schema = OpenAIFunctionToolSchema.model_validate({ "type": "function", "function": { "name": "calc_gsm8k_reward", "description": "A tool for calculating the reward of gsm8k", "parameters": { "type": "object", "properties": { "answer": { "type": "string", "description": "The answer to the question", }, }, "required": ["answer"], }, } }) """ super().__init__(config, tool_schema) self._instance_dict = {} def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create( self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs ) -> tuple[str, ToolResponse]: if instance_id is None: instance_id = str(uuid4()) if ground_truth is None: ground_truth = kwargs.get("create_kwargs", {}).get("ground_truth", None) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id, ToolResponse() @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: answer = parameters.get("answer", "") if not isinstance(answer, str): answer = str(answer) if answer.startswith("#### "): self._instance_dict[instance_id]["response"] = answer else: self._instance_dict[instance_id]["response"] = "#### " + answer reward = await self.calc_reward(instance_id) # penalty for non improved answer submission tool_reward = 0.0 if reward > self._instance_dict[instance_id]["reward"] else -0.05 # update the reward self._instance_dict[instance_id]["reward"] = reward return ToolResponse(text=f"Current parsed {answer=} {reward=}"), tool_reward, {} async def calc_reward(self, instance_id: str, **kwargs) -> float: return gsm8k.compute_score( self._instance_dict[instance_id]["response"], self._instance_dict[instance_id]["ground_truth"], method="flexible", format_score=0.0, score=1.0, ) async def release(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_distillation/verl/tools/image_zoom_in_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import threading from contextlib import ExitStack from enum import Enum from math import ceil, floor from typing import Any, Callable, Optional, TypeVar from uuid import uuid4 import ray import ray.actor from qwen_vl_utils import fetch_image from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema, ToolResponse logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) T = TypeVar("T") # Adapted from verl/tools/sandbox_fusion_tools.py class PoolMode(Enum): """Execution pool mode enumeration.""" ThreadMode = 1 ProcessMode = 2 @ray.remote(concurrency_groups={"acquire": 1, "release": 10}) class TokenBucketWorker: """Ray actor for rate limiting using token bucket algorithm.""" def __init__(self, rate_limit: int): self.rate_limit = rate_limit self.current_count = 0 # For observability self._semaphore = threading.Semaphore(rate_limit) @ray.method(concurrency_group="acquire") def acquire(self): """Acquire a token from the bucket.""" self._semaphore.acquire() self.current_count += 1 @ray.method(concurrency_group="release") def release(self): """Release a token back to the bucket.""" self._semaphore.release() self.current_count -= 1 def get_current_count(self): """Get current number of acquired tokens.""" return self.current_count class VisualExecutionWorker: """Worker for executing visual processing operations with optional rate limiting.""" def __init__(self, enable_global_rate_limit=True, rate_limit=10): self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None def _init_rate_limit(self, rate_limit): """Initialize singleton rate limiter.""" return TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote(rate_limit) def ping(self): """Health check method.""" return True def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T: """Execute function with optional rate limiting.""" if self.rate_limit_worker: with ExitStack() as stack: stack.callback(self.rate_limit_worker.release.remote) ray.get(self.rate_limit_worker.acquire.remote()) try: return fn(*fn_args, **fn_kwargs) except Exception as e: # TODO we should make this available to the tool caller logger.warning(f"Error when executing visual processing: {e}") else: return fn(*fn_args, **fn_kwargs) def init_visual_execution_pool( num_workers: int, enable_global_rate_limit=True, rate_limit=10, mode: PoolMode = PoolMode.ThreadMode ): """Initialize visual execution pool.""" if mode == PoolMode.ThreadMode: return ( ray.remote(VisualExecutionWorker) .options(max_concurrency=num_workers) .remote(enable_global_rate_limit=enable_global_rate_limit, rate_limit=rate_limit) ) else: raise NotImplementedError("Process mode is not implemented yet") class ImageZoomInTool(BaseTool): """A tool for zooming in on an image by cropping it based on a bounding box. This tool provides a zoom-in functionality by cropping a region from an image, with rate limiting and concurrent execution support through Ray. Methods: get_openai_tool_schema: Return the tool schema in OpenAI format create: Create a tool instance for a trajectory execute: Execute the zoom-in operation calc_reward: Calculate the reward with respect to tool state release: Release the tool instance """ MIN_DIMENSION = 28 def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """ _tool_schema = OpenAIFunctionToolSchema.model_validate({ "type": "function", "function": { "name": "image_zoom_in_tool", "description": ( "Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an " "optional object label." ), "parameters": { "type": "object", "properties": { "bbox_2d": { "type": "array", "items":{"type":"number"}, "minItems":4, "maxItems":4, "description": ( "The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is " "the top-left corner and (x2, y2) is the bottom-right corner." ), }, "label": { "type": "string", "description": "The name or label of the object in the specified bounding box (optional).", }, }, "required": ["bbox_2d"], }, } }) """ super().__init__(config, tool_schema) self._instance_dict = {} # Worker and rate limiting configuration self.num_workers = config.get("num_workers", 20) self.rate_limit = config.get("rate_limit", 50) self.timeout = config.get("timeout", 30) self.enable_global_rate_limit = config.get("enable_global_rate_limit", True) self.execution_pool = init_visual_execution_pool( num_workers=self.num_workers, enable_global_rate_limit=self.enable_global_rate_limit, rate_limit=self.rate_limit, mode=PoolMode.ThreadMode, ) logger.info(f"Initialized ImageZoomInTool with config: {config}") def _validate_bbox(self, left: float, top: float, right: float, bottom: float) -> bool: """Validate the bounding box dimensions and aspect ratio.""" try: if not (left < right and top < bottom): logger.warning(f"Invalid bbox shape: left={left}, top={top}, right={right}, bottom={bottom}") return False height = bottom - top width = right - left # Prevent division by zero for zero-sized boxes if min(height, width) == 0: logger.warning(f"Bbox has zero width or height: left={left}, top={top}, right={right}, bottom={bottom}") return False if max(height, width) / min(height, width) > 100: logger.warning(f"Bbox aspect ratio > 100: left={left}, top={top}, right={right}, bottom={bottom}") return False return True except Exception as e: logger.warning(f"Bbox validation error: {e}") return False def _maybe_resize_bbox(self, bbox_2d: list[float], image_width: int, image_height: int) -> Optional[list[float]]: """ Clamp, validate, and potentially resize a bounding box. This function ensures the final bounding box is within image bounds and meets the minimum dimension requirements. If the initial box is too small, it attempts to expand it from its center. It performs a final check to guarantee the output dimensions are valid. Returns: A valid bounding box as a list of coordinates, or None if validation fails. """ left, top, right, bottom = bbox_2d # 1. Clamp the initial bounding box to the image dimensions. left = max(0.0, float(left)) top = max(0.0, float(top)) right = min(float(image_width), float(right)) bottom = min(float(image_height), float(bottom)) # 2. If clamped bbox is invalid, return immediately. if not self._validate_bbox(left, top, right, bottom): return None current_bbox = [left, top, right, bottom] height = bottom - top width = right - left # 3. If the box is too small, attempt to resize it. if height < self.MIN_DIMENSION or width < self.MIN_DIMENSION: logger.info(f"Bbox {width}x{height} is smaller than {self.MIN_DIMENSION}, attempting resize.") center_x = (left + right) / 2.0 center_y = (top + bottom) / 2.0 min_dim = min(height, width) if min_dim == 0: # Safeguard for zero-area boxes return None # 1. Calculate the target dimensions to make the smallest side MIN_DIMENSION. ratio = self.MIN_DIMENSION / min_dim target_width = width * ratio target_height = height * ratio # 2. If the target size is larger than the image, scale it down to fit. # This preserves the aspect ratio while respecting image boundaries. if target_width > image_width: scale_down = image_width / target_width target_width = image_width target_height *= scale_down if target_height > image_height: scale_down = image_height / target_height target_height = image_height target_width *= scale_down # 3. Determine the coordinates for the box centered on the original center. new_half_width = target_width / 2.0 new_half_height = target_height / 2.0 new_left = center_x - new_half_width new_top = center_y - new_half_height # 4. Shift the box if it extends beyond the image boundaries to keep its size. if new_left < 0: new_left = 0 if new_top < 0: new_top = 0 if new_left + target_width > image_width: new_left = image_width - target_width if new_top + target_height > image_height: new_top = image_height - target_height new_right = new_left + target_width new_bottom = new_top + target_height # Use floor and ceil for final integer coordinates. current_bbox = [floor(new_left), floor(new_top), ceil(new_right), ceil(new_bottom)] # 4. Final validation on the resulting bounding box (either original or resized). final_left, final_top, final_right, final_bottom = current_bbox if not self._validate_bbox(final_left, final_top, final_right, final_bottom): logger.warning(f"Final bbox is invalid after processing: {current_bbox}") return None final_height = floor(final_bottom) - floor(final_top) final_width = floor(final_right) - floor(final_left) if final_height < self.MIN_DIMENSION or final_width < self.MIN_DIMENSION: logger.warning( f"Final bbox size ({final_width}x{final_height}) are still smaller than minimum ({self.MIN_DIMENSION})." f"Original bbox: {bbox_2d}, original image size: {image_width}x{image_height}" ) return None return current_bbox def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create(self, instance_id: Optional[str] = None, **kwargs) -> tuple[str, ToolResponse]: """ Creates a new instance for image zoom-in tool. This method initializes a new session for an image, which can then be used for operations like zooming. It fetches the image from various sources and stores it internally. Args: instance_id: An optional unique identifier for the instance. If not provided, a new UUID will be generated. **kwargs: Should contain 'image' key with image data, or 'create_kwargs' containing {'image': image_data}. Image can be one of the following: - A PIL.Image.Image object. - A string containing an HTTP or HTTPS URL. - A string containing a local file path. - A string containing a file URI (e.g., "file:///path/to/image.jpg"). - A string containing a base64-encoded image in the format of "data:image/jpeg;base64,..." Returns: Tuple of (instance_id, ToolResponse) """ if instance_id is None: instance_id = str(uuid4()) # Handle create_kwargs parameter if passed create_kwargs = kwargs.get("create_kwargs", {}) if create_kwargs: kwargs.update(create_kwargs) # Get image from kwargs image = kwargs.get("image") if image is None: raise ValueError("Missing required 'image' parameter in kwargs") img = fetch_image({"image": image}) self._instance_dict[instance_id] = { "image": img, "response": "", "reward": 0.0, } return instance_id, ToolResponse() async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: bbox_2d = parameters.get("bbox_2d") label = parameters.get("label", "") if not bbox_2d or len(bbox_2d) != 4: return ( ToolResponse(text="Error: bbox_2d parameter is missing or not a list of 4 numbers."), -0.05, {"success": False}, ) instance_data = self._instance_dict[instance_id] image = instance_data["image"] image_width, image_height = image.size try: resized_bbox = self._maybe_resize_bbox(bbox_2d, image_width=image_width, image_height=image_height) if resized_bbox is None: error_msg = ( f"Error: The specified bounding box {bbox_2d} is invalid or results in a crop smaller than " f"the minimum size of {self.MIN_DIMENSION}x{self.MIN_DIMENSION}." ) logger.warning(f"Tool execution failed: {error_msg}") return ToolResponse(text=error_msg), -0.05, {"success": False} cropped_image = image.crop(resized_bbox) logger.info(f"Cropped image size: {cropped_image.size}") except Exception as e: logger.error(f"Error processing image zoom-in: {e}") return ToolResponse(text=f"Error processing image zoom-in: {e}"), -0.05, {"success": False} response_text = f"Zoomed in on the image to the region {bbox_2d}." if label: response_text = f"Zoomed in on the image to the region {bbox_2d} with label {label}." return ( ToolResponse( image=[cropped_image], text=response_text, ), 0.0, {"success": True}, ) async def release(self, instance_id: str, **kwargs) -> None: if instance_id in self._instance_dict: del self._instance_dict[instance_id] ================================================ FILE: verl_distillation/verl/tools/mcp_base_tool.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os from typing import Any, Optional from uuid import uuid4 from fastmcp.exceptions import ClientError from verl.tools.utils.mcp_clients.McpClientManager import ClientManager from verl.utils.rollout_trace import rollout_trace_op from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema, ToolResponse logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MCPBaseTool(BaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): super().__init__(config, tool_schema) self._instance_dict = {} self.timeout = config.get("timeout", 30) # TODO(hechanghao): create a global client manager to manage the rate limit, client and pool logger.info(f"Initialized MCPBaseTool with config: {config}") def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: """Return the OpenAI tool schema.""" return self.tool_schema async def create(self, instance_id: Optional[str] = None, **kwargs) -> tuple[str, ToolResponse]: """Create a tool instance. Args: instance_id: The instance id of the tool. Returns: The instance id of the tool. tool_crtool_creation_response: The response of the tool when creating the instance. """ if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "reward": [], } return instance_id, ToolResponse() async def _call_tool(self, instance_id, parameters) -> tuple[str, dict]: err_msg = "" metadata = {} try: call_tool_result = await ClientManager.call_tool(self.name, parameters, self.timeout) logger.debug(f"Tool result for instance {instance_id} with tool {self.name}: {call_tool_result.content}") result, metadata = self._parse_tool_result(call_tool_result.content) except ClientError as e: err_msg = f"\n Tool call failed: {e}" except ConnectionError as e: err_msg = f"\n Connection failed: {e}" except Exception as e: err_msg = f"\n An unexpected error occurred: {e}" finally: if err_msg: result = err_msg metadata["api_request_error"] = err_msg else: metadata["api_request_error"] = None return result, metadata @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: if self.name == "" or self.name is None or parameters is None: error_msg = "Error: 'parameters' is missing or empty." logger.error(f"[MCPTool] {error_msg} Received tool name: {self.name}, parameters: {parameters}") return ToolResponse(text=json.dumps({"result": error_msg})), 0.0, {} try: result_text, metadata = await self._call_tool(instance_id, parameters) # Store results in instance dictionary self._instance_dict[instance_id]["reward"].append(result_text.strip()) # Convert metadata to metrics metrics = { "query_count": metadata.get("query_count", 0), "status": metadata.get("status", "unknown"), "total_results": metadata.get("total_results", 0), "api_request_error": metadata.get("api_request_error"), } return ToolResponse(text=result_text), 0.0, metrics except Exception as e: error_result = json.dumps({"result": f"Tool execution failed: {e}"}) logger.error(f"[MCPBaseTool] Execution failed: {e}") return ToolResponse(text=error_result), 0.0, {"error": str(e)} async def calc_reward(self, instance_id: str, **kwargs) -> str: return self._instance_dict[instance_id]["reward"] async def release(self, instance_id: str, **kwargs) -> None: if instance_id in self._instance_dict: del self._instance_dict[instance_id] def _parse_tool_result(self, content: list) -> tuple[str, dict]: tools_content = [part.text for part in filter(lambda x: x.type == "text", content)] return " ".join(tools_content), {} ================================================ FILE: verl_distillation/verl/tools/mcp_search_tool.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import re from verl.tools.mcp_base_tool import MCPBaseTool from .schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MCPSearchTool(MCPBaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): super().__init__(config, tool_schema) def _parse_tool_result(self, content: list) -> tuple[str, dict]: res = "" res_cnt = 0 query_list = [] metadata = { "api_request_error": "", "status": "unknown", "total_results": 0, } try: for part in content: if part.type != "text": continue text = part.text.replace("'", '"') query_match = re.search(r'query"\s*:\s*"([^"]+)"', text) query = query_match.group(1) if query_match else "" query_list.append(query) title_matches = re.findall(r'"title"\s*:', text) title_count = len(title_matches) results_match = re.search(r'"results"\s*:\s*(\[.*?\])', text, re.DOTALL) results_content = results_match.group(1) if results_match else "" res += results_content res_cnt += title_count except json.JSONDecodeError: err_msg = "json parse error." logger.error(err_msg) metadata["api_request_error"] = err_msg metadata["status"] = "error" # update metadata metadata["status"] = "success" metadata["queries"] = query_list metadata["query_count"] = len(query_list) metadata["total_results"] = res_cnt return res, metadata ================================================ FILE: verl_distillation/verl/tools/sandbox_fusion_tools.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import threading from contextlib import ExitStack from enum import Enum from typing import Any, Callable, Optional, TypeVar from uuid import uuid4 import ray from verl.tools.base_tool import BaseTool from verl.utils.reward_score.sandbox_fusion.utils import _process_single_case from verl.utils.rollout_trace import rollout_trace_op from .schemas import OpenAIFunctionToolSchema, ToolResponse logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) T = TypeVar("T") class PoolMode(Enum): ThreadMode = 1 ProcessMode = 2 @ray.remote(concurrency_groups={"acquire": 1, "release": 10}) class TokenBucketWorker: def __init__(self, rate_limit: int): self.rate_limit = rate_limit # this only used for observalability self.current_count = 0 self._semaphore = threading.Semaphore(rate_limit) @ray.method(concurrency_group="acquire") def acquire(self): self._semaphore.acquire() self.current_count += 1 @ray.method(concurrency_group="release") def release(self): self._semaphore.release() self.current_count -= 1 def get_current_count(self): return self.current_count class ExecutionWorker: def __init__(self, enable_global_rate_limit=True, rate_limit=10): self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None def _init_rate_limit(self, rate_limit): # TODO validation for rate_limit # A Singleton Rate Limitor return TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote(rate_limit) def ping(self): return True def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T: with ExitStack() as stack: stack.callback(self.rate_limit_worker.release.remote) ray.get(self.rate_limit_worker.acquire.remote()) try: return fn(*fn_args, **fn_kwargs) except Exception as e: # TODO we should make this available to the tool caller logger.warning(f"Error when executing code: {e}") def init_execution_pool( num_workers: int, enable_global_rate_limit=True, rate_limit=10, mode: PoolMode = PoolMode.ThreadMode ): if mode == PoolMode.ThreadMode: return ( ray.remote(ExecutionWorker) .options(max_concurrency=num_workers) .remote(enable_global_rate_limit=enable_global_rate_limit, rate_limit=rate_limit) ) else: raise NotImplementedError("Process mode is not implemented yet") # return ray.util.multiprocessing.Pool(processes=num_workers) class SandboxFusionTool(BaseTool): """A tool for executing the code using sanbox fusion image. - `get_openai_tool_schema`: return the tool schema in OpenAI format. - `create`: create a tool instance for a trajectory. - `execute`: execute the tool. - `calc_reward`: calculate the reward respect to tool state. - `release`: release the tool instance. """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """ _tool_schema = OpenAIFunctionToolSchema.model_validate({ "type": "function", "function": { "name": "code_interpreter", "description": "A tool for execute code", "parameters": { "type": "object", "properties": { "code": { "type": "string", "description": "code needs to be execute and grad", }, }, "required": ["code"], }, } }) """ super().__init__(config, tool_schema) self._instance_dict = {} # TODO: better documentation for the config self.num_workers = config.get("num_workers", 10) self.rate_limit = config.get("rate_limit", 10) self.default_timeout = config.get("default_timeout", 30) self.default_language = config.get("default_language", "python") self.enable_global_rate_limit = config.get("enable_global_rate_limit", True) self.execution_pool = init_execution_pool( num_workers=self.num_workers, enable_global_rate_limit=self.enable_global_rate_limit, rate_limit=self.rate_limit, mode=PoolMode.ThreadMode, ) self.sandbox_fusion_url = config.get("sandbox_fusion_url", "") self.memory_limit_mb = config.get("memory_limit_mb", 1024) if self.sandbox_fusion_url == "": raise ValueError("sandbox_fusion_url is not set") log_msg = f"Init SandboxFusionTool with config: {config}" logger.info(log_msg) def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create( self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs ) -> tuple[str, ToolResponse]: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": [], } return instance_id, ToolResponse() @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: code = parameters.get("code", "") timeout = parameters.get("timeout", self.default_timeout) language = parameters.get("language", self.default_language) if not isinstance(code, str): code = str(code) result = await self.execution_pool.execute.remote(self.execute_code, instance_id, code, timeout, language) # sandbox has no score or metrics, use Nones return ToolResponse(text=result), None, None def execute_code(self, instance_id, code, timeout=30, language="python"): result_status, metadata = _process_single_case( 0, None, None, self.sandbox_fusion_url, code, timeout, self.memory_limit_mb, language ) # we should always expect this since we don't have correct answer if metadata["run_status"] == "Finished": actual_output = metadata["stdout"] + metadata["stderr"] logger.debug(f"actual_output from sandbox fusion: {actual_output},{instance_id}") return ToolResponse(text=actual_output) else: return ToolResponse(text="no stdout here") async def calc_reward(self, instance_id: str, **kwargs) -> str: return self._instance_dict[instance_id]["reward"] async def release(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_distillation/verl/tools/schemas.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from typing import Any, Literal from pydantic import BaseModel, Field, model_validator class OpenAIFunctionPropertySchema(BaseModel): """The schema of a parameter in OpenAI format.""" type: str description: str | None = None enum: list[str] | None = None class OpenAIFunctionParametersSchema(BaseModel): """The schema of parameters in OpenAI format.""" type: str properties: dict[str, OpenAIFunctionPropertySchema] required: list[str] class OpenAIFunctionSchema(BaseModel): """The schema of a function in OpenAI format.""" name: str description: str parameters: OpenAIFunctionParametersSchema = Field( default_factory=lambda: OpenAIFunctionParametersSchema(type="object", properties={}, required=[]) ) strict: bool = False class OpenAIFunctionToolSchema(BaseModel): """The schema of a tool in OpenAI format.""" type: str function: OpenAIFunctionSchema class OpenAIFunctionParsedSchema(BaseModel): """The parsed schema of a tool in OpenAI format.""" name: str arguments: str # JSON string class OpenAIFunctionCallSchema(BaseModel): """The parsed schema of a tool in OpenAI format.""" name: str arguments: dict[str, Any] @staticmethod def from_openai_function_parsed_schema( parsed_schema: OpenAIFunctionParsedSchema, ) -> tuple["OpenAIFunctionCallSchema", bool]: has_decode_error = False try: arguments = json.loads(parsed_schema.arguments) except json.JSONDecodeError: arguments = {} has_decode_error = True # If the arguments is not a dict, it means the arguments is not a valid JSON string if not isinstance(arguments, dict): arguments = {} has_decode_error = True return OpenAIFunctionCallSchema(name=parsed_schema.name, arguments=arguments), has_decode_error class OpenAIFunctionToolCall(BaseModel): """The tool call in OpenAI format.""" id: str type: Literal["function"] = "function" function: OpenAIFunctionCallSchema class ToolResponse(BaseModel): """The response from a tool execution.""" text: str | None = None image: list[Any] | None = None video: list[Any] | None = None @model_validator(mode="before") @classmethod def initialize_request(cls, values): if "image" in values and not isinstance(values["image"], list): raise ValueError( f"Image must be a list, but got {type(values['image'])}. Please check the tool.execute(). " f"For single images, wrap in a list: [image]. " f"Example: {{'image': [img1]}} or {{'image': [img1, img2, ...]}}." ) if "video" in values and not isinstance(values["video"], list): raise ValueError( f"Video must be a list, but got {type(values['video'])}. Please check the tool.execute(). " f"For single videos, wrap in a list: [video]. " f"Example: {{'video': [video1]}} or {{'video': [video1, video2, ...]}}." ) return values def is_empty(self) -> bool: return not self.text and not self.image and not self.video def is_text_only(self) -> bool: return self.text and not self.image and not self.video ================================================ FILE: verl_distillation/verl/tools/search_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import threading from contextlib import ExitStack from enum import Enum from typing import Any, Callable, Optional, TypeVar from uuid import uuid4 import ray import ray.actor from verl.tools.utils.search_r1_like_utils import perform_single_search_batch from verl.utils.rollout_trace import rollout_trace_op from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema, ToolResponse logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) T = TypeVar("T") # Adapted from verl/tools/sandbox_fusion_tools.py class PoolMode(Enum): """Execution pool mode enumeration.""" ThreadMode = 1 ProcessMode = 2 @ray.remote(concurrency_groups={"acquire": 1, "release": 10}) class TokenBucketWorker: """Ray actor for rate limiting using token bucket algorithm.""" def __init__(self, rate_limit: int): self.rate_limit = rate_limit self.current_count = 0 # For observability self._semaphore = threading.Semaphore(rate_limit) @ray.method(concurrency_group="acquire") def acquire(self): """Acquire a token from the bucket.""" self._semaphore.acquire() self.current_count += 1 @ray.method(concurrency_group="release") def release(self): """Release a token back to the bucket.""" self._semaphore.release() self.current_count -= 1 def get_current_count(self): """Get current number of acquired tokens.""" return self.current_count class SearchExecutionWorker: """Worker for executing search operations with optional rate limiting.""" def __init__(self, enable_global_rate_limit=True, rate_limit=10): self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None def _init_rate_limit(self, rate_limit): """Initialize singleton rate limiter.""" return TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote(rate_limit) def ping(self): """Health check method.""" return True def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T: """Execute function with optional rate limiting.""" if self.rate_limit_worker: with ExitStack() as stack: stack.callback(self.rate_limit_worker.release.remote) ray.get(self.rate_limit_worker.acquire.remote()) try: return fn(*fn_args, **fn_kwargs) except Exception as e: # TODO we should make this available to the tool caller logger.warning(f"Error when executing search: {e}") else: return fn(*fn_args, **fn_kwargs) def init_search_execution_pool( num_workers: int, enable_global_rate_limit=True, rate_limit=10, mode: PoolMode = PoolMode.ThreadMode ): """Initialize search execution pool.""" if mode == PoolMode.ThreadMode: return ( ray.remote(SearchExecutionWorker) .options(max_concurrency=num_workers) .remote(enable_global_rate_limit=enable_global_rate_limit, rate_limit=rate_limit) ) else: raise NotImplementedError("Process mode is not implemented yet") class SearchTool(BaseTool): """Search tool for retrieving information using external retrieval services. This tool provides search functionality with rate limiting and concurrent execution support through Ray. It integrates with external retrieval services to perform semantic search operations. Methods: get_openai_tool_schema: Return the tool schema in OpenAI format create: Create a tool instance for a trajectory execute: Execute the search tool calc_reward: Calculate the reward with respect to tool state release: Release the tool instance """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """Initialize SearchTool with configuration and schema. Args: config: Configuration dictionary containing tool settings tool_schema: OpenAI function tool schema definition Example tool_schema: { "type": "function", "function": { "name": "search", "description": "Searches for relevant information based on queries.", "parameters": { "type": "object", "properties": { "query_list": { "type": "array", "items": {"type": "string"}, "description": "List of search queries" } }, "required": ["query_list"] } } } """ super().__init__(config, tool_schema) self._instance_dict = {} # Worker and rate limiting configuration self.num_workers = config.get("num_workers", 120) self.rate_limit = config.get("rate_limit", 120) self.timeout = config.get("timeout", 30) self.enable_global_rate_limit = config.get("enable_global_rate_limit", True) self.execution_pool = init_search_execution_pool( num_workers=self.num_workers, enable_global_rate_limit=self.enable_global_rate_limit, rate_limit=self.rate_limit, mode=PoolMode.ThreadMode, ) # Retrieval service configuration self.retrieval_service_url = config.get("retrieval_service_url") assert self.retrieval_service_url, "Configuration must include 'retrieval_service_url'" self.topk = config.get("topk", 3) if self.retrieval_service_url == "": raise ValueError("retrieval_service_url is not set") logger.info(f"Initialized SearchTool with config: {config}") def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: """Return the OpenAI tool schema.""" return self.tool_schema async def create(self, instance_id: Optional[str] = None, **kwargs) -> tuple[str, ToolResponse]: """Create a tool instance. Args: instance_id: The instance id of the tool. Returns: The instance id of the tool. tool_creation_response: The response of the tool when creating the instance. """ if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "reward": [], } return instance_id, ToolResponse() def execute_search(self, instance_id: str, query_list: list, retrieval_service_url: str, topk: int, timeout: int): """Execute search operation using retrieval service. Args: instance_id: Tool instance ID query_list: List of search queries retrieval_service_url: URL of the retrieval service topk: Number of top results to return timeout: Request timeout in seconds Returns: Tuple of (result_text, metadata) """ result_text, metadata = perform_single_search_batch( retrieval_service_url=retrieval_service_url, query_list=query_list, topk=topk, concurrent_semaphore=None, # Ray handles concurrency control timeout=timeout, ) logger.debug(f"Search result for instance {instance_id}: {result_text}") return result_text, metadata @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]: """Execute the search tool. Args: instance_id: The instance ID of the tool parameters: Tool parameters containing query_list and optional timeout Returns: tool_response, tool_reward_score, tool_metrics tool_response: The response str of the tool. tool_reward_score: The step reward score of the tool. tool_metrics: The metrics of the tool. """ timeout = self.timeout query_list_from_params = parameters.get("query_list") if not query_list_from_params or not isinstance(query_list_from_params, list): error_msg = "Error: 'query_list' is missing, empty, or not a list in parameters." logger.error(f"[SearchTool] {error_msg} Received parameters: {parameters}") return ToolResponse(text=json.dumps({"result": error_msg})), 0.0, {} # Execute search using Ray execution pool try: result_text, metadata = await self.execution_pool.execute.remote( self.execute_search, instance_id, query_list_from_params, self.retrieval_service_url, self.topk, timeout ) # Store results in instance dictionary self._instance_dict[instance_id]["reward"].append(result_text.strip()) # Convert metadata to metrics metrics = { "query_count": metadata.get("query_count", 0), "status": metadata.get("status", "unknown"), "total_results": metadata.get("total_results", 0), "api_request_error": metadata.get("api_request_error"), } return ToolResponse(text=result_text), 0.0, metrics except Exception as e: error_result = json.dumps({"result": f"Search execution failed: {e}"}) logger.error(f"[SearchTool] Execution failed: {e}") return ToolResponse(text=error_result), 0.0, {"error": str(e)} async def calc_reward(self, instance_id: str, **kwargs) -> str: return self._instance_dict[instance_id]["reward"] async def release(self, instance_id: str, **kwargs) -> None: if instance_id in self._instance_dict: del self._instance_dict[instance_id] ================================================ FILE: verl_distillation/verl/tools/utils/__init__.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/tools/utils/mcp_clients/McpClientManager.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import json import logging from typing import Any from fastmcp import Client from fastmcp.client.transports import SSETransport from verl.tools.utils.mcp_clients.utils import TokenBucket, mcp2openai logger = logging.getLogger(__name__) class MCPClientManager: rootServerName = "mcpServers" initialized = False clients = [] tool_client_mapping = {} rate_limiter = None async def initialize(self, config_path, rate_limit: float = 10.0): if self.initialized: return """Initialize the MCP Client Manager and start all clients""" result = self._load_config(config_path) servers = result[self.rootServerName] exclude_sse_servers = {self.rootServerName: {}} for server_name in servers.keys(): server = servers[server_name] if "auth_token" in server: transport = SSETransport(url=server["url"], headers={"Authorization": f"Bearer {server['auth_token']}"}) client = Client(transport) self.clients.append(client) else: exclude_sse_servers[self.rootServerName][server_name] = server if exclude_sse_servers[self.rootServerName]: self.clients.append(Client(exclude_sse_servers)) # Initialize rate limiter self.rate_limiter = TokenBucket(rate_limit) self.initialized = True async def call_tool(self, tool_name, parameters, timeout): # Apply rate limiting while not self.rate_limiter.acquire(): await asyncio.sleep(0.1) client = self.get_client_with_tool_name(tool_name) async with client: return await client.call_tool_mcp(tool_name, parameters) async def fetch_tool_schemas(self, tool_selected_list: list[str]) -> list[dict]: tool_schemas = [] for client in self.clients: async with client: tools = await client.list_tools_mcp() for tool in tools.tools: if not tool_selected_list: self.tool_client_mapping[tool.name] = client tool_schemas.append(mcp2openai(tool)) elif tool.name in tool_selected_list: self.tool_client_mapping[tool.name] = client tool_schemas.append(mcp2openai(tool)) return tool_schemas def get_client_with_tool_name(self, tool_name: str): return self.tool_client_mapping[tool_name] def _load_config(self, file: str) -> dict[str, Any]: try: with open(file) as f: return json.load(f) except FileNotFoundError: logger.warning(f'the "{file}" file was not found') except Exception: logger.error(f'there was an error reading the "{file}" file') return {} ClientManager = MCPClientManager() ================================================ FILE: verl_distillation/verl/tools/utils/mcp_clients/utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import threading import time from mcp import Tool logger = logging.getLogger(__file__) class TokenBucket: def __init__(self, rate_limit: float): self.rate_limit = rate_limit # tokens per second self.tokens = rate_limit self.last_update = time.time() self.lock = threading.Lock() def acquire(self) -> bool: with self.lock: now = time.time() # Add new tokens based on time elapsed new_tokens = (now - self.last_update) * self.rate_limit self.tokens = min(self.rate_limit, self.tokens + new_tokens) self.last_update = now if self.tokens >= 1: self.tokens -= 1 return True return False def mcp2openai(mcp_tool: Tool) -> dict: """Convert a MCP Tool to an OpenAI ChatCompletionTool.""" openai_format = { "type": "function", "function": { "name": mcp_tool.name, "description": mcp_tool.description, "parameters": mcp_tool.inputSchema, "strict": False, }, } if not openai_format["function"]["parameters"].get("required", None): openai_format["function"]["parameters"]["required"] = [] return openai_format ================================================ FILE: verl_distillation/verl/tools/utils/search_r1_like_utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import threading import time import traceback import uuid from typing import Any, Optional import requests DEFAULT_TIMEOUT = 30 # Default search request timeout MAX_RETRIES = 10 INITIAL_RETRY_DELAY = 1 API_TIMEOUT = 10 logger = logging.getLogger(__name__) def call_search_api( retrieval_service_url: str, query_list: list[str], topk: int = 3, return_scores: bool = True, timeout: int = DEFAULT_TIMEOUT, ) -> tuple[Optional[dict[str, Any]], Optional[str]]: """ Calls the remote search API to perform retrieval with retry logic for various errors, using increasing delay between retries. Logs internal calls with a unique ID. Args: retrieval_service_url: The URL of the retrieval service API. query_list: List of search queries. topk: Number of top results to return. return_scores: Whether to return scores. timeout: Request timeout in seconds. Returns: A tuple (response_json, error_message). If successful, response_json is the API's returned JSON object, error_message is None. If failed after retries, response_json is None, error_message contains the error information. """ request_id = str(uuid.uuid4()) log_prefix = f"[Search Request ID: {request_id}] " payload = {"queries": query_list, "topk": topk, "return_scores": return_scores} headers = {"Content-Type": "application/json", "Accept": "application/json"} last_error = None for attempt in range(MAX_RETRIES): try: logger.info( f"{log_prefix}Attempt {attempt + 1}/{MAX_RETRIES}: Calling search API at {retrieval_service_url}" ) response = requests.post( retrieval_service_url, headers=headers, json=payload, timeout=timeout, ) # Check for Gateway Timeout (504) and other server errors for retrying if response.status_code in [500, 502, 503, 504]: last_error = ( f"{log_prefix}API Request Error: Server Error ({response.status_code}) on attempt " f"{attempt + 1}/{MAX_RETRIES}" ) logger.warning(last_error) if attempt < MAX_RETRIES - 1: delay = INITIAL_RETRY_DELAY * (attempt + 1) logger.info(f"{log_prefix}Retrying after {delay} seconds...") time.sleep(delay) continue # Check for other HTTP errors (e.g., 4xx) response.raise_for_status() # If successful (status code 2xx) logger.info(f"{log_prefix}Search API call successful on attempt {attempt + 1}") return response.json(), None except requests.exceptions.ConnectionError as e: last_error = f"{log_prefix}Connection Error: {e}" logger.warning(last_error) if attempt < MAX_RETRIES - 1: delay = INITIAL_RETRY_DELAY * (attempt + 1) logger.info(f"{log_prefix}Retrying after {delay} seconds...") time.sleep(delay) continue except requests.exceptions.Timeout as e: last_error = f"{log_prefix}Timeout Error: {e}" logger.warning(last_error) if attempt < MAX_RETRIES - 1: delay = INITIAL_RETRY_DELAY * (attempt + 1) logger.info(f"{log_prefix}Retrying after {delay} seconds...") time.sleep(delay) continue except requests.exceptions.RequestException as e: last_error = f"{log_prefix}API Request Error: {e}" break # Exit retry loop on other request errors except json.JSONDecodeError as e: raw_response_text = response.text if "response" in locals() else "N/A" last_error = f"{log_prefix}API Response JSON Decode Error: {e}, Response: {raw_response_text[:200]}" break # Exit retry loop on JSON decode errors except Exception as e: last_error = f"{log_prefix}Unexpected Error: {e}" break # Exit retry loop on other unexpected errors # If loop finishes without returning success, return the last recorded error logger.error(f"{log_prefix}Search API call failed. Last error: {last_error}") return None, last_error.replace(log_prefix, "API Call Failed: ") if last_error else "API Call Failed after retries" def _passages2string(retrieval_result): """Convert retrieval results to formatted string.""" format_reference = "" for idx, doc_item in enumerate(retrieval_result): content = doc_item["document"]["contents"] title = content.split("\n")[0] text = "\n".join(content.split("\n")[1:]) format_reference += f"Doc {idx + 1} (Title: {title})\n{text}\n\n" return format_reference.strip() def perform_single_search_batch( retrieval_service_url: str, query_list: list[str], topk: int = 3, concurrent_semaphore: Optional[threading.Semaphore] = None, timeout: int = DEFAULT_TIMEOUT, ) -> tuple[str, dict[str, Any]]: """ Performs a single batch search for multiple queries (original search tool behavior). Args: retrieval_service_url: The URL of the retrieval service API. query_list: List of search queries. topk: Number of top results to return. concurrent_semaphore: Optional semaphore for concurrency control. timeout: Request timeout in seconds. Returns: A tuple (result_text, metadata). result_text: The search result JSON string. metadata: Metadata dictionary for the batch search. """ logger.info(f"Starting batch search for {len(query_list)} queries.") api_response = None error_msg = None try: if concurrent_semaphore: with concurrent_semaphore: api_response, error_msg = call_search_api( retrieval_service_url=retrieval_service_url, query_list=query_list, topk=topk, return_scores=True, timeout=timeout, ) else: api_response, error_msg = call_search_api( retrieval_service_url=retrieval_service_url, query_list=query_list, topk=topk, return_scores=True, timeout=timeout, ) except Exception as e: error_msg = f"API Request Exception during batch search: {e}" logger.error(f"Batch search: {error_msg}") traceback.print_exc() metadata = { "query_count": len(query_list), "queries": query_list, "api_request_error": error_msg, "api_response": None, "status": "unknown", "total_results": 0, "formatted_result": None, } result_text = json.dumps({"result": "Search request failed or timed out after retries."}, ensure_ascii=False) if error_msg: metadata["status"] = "api_error" result_text = json.dumps({"result": f"Search error: {error_msg}"}, ensure_ascii=False) logger.error(f"Batch search: API error occurred: {error_msg}") elif api_response: logger.debug(f"Batch search: API Response: {api_response}") metadata["api_response"] = api_response try: raw_results = api_response.get("result", []) if raw_results: pretty_results = [] total_results = 0 for retrieval in raw_results: formatted = _passages2string(retrieval) pretty_results.append(formatted) total_results += len(retrieval) if isinstance(retrieval, list) else 1 final_result = "\n---\n".join(pretty_results) result_text = json.dumps({"result": final_result}, ensure_ascii=False) metadata["status"] = "success" metadata["total_results"] = total_results metadata["formatted_result"] = final_result logger.info(f"Batch search: Successful, got {total_results} total results") else: result_text = json.dumps({"result": "No search results found."}, ensure_ascii=False) metadata["status"] = "no_results" metadata["total_results"] = 0 logger.info("Batch search: No results found") except Exception as e: error_msg = f"Error processing search results: {e}" result_text = json.dumps({"result": error_msg}, ensure_ascii=False) metadata["status"] = "processing_error" logger.error(f"Batch search: {error_msg}") else: metadata["status"] = "unknown_api_state" result_text = json.dumps( {"result": "Unknown API state (no response and no error message)."}, ensure_ascii=False ) logger.error("Batch search: Unknown API state.") return result_text, metadata ================================================ FILE: verl_distillation/verl/tools/utils/tool_registry.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import importlib import logging import os import sys import threading from enum import Enum from omegaconf import OmegaConf from verl.tools.schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class ToolType(Enum): NATIVE = "native" MCP = "mcp" async def initialize_mcp_tool(tool_cls, tool_config) -> list: from verl.tools.utils.mcp_clients.McpClientManager import ClientManager tool_list = [] mcp_servers_config_path = tool_config.mcp.mcp_servers_config_path tool_selected_list = tool_config.mcp.tool_selected_list if "tool_selected_list" in tool_config.mcp else None await ClientManager.initialize(mcp_servers_config_path, tool_config.config.rate_limit) # Wait for MCP client to be ready max_retries = 10 retry_interval = 2 # seconds for i in range(max_retries): tool_schemas = await ClientManager.fetch_tool_schemas(tool_selected_list) if tool_schemas: break if i < max_retries - 1: logger.debug(f"Waiting for MCP client to be ready, attempt {i + 1}/{max_retries}") await asyncio.sleep(retry_interval) else: raise RuntimeError("Failed to initialize MCP tools after maximum retries") # mcp registry assert len(tool_schemas), "mcp tool is empty" for tool_schema_dict in tool_schemas: logger.debug(f"tool_schema_dict: {tool_schema_dict}") tool_schema = OpenAIFunctionToolSchema.model_validate(tool_schema_dict) tool = tool_cls( config=OmegaConf.to_container(tool_config.config, resolve=True), tool_schema=tool_schema, ) tool_list.append(tool) return tool_list def get_tool_class(cls_name): module_name, class_name = cls_name.rsplit(".", 1) if module_name not in sys.modules: spec = importlib.util.find_spec(module_name) module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) else: module = sys.modules[module_name] tool_cls = getattr(module, class_name) return tool_cls def initialize_tools_from_config(tools_config_file): tools_config = OmegaConf.load(tools_config_file) tool_list = [] # Use a temporary event loop in a new thread because event # loop may already exist in new async architecture while retaining # backwards compatibility tmp_event_loop = asyncio.new_event_loop() thread = threading.Thread(target=tmp_event_loop.run_forever, name="mcp tool list fetcher", daemon=True) def run_coroutine(coroutine): if not thread.is_alive(): thread.start() future = asyncio.run_coroutine_threadsafe(coroutine, tmp_event_loop) return future.result() async def stop_loop(): tmp_event_loop.stop() try: for tool_config in tools_config.tools: cls_name = tool_config.class_name tool_type = ToolType(tool_config.config.type) tool_cls = get_tool_class(cls_name) match tool_type: case ToolType.NATIVE: if tool_config.get("tool_schema", None) is None: tool_schema = None else: tool_schema_dict = OmegaConf.to_container(tool_config.tool_schema, resolve=True) tool_schema = OpenAIFunctionToolSchema.model_validate(tool_schema_dict) tool = tool_cls( config=OmegaConf.to_container(tool_config.config, resolve=True), tool_schema=tool_schema, ) tool_list.append(tool) case ToolType.MCP: mcp_tools = run_coroutine(initialize_mcp_tool(tool_cls, tool_config)) tool_list.extend(mcp_tools) case _: raise NotImplementedError finally: if thread.is_alive(): asyncio.run_coroutine_threadsafe(stop_loop(), tmp_event_loop) thread.join() return tool_list ================================================ FILE: verl_distillation/verl/trainer/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/trainer/config/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import algorithm, config from .algorithm import * # noqa: F401 from .config import * # noqa: F401 __all__ = config.__all__ + algorithm.__all__ ================================================ FILE: verl_distillation/verl/trainer/config/_generated_ppo_megatron_trainer.yaml ================================================ # This reference configration yaml is automatically generated via 'scripts/generate_trainer_config.sh' # in which it invokes 'python3 scripts/print_cfg.py --cfg job --config-name=ppo_megatron_trainer.yaml' to flatten the 'verl/trainer/config/ppo_megatron_trainer.yaml' config fields into a single file. # Do not modify this file directly. # The file is usually only for reference and never used. actor_rollout_ref: actor: optim: _target_: verl.workers.config.McoreOptimizerConfig lr: 1.0e-06 lr_warmup_steps_ratio: 0.0 total_training_steps: -1 weight_decay: 0.01 lr_warmup_steps: -1 betas: - 0.9 - 0.999 clip_grad: 1.0 optimizer: adam lr_warmup_init: 0.0 lr_decay_steps: null lr_decay_style: constant min_lr: 0.0 weight_decay_incr_style: constant lr_wsd_decay_style: exponential lr_wsd_decay_steps: null use_checkpoint_opt_param_scheduler: false override_optimizer_config: {} megatron: _target_: verl.workers.config.McoreEngineConfig param_offload: false grad_offload: false optimizer_offload: false tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: 1 pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: true use_distributed_optimizer: true use_dist_checkpointing: false dist_checkpointing_path: null seed: 42 override_ddp_config: {} override_transformer_config: recompute_granularity: null recompute_modules: - core_attn recompute_method: null recompute_num_layers: null attention_backend: flash override_mcore_model_config: {} use_mbridge: false forward_only: false _target_: verl.workers.config.McoreActorConfig strategy: megatron ppo_mini_batch_size: 256 ppo_micro_batch_size: null ppo_micro_batch_size_per_gpu: null use_dynamic_bsz: false ppo_max_token_len_per_gpu: 16384 clip_ratio: 0.2 clip_ratio_low: 0.2 clip_ratio_high: 0.2 freeze_vision_tower: false policy_loss: _target_: verl.workers.config.PolicyLossConfig loss_mode: vanilla clip_cov_ratio: 0.0002 clip_cov_lb: 1.0 clip_cov_ub: 5.0 kl_cov_ratio: 0.0002 ppo_kl_coef: 0.1 clip_ratio_c: 3.0 loss_agg_mode: token-mean entropy_coeff: 0 use_kl_loss: false use_torch_compile: true kl_loss_coef: 0.001 kl_loss_type: low_var_kl ppo_epochs: 1 shuffle: false checkpoint: _target_: verl.trainer.config.CheckpointConfig save_contents: - model - optimizer - extra load_contents: ${.save_contents} async_save: false use_fused_kernels: ${oc.select:actor_rollout_ref.model.use_fused_kernels,false} profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: false all_ranks: false ranks: [] save_path: ${oc.select:global_profiler.save_path,null} tool_config: nsys: _target_: verl.utils.profiler.config.NsightToolConfig discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} npu: _target_: verl.utils.profiler.config.NPUToolConfig contents: [] level: level1 analysis: true discrete: false torch: _target_: verl.utils.profiler.config.TorchProfilerToolConfig step_start: 0 step_end: null torch_memory: _target_: verl.utils.profiler.config.TorchMemoryToolConfig trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} data_loader_seed: null load_weight: true ref: strategy: megatron use_torch_compile: ${oc.select:actor_rollout_ref.actor.use_torch_compile,true} log_prob_micro_batch_size: null log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: false all_ranks: false ranks: [] save_path: ${oc.select:global_profiler.save_path,null} tool_config: nsys: _target_: verl.utils.profiler.config.NsightToolConfig discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} npu: _target_: verl.utils.profiler.config.NPUToolConfig contents: [] level: level1 analysis: true discrete: false torch: _target_: verl.utils.profiler.config.TorchProfilerToolConfig step_start: 0 step_end: null torch_memory: _target_: verl.utils.profiler.config.TorchMemoryToolConfig trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} megatron: _target_: verl.workers.config.MegatronEngineConfig param_offload: false grad_offload: false optimizer_offload: false tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: 1 pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: true use_distributed_optimizer: true use_dist_checkpointing: false dist_checkpointing_path: null seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} override_ddp_config: {} override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} override_mcore_model_config: {} use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} forward_only: false load_weight: true rollout: _target_: verl.workers.config.RolloutConfig name: ??? mode: sync temperature: 1.0 top_k: -1 top_p: 1 prompt_length: ${oc.select:data.max_prompt_length,512} response_length: ${oc.select:data.max_response_length,512} dtype: bfloat16 gpu_memory_utilization: 0.5 ignore_eos: false enforce_eager: false cudagraph_capture_sizes: null free_cache_engine: true tensor_model_parallel_size: 2 data_parallel_size: 1 expert_parallel_size: 1 pipeline_model_parallel_size: 1 max_num_batched_tokens: 8192 max_model_len: null max_num_seqs: 1024 enable_chunked_prefill: true enable_prefix_caching: true load_format: dummy log_prob_micro_batch_size: null log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} disable_log_stats: true do_sample: true 'n': 1 over_sample_rate: 0 multi_stage_wake_up: false engine_kwargs: vllm: {} sglang: {} val_kwargs: _target_: verl.workers.config.SamplingConfig top_k: -1 top_p: 1.0 temperature: 0 'n': 1 do_sample: false multi_turn: _target_: verl.workers.config.MultiTurnConfig enable: false max_assistant_turns: null tool_config_path: null max_user_turns: null max_parallel_calls: 1 max_tool_response_length: 256 tool_response_truncate_side: middle interaction_config_path: null use_inference_chat_template: false tokenization_sanity_check_mode: strict format: hermes num_repeat_rollouts: null calculate_log_probs: false agent: _target_: verl.workers.config.AgentLoopConfig num_workers: 8 default_agent_loop: single_turn_agent agent_loop_config_path: null custom_async_server: _target_: verl.workers.config.CustomAsyncServerConfig path: null name: null update_weights_bucket_megabytes: 512 trace: _target_: verl.workers.config.TraceConfig backend: null token2text: false skip_rollout: false skip_dump_dir: /tmp/rollout_dump skip_tokenizer_init: true profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: ${oc.select:actor_rollout_ref.actor.profiler.enable,false} all_ranks: ${oc.select:actor_rollout_ref.actor.profiler.all_ranks,false} ranks: ${oc.select:actor_rollout_ref.actor.profiler.ranks,[]} save_path: ${oc.select:global_profiler.save_path,null} tool_config: ${oc.select:actor_rollout_ref.actor.profiler.tool_config,null} layer_name_map: qkv_layer_name: qkv gate_proj_layer_name: gate_up hybrid_engine: true nccl_timeout: 600 model: path: ~/models/deepseek-llm-7b-chat custom_chat_template: null external_lib: null override_config: model_config: {} moe_config: freeze_moe_router: false use_fused_kernels: false trust_remote_code: false use_remove_padding: false data: tokenizer: null use_shm: false train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet train_max_samples: -1 val_max_samples: -1 prompt_key: prompt reward_fn_key: data_source max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 val_batch_size: null tool_config_path: ${oc.select:actor_rollout_ref.rollout.multi_turn.tool_config_path, null} return_raw_input_ids: false return_raw_chat: false return_full_prompt: false shuffle: true seed: null dataloader_num_workers: 8 image_patch_size: 14 validation_shuffle: false filter_overlong_prompts: false filter_overlong_prompts_workers: 1 truncation: error image_key: images video_key: videos trust_remote_code: false custom_cls: path: null name: null return_multi_modal_inputs: true sampler: class_path: null class_name: null datagen: path: null name: null apply_chat_template_kwargs: {} critic: optim: _target_: verl.workers.config.McoreOptimizerConfig lr: 1.0e-05 lr_warmup_steps_ratio: 0.0 total_training_steps: -1 weight_decay: 0.01 lr_warmup_steps: -1 betas: - 0.9 - 0.999 clip_grad: 1.0 optimizer: adam lr_warmup_init: 0.0 lr_decay_steps: null lr_decay_style: constant min_lr: 0.0 weight_decay_incr_style: constant lr_wsd_decay_style: exponential lr_wsd_decay_steps: null use_checkpoint_opt_param_scheduler: false override_optimizer_config: {} megatron: _target_: verl.workers.config.McoreEngineConfig param_offload: false grad_offload: false optimizer_offload: false tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: 1 pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: true use_distributed_optimizer: true use_dist_checkpointing: false dist_checkpointing_path: null seed: 42 override_ddp_config: {} override_transformer_config: recompute_granularity: null recompute_modules: - core_attn recompute_method: null recompute_num_layers: null attention_backend: flash override_mcore_model_config: {} use_mbridge: false forward_only: false _target_: verl.workers.config.McoreCriticConfig rollout_n: ${oc.select:actor_rollout_ref.rollout.n,1} strategy: megatron enable: null model: path: ~/models/deepseek-llm-7b-chat tokenizer_path: ${oc.select:actor_rollout_ref.model.path,"~/models/deepseek-llm-7b-chat"} override_config: model_config: {} moe_config: freeze_moe_router: false external_lib: ${oc.select:actor_rollout_ref.model.external_lib,null} trust_remote_code: ${oc.select:actor_rollout_ref.model.trust_remote_code,false} _target_: verl.trainer.config.BaseModelConfig ppo_mini_batch_size: ${oc.select:actor_rollout_ref.actor.ppo_mini_batch_size,256} ppo_micro_batch_size: null ppo_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size,null} use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} ppo_max_token_len_per_gpu: 32768 forward_max_token_len_per_gpu: ${.ppo_max_token_len_per_gpu} ppo_epochs: ${oc.select:actor_rollout_ref.actor.ppo_epochs,1} shuffle: ${oc.select:actor_rollout_ref.actor.shuffle,false} cliprange_value: 0.5 loss_agg_mode: ${oc.select:actor_rollout_ref.actor.loss_agg_mode,token-mean} checkpoint: _target_: verl.trainer.config.CheckpointConfig save_contents: - model - optimizer - extra load_contents: ${.save_contents} async_save: false profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: false all_ranks: false ranks: [] save_path: ${oc.select:global_profiler.save_path,null} tool_config: nsys: _target_: verl.utils.profiler.config.NsightToolConfig discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} npu: _target_: verl.utils.profiler.config.NPUToolConfig contents: [] level: level1 analysis: true discrete: false torch: _target_: verl.utils.profiler.config.TorchProfilerToolConfig step_start: 0 step_end: null torch_memory: _target_: verl.utils.profiler.config.TorchMemoryToolConfig trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} nccl_timeout: 600 load_weight: true data_loader_seed: ${oc.select:actor_rollout_ref.actor.data_loader_seed,null} reward_model: enable: false enable_resource_pool: false n_gpus_per_node: 0 nnodes: 0 strategy: megatron model: input_tokenizer: ${actor_rollout_ref.model.path} path: ~/models/FsfairX-LLaMA3-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} trust_remote_code: false micro_batch_size: null micro_batch_size_per_gpu: null max_length: null use_dynamic_bsz: ${critic.use_dynamic_bsz} forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} reward_manager: naive launch_reward_fn_async: false sandbox_fusion: url: null max_concurrent: 64 memory_limit_mb: 1024 profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: false all_ranks: false ranks: [] save_path: ${oc.select:global_profiler.save_path,null} tool_config: ${oc.select:actor_rollout_ref.actor.profiler.tool_config,null} nccl_timeout: 600 megatron: _target_: verl.workers.config.MegatronEngineConfig param_offload: false tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: 1 pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: true use_distributed_optimizer: false use_dist_checkpointing: false dist_checkpointing_path: null seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} load_weight: true custom_reward_function: path: null name: compute_score algorithm: _target_: verl.trainer.config.AlgoConfig gamma: 1.0 lam: 1.0 adv_estimator: gae norm_adv_by_std_in_grpo: true use_kl_in_reward: false kl_penalty: kl kl_ctrl: _target_: verl.trainer.config.KLControlConfig type: fixed kl_coef: 0.001 horizon: 10000 target_kl: 0.1 use_pf_ppo: false pf_ppo: reweight_method: pow weight_pow: 2.0 rollout_is_threshold: null rollout_is_threshold_lower: null rollout_is_level: token rollout_is_mode: truncate rollout_is_veto_threshold: null rollout_is: false trainer: balance_batch: true total_epochs: 30 total_training_steps: null project_name: verl_examples experiment_name: gsm8k logger: - console - wandb log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 esi_redundant_time: 0 resume_mode: auto resume_from_path: null del_local_ckpt_after_load: false val_before_train: true test_freq: -1 critic_warmup: 0 default_hdfs_dir: null default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} max_actor_ckpt_to_keep: null max_critic_ckpt_to_keep: null ray_wait_register_center_timeout: 300 device: cuda rollout_data_dir: null global_profiler: _target_: verl.utils.profiler.ProfilerConfig tool: null steps: null profile_continuous_steps: false save_path: outputs/profile global_tool_config: nsys: discrete: false controller_nsight_options: trace: cuda,nvtx,cublas,ucx cuda-memory-usage: 'true' cuda-graph-trace: graph worker_nsight_options: trace: cuda,nvtx,cublas,ucx cuda-memory-usage: 'true' cuda-graph-trace: graph capture-range: cudaProfilerApi capture-range-end: null kill: none torch_memory: trace_alloc_max_entries: 100000 stack_depth: 32 context: all stacks: all kw_args: {} transfer_queue: enable: false ray_kwargs: ray_init: num_cpus: null timeline_json_file: null ================================================ FILE: verl_distillation/verl/trainer/config/_generated_ppo_trainer.yaml ================================================ # This reference configration yaml is automatically generated via 'scripts/generate_trainer_config.sh' # in which it invokes 'python3 scripts/print_cfg.py --cfg job ' to flatten the 'verl/trainer/config/ppo_trainer.yaml' config fields into a single file. # Do not modify this file directly. # The file is usually only for reference and never used. actor_rollout_ref: actor: optim: _target_: verl.workers.config.FSDPOptimizerConfig optimizer: AdamW optimizer_impl: torch.optim lr: 1.0e-06 lr_warmup_steps_ratio: 0.0 total_training_steps: -1 weight_decay: 0.01 lr_warmup_steps: -1 betas: - 0.9 - 0.999 clip_grad: 1.0 min_lr_ratio: 0.0 num_cycles: 0.5 lr_scheduler_type: constant warmup_style: null override_optimizer_config: null fsdp_config: _target_: verl.workers.config.FSDPEngineConfig wrap_policy: min_num_params: 0 param_offload: false optimizer_offload: false offload_policy: false reshard_after_forward: true fsdp_size: -1 forward_prefetch: false model_dtype: fp32 use_orig_params: false ulysses_sequence_parallel_size: 1 entropy_from_logits_with_chunking: false use_torch_compile: true entropy_checkpointing: false forward_only: false strategy: fsdp _target_: verl.workers.config.FSDPActorConfig strategy: fsdp ppo_mini_batch_size: 256 ppo_micro_batch_size: null ppo_micro_batch_size_per_gpu: null use_dynamic_bsz: false ppo_max_token_len_per_gpu: 16384 clip_ratio: 0.2 clip_ratio_low: 0.2 clip_ratio_high: 0.2 freeze_vision_tower: false policy_loss: _target_: verl.workers.config.PolicyLossConfig loss_mode: vanilla clip_cov_ratio: 0.0002 clip_cov_lb: 1.0 clip_cov_ub: 5.0 kl_cov_ratio: 0.0002 ppo_kl_coef: 0.1 clip_ratio_c: 3.0 loss_agg_mode: token-mean entropy_coeff: 0 use_kl_loss: false use_torch_compile: true kl_loss_coef: 0.001 kl_loss_type: low_var_kl ppo_epochs: 1 shuffle: false checkpoint: _target_: verl.trainer.config.CheckpointConfig save_contents: - model - optimizer - extra load_contents: ${.save_contents} async_save: false use_fused_kernels: ${oc.select:actor_rollout_ref.model.use_fused_kernels,false} profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: false all_ranks: false ranks: [] save_path: ${oc.select:global_profiler.save_path,null} tool_config: nsys: _target_: verl.utils.profiler.config.NsightToolConfig discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} npu: _target_: verl.utils.profiler.config.NPUToolConfig contents: [] level: level1 analysis: true discrete: false torch: _target_: verl.utils.profiler.config.TorchProfilerToolConfig step_start: 0 step_end: null torch_memory: _target_: verl.utils.profiler.config.TorchMemoryToolConfig trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} grad_clip: 1.0 ulysses_sequence_parallel_size: 1 entropy_from_logits_with_chunking: false entropy_checkpointing: false use_remove_padding: ${oc.select:actor_rollout_ref.model.use_remove_padding,false} ref: strategy: ${actor_rollout_ref.actor.strategy} use_torch_compile: ${oc.select:actor_rollout_ref.actor.use_torch_compile,true} log_prob_micro_batch_size: null log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: false all_ranks: false ranks: [] save_path: ${oc.select:global_profiler.save_path,null} tool_config: nsys: _target_: verl.utils.profiler.config.NsightToolConfig discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} npu: _target_: verl.utils.profiler.config.NPUToolConfig contents: [] level: level1 analysis: true discrete: false torch: _target_: verl.utils.profiler.config.TorchProfilerToolConfig step_start: 0 step_end: null torch_memory: _target_: verl.utils.profiler.config.TorchMemoryToolConfig trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} fsdp_config: _target_: verl.workers.config.FSDPEngineConfig wrap_policy: min_num_params: 0 param_offload: false optimizer_offload: false offload_policy: false reshard_after_forward: true fsdp_size: -1 forward_prefetch: false model_dtype: fp32 use_orig_params: false ulysses_sequence_parallel_size: 1 entropy_from_logits_with_chunking: false use_torch_compile: true entropy_checkpointing: false forward_only: false strategy: fsdp model: null ulysses_sequence_parallel_size: ${oc.select:actor_rollout_ref.actor.ulysses_sequence_parallel_size,1} entropy_from_logits_with_chunking: false entropy_checkpointing: false rollout: _target_: verl.workers.config.RolloutConfig name: ??? mode: sync temperature: 1.0 top_k: -1 top_p: 1 prompt_length: ${oc.select:data.max_prompt_length,512} response_length: ${oc.select:data.max_response_length,512} dtype: bfloat16 gpu_memory_utilization: 0.5 ignore_eos: false enforce_eager: false cudagraph_capture_sizes: null free_cache_engine: true tensor_model_parallel_size: 2 data_parallel_size: 1 expert_parallel_size: 1 pipeline_model_parallel_size: 1 max_num_batched_tokens: 8192 max_model_len: null max_num_seqs: 1024 enable_chunked_prefill: true enable_prefix_caching: true load_format: dummy log_prob_micro_batch_size: null log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} disable_log_stats: true do_sample: true 'n': 1 over_sample_rate: 0 multi_stage_wake_up: false engine_kwargs: vllm: {} sglang: {} val_kwargs: _target_: verl.workers.config.SamplingConfig top_k: -1 top_p: 1.0 temperature: 0 'n': 1 do_sample: false multi_turn: _target_: verl.workers.config.MultiTurnConfig enable: false max_assistant_turns: null tool_config_path: null max_user_turns: null max_parallel_calls: 1 max_tool_response_length: 256 tool_response_truncate_side: middle interaction_config_path: null use_inference_chat_template: false tokenization_sanity_check_mode: strict format: hermes num_repeat_rollouts: null calculate_log_probs: false agent: _target_: verl.workers.config.AgentLoopConfig num_workers: 8 default_agent_loop: single_turn_agent agent_loop_config_path: null custom_async_server: _target_: verl.workers.config.CustomAsyncServerConfig path: null name: null update_weights_bucket_megabytes: 512 trace: _target_: verl.workers.config.TraceConfig backend: null token2text: false skip_rollout: false skip_dump_dir: /tmp/rollout_dump skip_tokenizer_init: true profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: ${oc.select:actor_rollout_ref.actor.profiler.enable,false} all_ranks: ${oc.select:actor_rollout_ref.actor.profiler.all_ranks,false} ranks: ${oc.select:actor_rollout_ref.actor.profiler.ranks,[]} save_path: ${oc.select:global_profiler.save_path,null} tool_config: ${oc.select:actor_rollout_ref.actor.profiler.tool_config,null} layered_summon: false model: _target_: verl.workers.config.HFModelConfig path: ~/models/deepseek-llm-7b-chat hf_config_path: null tokenizer_path: null use_shm: false trust_remote_code: false custom_chat_template: null external_lib: null override_config: {} enable_gradient_checkpointing: true enable_activation_offload: false use_remove_padding: false lora_rank: 0 lora_alpha: 16 target_modules: all-linear exclude_modules: null lora_adapter_path: null use_liger: false use_fused_kernels: false fused_kernel_options: impl_backend: torch hybrid_engine: true nccl_timeout: 600 data: tokenizer: null use_shm: false train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet train_max_samples: -1 val_max_samples: -1 prompt_key: prompt reward_fn_key: data_source max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 val_batch_size: null tool_config_path: ${oc.select:actor_rollout_ref.rollout.multi_turn.tool_config_path, null} return_raw_input_ids: false return_raw_chat: false return_full_prompt: false shuffle: true seed: null dataloader_num_workers: 8 image_patch_size: 14 validation_shuffle: false filter_overlong_prompts: false filter_overlong_prompts_workers: 1 truncation: error image_key: images video_key: videos trust_remote_code: false custom_cls: path: null name: null return_multi_modal_inputs: true sampler: class_path: null class_name: null datagen: path: null name: null apply_chat_template_kwargs: {} critic: optim: _target_: verl.workers.config.FSDPOptimizerConfig optimizer: AdamW optimizer_impl: torch.optim lr: 1.0e-05 lr_warmup_steps_ratio: 0.0 total_training_steps: -1 weight_decay: 0.01 lr_warmup_steps: -1 betas: - 0.9 - 0.999 clip_grad: 1.0 min_lr_ratio: 0.0 num_cycles: 0.5 lr_scheduler_type: constant warmup_style: null override_optimizer_config: null model: fsdp_config: _target_: verl.workers.config.FSDPEngineConfig wrap_policy: min_num_params: 0 param_offload: false optimizer_offload: false offload_policy: false reshard_after_forward: true fsdp_size: -1 forward_prefetch: false model_dtype: fp32 use_orig_params: false ulysses_sequence_parallel_size: 1 entropy_from_logits_with_chunking: false use_torch_compile: true entropy_checkpointing: false forward_only: false strategy: fsdp path: ~/models/deepseek-llm-7b-chat tokenizer_path: ${oc.select:actor_rollout_ref.model.path,"~/models/deepseek-llm-7b-chat"} override_config: {} external_lib: ${oc.select:actor_rollout_ref.model.external_lib,null} trust_remote_code: ${oc.select:actor_rollout_ref.model.trust_remote_code,false} _target_: verl.workers.config.FSDPCriticModelCfg use_shm: false enable_gradient_checkpointing: true enable_activation_offload: false use_remove_padding: false lora_rank: 0 lora_alpha: 16 target_modules: all-linear _target_: verl.workers.config.FSDPCriticConfig rollout_n: ${oc.select:actor_rollout_ref.rollout.n,1} strategy: fsdp enable: null ppo_mini_batch_size: ${oc.select:actor_rollout_ref.actor.ppo_mini_batch_size,256} ppo_micro_batch_size: null ppo_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size,null} use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} ppo_max_token_len_per_gpu: 32768 forward_max_token_len_per_gpu: ${.ppo_max_token_len_per_gpu} ppo_epochs: ${oc.select:actor_rollout_ref.actor.ppo_epochs,1} shuffle: ${oc.select:actor_rollout_ref.actor.shuffle,false} cliprange_value: 0.5 loss_agg_mode: ${oc.select:actor_rollout_ref.actor.loss_agg_mode,token-mean} checkpoint: _target_: verl.trainer.config.CheckpointConfig save_contents: - model - optimizer - extra load_contents: ${.save_contents} async_save: false profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: false all_ranks: false ranks: [] save_path: ${oc.select:global_profiler.save_path,null} tool_config: nsys: _target_: verl.utils.profiler.config.NsightToolConfig discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} npu: _target_: verl.utils.profiler.config.NPUToolConfig contents: [] level: level1 analysis: true discrete: false torch: _target_: verl.utils.profiler.config.TorchProfilerToolConfig step_start: 0 step_end: null torch_memory: _target_: verl.utils.profiler.config.TorchMemoryToolConfig trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} forward_micro_batch_size: ${oc.select:.ppo_micro_batch_size,null} forward_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size_per_gpu,null} ulysses_sequence_parallel_size: 1 grad_clip: 1.0 reward_model: enable: false enable_resource_pool: false n_gpus_per_node: 0 nnodes: 0 strategy: fsdp model: input_tokenizer: ${actor_rollout_ref.model.path} path: ~/models/FsfairX-LLaMA3-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} trust_remote_code: false use_shm: false use_remove_padding: false use_fused_kernels: ${actor_rollout_ref.model.use_fused_kernels} fsdp_config: _target_: verl.workers.config.FSDPEngineConfig wrap_policy: min_num_params: 0 param_offload: false reshard_after_forward: true fsdp_size: -1 forward_prefetch: false micro_batch_size: null micro_batch_size_per_gpu: null max_length: null use_dynamic_bsz: ${critic.use_dynamic_bsz} forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} reward_manager: naive launch_reward_fn_async: false sandbox_fusion: url: null max_concurrent: 64 memory_limit_mb: 1024 profiler: _target_: verl.utils.profiler.ProfilerConfig tool: ${oc.select:global_profiler.tool,null} enable: false all_ranks: false ranks: [] save_path: ${oc.select:global_profiler.save_path,null} tool_config: ${oc.select:actor_rollout_ref.actor.profiler.tool_config,null} ulysses_sequence_parallel_size: 1 custom_reward_function: path: null name: compute_score algorithm: _target_: verl.trainer.config.AlgoConfig gamma: 1.0 lam: 1.0 adv_estimator: gae norm_adv_by_std_in_grpo: true use_kl_in_reward: false kl_penalty: kl kl_ctrl: _target_: verl.trainer.config.KLControlConfig type: fixed kl_coef: 0.001 horizon: 10000 target_kl: 0.1 use_pf_ppo: false pf_ppo: reweight_method: pow weight_pow: 2.0 rollout_is_threshold: null rollout_is_threshold_lower: null rollout_is_level: token rollout_is_mode: truncate rollout_is_veto_threshold: null rollout_is: false trainer: balance_batch: true total_epochs: 30 total_training_steps: null project_name: verl_examples experiment_name: gsm8k logger: - console - wandb log_val_generations: 0 rollout_data_dir: null validation_data_dir: null nnodes: 1 n_gpus_per_node: 8 save_freq: -1 esi_redundant_time: 0 resume_mode: auto resume_from_path: null val_before_train: true val_only: false test_freq: -1 critic_warmup: 0 default_hdfs_dir: null del_local_ckpt_after_load: false default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} max_actor_ckpt_to_keep: null max_critic_ckpt_to_keep: null ray_wait_register_center_timeout: 300 device: cuda use_legacy_worker_impl: auto global_profiler: _target_: verl.utils.profiler.ProfilerConfig tool: null steps: null profile_continuous_steps: false save_path: outputs/profile global_tool_config: nsys: _target_: verl.utils.profiler.config.NsightToolConfig discrete: false controller_nsight_options: trace: cuda,nvtx,cublas,ucx cuda-memory-usage: 'true' cuda-graph-trace: graph worker_nsight_options: trace: cuda,nvtx,cublas,ucx cuda-memory-usage: 'true' cuda-graph-trace: graph capture-range: cudaProfilerApi capture-range-end: null kill: none torch_memory: trace_alloc_max_entries: 100000 stack_depth: 32 context: all stacks: all kw_args: {} transfer_queue: enable: false ray_kwargs: ray_init: num_cpus: null timeline_json_file: null ================================================ FILE: verl_distillation/verl/trainer/config/actor/actor.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # Target class for this configuration _target_: verl.workers.config.ActorConfig # the abstract actor configs # fsdp, fsdp2 or megatron. must be set. strategy: ??? # Split each sample into sub-batches of this size for PPO ppo_mini_batch_size: 256 # [Deprecated] Global micro batch size ppo_micro_batch_size: null # Local per-GPU micro batch size ppo_micro_batch_size_per_gpu: null # Whether to automatically adjust batch size at runtime # oc.select: the default val for ref.log_prob_use_dynamic_bsz use_dynamic_bsz: false # Max tokens per GPU in one PPO batch; affects gradient accumulation # Typically it should be: n * ${data.max_prompt_length} + ${data.max_response_length} # oc.select: the default val for ref.log_prob_max_token_len_per_gpu ppo_max_token_len_per_gpu: 16384 # PPO clip ratio clip_ratio: 0.2 # Lower bound for asymmetric clipping (used in dual-clip PPO) clip_ratio_low: 0.2 # Upper bound for asymmetric clipping (used in dual-clip PPO) clip_ratio_high: 0.2 # Whether to freeze vision model, if set true, it will be freeze vision model freeze_vision_tower: false # policy loss config policy_loss: # # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.PolicyLossConfig # Loss function mode: vanilla / clip-cov / kl-cov /gpg from https://arxiv.org/abs/2505.22617 loss_mode: "vanilla" # Ratio of tokens to be clipped for clip-cov loss clip_cov_ratio: 0.0002 # Lower bound for clip-cov loss clip_cov_lb: 1.0 # Upper bound for clip-cov loss clip_cov_ub: 5.0 # Ratio of tokens to be applied kl penalty for kl-cov loss kl_cov_ratio: 0.0002 # KL divergence penalty coefficient ppo_kl_coef: 0.1 # Constant C in Dual-clip PPO; clips when advantage < 0 and ratio > C clip_ratio_c: 3.0 # Loss aggregation mode: "token-mean", "seq-mean-token-sum", or "seq-mean-token-mean" loss_agg_mode: token-mean # Entropy regularization coefficient in PPO loss entropy_coeff: 0 # Whether to use KL loss instead of KL reward penalty. True for GRPO use_kl_loss: false # Whether to use torch.compile() # oc.select: the default val for ref.use_torch_compile use_torch_compile: true # float val to replace the ref_log_prob ref_log_prob_replace_val: -10.0 # KL loss coefficient when use_kl_loss is enabled. For GRPO kl_loss_coef: 0.001 # Type of KL divergence loss. Options: "kl"(k1), "abs", "mse"(k2), "low_var_kl"(k3), "full" kl_loss_type: low_var_kl # Number of PPO epochs per batch ppo_epochs: 1 # Shuffle training data across PPO epochs shuffle: false # checkpoint configs checkpoint: # Target dataclass for this configuration _target_: verl.trainer.config.CheckpointConfig # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # For more flexibility, you can specify the contents to load from the checkpoint. # .xxx refers to the local variable xxx from the same level of hierarchy similar to python pkg load_contents: ${.save_contents} # Whether to save checkpoints asynchronously. Only effective for Megatron as of now. async_save: False # optimizer configs optim: # Learning rate lr: 1e-6 # Warmup steps ratio (used if lr_warmup_steps is 0 or negative) lr_warmup_steps_ratio: 0.0 # Total training steps (must be overridden at runtime) total_training_steps: -1 # Weight decay weight_decay: 0.01 # Prioritized. None, 0 or Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps: -1 # Whether to use custom fused kernels (e.g., FlashAttention, fused MLP) use_fused_kernels: ${oc.select:actor_rollout_ref.model.use_fused_kernels,false} # profile the actor model in `update_policy` profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # profiler tool, default same as profiler.tool in global config # choices: nsys, npu, torch tool: ${oc.select:global_profiler.tool,null} # whether enable profile on Actor enable: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # profile results saving path save_path: ${oc.select:global_profiler.save_path,null} # specific tool config which only related to the role tool_config: # nsys tool config nsys: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.NsightToolConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} # npu config npu: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.NPUToolConfig # Contents to profile, can be empty # options: npu, cpu, memory, shapes, module, stack contents: [] # Collection level, optional values: level_none, level0, level1, level2. level: "level1" # Whether to automatically parse the data. analysis: True # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # torch profiler config torch: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.TorchProfilerToolConfig # start profile mini-batch in training # NOTICE: different with global steps config which refers to iteration # This field only related with mini-batch step_start: 0 # stop profile mini-batch in training step_end: null # torch memory profiler config torch_memory: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.TorchMemoryToolConfig # Maximum number of memory allocation entries to track trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} # Stack trace depth for memory allocations stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} ================================================ FILE: verl_distillation/verl/trainer/config/actor/dp_actor.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # defaults specify the default config from each component defaults: # fsdp optimizer config - ../optim@optim: fsdp # fsdp engine config - ../engine@fsdp_config: fsdp # dp actor config, inheriting from trainer/config/actor/actor.yaml - actor # load the reference default config, then apply the fields in the current yaml - _self_ # Target class for this configuration _target_: verl.workers.config.FSDPActorConfig # TODO(haibin.lin): switch to fsdp2 strategy: fsdp # Gradient clipping for actor updates, specific to the strategy. grad_clip: 1.0 # Sequence parallelism size for Ulysses-style model parallelism # oc.select: the default val for ref.ulysses_sequence_parallel_size ulysses_sequence_parallel_size: 1 # calculate entropy with chunking to reduce memory peak entropy_from_logits_with_chunking: False # recompute entropy entropy_checkpointing: False # Whether to remove padding tokens in inputs during training use_remove_padding: ${oc.select:actor_rollout_ref.model.use_remove_padding,false} ================================================ FILE: verl_distillation/verl/trainer/config/actor/megatron_actor.yaml ================================================ # megatron actor config, inheriting from trainer/config/actor/actor.yaml defaults: # megatron optimizer config - ../optim@optim: megatron # megatron engine config - ../engine@megatron: megatron - actor # load the reference default config, then apply the fields in the current yaml - _self_ _target_: verl.workers.config.McoreActorConfig strategy: megatron data_loader_seed: null load_weight: True ================================================ FILE: verl_distillation/verl/trainer/config/algorithm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from verl.base_config import BaseConfig __all__ = ["AlgoConfig", "FilterGroupsConfig", "KLControlConfig"] @dataclass class KLControlConfig(BaseConfig): """Configuration for KL control. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: type (str): Type of KL control. Can be "fixed" or "adaptive". kl_coef (float): Initial coefficient for KL penalty. horizon (int): Horizon value for adaptive controller. target_kl (float): Target KL divergence for adaptive controller. """ type: str = "fixed" kl_coef: float = 0.001 horizon: int = 10000 target_kl: float = 0.1 @dataclass class FilterGroupsConfig(BaseConfig): """Configuration for filter groups (used in DAPO and Entropy). The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: enable (bool): Whether to enable filter groups. metric (Optional[str]): Metric to use for filtering: "acc", "score", "seq_reward", "seq_final_reward", etc. max_num_gen_batches (int): Non-positive values mean no upper limit. """ enable: bool = False metric: Optional[str] = None max_num_gen_batches: int = 0 @dataclass class AlgoConfig(BaseConfig): """Configuration for the algorithm. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: gamma (float): Discount factor for future rewards. lam (float): Trade-off between bias and variance in the GAE estimator. adv_estimator (str): Advantage estimator type: "gae", "grpo", "reinforce_plus_plus", etc. norm_adv_by_std_in_grpo (bool): Whether to normalize advantages by std (specific to GRPO). use_kl_in_reward (bool): Whether to enable in-reward KL penalty. kl_penalty (str): How to estimate KL divergence: "kl", "abs", "mse", "low_var_kl", or "full". kl_ctrl (KLControlConfig): KL control configuration. use_pf_ppo (bool): Whether to enable preference feedback PPO. pf_ppo (dict[str, Any]): Preference feedback PPO settings. filter_groups (Optional[FilterGroupsConfig]): Filter groups configuration, used in DAPO and Entropy rollout_is_threshold (Optional[float]): Upper threshold for IS weights. null = disabled, float value = enabled (compute weights and metrics). This is the main on/off switch. rollout_is_threshold_lower (Optional[float]): Lower threshold for IS weights. If None, defaults to 1/upper. rollout_is_level (str): Aggregation level: "token", "sequence", or "geometric". rollout_is_mode (str): Bounding mode: "truncate" (cap upper only) or "mask" (zero outside bounds). rollout_is_veto_threshold (float or None): Per-token veto threshold for catastrophic outliers. None to disable. rollout_is (bool): Whether to apply IS weights to policy loss. True = apply weights, False = compute metrics only (useful for monitoring before enabling correction). Default: False. """ gamma: float = 1.0 lam: float = 1.0 adv_estimator: str = "gae" norm_adv_by_std_in_grpo: bool = True use_kl_in_reward: bool = False kl_penalty: str = "kl" kl_ctrl: KLControlConfig = field(default_factory=KLControlConfig) use_pf_ppo: bool = False pf_ppo: dict[str, Any] = field(default_factory=dict) filter_groups: Optional[FilterGroupsConfig] = None # Rollout Importance Sampling # Controls computation of IS weights and mismatch metrics rollout_is_threshold: Optional[float] = None # null = disabled, float = enabled rollout_is_threshold_lower: Optional[float] = None rollout_is_level: str = "token" rollout_is_mode: str = "truncate" rollout_is_veto_threshold: Optional[float] = None # Controls whether to apply IS weights to policy loss (only if rollout_is_threshold is set) # True = apply weights to loss, False = compute metrics only (no weight application) rollout_is: bool = False ================================================ FILE: verl_distillation/verl/trainer/config/config.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from verl.base_config import BaseConfig __all__ = ["CheckpointConfig", "ProfileConfig", "BaseModelConfig"] @dataclass class CheckpointConfig(BaseConfig): """Configuration for model checkpointing. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: save_contents (list[str]): What to include in saved checkpoints. Options: 'model', 'optimizer', 'extra', 'hf_model'. load_contents (list[str]): Contents to load from checkpoint. Defaults to same as save_contents. async_save (bool): Whether to save checkpoints asynchronously. Only implemented for Megatron as of now. """ save_contents: list[str] = field(default_factory=lambda: ["model", "optimizer", "extra"]) load_contents: list[str] = field(default_factory=lambda: ["model", "optimizer", "extra"]) async_save: bool = False @dataclass class ProfileConfig(BaseConfig): """Configuration for profiling. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: profile_ranks (Optional[list[int]]): List of ranks to profile. None means all ranks. step_start (int): Starting step for profiling. step_end (int): Ending step for profiling. save_path (Optional[str]): Path to save profiling results. """ profile_ranks: Optional[list[int]] = None step_start: int = -1 step_end: int = -1 save_path: Optional[str] = None @dataclass class BaseModelConfig(BaseConfig): """Base configuration for a model. Contains core settings for loading and initializing a pretrained model checkpoint. Args: path (str): Path to pretrained model weights. tokenizer_path (Optional[str]): Tokenizer path (defaults to actor's model path if not set). override_config (dict): Hugging Face config override. external_lib (Optional[str]): External model implementation (optional). trust_remote_code (bool): Whether to trust remote code from Hugging Face models. """ path: str = "~/models/deepseek-llm-7b-chat" tokenizer_path: Optional[str] = None override_config: dict[str, Any] = field(default_factory=dict) external_lib: Optional[str] = None trust_remote_code: bool = False ================================================ FILE: verl_distillation/verl/trainer/config/critic/critic.yaml ================================================ # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.CriticConfig # Number of rollouts per update (mirrors actor rollout_n) rollout_n: ${oc.select:actor_rollout_ref.rollout.n,1} # fsdp or fsdp2 strategy used for critic model training strategy: ??? # whether to enable the critic worker. # by default it is only enabled if advantage estimator is gae # set it to True manually if you always want to enable critic worker enable: null # optimizer configs optim: # Learning rate lr: 1e-5 # Warmup steps ratio; total steps will be injected at runtime lr_warmup_steps_ratio: 0.0 # Total training steps (must be overridden at runtime) total_training_steps: -1 # Weight decay weight_decay: 0.01 # Prioritized. None, 0 or Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps: -1 # model config for the critic model: # Path to pretrained model weights path: ~/models/deepseek-llm-7b-chat # Tokenizer path (defaults to actor's model path) tokenizer_path: ${oc.select:actor_rollout_ref.model.path,"~/models/deepseek-llm-7b-chat"} # Hugging Face config override override_config: {} # External model implementation (optional) external_lib: ${oc.select:actor_rollout_ref.model.external_lib,null} # Whether to trust remote code from Hugging Face models trust_remote_code: ${oc.select:actor_rollout_ref.model.trust_remote_code,false} # PPO mini-batch size per update ppo_mini_batch_size: ${oc.select:actor_rollout_ref.actor.ppo_mini_batch_size,256} # [Deprecated] Global micro batch size ppo_micro_batch_size: null # Local per-GPU micro batch size ppo_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size,null} # Whether to automatically adjust batch size at runtime use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} # Max tokens per GPU in one PPO batch (doubled for critic) ppo_max_token_len_per_gpu: 32768 # Max token length per GPU in forward pass forward_max_token_len_per_gpu: ${.ppo_max_token_len_per_gpu} # Number of PPO epochs per batch ppo_epochs: ${oc.select:actor_rollout_ref.actor.ppo_epochs,1} # Shuffle training data across PPO epochs shuffle: ${oc.select:actor_rollout_ref.actor.shuffle,false} # PPO value function clipping range cliprange_value: 0.5 # Loss aggregation mode: "token-mean", "seq-mean-token-sum", or "seq-mean-token-mean" loss_agg_mode: ${oc.select:actor_rollout_ref.actor.loss_agg_mode,token-mean} # checkpoint configs checkpoint: # Target dataclass for this configuration _target_: verl.trainer.config.CheckpointConfig # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # What to include when loading checkpoints load_contents: ${.save_contents} # Whether to save checkpoints asynchronously. Only effective for Megatron as of now. async_save: False # profile the critic model in `update_critic` profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # profiler tool, default same as profiler.tool in global config # choices: nsys, npu, torch, torch_memory tool: ${oc.select:global_profiler.tool,null} # whether enable profile on Critic enable: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # profile results saving path save_path: ${oc.select:global_profiler.save_path,null} # specific tool config which only related to the role tool_config: # nsys tool config nsys: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.NsightToolConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} # npu config npu: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.NPUToolConfig # Contents to profile, can be empty # options: npu, cpu, memory, shapes, module, stack contents: [] # Collection level, optional values: level_none, level0, level1, level2. level: "level1" # Whether to automatically parse the data. analysis: True # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # torch profiler config torch: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.TorchProfilerToolConfig # start profile mini-batch in training # NOTICE: different with global steps config which refers to iteration # This field only related with mini-batch step_start: 0 # stop profile mini-batch in training step_end: null # torch memory profiler config torch_memory: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.TorchMemoryToolConfig # Maximum number of memory allocation entries to track trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} # Stack trace depth for memory allocations stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} ================================================ FILE: verl_distillation/verl/trainer/config/critic/dp_critic.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # defaults specify the default config from each component defaults: # fsdp optimizer config - ../optim@optim: fsdp # fsdp engine config - ../engine@model.fsdp_config: fsdp # dp actor config, inheriting from trainer/config/critic/critic.yaml - critic # load the reference default config, then apply the fields in the current yaml - _self_ # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.FSDPCriticConfig # distribution strategy. Options: fsdp (deprecating), fsdp2 strategy: fsdp # model config for the critic model: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.FSDPCriticModelCfg # Whether to use shared memory for loading the model use_shm: False # Enable gradient checkpointing to save memory enable_gradient_checkpointing: True # Offload activations to CPU to reduce GPU memory usage enable_activation_offload: False # Use remove padding optimization (saves compute) use_remove_padding: False # Set to positive value to enable LoRA (e.g., 32) lora_rank: 0 # LoRA scaling factor lora_alpha: 16 # LoRA target modules: "all-linear" or list of linear projection layers target_modules: all-linear # Forward-only batch size during inference (global) forward_micro_batch_size: ${oc.select:.ppo_micro_batch_size,null} # Forward-only batch size during inference (per GPU) forward_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size_per_gpu,null} # Sequence parallelism size for Ulysses-style model parallelism ulysses_sequence_parallel_size: 1 # Gradient clipping for critic updates grad_clip: 1.0 ================================================ FILE: verl_distillation/verl/trainer/config/critic/megatron_critic.yaml ================================================ # defaults specify the default config from each component defaults: # megatron optimizer config - ../optim@optim: megatron # megatron engine config - ../engine@megatron: megatron # dp actor config, inheriting from trainer/config/critic/critic.yaml - critic # load the reference default config, then apply the fields in the current yaml - _self_ # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.McoreCriticConfig strategy: megatron # seconds, default is 10 minutes for torch, you can set it to a larger value if you have long-running operations like 32B or 72B model using megatron nccl_timeout: 600 # model config for the critic model: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.BaseModelConfig # override default empty mapping override_config: model_config: {} moe_config: freeze_moe_router: False # Whether to load initial weights load_weight: True # seed for data loader data_loader_seed: ${oc.select:actor_rollout_ref.actor.data_loader_seed,null} ================================================ FILE: verl_distillation/verl/trainer/config/data/legacy_data.yaml ================================================ # Tokenizer class or path. If null, it will be inferred from the model. tokenizer: null # Whether to use shared memory for data loading. use_shm: False # Training set parquet. Can be a list or a single file. # The program will read all files into memory, so it can't be too large (< 100GB). # The path can be either a local path or an HDFS path. # For HDFS path, we provide utils to download it to DRAM and convert it to a local path. train_files: ~/data/rlhf/gsm8k/train.parquet # Validation parquet. Can be a list or a single file. val_files: ~/data/rlhf/gsm8k/test.parquet # Maximum sample length to be used. # Set to -1 to use full dataset, otherwise, randomly # select the specified number of samples from train dataset train_max_samples: -1 # Maximum sample length to be used. # Set to -1 to use full dataset, otherwise, randomly # select the specified number of samples from val dataset val_max_samples: -1 # The field in the dataset where the prompt is located. Default is 'prompt'. prompt_key: prompt # The field used to select the reward function (if using different ones per example). reward_fn_key: data_source # Maximum prompt length. All prompts will be left-padded to this length. # An error will be reported if the length is too long. # oc.select: default val for rollout.prompt_length max_prompt_length: 512 # Maximum response length. Rollout in RL algorithms (e.g. PPO) generates up to this length. # oc.select: default val for rollout.response_length max_response_length: 512 # Batch size sampled for one training iteration of different RL algorithms. train_batch_size: 1024 # Batch size used during validation. Can be null. val_batch_size: null # use tool config to calculate true prompt length tool_config_path: ${oc.select:actor_rollout_ref.rollout.multi_turn.tool_config_path, null} # Whether to return the original input_ids without adding chat template. # This is used when the reward model's chat template differs from the policy. # If using a model-based RM with different templates, this should be True. return_raw_input_ids: False # Whether to return the original chat (prompt) without applying chat template. return_raw_chat: False # Whether to return the full prompt with chat template. return_full_prompt: False # Whether to shuffle the data in the dataloader. shuffle: True # Seed to use when shuffling the data seed: null # num dataloader workers dataloader_num_workers: 8 # image patch size image_patch_size: 14 # Whether to shuffle the validation set. validation_shuffle: False # Whether to filter overlong prompts. filter_overlong_prompts: False # Number of workers for filtering overlong prompts. # For large-scale datasets, filtering can be time-consuming. # Use multiprocessing to speed up. Default is 1. filter_overlong_prompts_workers: 1 # Truncate the input_ids or prompt if they exceed max_prompt_length. # Options: 'error', 'left', 'right', 'middle'. Default is 'error'. truncation: error # The field in the multi-modal dataset where the image is located. Default is 'images'. image_key: images # The field in the multi-modal dataset where the video is located. video_key: videos # If the remote tokenizer has a Python file, this flag determines whether to allow using it. trust_remote_code: False # Optional: specify a custom dataset class path and name if overriding default loading behavior. custom_cls: # The path to the file containing your customized dataset class. If not specified, pre-implemented dataset will be used. path: null # The name of the dataset class within the specified file. name: null # Whether to return multi-modal inputs in the dataset. Set to False if rollout generates new multi-modal inputs. return_multi_modal_inputs: True # settings related to data sampler sampler: # the path to the module containing a curriculum class which implements the # AbstractSampler interface class_path: null # the name of the curriculum class like `MySampler` class_name: null # Data generation configuration for augmenting the dataset. datagen: # The path to the file containing your customized data generation class. # E.g. 'pkg://verl.experimental.dynamic_dataset.dynamicgen_dataset' path: null # The class name of the data generation class within the specified file. # E.g. 'MockDataGenerator' name: null # Additional kwargs when calling tokenizer.apply_chat_template apply_chat_template_kwargs: {} ================================================ FILE: verl_distillation/verl/trainer/config/engine/fsdp.yaml ================================================ # Target class for this configuration _target_: verl.workers.config.FSDPEngineConfig # policy for wrapping the model wrap_policy: # Minimum number of parameters to trigger wrapping a layer with FSDP min_num_params: 0 # Whether to offload model parameters to CPU (trades speed for memory) # Note that this differs from the offload_policy in FSDP param_offload: false # Whether to offload optimizer state to CPU # Note that this differs from the offload_policy in FSDP optimizer_offload: false # Only for FSDP2: offload param/grad/optimizer during train offload_policy: false # Only for FSDP2: Reshard after forward pass to reduce memory footprint reshard_after_forward: true # Number of GPUs in each FSDP shard group; -1 means auto fsdp_size: -1 # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # model dtype of fsdp model_dtype: fp32 # Whether to use original parameters in fsdp. Only avaiable in fsdp1 use_orig_params: false # ulysses sequence parallel size ulysses_sequence_parallel_size: 1 # Whether to use entropy_from_logits_with_chunking in fsdp. entropy_from_logits_with_chunking: false # Whether to use torch compile in fsdp. use_torch_compile: true # Whether to use entropy checkpointing in fsdp. entropy_checkpointing: false # Whether to use forward only in fsdp. forward_only: false # fsdp or fsdp2 strategy: fsdp ================================================ FILE: verl_distillation/verl/trainer/config/engine/megatron.yaml ================================================ # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.McoreEngineConfig # Whether to offload model parameters to CPU param_offload: False # Whether to offload gradients to CPU grad_offload: False # Whether to offload optimizer state to CPU optimizer_offload: False # tensor model parallel size tensor_model_parallel_size: 1 # expert model parallel size expert_model_parallel_size: 1 # expert tensor parallel size expert_tensor_parallel_size: 1 # pipeline model parallel size pipeline_model_parallel_size: 1 # virtual pipeline model parallel size virtual_pipeline_model_parallel_size: null # context parallel size context_parallel_size: 1 # sequence parallel sequence_parallel: True # Whether to use distributed optimizer use_distributed_optimizer: True # Whether to use distributed checkpointing use_dist_checkpointing: False # distributed checkpointing path dist_checkpointing_path: null # oc.select: default val for ref.megatron.seed seed: 42 # Allow to override Distributed Data Parallel (DDP) config override_ddp_config: {} # additional transformer config like: num_layers_in_first(/last)_pipeline_stage # oc.select: default val for ref.megatron.override_transformer_config override_transformer_config: # Recompute configuration, same as in megatron.training.arguments # default use minimal performance-interference recompute methods # Recompute granualarity, choices: ["full", "selective"] recompute_granularity: null # Recompute modules, multiple choices: ["core_attn", "moe_act", "layernorm", "mla_up_proj", "mlp", "moe"] # Please use correct module in matched model recompute_modules: ["core_attn"] # 'uniform', 'block' # 'uniform' divides the total number of transformer layers and checkpoints the input activation of each chunk # 'block' checkpoints the specified number of layers per pipeline stage at the specified granularity recompute_method: null # 'full' will checkpoint the entire transformer layer and 'selective' only checkpoints memory intensive part of attention recompute_num_layers: null # Attention backend to use (flash,fused,unfused,local,auto). Defaults to auto in mcore, flash in verl attention_backend: flash override_mcore_model_config: {} # oc.select: default val for ref.megatron.use_mbridge use_mbridge: False # whether to use forward only forward_only: False ================================================ FILE: verl_distillation/verl/trainer/config/evaluation.yaml ================================================ data: path: /tmp/math_Qwen2-7B-Instruct.parquet prompt_key: prompt response_key: responses data_source_key: data_source reward_model_key: reward_model custom_reward_function: path: null name: compute_score ray_kwargs: ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. timeline_json_file: null ================================================ FILE: verl_distillation/verl/trainer/config/generation.yaml ================================================ trainer: nnodes: 1 n_gpus_per_node: 8 device: cuda data: path: ~/data/rlhf/math/test.parquet prompt_key: prompt n_samples: 5 output_path: /opt/tiger/math_Qwen2-7B-Instruct.parquet batch_size: 128 model: path: ~/models/Qwen2-7B-Instruct external_lib: null rollout: _target_: verl.workers.config.RolloutConfig name: vllm mode: sync # sync: LLM, async: AsyncLLM temperature: 1.0 top_k: 50 # 0 for hf rollout, -1 for vllm rollout top_p: 0.7 prompt_length: 1536 response_length: 512 # for vllm rollout dtype: bfloat16 # should align with FSDP gpu_memory_utilization: 0.5 ignore_eos: False enforce_eager: True free_cache_engine: True load_format: auto tensor_model_parallel_size: 1 data_parallel_size: 1 max_num_batched_tokens: 8192 max_model_len: null max_num_seqs: 1024 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: 8 # for hf rollout do_sample: True disable_log_stats: True enable_chunked_prefill: True n: 1 # support logging rollout prob for debugging purpose calculate_log_probs: False actor: strategy: fsdp # This is for backward-compatibility ulysses_sequence_parallel_size: 1 # sp size entropy_from_logits_with_chunking: False # calculate entropy with chunking to reduce memory peak entropy_checkpointing: False # recompute entropy fsdp_config: fsdp_size: -1 forward_prefetch: False # FSDP1 forward_prefetch configuration ray_kwargs: ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. timeline_json_file: null ================================================ FILE: verl_distillation/verl/trainer/config/model/hf_model.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. _target_: verl.workers.config.HFModelConfig # path to the huggingface model path: ~/models/deepseek-llm-7b-chat # config to the huggingface config. In case it is not the same as path hf_config_path: null # path to the huggingface tokenizer. In case it is not the same as path tokenizer_path: null # whether to use shared memory for model loading use_shm: False # whether to trust remote code. trust_remote_code: False # custom chat template for the model custom_chat_template: null # whether to use external libs for the model external_lib: null # override hf config override_config: {} # whether to enable gradient checkpointing. Only valid when we use hf model definition enable_gradient_checkpointing: True # whether to enable activation offload. Only valid when we use hf model definition enable_activation_offload: False # whether to use remove padding. Only valid when we use hf model definition use_remove_padding: False # Set to positive value to enable LoRA (e.g., 32) lora_rank: 0 # LoRA scaling factor lora_alpha: 16 # Target modules for LoRA adaptation target_modules: all-linear # Exclude modules from LoRA adaptation exclude_modules: null # Path to pre-trained LoRA adapter to load for continued training lora_adapter_path: null # whether to use liger. Only valid when we use hf model definition use_liger: False # whether to use fused kernels. use_fused_kernels: False # fused kernel options. fused_kernel_options: # the implementation backend for fused kernels. impl_backend: torch ================================================ FILE: verl_distillation/verl/trainer/config/npu_profile/npu_profile.yaml ================================================ # Options for the npu profiler options: # Storage path of collected data. save_path: ./profiler_data # The roles that will be profiled. Only takes effect in discrete mode. # optional values: all, rollout_generate, actor_compute_log_prob, actor_update and ref_compute_log_prob. # "all" means all roles will be profiled. roles: ["all"] # Collection level, optional values: level_none, level0, level1, level2. level: level1 # Whether to enable memory analysis. with_memory: False # Whether to record tensor shape. record_shapes: False # Whether to record Device-side performance data. with_npu: True # Whether to record Host-side performance data. with_cpu: True # Whether to record Python call stack information. with_module: False # Whether to record operator call stack information. with_stack: False # Whether to automatically parse the data. analysis: True ================================================ FILE: verl_distillation/verl/trainer/config/optim/fsdp.yaml ================================================ # Target class for this configuration _target_: verl.workers.config.FSDPOptimizerConfig # Optimizer class name (e.g., "AdamW", "AdamW8bit", "_AdamW", "Adam") optimizer: AdamW # Module path to import optimizer # Examples: "torch.optim", "torchao.optim", "bitsandbytes.optim" optimizer_impl: torch.optim # Learning rate lr: 1e-3 # LR warmup steps ratio lr_warmup_steps_ratio: 0.0 # Total training steps total_training_steps: -1 # Weight decay weight_decay: 0.01 # LR warmup steps lr_warmup_steps: -1 # Betas for Adam optimizer betas: [0.9, 0.999] # Clip gradient clip_grad: 1.0 # Minimum LR ratio for cosine schedule min_lr_ratio: 0.0 # Number of cosine cycles in LR schedule num_cycles: 0.5 # LR scheduler type: "constant" or "cosine" lr_scheduler_type: constant # deprecated warmup_style: null # Additional optimizer-specific keyword arguments # Example for torchao with bf16 stochastic rounding: # optimizer_impl: torchao.optim # optimizer: _AdamW # override_optimizer_config: # bf16_stochastic_round: true override_optimizer_config: null ================================================ FILE: verl_distillation/verl/trainer/config/optim/megatron.yaml ================================================ _target_: verl.workers.config.McoreOptimizerConfig # Learning rate lr: 1e-3 # LR warmup steps ratio lr_warmup_steps_ratio: 0.0 # Total training steps total_training_steps: -1 # Weight decay weight_decay: 0.01 # LR warmup steps lr_warmup_steps: -1 # Betas for Adam optimizer betas: [0.9, 0.999] # Clip gradient clip_grad: 1.0 # optimizer type optimizer: adam # initial learning rate for warmup, default to 0.0 lr_warmup_init: 0.0 lr_decay_steps: null # select from constant/linear/cosine/inverse_square_root lr_decay_style: constant # minimum learning rate, default to 0.0 min_lr: 0.0 # select from constant/linear/cosine weight_decay_incr_style: constant # select from constant/exponential/cosine lr_wsd_decay_style: exponential lr_wsd_decay_steps: null # use checkpoint optimizer parameter scheduler use_checkpoint_opt_param_scheduler: False override_optimizer_config: {} ================================================ FILE: verl_distillation/verl/trainer/config/ppo_megatron_trainer.yaml ================================================ # specify the default per-component configs defaults: # @.: # actor_rollout_ref.actor: trainer/config/actor/megatron_actor.yaml - actor@actor_rollout_ref.actor: megatron_actor # data: trainer/config/data/legacy_data.yaml - data@data: legacy_data # load the reference default config, then apply the fields in the current yaml # Reference model config. # Reference model will be enabled when actor.use_kl_loss or/and algorithm.use_kl_in_reward is/are True. - ref@actor_rollout_ref.ref: megatron_ref # Rollout model config. - rollout@actor_rollout_ref.rollout: rollout # Critic model config. - critic@critic: megatron_critic # Reward model config. - reward_model@reward_model: megatron_reward_model - _self_ actor_rollout_ref: hybrid_engine: True nccl_timeout: 600 # seconds, default is 10 minutes for torch, you can set it to a larger value if you have long-running operations like 32B or 72B model using megatron model: path: ~/models/deepseek-llm-7b-chat custom_chat_template: null external_lib: null override_config: model_config: {} moe_config: freeze_moe_router: False use_fused_kernels: False # Whether to use custom fused kernels (PostProcessing, for memory efficiency) trust_remote_code: False # Whether to remove padding tokens in inputs during training use_remove_padding: false rollout: layer_name_map: qkv_layer_name: qkv gate_proj_layer_name: gate_up custom_reward_function: path: null name: compute_score algorithm: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.AlgoConfig gamma: 1.0 lam: 1.0 adv_estimator: gae norm_adv_by_std_in_grpo: True use_kl_in_reward: False kl_penalty: kl # how to estimate kl divergence kl_ctrl: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.KLControlConfig type: fixed kl_coef: 0.001 horizon: 10000 target_kl: 0.1 use_pf_ppo: False pf_ppo: reweight_method: pow # ["pow", "max_min", "max_random"] weight_pow: 2.0 # Rollout Importance Sampling: corrects distribution mismatch between rollout and training policies # Main control: Upper threshold for IS weights (null = disabled, float = enabled) # When enabled, computes IS weights and mismatch metrics (KL, PPL, etc.) rollout_is_threshold: null # Lower threshold for IS weights (null = auto-reciprocal of upper) rollout_is_threshold_lower: null # Aggregation level: "token" (biased), "sequence" (unbiased), "geometric" (experimental) rollout_is_level: token # Bounding mode: "truncate" (cap upper only), "mask" (zero outside bounds) rollout_is_mode: truncate # Per-token veto threshold for catastrophic outliers (null to disable) rollout_is_veto_threshold: null # Whether to apply IS weights to policy loss # true = apply weights to loss, false = compute metrics only (no weight application) # Useful for monitoring mismatch before enabling correction rollout_is: false trainer: balance_batch: True total_epochs: 30 total_training_steps: null project_name: verl_examples experiment_name: gsm8k logger: ["console", "wandb"] log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 esi_redundant_time: 0 # auto: find the last ckpt to resume. If can't find, start from scratch resume_mode: auto # or disable or resume_path if resume_from_path is set resume_from_path: null del_local_ckpt_after_load: False val_before_train: True test_freq: -1 critic_warmup: 0 default_hdfs_dir: null default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} max_actor_ckpt_to_keep: null max_critic_ckpt_to_keep: null # The timeout for ray worker group to wait for the register center to be ready ray_wait_register_center_timeout: 300 device: cuda # Directory for logging rollout data; no dump if null rollout_data_dir: null global_profiler: _target_: verl.utils.profiler.ProfilerConfig tool: null # choose between nsys, npu, torch, torch_memory steps: null # profile steps profile_continuous_steps: False save_path: "outputs/profile" # profiler saving path # Specific tool configs, can use +profiler.tool_config.[tool].xxx to config global_tool_config: # nsys config nsys: # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # controller Nvidia Nsight Systems Options. Must set when profile_steps is not None. ## reference https://docs.nvidia.com/nsight-systems/UserGuide/index.html ## reference https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html controller_nsight_options: # Select the API(s) to be traced. trace: "cuda,nvtx,cublas,ucx" # Track the GPU memory usage by CUDA kernels. Must be string type "true" or "false". cuda-memory-usage: "true" # CUDA graphs will be traced as a whole cuda-graph-trace: "graph" # worker Nvidia Nsight Systems Options. Must set when profile_steps is not None. worker_nsight_options: # Select the API(s) to be traced. trace: "cuda,nvtx,cublas,ucx" # Track the GPU memory usage by CUDA kernels. Must be string type "true" or "false". cuda-memory-usage: "true" # CUDA graphs will be traced as a whole cuda-graph-trace: "graph" # Profiling only in a range of torch.cuda.profiler.start and stop. Do not change this config. capture-range: "cudaProfilerApi" # Specify the desired behavior when a capture range ends. # In verl we need the torch.cuda.profiler.start/stop pair to repeats n times. # valid values are "repeat-shutdown:n" or null. # For normal whole step profiling, n = len(profile_steps); # but for discrete profiling, n = len(profile_steps) * Number(subtasks). # Or you can just leave it null and the program will use n = len(profile_steps) * 6; capture-range-end: null # Send signal to the target application's process group. We let the program to exit by itself. kill: none # enable memory visualization for debugging memory usage torch_memory: # Maximum number of allocation entries to record trace_alloc_max_entries: 100_000 # The depth of the call stack to capture for each allocation stack_depth: 32 # 'alloc': records only allocation events || 'state': records memory state changes || 'all': records both. context: "all" # 'python': records Python stacks || 'cpp': records C++ stacks (available in some versions) || 'all': records both. stacks: "all" # devices, record_context etc. kw_args: {} # configs for TransferQueue transfer_queue: # Whether to enable transfer queue enable: False ray_kwargs: ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. timeline_json_file: null ================================================ FILE: verl_distillation/verl/trainer/config/ppo_trainer.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # specify the default per-component configs defaults: # @.: # actor_rollout_ref.actor: trainer/config/actor/dp_actor.yaml - actor@actor_rollout_ref.actor: dp_actor # data: trainer/config/data/legacy_data.yaml - data@data: legacy_data # Reference model config. # Reference model will be enabled when actor.use_kl_loss or/and algorithm.use_kl_in_reward is/are True. - ref@actor_rollout_ref.ref: dp_ref # Rollout model config. - rollout@actor_rollout_ref.rollout: rollout # Model config. - model@actor_rollout_ref.model: hf_model # Critic model config. - critic@critic: dp_critic # Reward model config. - reward_model@reward_model: dp_reward_model # load the reference default config, then apply the fields in the current yaml # self config override anything above - _self_ # config for actor, rollout and reference model actor_rollout_ref: # Whether it's a hybrid engine, currently only supports hybrid engine hybrid_engine: true # Timeout for operations executed against the process group nccl_timeout: 600 # Rollout model config. rollout: # for huge model, layered summon can save memory (prevent OOM) but make it slower layered_summon: False # custom reward function definition custom_reward_function: # The path to the file containing your customized reward function. # If not specified, pre-implemented reward functions will be used. path: null # The name of the reward function within the specified file. Default is 'compute_score'. name: compute_score # config for the algorithm algorithm: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.AlgoConfig # Discount factor for future rewards gamma: 1.0 # Trade-off between bias and variance in the GAE estimator lam: 1.0 # Advantage estimator type: "gae", "grpo", "reinforce_plus_plus", etc. adv_estimator: gae # Whether to normalize advantages by std (specific to GRPO) norm_adv_by_std_in_grpo: True # Whether to enable in-reward KL penalty use_kl_in_reward: False # How to estimate KL divergence: "kl", "abs", "mse", "low_var_kl", or "full" kl_penalty: kl # KL control configuration kl_ctrl: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.trainer.config.KLControlConfig # KL control type: "fixed" or "adaptive" type: fixed # Initial coefficient for KL penalty kl_coef: 0.001 # Horizon value for adaptive controller (if enabled) horizon: 10000 # Target KL divergence (used for adaptive controller) target_kl: 0.1 # Whether to enable preference feedback PPO use_pf_ppo: False # Preference feedback PPO settings pf_ppo: # Method for reweighting samples: "pow", "max_min", or "max_random" reweight_method: pow # Power used for weight scaling in "pow" method weight_pow: 2.0 # Rollout Importance Sampling: corrects distribution mismatch between rollout and training policies # Main control: Upper threshold for IS weights (null = disabled, float = enabled) # When enabled, computes IS weights and mismatch metrics (KL, PPL, etc.) rollout_is_threshold: null # Lower threshold for IS weights (null = auto-reciprocal of upper) rollout_is_threshold_lower: null # Aggregation level: "token" (biased), "sequence" (unbiased), "geometric" (experimental) rollout_is_level: token # Bounding mode: "truncate" (cap upper only), "mask" (zero outside bounds) rollout_is_mode: truncate # Per-token veto threshold for catastrophic outliers (null to disable) rollout_is_veto_threshold: null # Whether to apply IS weights to policy loss # true = apply weights to loss, false = compute metrics only (no weight application) # Useful for monitoring mismatch before enabling correction rollout_is: false # distill advantage clip params distill_adv_max_clip: 1e9 distill_adv_min_clip: -1e9 # config for the trainer trainer: # Whether to balance batch sizes across distributed workers balance_batch: True # Number of epochs in training total_epochs: 30 # Total training steps (can be set explicitly or derived from epochs) total_training_steps: null # Project name for experiment tracking (e.g., wandb) project_name: verl_examples # Experiment name for run identification in tracking tools experiment_name: gsm8k # Logging backends to use: "console", "wandb", etc. logger: ["console", "wandb"] # Number of generations to log during validation log_val_generations: 0 # Directory for logging rollout data; no dump if null rollout_data_dir: null # Directory for logging validation data; no dump if null validation_data_dir: null # Number of nodes used in the training nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 # Save frequency (by iteration) for model checkpoints save_freq: -1 # ESI refers to the elastic server instance used during training, similar to the training plan. For example, # if you purchase 10 hours of computing power, the ESI will automatically shut down after 10 hours of training. # To ensure a checkpoint is saved before ESI shuts down, the system will start saving a checkpoint in advance. # The advance time is calculated as: Advance Time = Longest historical step duration + Checkpoint save duration + esi_redundant_time. # Here, esi_redundant_time is a user-defined value that further extends the advance time for added safety. esi_redundant_time: 0 # Resume mode: "auto", "disable", or "resume_path" # "auto": resume from last checkpoint if available # "disable": start from scratch # "resume_path": resume from a user-defined path resume_mode: auto # Path to resume training from (only used when resume_mode is "resume_path") resume_from_path: null # Whether to run validation before training begins val_before_train: True # Whether to run validation only val_only: False # Validation frequency (in training iterations) test_freq: -1 # Number of iterations to warm up the critic before updating policy critic_warmup: 0 # Default path to distributed filesystem for saving checkpoints default_hdfs_dir: null # Whether to delete local checkpoints after loading del_local_ckpt_after_load: False # Default local directory for saving checkpoints default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} # Maximum number of actor checkpoints to keep max_actor_ckpt_to_keep: null # Maximum number of critic checkpoints to keep max_critic_ckpt_to_keep: null # Timeout (in seconds) for Ray worker to wait for registration ray_wait_register_center_timeout: 300 # Device to run training on (e.g., "cuda", "cpu") device: cuda # whether to use legacy worker implementation # mode: "auto", "enable", or "disable" use_legacy_worker_impl: auto # profiler configs global_profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # Profiling tool: choose between nsys, npu, torch, torch_memory tool: null # profile steps steps: null # Whether to combine continuous steps into one database. ## If True, worker.profiler.discrete must be False, [1,2] in one, [5] in another. ## If False, [1] in one, [2] in another, [5] in another. profile_continuous_steps: False # Path to save profiling contents save_path: "outputs/profile" # Specific tool configs, can use +profiler.tool_config.[tool].xxx to config global_tool_config: # nsys config nsys: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.NsightToolConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # controller Nvidia Nsight Systems Options. Must set when profile_steps is not None. ## reference https://docs.nvidia.com/nsight-systems/UserGuide/index.html ## reference https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html controller_nsight_options: # Select the API(s) to be traced. trace: "cuda,nvtx,cublas,ucx" # Track the GPU memory usage by CUDA kernels. Must be string type "true" or "false". cuda-memory-usage: "true" # CUDA graphs will be traced as a whole cuda-graph-trace: "graph" # worker Nvidia Nsight Systems Options. Must set when profile_steps is not None. worker_nsight_options: # Select the API(s) to be traced. trace: "cuda,nvtx,cublas,ucx" # Track the GPU memory usage by CUDA kernels. Must be string type "true" or "false". cuda-memory-usage: "true" # CUDA graphs will be traced as a whole cuda-graph-trace: "graph" # Profiling only in a range of torch.cuda.profiler.start and stop. Do not change this config. capture-range: "cudaProfilerApi" # Specify the desired behavior when a capture range ends. # In verl we need the torch.cuda.profiler.start/stop pair to repeats n times. # valid values are "repeat-shutdown:n" or null. # For normal whole step profiling, n = len(profile_steps); # but for discrete profiling, n = len(profile_steps) * Number(subtasks). # Or you can just leave it null and the program will use n = len(profile_steps) * 6; capture-range-end: null # Send signal to the target application's process group. We let the program to exit by itself. kill: none # enable memory visualization for debugging memory usage torch_memory: # Maximum number of allocation entries to record trace_alloc_max_entries: 100_000 # The depth of the call stack to capture for each allocation stack_depth: 32 # 'alloc': records only allocation events || 'state': records memory state changes || 'all': records both. context: "all" # 'python': records Python stacks || 'cpp': records C++ stacks (available in some versions) || 'all': records both. stacks: "all" # devices, record_context etc. kw_args: {} # configs for TransferQueue transfer_queue: # Whether to enable transfer queue enable: False # configs related to ray ray_kwargs: # configs related to ray initialization ray_init: # Number of CPUs for Ray. Use a fixed number instead of null when using SLURM. num_cpus: null # Path to save Ray timeline JSON for performance profiling timeline_json_file: null ================================================ FILE: verl_distillation/verl/trainer/config/ref/dp_ref.yaml ================================================ # defaults specify the default config from each component defaults: # dp ref config, inheriting from trainer/config/ref/ref.yaml - ref # fsdp engine config - ../engine@fsdp_config: fsdp # load the reference default config, then apply the fields in the current yaml - _self_ # ref model is assumed to be identical to actor model. Specify model.path for using a different ref model. # Potential use case involves on policy distillation where we calculate KL divergence between student actor # and teacher ref model: null # sequence parallel size # same as actor_rollout_ref.actor.ulysses_sequence_parallel_size if it exists, otherwise 1 ulysses_sequence_parallel_size: ${oc.select:actor_rollout_ref.actor.ulysses_sequence_parallel_size,1} # calculate entropy with chunking to reduce memory peak entropy_from_logits_with_chunking: False # recompute entropy entropy_checkpointing: False ================================================ FILE: verl_distillation/verl/trainer/config/ref/megatron_ref.yaml ================================================ # megatron ref config, inheriting from trainer/config/ref/ref.yaml defaults: - ref # megatron engine config - ../engine@megatron: megatron # load the reference default config, then apply the fields in the current yaml - _self_ strategy: megatron megatron: _target_: verl.workers.config.MegatronEngineConfig seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} load_weight: True ================================================ FILE: verl_distillation/verl/trainer/config/ref/ref.yaml ================================================ # actor_rollout_ref.ref: FSDP config same as actor. For models larger than 7B, it’s recommended to turn on offload for ref by default strategy: ${actor_rollout_ref.actor.strategy} # whether to enable torch.compile # same as actor_rollout_ref.actor.use_torch_compile if it exists, otherwise 1 use_torch_compile: ${oc.select:actor_rollout_ref.actor.use_torch_compile,true} # [Will be deprecated, use log_prob_micro_batch_size_per_gpu] # The batch size for one forward pass in the computation of log_prob. Global batch size. log_prob_micro_batch_size: null # The batch size for one forward pass in the computation of log_prob. Local batch size per GPU. log_prob_micro_batch_size_per_gpu: null # enable dynamic batch size (sequence packing) for log_prob computation # same as actor_rollout_ref.actor.use_dynamic_bsz if it exists, otherwise false log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} # the max token length per GPU # same as actor_rollout_ref.actor.ppo_max_token_len_per_gpu if it exists, otherwise 16384 log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} # float val to replace the ref_log_prob ref_log_prob_replace_val: -10.0 # profile the ref model in `compute_log_prob` profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # choices: nsys, npu, torch, torch_memory tool: ${oc.select:global_profiler.tool,null} # whether enable profile on Ref enable: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # profile results saving path save_path: ${oc.select:global_profiler.save_path,null} # specific tool config which only related to the role tool_config: # nsys tool config nsys: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.NsightToolConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: ${oc.select:global_profiler.global_tool_config.nsys.discrete} # npu config npu: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.NPUToolConfig # Contents to profile, can be empty # options: npu, cpu, memory, shapes, module, stack contents: [] # Collection level, optional values: level_none, level0, level1, level2. level: "level1" # Whether to automatically parse the data. analysis: True # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # torch profiler config torch: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.TorchProfilerToolConfig # start profile mini-batch in training # NOTICE: different with global steps config which refers to iteration # This field only related with mini-batch step_start: 0 # stop profile mini-batch in training step_end: null # torch memory profiler config torch_memory: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.config.TorchMemoryToolConfig # Maximum number of memory allocation entries to track trace_alloc_max_entries: ${oc.select:global_profiler.global_tool_config.torch_memory.trace_alloc_max_entries,100000} # Stack trace depth for memory allocations stack_depth: ${oc.select:global_profiler.global_tool_config.torch_memory.stack_depth,32} ================================================ FILE: verl_distillation/verl/trainer/config/reward_model/dp_reward_model.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # defaults specify the default config from each component defaults: # dp actor config, inheriting from trainer/config/reward_model/reward_model.yaml - reward_model # load the reference default config, then apply the fields in the current yaml - _self_ strategy: fsdp model: # Whether to use shared memory for loading the model use_shm: False # Use remove padding optimization (saves compute) use_remove_padding: False # Whether to use fused reward kernels for speedup use_fused_kernels: ${actor_rollout_ref.model.use_fused_kernels} # FSDP-specific config fsdp_config: # Target configuration dataclass _target_: verl.workers.config.FSDPEngineConfig # Policy for wrapping layers with FSDP wrap_policy: # Minimum number of parameters to trigger wrapping min_num_params: 0 # Whether to offload model parameters to CPU param_offload: False # Only for FSDP2: Reshard after forward pass to reduce memory footprint reshard_after_forward: True # Number of GPUs in each FSDP shard group; -1 means auto fsdp_size: -1 # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # Sequence parallelism size for Ulysses-style model parallelism ulysses_sequence_parallel_size: 1 ================================================ FILE: verl_distillation/verl/trainer/config/reward_model/megatron_reward_model.yaml ================================================ # defaults specify the default config from each component defaults: # dp actor config, inheriting from trainer/config/reward_model/reward_model.yaml - reward_model # load the reference default config, then apply the fields in the current yaml - _self_ strategy: megatron # seconds, default is 10 minutes for torch, you can set it to a larger value # if you have long-running operations like 32B or 72B model using megatron nccl_timeout: 600 # Megatron parallelism & checkpointing config megatron: # Target configuration dataclass _target_: verl.workers.config.MegatronEngineConfig # Whether to offload model parameters to CPU param_offload: False # Number of GPUs in tensor model parallel group tensor_model_parallel_size: 1 # Number of GPUs in expert model parallel group expert_model_parallel_size: 1 # Expert tensor parallel size expert_tensor_parallel_size: 1 # Number of pipeline model parallel stages pipeline_model_parallel_size: 1 # change VPP interface for parallelism tests virtual_pipeline_model_parallel_size: null # Context parallel size context_parallel_size: 1 # Whether to use sequence parallelism sequence_parallel: True # Whether to use distributed optimizer use_distributed_optimizer: False # Whether to enable distributed checkpointing use_dist_checkpointing: False # Path for distributed checkpoints dist_checkpointing_path: null # RNG seed for megatron seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} # Any overrides to transformer config override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} # Whether to use mbridge for faster comms use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} # Whether to load weights (default True) load_weight: True ================================================ FILE: verl_distillation/verl/trainer/config/reward_model/reward_model.yaml ================================================ # configs for the reward model # Whether to enable reward model. If False, we compute the reward only with the user-defined reward functions. # In GSM8K and Math examples, we disable reward model. # For RLHF alignment example using full_hh_rlhf, we utilize reward model to assess the responses. # If False, the following parameters are not effective enable: False # Whether to deploy the model to a separate resource pool. # If true, n_gpus_per_node & nnodes will be used to determine the resource node. enable_resource_pool: False n_gpus_per_node: 0 nnodes: 0 # FSDP strategy: "fsdp" or "fsdp2" strategy: ??? # model config for reward scoring model: # Input tokenizer. If the reward model's chat template is inconsistent with the policy, # we need to first decode to plaintext, then apply the rm's chat_template. # Then score with RM. If chat_templates are consistent, it can be set to null. # set this to null if the chat template is identical input_tokenizer: ${actor_rollout_ref.model.path} # RM’s HDFS path or local path. Note that RM only supports AutoModelForSequenceClassification. # Other model types need to define their own RewardModelWorker and pass it from the code. path: ~/models/FsfairX-LLaMA3-RM-v0.1 # External model implementation (optional) external_lib: ${actor_rollout_ref.model.external_lib} # Whether to enable loading a remote code model, default to False trust_remote_code: False # [Deprecated] Global micro batch size # will be deprecated, use micro_batch_size_per_gpu micro_batch_size: null # Local per-GPU micro batch size micro_batch_size_per_gpu: null # Maximum sequence length to process for scoring max_length: null # Whether to dynamically adjust batch size at runtime use_dynamic_bsz: ${critic.use_dynamic_bsz} # Maximum number of tokens per GPU in one forward pass forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} # Reward Manager. This defines the mechanism of computing rule-based reward and handling different reward sources. # Default is naive. If all verification functions are multiprocessing-safe, # the reward manager can be set to prime for parallel verification. reward_manager: naive # Whether to launch custom reward function asynchronously during log_prob # custom reward function executed async on CPU, during log_prob launch_reward_fn_async: False # Cloud/local sandbox fusion configuration for custom reward logic sandbox_fusion: # Cloud /local function URL for sandbox execution url: null # Max concurrent requests allowed to sandbox max_concurrent: 64 # Max memory limit for each sandbox process in MB memory_limit_mb: 1024 # profile the reward model in `compute_reward` profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # profiler tool, default same as profiler.tool in global config # choices: nsys, npu, torch tool: ${oc.select:global_profiler.tool,null} # whether enable profile on ref enable: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # profile results saving path save_path: ${oc.select:global_profiler.save_path,null} # specific tool config tool_config: ${oc.select:actor_rollout_ref.actor.profiler.tool_config,null} ================================================ FILE: verl_distillation/verl/trainer/config/rollout/rollout.yaml ================================================ # Target class for this configuration _target_: verl.workers.config.RolloutConfig # actor_rollout_ref.rollout.name: hf/vllm/sglang. The default value will be removed in the future name: ??? # sync: LLM, async: AsyncLLM mode: sync # Sampling temperature for rollout. temperature: 1.0 # Top-k sampling parameter. -1 for vLLM rollout, 0 for HF rollout. top_k: -1 # Top-p sampling parameter. Default 1.0. top_p: 1 # typically the same as data max prompt length # same as data.max_prompt_length if it exists prompt_length: ${oc.select:data.max_prompt_length,512} # typically the same as data max response length # same as data.max_response_length if it exists response_length: ${oc.select:data.max_response_length,512} # for vllm rollout # Rollout model parameters type. Align with actor model's FSDP/Megatron type. dtype: bfloat16 # Fraction of GPU memory used by vLLM/SGLang for KV cache. gpu_memory_utilization: 0.5 # Whether to ignore EOS and continue generating after EOS is hit. ignore_eos: False # Whether to disable CUDA graph. Default False to best performance. enforce_eager: False # batch size of cudagraph to capture. Require enforce_eager: False to use this option # Since cudagraph in inference engine can not be offloaded during update policy, # you can use smaller batch size to save memory used in cuda graph, eg: [1 ,2, 4, 8, 16, 32] # supported engines: vllm cudagraph_capture_sizes: null # Whether to free engine KVCache after generation. free_cache_engine: True # TP size for rollout. Not effective for hf tensor_model_parallel_size: 2 # DP size for rollout data_parallel_size: 1 # EP size for rollout expert_parallel_size: 1 # PP size for rollout. pipeline_model_parallel_size: 1 # max number of tokens in a batch max_num_batched_tokens: 8192 # max length for rollout max_model_len: null # max length of sequences max_num_seqs: 1024 # may get higher throughput when set to True. When activated, Please increase max_num_batched_tokens or decrease max_model_len. enable_chunked_prefill: True # Prefix caching kv-cache blocks is a popular optimization in LLM inference to avoid redundant prompt computations. enable_prefix_caching: True # Which loader to use for rollout model weights: dummy, hf, megatron, etc. # safetensors (for huge model, and set use_shm=True); dummy: randomly init model weight load_format: dummy # [Will be deprecated, use log_prob_micro_batch_size_per_gpu] The batch size for one forward pass in the computation of log_prob. Global batch size. log_prob_micro_batch_size: null # The batch size for one forward pass in the computation of log_prob. Local batch size per GPU. log_prob_micro_batch_size_per_gpu: null # enable dynamic batch size (sequence packing) for log_prob computation # same as actor_rollout_ref.actor.use_dynamic_bsz if it exists, otherwise false log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} # max token length for log_prob computation # same as actor_rollout_ref.actor.ppo_max_token_len_per_gpu if it exists, otherwise 16384 log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} # disable logging statistics disable_log_stats: True # for hf rollout # Whether to sample during training rollout. False uses greedy sampling. do_sample: True # number of responses (i.e. num sample times). > 1 for grpo n: 1 # The over_sample_rate parameter controls the early termination threshold for training rollouts, # where the system will abort remaining requests when (1 - over_sample_rate) * total_requests completions are reached. over_sample_rate: 0 # Whether to wake up inference engine in multi-stage for SGLang # to reduce peak memory during training-rollout transition. # This is only effective for SGLang rollout. multi_stage_wake_up: false # Extra inference engine arguments (vllm, sglang), please refer vllm/sglang official doc for detail engine_kwargs: # vllm engine config vllm: {} # sglang engine config sglang: {} # Sampling parameters used during validation. val_kwargs: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.SamplingConfig # sampling parameters for validation # Top-k sampling parameter. -1 for vLLM rollout, 0 for HF rollout. top_k: -1 # Top-p sampling parameter. Default 1.0. top_p: 1.0 # Sampling temperature for rollout. temperature: 0 # whether to repeat n times for validation n: 1 # Whether to sample during training rollout. False uses greedy sampling. do_sample: False # Multi-turn interaction config for tools or chat. multi_turn: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.MultiTurnConfig # set to True for multi-turn tool interaction tasks; should set rollout.name to sglang as well enable: False # null for no limit (default max_length // 3) max_assistant_turns: null # null for no tool tool_config_path: null # null for no limit (default max_length // 3) max_user_turns: null # max parallel call for tools in single turn max_parallel_calls: 1 # max length of tool response max_tool_response_length: 256 # truncate side of tool response: left, middle, right tool_response_truncate_side: middle # null for no interaction interaction_config_path: null # - When set to True, the model's default chat template is used for multi-turn rollout, which typically matches production behavior. # - When set to False, the token ids recorded for training are used instead; unlike the default chat template, these always include the model's full output, # which may contain additional content such as reasoning content. This maintains the consistency between training and rollout, but it will lead to longer prompts. use_inference_chat_template: False # Tokenization is performed turn by turn and the resulting token ids are concatenated to form the full conversation. # To ensure this matches the result of tokenizing the entire conversation at once, a sanity check is run at the end of each multi-turn rollout to compare the two sets of token ids. # Some models are known to produce different tokenization results when tokenizing turn by turn vs. all at once. aThis behavior has already been validated for them. # To reduce excessive warnings, you can turn off the sanity check for these models if you are using their default chat template: # Qwen/QwQ-32B, Qwen/Qwen3-xxB # - disable: disable tokenization sanity check # - strict: enable strict tokenization sanity check (default) # - ignore_strippable: ignore strippable tokens when checking tokenization sanity tokenization_sanity_check_mode: strict # Format of the multi-turn interaction. Options: hermes, llama3_json, ... format: hermes # Number of repeat rollouts for each interaction num_repeat_rollouts: null # support logging rollout prob for debugging purpose # "Truncated importance sampling" requires rollout log probs, set to True when turning on Truncated importance sampling calculate_log_probs: False # mask special token in response, for on policy distill extend_vocab_start_token: null # mask_response_if_have_extend_token, for on policy distill mask_response_if_have_extend_token: False # [Experimental] agent loop based rollout configs agent: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.AgentLoopConfig # Number of agent loop workers num_workers: 8 # default agent loop to use if `agent_name` not set in RL dataset default_agent_loop: single_turn_agent # custom agent loop config path, which should contain list of configs to intialize AgentLoop instances. # https://hydra.cc/docs/advanced/instantiate_objects/overview/ # # - name: react_agent # _target_: recipe.langgraph_agent.react_agent_loop.ReactAgentLoop # tools: ["get_current_temperature"] # - name: math_expression # _target_: recipe.langgraph_agent.example.math_expression.MathExpressionReactAgentLoop # min_terms: 2 # max_terms: 6 agent_loop_config_path: null # custom async server configs custom_async_server: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.CustomAsyncServerConfig # Path to the custom async server implementation path: null # Class name of the custom async server class (e.g. AsyncvLLMServer) name: null # Specifies the tensor bucket size (in megabytes) for batch weight updates during rollout operations. # This parameter controls the maximum payload size for a single weight update request. # Reference: https://github.com/volcengine/verl/pull/2418 # Currently only supported in SGLang rollout implementations # Larger values may improve throughput but increase memory overhead # Detailed performance comparison: # https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/issues/169#issuecomment-3070686720 # Default value (512MB) is optimized for typical GPU memory configurations # For the best performance of `rebuild_cuda_tensor`, it is recommended to: # 1. Enable `RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES` # 2. Manually set `CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7` # when using Tensor Parallelism (TP) >= 8. update_weights_bucket_megabytes: 512 # trace rollout data trace: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.workers.config.TraceConfig # trace backend, support mlflow, weave backend: null # whether translate token id to text in output token2text: False # When enabled (True), the trainer will attempt to load previously generated rollout data from the specified directory instead of computing new rollouts. # If no cached data is found or loading fails, new rollouts will be generated and automatically saved. # This feature is useful for debugging or when you want to reuse computation results across multiple runs. skip_rollout: False # Specifies the filesystem path where rollout data should be cached when skip_rollout is enabled. # Note: Giving path under /tmp/ray/session* is not recommended as these are temporary Ray cluster directories. skip_dump_dir: /tmp/rollout_dump # Whether to skip tokenizer initialization for rollout engine # When enabled (True), the rollout assume token in token out for generation skip_tokenizer_init: True # profile the rollout model in `generate_sequence` profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs _target_: verl.utils.profiler.ProfilerConfig # profiler tool, default same as profiler.tool in global config # choices: nsys, npu, torch tool: ${oc.select:global_profiler.tool,null} # whether enable profile on ref enable: ${oc.select:actor_rollout_ref.actor.profiler.enable,false} # Whether to profile all ranks. all_ranks: ${oc.select:actor_rollout_ref.actor.profiler.all_ranks,false} # The ranks that will be profiled. [] or [0,1,...] ranks: ${oc.select:actor_rollout_ref.actor.profiler.ranks,[]} # profile results saving path save_path: ${oc.select:global_profiler.save_path,null} # specific tool config tool_config: ${oc.select:actor_rollout_ref.actor.profiler.tool_config,null} ================================================ FILE: verl_distillation/verl/trainer/config/sft_trainer.yaml ================================================ defaults: - optim: fsdp - _self_ data: train_batch_size: 256 micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu micro_batch_size_per_gpu: 4 # this is also val batch size train_files: ~/data/gsm8k/train.parquet val_files: ~/data/gsm8k/test.parquet train_max_samples: -1 # set to -1 to use full dataset val_max_samples: -1 # set to -1 to use full dataset # Single-turn settings prompt_key: question response_key: answer prompt_dict_keys: null response_dict_keys: null # Multi-turn settings multiturn: enable: false # Set to true to use multi-turn dataset messages_key: messages # Key for messages list in multi-turn mode tools_key: tools # Key for tools list in multi-turn mode enable_thinking_key: enable_thinking # Whether to enable thinking in multi-turn mode max_length: 1024 truncation: error balance_dp_token: False chat_template: null custom_cls: path: null name: null use_shm: False apply_chat_template_kwargs: {} model: partial_pretrain: ~/models/gemma-1.1-7b-it use_shm: False fsdp_config: model_dtype: fp32 wrap_policy: min_num_params: 0 cpu_offload: False offload_params: False external_lib: null enable_gradient_checkpointing: True trust_remote_code: False lora_rank: 0 # Set to positive value to enable LoRA (e.g., 32) lora_alpha: 16 # LoRA scaling factor target_modules: all-linear # Target modules for LoRA adaptation use_liger: False strategy: fsdp2 optim: lr: 1e-5 betas: [0.9, 0.95] weight_decay: 0.01 lr_warmup_steps_ratio: 0.1 clip_grad: 1.0 lr_scheduler: cosine ulysses_sequence_parallel_size: 1 use_remove_padding: False trainer: default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} default_hdfs_dir: null project_name: gsm8k-sft experiment_name: test total_epochs: 4 total_training_steps: null logger: [ 'console', 'wandb' ] seed: 1 save_freq: -1 test_freq: -1 nnodes: 1 n_gpus_per_node: 8 max_ckpt_to_keep: null # Maximum number of checkpoints to keep, set to null to keep all # Resume mode: "auto", "disable", or "resume_path" # "auto": resume from last checkpoint if available # "disable": start from scratch # "resume_path": resume from a user-defined path resume_mode: auto # Path to resume training from (used when resume_mode is "resume_path" or "auto") resume_from_path: null # Checkpoint configuration checkpoint: # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ["model", "optimizer", "extra"] # For more flexibility, you can specify the contents to load from the checkpoint. load_contents: ${trainer.checkpoint.save_contents} device: cuda ================================================ FILE: verl_distillation/verl/trainer/config/sft_trainer_engine.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # @.: defaults: - model@model: hf_model - engine@engine: fsdp - optim@optim: fsdp - _self_ data: train_batch_size: 256 # global batch size micro_batch_size_per_gpu: 4 # this is also val batch size max_token_len_per_gpu: 8192 use_dynamic_bsz: True train_files: ~/data/gsm8k/train.parquet val_files: null train_max_samples: -1 # set to -1 to use full dataset val_max_samples: -1 # set to -1 to use full dataset # Multi-turn settings messages_key: messages # Key for messages list in multi-turn mode tools_key: tools # Key for tools list in multi-turn mode enable_thinking_key: enable_thinking # Whether to enable thinking in multi-turn mode pad_mode: no_padding # for right padding max_length: 1024 truncation: error balance_dp_token: False # to be implement custom_cls: path: null name: null use_shm: False apply_chat_template_kwargs: {} # Checkpoint configuration checkpoint: _target_: verl.trainer.config.CheckpointConfig # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ["model", "optimizer", "extra"] # For more flexibility, you can specify the contents to load from the checkpoint. load_contents: ${checkpoint.save_contents} trainer: default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} default_hdfs_dir: null project_name: gsm8k-sft experiment_name: test total_epochs: 4 total_training_steps: null logger: [ 'console', 'wandb' ] seed: 1 save_freq: -1 test_freq: -1 max_ckpt_to_keep: null # Maximum number of checkpoints to keep, set to null to keep all # Resume mode: "auto", "disable", or "resume_path" # "auto": resume from last checkpoint if available # "disable": start from scratch # "resume_path": resume from a user-defined path resume_mode: auto # Path to resume training from (used when resume_mode is "resume_path" or "auto") resume_from_path: null device: cuda ================================================ FILE: verl_distillation/verl/trainer/constants_ppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ray._private.runtime_env.constants import RAY_JOB_CONFIG_JSON_ENV_VAR PPO_RAY_RUNTIME_ENV = { "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN", "VLLM_ALLOW_RUNTIME_LORA_UPDATING": "true", "CUDA_DEVICE_MAX_CONNECTIONS": "1", # To prevent hanging or crash during synchronization of weights between actor and rollout # in disaggregated mode. See: # https://docs.vllm.ai/en/latest/usage/troubleshooting.html?h=nccl_cumem_enable#known-issues # https://github.com/vllm-project/vllm/blob/c6b0a7d3ba03ca414be1174e9bd86a97191b7090/vllm/worker/worker_base.py#L445 "NCCL_CUMEM_ENABLE": "0", }, } def get_ppo_ray_runtime_env(): """ A filter function to return the PPO Ray runtime environment. To avoid repeat of some environment variables that are already set. """ working_dir = ( json.loads(os.environ.get(RAY_JOB_CONFIG_JSON_ENV_VAR, "{}")).get("runtime_env", {}).get("working_dir", None) ) runtime_env = { "env_vars": PPO_RAY_RUNTIME_ENV["env_vars"].copy(), **({"working_dir": None} if working_dir is None else {}), } for key in list(runtime_env["env_vars"].keys()): if os.environ.get(key) is not None: runtime_env["env_vars"].pop(key, None) return runtime_env ================================================ FILE: verl_distillation/verl/trainer/fsdp_sft_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A lightweight one-file FSDP SFT Trainer TODO(zhangchi.usc1992) - Add calculation of mfu - Add validation """ import os os.environ["NCCL_DEBUG"] = "WARN" os.environ["TOKENIZERS_PARALLELISM"] = "true" import logging import re import time from contextlib import nullcontext import hydra import torch import torch.distributed from omegaconf import DictConfig, OmegaConf from peft import LoraConfig, TaskType, get_peft_model from tensordict import TensorDict from torch import nn from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.fsdp import CPUOffload, MixedPrecision, ShardingStrategy from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.utils.data import Dataset, DistributedSampler from torchdata.stateful_dataloader import StatefulDataLoader from tqdm import tqdm from transformers import AutoConfig, AutoModelForCausalLM, PreTrainedModel import verl.utils.hdfs_io as hdfs_io from verl.utils.attention_utils import index_first_axis, pad_input, rearrange, unpad_input from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, get_checkpoint_tracker_filename from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.dataset import SFTDataset from verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset from verl.utils.device import get_device_id, get_device_name, is_cuda_available, is_npu_available from verl.utils.distributed import destroy_global_process_group, initialize_global_process_group from verl.utils.fs import copy_to_local from verl.utils.fsdp_utils import ( CPUOffloadPolicy, MixedPrecisionPolicy, apply_fsdp2, fsdp2_clip_grad_norm_, fsdp2_load_full_state_dict, get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, ) from verl.utils.logger import log_with_rank from verl.utils.profiler import log_gpu_memory_usage from verl.utils.py_functional import convert_to_regular_types from verl.utils.torch_dtypes import PrecisionType from verl.utils.torch_functional import get_cosine_schedule_with_warmup, get_wsd_schedule_with_warmup from verl.utils.tracking import Tracking from verl.utils.ulysses import ( gather_outputs_and_unpad, get_ulysses_sequence_parallel_world_size, ulysses_pad_and_slice_inputs, ) from verl.workers.config.optimizer import build_optimizer from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN")) def extract_step(path): match = re.search(r"global_step_(\d+)", path) if match: return int(match.group(1)) return None class FSDPSFTTrainer: def __init__( self, config, device_mesh: DeviceMesh, ulysses_device_mesh: DeviceMesh, tokenizer, train_dataset: Dataset, val_dataset: Dataset, ): self.config = config self.device_mesh = device_mesh self.ulysses_device_mesh = ulysses_device_mesh self.sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) self.tokenizer = tokenizer if self.config.data.chat_template is not None: raise ValueError("Apply Chat template from config is not supported yet.") # normalize dp size self._normalize_config_bsz() # Set sequence parallel size self.config.ulysses_sequence_parallel_size = getattr(self.config, "ulysses_sequence_parallel_size", 1) self.use_remove_padding = getattr(self.config, "use_remove_padding", False) if self.device_mesh.get_rank() == 0: print(f"Using sequence parallel size: {self.config.ulysses_sequence_parallel_size}") print(f"Using remove padding: {self.use_remove_padding}") self._build_dataloader(train_dataset, val_dataset) self.lora = self.config.model.get("lora_adapter_path") is not None or self.config.model.lora_rank > 0 # Initialize resume-related variables self.resume_global_step = 0 # build model self._build_model_optimizer() # Initialize checkpoint manager self._init_checkpoint_manager() self.load_checkpoint() if self.device_mesh.get_rank() == 0: print(self.config) self.device_name = self.config.trainer.device def _normalize_config_bsz(self): dp_size = self.device_mesh.size(0) if not self.ulysses_device_mesh else self.ulysses_device_mesh.size(0) if self.device_mesh.get_rank() == 0: print(f"Normalize batch size by dp {dp_size}") assert self.config.data.train_batch_size % dp_size == 0, ( f"Global batch size {self.config.data.train_batch_size} is not divisible by dp size {dp_size}" ) self.config.data.train_batch_size //= dp_size assert self.config.data.train_batch_size % self.config.data.micro_batch_size_per_gpu == 0 def _build_dataloader(self, train_dataset, val_dataset): # build dataset config = self.config self.train_dataset, self.val_dataset = train_dataset, val_dataset # build dataloader # Use data parallel rank and size instead of global rank and world size # If doing SP, we need to use the local rank and size if self.config.ulysses_sequence_parallel_size > 1: rank = self.ulysses_device_mesh.get_local_rank("dp") world_size = self.ulysses_device_mesh.size(0) if self.ulysses_device_mesh.get_rank() == 0: print(f"Using SP rank {rank} and size {world_size} for data distribution") print("Each SP rank gets different data, but the same data WITHIN the same rank") else: rank = self.device_mesh.get_rank() world_size = self.device_mesh.size() if self.device_mesh.get_rank() == 0: print(f"Using FSDP rank {rank} and size {world_size} for data distribution") # Set pin_memory_device when pin_memory is enabled. device_name = get_device_name() self.train_sampler = DistributedSampler( self.train_dataset, shuffle=True, num_replicas=world_size, rank=rank, drop_last=True ) self.train_dataloader = StatefulDataLoader( dataset=self.train_dataset, batch_size=config.data.train_batch_size, sampler=self.train_sampler, num_workers=8, pin_memory=True, drop_last=True, pin_memory_device=device_name, ) self.val_sampler = DistributedSampler( self.val_dataset, shuffle=False, num_replicas=world_size, rank=rank, drop_last=True ) self.val_dataloader = StatefulDataLoader( dataset=self.val_dataset, batch_size=config.data.micro_batch_size_per_gpu, sampler=self.val_sampler, num_workers=8, pin_memory=True, drop_last=True, pin_memory_device=device_name, ) def _build_model_optimizer(self): # TODO (zhangchi.usc1992): # 1. support pretrain from random weights # 2. support init directly from sharded weights local_model_path = copy_to_local(src=self.config.model.partial_pretrain, verbose=True) if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) log_gpu_memory_usage("Before model allocation", logger=logger) trust_remote_code = self.config.model.trust_remote_code torch_dtype = self.config.model.fsdp_config.get("model_dtype", "fp32") torch_dtype = PrecisionType.to_dtype(torch_dtype) # load config first config = AutoConfig.from_pretrained(local_model_path, trust_remote_code=trust_remote_code) self.model_config = config if hasattr(self.model_config, "max_position_embeddings"): self.model_config.max_position_embeddings = max( self.model_config.max_position_embeddings, self.config.data.max_length ) if self.config.ulysses_sequence_parallel_size > 1: assert self.use_remove_padding, "Sequence parallel is only supported when remove_padding is enabled" # This may be very large init_context = get_init_weight_context_manager( use_meta_tensor=not config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(): self.model: PreTrainedModel = AutoModelForCausalLM.from_pretrained( local_model_path, config=config, torch_dtype=torch_dtype, attn_implementation="flash_attention_2", trust_remote_code=trust_remote_code, ) if self.use_remove_padding or self.config.ulysses_sequence_parallel_size > 1: from verl.models.transformers.monkey_patch import apply_monkey_patch apply_monkey_patch(model=self.model, ulysses_sp_size=self.config.ulysses_sequence_parallel_size) # Apply Liger kernel if use_liger is enabled if self.config.model.get("use_liger", False): from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance _apply_liger_kernel_to_instance(model=self.model) if self.lora: self.model.enable_input_require_grads() lora_adapter_path = self.config.model.get("lora_adapter_path") if lora_adapter_path is not None: from peft import PeftModel print(f"Loading pre-trained LoRA adapter for sft from: {lora_adapter_path}") local_adapter_path = copy_to_local(lora_adapter_path, use_shm=self.config.model.use_shm) self.model = PeftModel.from_pretrained(self.model, local_adapter_path, is_trainable=True) peft_config = self.model.peft_config["default"] # Ensure task_type is TaskType enum, not string if isinstance(peft_config.task_type, str): peft_config.task_type = TaskType.CAUSAL_LM else: # Convert config to regular Python types before creating PEFT model lora_config = { "task_type": TaskType.CAUSAL_LM, "r": self.config.model.lora_rank, "lora_alpha": self.config.model.lora_alpha, "target_modules": convert_to_regular_types(self.config.model.target_modules), "bias": "none", } self.model = get_peft_model(self.model, LoraConfig(**lora_config)) self.model = self.model.to(torch_dtype) if self.config.model.enable_gradient_checkpointing: self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) log_gpu_memory_usage("After model allocation", logger=logger) mixed_precision = MixedPrecision( param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32 ) auto_wrap_policy = get_fsdp_wrap_policy( self.model, config=self.config.model.fsdp_config.wrap_policy, is_lora=self.lora, ) if self.device_mesh.get_rank() == 0: print(auto_wrap_policy) if not self.config.model.fsdp_config.cpu_offload: cpu_offload = None else: cpu_offload = CPUOffload(offload_params=self.config.model.fsdp_config.offload_params) fsdp_strategy = self.config.model.strategy if fsdp_strategy == "fsdp": self.fsdp_model = FSDP( self.model, cpu_offload=cpu_offload, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=ShardingStrategy.FULL_SHARD, mixed_precision=mixed_precision, sync_module_states=True, device_mesh=self.device_mesh, forward_prefetch=False, ) elif fsdp_strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" mp_policy = MixedPrecisionPolicy( param_dtype=torch.bfloat16, reduce_dtype=torch.float32, cast_forward_inputs=True ) fsdp_kwargs = { "mesh": self.device_mesh, "mp_policy": mp_policy, "offload_policy": cpu_offload, "reshard_after_forward": True, } full_state = self.model.state_dict() apply_fsdp2(self.model, fsdp_kwargs, self.config.model.fsdp_config) fsdp2_load_full_state_dict(self.model, full_state, self.device_mesh, cpu_offload) self.fsdp_model = self.model else: raise NotImplementedError(f"not implement {fsdp_strategy}") log_gpu_memory_usage("After FSDP wrapping", logger=logger) self.optimizer = build_optimizer(self.fsdp_model.parameters(), self.config.optim) log_gpu_memory_usage("After initialize optimizer", logger=logger) self.steps_per_epoch = len(self.train_dataloader) self.total_steps = self.steps_per_epoch * self.config.trainer.total_epochs if self.device_mesh.get_rank() == 0: print( f"Number of steps/epoch {self.steps_per_epoch}, number of epochs " f"{self.config.trainer.total_epochs}, total number of steps {self.total_steps}" ) num_warmup_steps = int(self.total_steps * self.config.optim.lr_warmup_steps_ratio) if not hasattr(self.config.optim, "lr_scheduler") or self.config.optim.lr_scheduler == "cosine": self.lr_scheduler = get_cosine_schedule_with_warmup( optimizer=self.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=self.total_steps ) elif self.config.optim.lr_scheduler == "wsd": self.lr_scheduler = get_wsd_schedule_with_warmup( optimizer=self.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=self.total_steps ) else: raise ValueError(f"Unknown lr scheduler: {self.config.optim.lr_scheduler}") def _compute_loss_and_backward(self, batch, do_backward=True, n_micro_batches=1): """Compute loss with optional sequence parallelism and remove padding features""" use_sp = self.use_remove_padding and self.config.ulysses_sequence_parallel_size > 1 # Move inputs to GPU and prepare loss mask input_ids = batch["input_ids"].to(self.device_name) attention_mask = batch["attention_mask"].to(self.device_name) position_ids = batch["position_ids"].to(self.device_name) loss_mask = batch.pop("loss_mask")[:, 1:].reshape(-1).to(self.device_name) loss_fct = nn.CrossEntropyLoss(reduction="none") # Context manager for sequence parallel if needed context = self.sharding_manager if use_sp else nullcontext() with context, torch.autocast(device_type=self.device_name, dtype=torch.bfloat16): if not use_sp: # Standard forward pass without sequence parallel labels = input_ids[:, 1:].contiguous() output = self.fsdp_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False ) logits = output.logits shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels.contiguous() # Flatten the tokens shift_logits = shift_logits.view(-1, self.model.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) loss = loss * loss_mask.to(loss.device) else: # IMPORTANT: We have a big assumption here, so we can shard the SAME sequence across SP ranks # i.e., each GPU has <1 sequence, and each SP group has 1 sequence # 1. All SP ranks will receive the *SAME* batch # 2. Different SP groups will receive *DIFFERENT* batches # This is implemented by the DistributedSampler batch_size, seqlen = input_ids.shape # Remove padding input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # Unpad position_ids to align rotary position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # Pad and slice inputs for sequence parallelism input_ids_rmpad_sliced, position_ids_rmpad_padded, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=get_ulysses_sequence_parallel_world_size() ) # For computing loss input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz) input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs( input_ids_rmpad_rolled, None, get_ulysses_sequence_parallel_world_size() ) input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad) # Forward pass output = self.fsdp_model( input_ids=input_ids_rmpad_sliced, attention_mask=None, # Not needed with flash attention varlen position_ids=position_ids_rmpad_padded, use_cache=False, ) # Compute loss locally then aggregate logits_rmpad = output.logits.squeeze(0) input_ids_rmpad_rolled = input_ids_rmpad_rolled.to(logits_rmpad.device) loss = loss_fct(logits_rmpad, input_ids_rmpad_rolled) # Gather and unpad for sequence parallelism loss = gather_outputs_and_unpad(loss, gather_dim=0, unpad_dim=0, padding_size=pad_size) # This is the loss collected from all ulysses ranks full_loss = pad_input( hidden_states=loss.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen ) full_loss = full_loss.squeeze(-1)[:, :-1] # Remove last token's loss full_loss = full_loss.reshape(-1) loss_mask = loss_mask.to(full_loss.device) loss = full_loss * loss_mask valid_token_this_rank = torch.sum(loss_mask) if self.config.data.balance_dp_token: torch.distributed.all_reduce(valid_token_this_rank) dp_size = self.ulysses_device_mesh.size("dp") if use_sp else torch.distributed.get_world_size() else: dp_size = 1 loss = torch.sum(loss) / (valid_token_this_rank + 1e-8) * dp_size loss = loss / n_micro_batches # normalize loss if do_backward: loss.backward() return loss def training_step(self, batch: TensorDict): start_time = time.time() self.fsdp_model.train() log_gpu_memory_usage("Before optimizer zero_grad", logger=logger) self.optimizer.zero_grad() log_gpu_memory_usage("After optimizer zero_grad", logger=logger) micro_batches = batch.split(self.config.data.micro_batch_size_per_gpu) n_micro_batches = len(micro_batches) step_loss = 0 for micro_batch in micro_batches: loss = self._compute_loss_and_backward(batch=micro_batch, n_micro_batches=n_micro_batches) step_loss += loss.item() if self.config.model.strategy == "fsdp": grad_norm = self.fsdp_model.clip_grad_norm_(max_norm=self.config.optim.clip_grad) elif self.config.model.strategy == "fsdp2": grad_norm = fsdp2_clip_grad_norm_(self.fsdp_model.parameters(), max_norm=self.config.optim.clip_grad) else: raise NotImplementedError(f"not implement {self.config.model.strategy}") log_gpu_memory_usage("Before optimizer step", logger=logger) # if grad_norm is not finite, skip the update if not torch.isfinite(grad_norm): print(f"WARN: grad_norm is not finite: {grad_norm}") self.optimizer.zero_grad() else: self.optimizer.step() log_gpu_memory_usage("After optimizer step", logger=logger) self.lr_scheduler.step() # reduce loss across dp ranks lr = self.lr_scheduler.get_last_lr()[0] log_gpu_memory_usage("After offload weights", logger=logger) step_loss = torch.tensor(step_loss).to(self.device_name) # compute time spent per step end_time = time.time() spend_time_per_step = end_time - start_time if is_cuda_available: torch.distributed.all_reduce(step_loss, op=torch.distributed.ReduceOp.AVG) elif is_npu_available: torch.distributed.all_reduce(step_loss) step_loss /= self.device_mesh.size(0) return { "train/loss": step_loss.detach().item(), "train/lr(1e-3)": lr * 1e3, "train/time(s)": spend_time_per_step, } def validation_step(self, batch: TensorDict): self.fsdp_model.eval() with torch.no_grad(): loss = self._compute_loss_and_backward(batch, do_backward=False) if is_cuda_available: torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.AVG) elif is_npu_available: torch.distributed.all_reduce(loss) loss /= self.device_mesh.size(0) return loss def save_checkpoint(self, step): """Save checkpoint using FSDPCheckpointManager with improved tracking""" from verl.utils.fs import local_mkdir_safe # Determine checkpoint path local_global_step_folder = os.path.join(self.config.trainer.default_local_dir, f"global_step_{step}") if self.device_mesh.get_rank() == 0: print(f"Saving checkpoint to: {local_global_step_folder}") # Get max checkpoints to keep max_ckpt_to_keep = getattr(self.config.trainer, "max_ckpt_to_keep", None) # Use checkpoint manager to save self.checkpoint_manager.save_checkpoint( local_path=local_global_step_folder, global_step=step, max_ckpt_to_keep=max_ckpt_to_keep ) # Save dataloader state if self.device_mesh.get_rank() == 0: local_mkdir_safe(local_global_step_folder) dataloader_local_path = os.path.join(local_global_step_folder, "data.pt") # Use StatefulDataLoader's built-in state dict functionality dataloader_state_dict = self.train_dataloader.state_dict() torch.save(dataloader_state_dict, dataloader_local_path) print(f"Saved dataloader state to: {dataloader_local_path}") # Update latest checkpoint tracker (atomic write) tracker_file = get_checkpoint_tracker_filename(self.config.trainer.default_local_dir) temp_tracker_file = tracker_file + ".tmp" with open(temp_tracker_file, "w") as f: f.write(str(step)) os.rename(temp_tracker_file, tracker_file) print(f"Updated checkpoint tracker: {tracker_file}") # Copy to HDFS if configured if self.device_mesh.get_rank() == 0 and getattr(self.config.trainer, "default_hdfs_dir", None): hdfs_io.makedirs(self.config.trainer.default_hdfs_dir, exist_ok=True) hdfs_io.copy(src=local_global_step_folder, dst=self.config.trainer.default_hdfs_dir, dirs_exist_ok=True) torch.distributed.barrier() def _init_checkpoint_manager(self): """Initialize checkpoint manager with proper configuration""" # Get checkpoint configuration from config, with defaults checkpoint_config = getattr(self.config.trainer, "checkpoint", {}) # Set default values if not specified save_contents = checkpoint_config.get("save_contents", ["model", "optimizer", "extra"]) load_contents = checkpoint_config.get("load_contents", save_contents) # Create checkpoint config dict checkpoint_config_dict = { "load_contents": load_contents, "save_contents": save_contents, } # Convert to DictConfig for compatibility checkpoint_config_dict = DictConfig(checkpoint_config_dict) # Initialize checkpoint manager self.checkpoint_manager = FSDPCheckpointManager( model=self.fsdp_model, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, processing_class=self.tokenizer, checkpoint_config=checkpoint_config_dict, ) def load_checkpoint(self): # Determine resume path based on configuration checkpoint_path = self._determine_resume_path() if checkpoint_path is None: return 0 # extract resume step from checkpoint path resume_step = extract_step(checkpoint_path) if resume_step is None: log_with_rank( f"Warning: Could not extract step number from {checkpoint_path}, starting from step 0", logger=logger, rank=self.device_mesh.get_rank(), level=logging.WARNING, log_only_rank_0=True, ) return 0 self.resume_global_step = resume_step # Use checkpoint manager to load model state self.checkpoint_manager.load_checkpoint(checkpoint_path) log_with_rank( f"Successfully loaded model checkpoint from {checkpoint_path} (step {resume_step})", logger=logger, rank=self.device_mesh.get_rank(), log_only_rank_0=True, ) # Always load dataloader state for StatefulDataLoader self._load_dataloader_state(checkpoint_path) return resume_step def _load_dataloader_state(self, checkpoint_path: str): """Load dataloader state from checkpoint""" dataloader_path = os.path.join(checkpoint_path, "data.pt") if os.path.exists(dataloader_path): # Use StatefulDataLoader's built-in state dict functionality dataloader_state_dict = torch.load(dataloader_path, map_location="cpu", weights_only=False) self.train_dataloader.load_state_dict(dataloader_state_dict) log_with_rank( f"Successfully loaded dataloader state from {dataloader_path}", logger=logger, rank=self.device_mesh.get_rank(), log_only_rank_0=True, ) else: log_with_rank( f"Warning: No dataloader state found at {dataloader_path}, will start from scratch", logger=logger, rank=self.device_mesh.get_rank(), level=logging.WARNING, log_only_rank_0=True, ) def _determine_resume_path(self): """Determine the path to resume from based on resume_mode configuration""" resume_mode = getattr(self.config.trainer, "resume_mode", "auto") resume_from_path = getattr(self.config.trainer, "resume_from_path", None) if resume_mode == "disable": return None elif resume_mode == "auto": if resume_from_path is not None: assert os.path.exists(resume_from_path), ( "resume_from_path must be null or an existing path when resume_mode is 'auto'" ) assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps" return resume_from_path # Try to find the latest checkpoint in the default directory return self._find_latest_checkpoint() elif resume_mode == "resume_path": assert os.path.exists(resume_from_path), ( "resume_from_path must be an existing path when resume_mode is 'resume_path'" ) assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps" return resume_from_path else: raise ValueError(f"Invalid resume_mode: {resume_mode}. Must be 'auto', 'disable', or 'resume_path'") def _find_latest_checkpoint(self): """Find the latest checkpoint in the default local directory""" checkpoint_dir = self.config.trainer.default_local_dir if not os.path.exists(checkpoint_dir): return None latest_checkpoint = find_latest_ckpt_path(checkpoint_dir) if latest_checkpoint and self.device_mesh.get_rank() == 0: step_num = extract_step(latest_checkpoint) print(f"Found latest checkpoint: {latest_checkpoint} (step {step_num})") return latest_checkpoint def fit(self): rank = self.device_mesh.get_rank() # TODO: add a unified tracking if rank == 0: tracking = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) global_step = self.resume_global_step # Start from resumed step last_valid_metric = None # compute the total training steps. # the total training steps in SFT is mainly for early exit total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.trainer.total_training_steps is not None: total_training_steps = self.config.trainer.total_training_steps self.total_training_steps = total_training_steps log_with_rank( f"Total training steps: {self.total_training_steps},", logger=logger, rank=self.device_mesh.get_rank(), log_only_rank_0=True, ) # With StatefulDataLoader, we don't need to manually calculate epochs and steps # The dataloader will automatically resume from where it left off if global_step > 0: log_with_rank( f"StatefulDataLoader will automatically resume from global step: {global_step}", logger=logger, rank=self.device_mesh.get_rank(), log_only_rank_0=True, ) # Calculate which epoch we're starting from for sampler.set_epoch() start_epoch = global_step // self.steps_per_epoch train_time = 0 for epoch in range(start_epoch, self.config.trainer.total_epochs): self.train_sampler.set_epoch(epoch=epoch) for step_in_epoch, data in enumerate( tqdm( self.train_dataloader, initial=global_step % self.steps_per_epoch if epoch == start_epoch else 0, total=self.steps_per_epoch, desc=f"Epoch {epoch + 1}/{self.config.trainer.total_epochs}", disable=rank != 0, ) ): global_step += 1 data = TensorDict(data, batch_size=self.config.data.train_batch_size).to(self.device_name) metric = self.training_step(data) train_time += metric["train/time(s)"] if rank == 0: tracking.log(data=metric, step=global_step) is_last_step = global_step >= self.total_training_steps is_valid_step = global_step % self.config.trainer.test_freq == 0 is_save_step = global_step % self.config.trainer.save_freq == 0 # early exit or validation step if is_last_step or (self.config.trainer.test_freq > 0 and is_valid_step): # Perform validation val_losses = [] for val_data in self.val_dataloader: val_data = TensorDict(val_data, batch_size=self.config.data.micro_batch_size_per_gpu).to( self.device_name ) val_loss = self.validation_step(val_data) val_losses.append(val_loss) if rank == 0: val_loss = torch.mean(torch.stack(val_losses)) metric = {"val/loss": val_loss.detach().item()} tracking.log(data=metric, step=global_step) last_valid_metric = metric torch.distributed.barrier() if is_last_step or (self.config.trainer.save_freq > 0 and is_save_step): self.save_checkpoint(step=global_step) if is_last_step: if rank == 0: print(f"Total time for train steps: {train_time:.2f}s") print(f"Final validation metrics: {last_valid_metric}") return def run_sft(config): device_name = get_device_name() local_rank, rank, world_size = initialize_global_process_group() device_mesh = init_device_mesh(device_type=device_name, mesh_shape=(world_size,), mesh_dim_names=("fsdp",)) dp_size = world_size // config.ulysses_sequence_parallel_size ulysses_device_mesh = init_device_mesh( device_type=device_name, mesh_shape=(dp_size, config.ulysses_sequence_parallel_size), mesh_dim_names=("dp", "sp"), ) # build tokenizer and datasets first from verl.utils import hf_tokenizer local_model_path = copy_to_local(src=config.model.partial_pretrain, verbose=True) tokenizer = hf_tokenizer(local_model_path, trust_remote_code=config.model.trust_remote_code) train_dataset = create_sft_dataset( config.data.train_files, config.data, tokenizer, max_samples=config.data.get("train_max_samples", -1) ) val_dataset = create_sft_dataset( config.data.val_files, config.data, tokenizer, max_samples=config.data.get("val_max_samples", -1) ) trainer = FSDPSFTTrainer( config=config, device_mesh=device_mesh, ulysses_device_mesh=ulysses_device_mesh, tokenizer=tokenizer, train_dataset=train_dataset, val_dataset=val_dataset, ) trainer.fit() destroy_global_process_group() @hydra.main(config_path="config", config_name="sft_trainer", version_base=None) def main(config): run_sft(config) def create_sft_dataset(data_paths, data_config, tokenizer, max_samples=-1): """Create a dataset.""" # build dataset # First check if a custom dataset class is specified if data_config.custom_cls.get("path", None): from verl.utils.import_utils import load_extern_type dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name) # Then check if multi-turn dataset should be used elif data_config.get("multiturn", {}).get("enable", False): dataset_cls = MultiTurnSFTDataset # Default to single-turn dataset else: dataset_cls = SFTDataset # Create datasets based on the selected class dataset = dataset_cls(parquet_files=data_paths, tokenizer=tokenizer, config=data_config, max_samples=max_samples) return dataset if __name__ == "__main__": main() ================================================ FILE: verl_distillation/verl/trainer/main_eval.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Offline evaluate the performance of a generated file using reward model and ground truth verifier. The input is a parquet file that contains N generated sequences and (optional) the ground truth. """ from collections import defaultdict import hydra import numpy as np import pandas as pd import ray from omegaconf import OmegaConf from tqdm import tqdm from verl.trainer.ppo.reward import get_custom_reward_fn from verl.utils.fs import copy_to_local @ray.remote def process_item(config, data_source, response_lst, reward_data): reward_fn = get_custom_reward_fn(config) ground_truth = reward_data["ground_truth"] score_lst = [reward_fn(data_source, r, ground_truth) for r in response_lst] return data_source, np.mean(score_lst) @hydra.main(config_path="config", config_name="evaluation", version_base=None) def main(config): local_path = copy_to_local(config.data.path, use_shm=config.data.get("use_shm", False)) dataset = pd.read_parquet(local_path) responses = dataset[config.data.response_key] data_sources = dataset[config.data.data_source_key] reward_model_data = dataset[config.data.reward_model_key] total = len(dataset) # Initialize Ray if not ray.is_initialized(): ray.init(**OmegaConf.to_container(config.ray_kwargs.get("ray_init", {}))) # evaluate test_score based on data source data_source_reward = defaultdict(list) # Create remote tasks remote_tasks = [ process_item.remote(config, data_sources[i], responses[i], reward_model_data[i]) for i in range(total) ] # Process results as they come in with tqdm(total=total) as pbar: while len(remote_tasks) > 0: # Use ray.wait to get completed tasks done_ids, remote_tasks = ray.wait(remote_tasks) for result_id in done_ids: data_source, score = ray.get(result_id) data_source_reward[data_source].append(score) pbar.update(1) metric_dict = {} for data_source, rewards in data_source_reward.items(): metric_dict[f"test_score/{data_source}"] = np.mean(rewards) print(metric_dict) if __name__ == "__main__": main() ================================================ FILE: verl_distillation/verl/trainer/main_generation.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generate responses given a dataset of prompts """ import os import hydra import numpy as np import ray os.environ["NCCL_DEBUG"] = "WARN" os.environ["TOKENIZERS_PARALLELISM"] = "true" # os.environ['TORCH_COMPILE_DISABLE'] = '1' from pprint import pprint import pandas as pd from omegaconf import OmegaConf from verl import DataProto from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup from verl.utils import hf_tokenizer from verl.utils.fs import copy_to_local from verl.utils.hdfs_io import makedirs from verl.utils.model import compute_position_id_with_mask from verl.workers.fsdp_workers import ActorRolloutRefWorker @hydra.main(config_path="config", config_name="generation", version_base=None) def main(config): run_generation(config) def run_generation(config) -> None: if not ray.is_initialized(): # this is for local ray cluster default_runtime_env = {"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}} ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") ray.init(**OmegaConf.to_container(ray_init_kwargs)) ray.get(main_task.remote(config)) @ray.remote(num_cpus=1) def main_task(config): pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) local_path = copy_to_local(config.model.path) trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) if config.rollout.temperature == 0.0: assert config.data.n_samples == 1, "When temperature=0, n_samples must be 1." assert config.data.n_samples >= 1, "n_samples should always >= 1" # read dataset. Note that the dataset should directly contain chat template format (e.g., a list of dictionary) dataset = pd.read_parquet(config.data.path) chat_lst = dataset[config.data.prompt_key].tolist() chat_lst = [chat.tolist() for chat in chat_lst] tokenizer.padding_side = "left" if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token ray_cls_with_init = RayClassWithInitArgs(cls=ray.remote(ActorRolloutRefWorker), config=config, role="rollout") resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes) wg = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, device_name=config.trainer.device, ) wg.init_model() total_samples = len(dataset) config_batch_size = config.data.batch_size apply_chat_template_kwargs = config.data.get("apply_chat_template_kwargs", {}) num_batch = -(-total_samples // config_batch_size) output_lst = [[] for _ in range(config.data.n_samples)] for batch_idx in range(num_batch): print(f"[{batch_idx + 1}/{num_batch}] Start to process.") batch_chat_lst = chat_lst[batch_idx * config_batch_size : (batch_idx + 1) * config_batch_size] inputs = tokenizer.apply_chat_template( batch_chat_lst, add_generation_prompt=True, padding=True, truncation=True, max_length=config.rollout.prompt_length, return_tensors="pt", return_dict=True, tokenize=True, **apply_chat_template_kwargs, ) input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] position_ids = compute_position_id_with_mask(attention_mask) batch_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids} data = DataProto.from_dict(batch_dict) data_padded, pad_size = pad_dataproto_to_divisor(data, wg.world_size) # START TO GENERATE FOR n_samples TIMES print(f"[{batch_idx + 1}/{num_batch}] Start to generate.") for n_sample in range(config.data.n_samples): output_padded = wg.generate_sequences(data_padded) output = unpad_dataproto(output_padded, pad_size=pad_size) output_texts = [] for i in range(len(output)): data_item = output[i] prompt_length = data_item.batch["prompts"].shape[-1] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = data_item.batch["responses"][:valid_response_length] response_str = tokenizer.decode(valid_response_ids, skip_special_tokens=True) output_texts.append(response_str) output_lst[n_sample].extend(output_texts) # convert output_lst from (n_samples, n_data) to (n_data, n_sampels) output_lst = np.array(output_lst, dtype=object) output_lst = np.transpose(output_lst, axes=(1, 0)).tolist() # add to the data frame dataset["responses"] = output_lst # write to a new parquet output_dir = os.path.dirname(config.data.output_path) makedirs(output_dir, exist_ok=True) dataset.to_parquet(config.data.output_path) if __name__ == "__main__": main() ================================================ FILE: verl_distillation/verl/trainer/main_generation_server.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generate responses given a dataset of prompts """ import os import aiohttp import hydra import numpy as np import ray os.environ["NCCL_DEBUG"] = "WARN" os.environ["TOKENIZERS_PARALLELISM"] = "true" # os.environ['TORCH_COMPILE_DISABLE'] = '1' import asyncio from pprint import pprint import pandas as pd from omegaconf import OmegaConf from openai.types.chat import ChatCompletion from verl.utils.hdfs_io import makedirs from verl.workers.rollout.replica import get_rollout_replica_class async def start_server(config): tp_size = config.actor_rollout_ref.rollout.tensor_model_parallel_size num_replicas = (config.trainer.n_gpus_per_node * config.trainer.nnodes) // tp_size rollout_config = config.actor_rollout_ref.rollout model_config = config.actor_rollout_ref.model # create standalone rollout server rollout_server_class = get_rollout_replica_class(config.actor_rollout_ref.rollout.name) rollout_servers = [ rollout_server_class( replica_rank=replica_rank, config=rollout_config, model_config=model_config, gpus_per_node=config.trainer.n_gpus_per_node, ) for replica_rank in range(num_replicas) ] await asyncio.gather(*[server.init_standalone() for server in rollout_servers]) server_handles = [server._server_handle for server in rollout_servers] server_addresses = [server._server_address for server in rollout_servers] assert len(server_handles) == num_replicas assert len(server_addresses) == num_replicas return server_handles, server_addresses async def submit_request(server_address, **chat_complete_request): try: extra_headers = chat_complete_request.pop("extra_headers", {}) timeout = aiohttp.ClientTimeout(total=None) session = aiohttp.ClientSession(timeout=timeout) async with session.post( url=f"http://{server_address}/v1/chat/completions", headers={"Authorization": "Bearer token-abc123", **extra_headers}, json=chat_complete_request, ) as resp: data = await resp.json() return ChatCompletion(**data) finally: await session.close() async def generate_per_replica(server_address, model_path: str, n_samples: int, sampling_params: dict, chat_lst: list): # here we should sample n_samples for each chat_lst. # we use aiohttp to avoid hang in AsyncOpenAI when the number of requests is large. # client = AsyncOpenAI( # api_key="123-abc", # base_url=f"http://{server_address}/v1", # ) chat_complete_request = [ { "model": model_path, "messages": messages, **sampling_params, } for messages in chat_lst for _ in range(n_samples) ] tasks = [submit_request(server_address, **req) for req in chat_complete_request] results = await asyncio.gather(*tasks) return results async def generate( server_addresses: list, model_path: str, n_samples: int, sampling_params: dict, chat_numpy: np.ndarray ): num_replicas = len(server_addresses) chat_sub_array = np.array_split(chat_numpy, num_replicas) chat_sub_array = [chat.tolist() for chat in chat_sub_array] assert len(server_addresses) == len(chat_sub_array) results = await asyncio.gather( *[ generate_per_replica(server_addresses[i], model_path, n_samples, sampling_params, chat_sub_array[i]) for i in range(num_replicas) ] ) return results @hydra.main(config_path="config", config_name="ppo_trainer", version_base=None) def main(config): ray.init(runtime_env={"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_USE_V1": "1"}}) pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) n_samples = config.actor_rollout_ref.rollout.n if config.actor_rollout_ref.rollout.temperature == 0.0: assert n_samples == 1, "When temperature=0, n_samples must be 1." assert n_samples >= 1, "n_samples should always >= 1" sampling_params = { "temperature": config.actor_rollout_ref.rollout.temperature, "top_p": config.actor_rollout_ref.rollout.top_p, # "top_k": config.actor_rollout_ref.rollout.top_k, "max_tokens": config.actor_rollout_ref.rollout.response_length, } from omegaconf import ListConfig train_files = config.data.train_files if not isinstance(train_files, list | ListConfig): train_files = [train_files] # read dataset. Note that the dataset should directly contain chat template format (e.g., a list of dictionary) datasets = [] for train_file in train_files: dataset = pd.read_parquet(train_file) datasets.append(dataset) # concat dataset dataset = pd.concat(datasets, axis=0, ignore_index=True) chat_lst = dataset[config.data.prompt_key].tolist() chat_lst = [chat.tolist() for chat in chat_lst] chat_numpy = np.array(chat_lst) # start native server server_handles, server_addresses = asyncio.run(start_server(config)) # run generate gen_results = asyncio.run( generate(server_addresses, config.actor_rollout_ref.model.path, n_samples, sampling_params, chat_numpy) ) # reshape results into a numpy array import itertools results = list(itertools.chain.from_iterable(gen_results)) # extract content from results results = np.array([result.choices[0].message.content for result in results]) results = np.reshape(results, (-1, n_samples)) assert results.shape == (len(chat_lst), n_samples) results = results.tolist() # add to the data frame dataset["responses"] = results # write to a new parquet output_dir = os.path.dirname(config.data.output_path) makedirs(output_dir, exist_ok=True) print(f"Saving results to {config.data.output_path}") dataset.to_parquet(config.data.output_path) if __name__ == "__main__": main() ================================================ FILE: verl_distillation/verl/trainer/main_ppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other mpain. """ import os import socket import hydra import ray from omegaconf import OmegaConf from verl.experimental.dataset.sampler import AbstractSampler from verl.trainer.constants_ppo import get_ppo_ray_runtime_env from verl.trainer.ppo.ray_trainer import RayPPOTrainer from verl.trainer.ppo.reward import load_reward_manager from verl.trainer.ppo.utils import need_critic, need_reference_policy from verl.utils.config import validate_config from verl.utils.device import is_cuda_available from verl.utils.import_utils import load_extern_type @hydra.main(config_path="config", config_name="ppo_trainer", version_base=None) def main(config): """Main entry point for PPO training with Hydra configuration management. Args: config_dict: Hydra configuration dictionary containing training parameters. """ run_ppo(config) # Define a function to run the PPO-like training process def run_ppo(config, task_runner_class=None) -> None: """Initialize Ray cluster and run distributed PPO training process. Args: config: Training configuration object containing all necessary parameters for distributed PPO training including Ray initialization settings, model paths, and training hyperparameters. task_runner_class: For recipe to change TaskRunner. """ # Check if Ray is not initialized if not ray.is_initialized(): # Initialize Ray with a local cluster configuration # Set environment variables in the runtime environment to control tokenizer parallelism, # NCCL debug level, VLLM logging level, and allow runtime LoRA updating # `num_cpus` specifies the number of CPU cores Ray can use, obtained from the configuration default_runtime_env = get_ppo_ray_runtime_env() ray_init_kwargs = config.ray_kwargs.get("ray_init", {}) runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {}) if config.transfer_queue.enable: # Add runtime environment variables for transfer queue runtime_env_vars = runtime_env_kwargs.get("env_vars", {}) runtime_env_vars["TRANSFER_QUEUE_ENABLE"] = "1" runtime_env_kwargs["env_vars"] = runtime_env_vars for k, v in runtime_env_kwargs['env_vars'].items(): if not isinstance(v, str): runtime_env_kwargs['env_vars'][k] = str(v) runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs) ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env}) print(f"ray init kwargs: {ray_init_kwargs}") ray.init(**OmegaConf.to_container(ray_init_kwargs)) if task_runner_class is None: task_runner_class = ray.remote(num_cpus=1)(TaskRunner) # please make sure main_task is not scheduled on head # Create a remote instance of the TaskRunner class, and # Execute the `run` method of the TaskRunner instance remotely and wait for it to complete if ( is_cuda_available and config.global_profiler.tool == "nsys" and config.global_profiler.get("steps") is not None and len(config.global_profiler.get("steps", [])) > 0 ): from verl.utils.import_utils import is_nvtx_available assert is_nvtx_available(), "nvtx is not available in CUDA platform. Please 'pip3 install nvtx'" nsight_options = OmegaConf.to_container( config.global_profiler.global_tool_config.nsys.controller_nsight_options ) runner = task_runner_class.options(runtime_env={"nsight": nsight_options}).remote() else: runner = task_runner_class.remote() ray.get(runner.run.remote(config)) # [Optional] get the path of the timeline trace file from the configuration, default to None # This file is used for performance analysis timeline_json_file = config.ray_kwargs.get("timeline_json_file", None) if timeline_json_file: ray.timeline(filename=timeline_json_file) class TaskRunner: """Ray remote class for executing distributed PPO training tasks. This class encapsulates the main training logic and runs as a Ray remote actor to enable distributed execution across multiple nodes and GPUs. Attributes: role_worker_mapping: Dictionary mapping Role enums to Ray remote worker classes mapping: Dictionary mapping Role enums to resource pool IDs for GPU allocation """ def __init__(self): self.role_worker_mapping = {} self.mapping = {} def add_actor_rollout_worker(self, config): """Add actor rollout worker based on the actor strategy.""" from verl.single_controller.ray import RayWorkerGroup if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": from verl.workers.megatron_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import Role self.role_worker_mapping[Role.ActorRollout] = ray.remote(actor_rollout_cls) return actor_rollout_cls, ray_worker_group_cls def add_critic_worker(self, config): """Add critic worker to role mapping.""" if config.critic.strategy in {"fsdp", "fsdp2"}: use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto") if use_legacy_worker_impl in ["auto", "enable"]: from verl.workers.fsdp_workers import CriticWorker elif use_legacy_worker_impl == "disable": from verl.workers.roles import CriticWorker print("Using new worker implementation") else: raise ValueError(f"Invalid use_legacy_worker_impl: {use_legacy_worker_impl}") elif config.critic.strategy == "megatron": from verl.workers.megatron_workers import CriticWorker else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import Role self.role_worker_mapping[Role.Critic] = ray.remote(CriticWorker) def init_resource_pool_mgr(self, config): """Initialize resource pool manager.""" from verl.trainer.ppo.ray_trainer import Role global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } # TODO Here you can use the new registration method to support dynamic registration of roles if config.reward_model.enable_resource_pool: if config.reward_model.n_gpus_per_node <= 0: raise ValueError("config.reward_model.n_gpus_per_node must be greater than 0") if config.reward_model.nnodes <= 0: raise ValueError("config.reward_model.nnodes must be greater than 0") reward_pool = [config.reward_model.n_gpus_per_node] * config.reward_model.nnodes resource_pool_spec["reward_pool"] = reward_pool self.mapping[Role.ActorRollout] = global_pool_id self.mapping[Role.Critic] = global_pool_id from verl.trainer.ppo.ray_trainer import ResourcePoolManager resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=self.mapping) return resource_pool_manager def add_reward_model_worker(self, config): """Add reward model worker if enabled.""" from verl.trainer.ppo.ray_trainer import Role if config.reward_model.enable: use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto") if use_legacy_worker_impl in ["auto", "enable"]: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError elif use_legacy_worker_impl == "disable": from verl.workers.roles import RewardModelWorker print("Using new worker implementation") else: raise ValueError(f"Invalid use_legacy_worker_impl: {use_legacy_worker_impl}") self.role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) if config.reward_model.enable_resource_pool: self.mapping[Role.RewardModel] = "reward_pool" else: self.mapping[Role.RewardModel] = "global_pool" def add_ref_policy_worker(self, config, ref_policy_cls): """Add reference policy worker if KL loss or KL reward is used.""" from verl.trainer.ppo.ray_trainer import Role if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: self.role_worker_mapping[Role.RefPolicy] = ray.remote(ref_policy_cls) self.mapping[Role.RefPolicy] = "global_pool" def run(self, config): """Execute the main PPO training workflow. This method sets up the distributed training environment, initializes workers, datasets, and reward functions, then starts the training process. Args: config: Training configuration object containing all parameters needed for setting up and running the PPO training process. """ # Print the initial configuration. `resolve=True` will evaluate symbolic values. from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) OmegaConf.resolve(config) actor_rollout_cls, ray_worker_group_cls = self.add_actor_rollout_worker(config) self.add_critic_worker(config) # We should adopt a multi-source reward function here: # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # finally, we combine all the rewards together # The reward type depends on the tag of the data self.add_reward_model_worker(config) # Add a reference policy worker if KL loss or KL reward is used. self.add_ref_policy_worker(config, actor_rollout_cls) # validate config validate_config( config=config, use_reference_policy=need_reference_policy(self.role_worker_mapping), use_critic=need_critic(config), ) # Download the checkpoint from HDFS to the local machine. # `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on local_path = copy_to_local( config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False) ) # Instantiate the tokenizer and processor. from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) # Used for multimodal LLM, could be None processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) # Load the reward manager for training and validation. reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager( config, tokenizer, num_examine=1, **config.reward_model.get("reward_kwargs", {}) ) resource_pool_manager = self.init_resource_pool_mgr(config) from verl.utils.dataset.rl_dataset import collate_fn # Create training and validation datasets. train_dataset = create_rl_dataset( config.data.train_files, config.data, tokenizer, processor, is_train=True, max_samples=config.data.get("train_max_samples", -1), ) val_dataset = create_rl_dataset( config.data.val_files, config.data, tokenizer, processor, is_train=False, max_samples=config.data.get("val_max_samples", -1), ) train_sampler = create_rl_sampler(config.data, train_dataset) # Initialize the PPO trainer. trainer = RayPPOTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=self.role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, ) # Initialize the workers of the trainer. trainer.init_workers() # Start the training process. trainer.fit() def create_rl_dataset(data_paths, data_config, tokenizer, processor, is_train=True, max_samples: int = -1): """Create a dataset. Arguments: data_paths: List of paths to data files. data_config: The data config. tokenizer (Tokenizer): The tokenizer. processor (Processor): The processor. Returns: dataset (Dataset): The dataset. """ from torch.utils.data import Dataset from verl.utils.dataset.rl_dataset import RLHFDataset # Check if a custom dataset class is specified in the data configuration # and if the path to the custom class is provided if "custom_cls" in data_config and data_config.custom_cls.get("path", None) is not None: # Dynamically load the custom dataset class dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name) # Verify that the custom dataset class inherits from torch.utils.data.Dataset if not issubclass(dataset_cls, Dataset): raise TypeError( f"The custom dataset class '{data_config.custom_cls.name}' from " f"'{data_config.custom_cls.path}' must inherit from torch.utils.data.Dataset" ) elif "datagen" in data_config and data_config.datagen.get("path", None) is not None and is_train: # If a data generation strategy is specified, use the DynamicGenDataset class from verl.utils.dataset.dynamicgen_dataset import DynamicGenDataset dataset_cls = DynamicGenDataset print("Using DynamicGenDataset for data generation.") else: # Use the default RLHFDataset class if no custom class is specified dataset_cls = RLHFDataset print(f"Using dataset class: {dataset_cls.__name__}") # Instantiate the dataset using the determined dataset class dataset = dataset_cls( data_files=data_paths, tokenizer=tokenizer, processor=processor, config=data_config, max_samples=max_samples, ) return dataset def create_rl_sampler(data_config, dataset): """Create a sampler for the dataset. Arguments: data_config: The data config. dataset (Dataset): The dataset. Returns: sampler (Sampler): The sampler. """ import torch from torch.utils.data import RandomSampler, SequentialSampler if data_config.sampler is not None and data_config.sampler.get("class_path", None) is not None: curriculum_class = load_extern_type( data_config.sampler.class_path, data_config.sampler.class_name, ) sampler = curriculum_class( data_source=dataset, data_config=data_config, ) assert isinstance(sampler, AbstractSampler) assert data_config.get("dataloader_num_workers", 8) == 0, ( "If using curriculum, num_workers must be 0 to prevent data caching. " "If the dataloader caches data before the batch is done the " "curriculum sampler won't have the opportunity to reorder it. " ) # Use a sampler to facilitate checkpoint resumption. # If shuffling is enabled in the data configuration, create a random sampler. elif data_config.shuffle: train_dataloader_generator = torch.Generator() seed = data_config.get("seed") if seed is not None: train_dataloader_generator.manual_seed(seed) sampler = RandomSampler(data_source=dataset, generator=train_dataloader_generator) else: # If shuffling is disabled, use a sequential sampler to iterate through the dataset in order. sampler = SequentialSampler(data_source=dataset) return sampler if __name__ == "__main__": main() ================================================ FILE: verl_distillation/verl/trainer/ppo/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/trainer/ppo/core_algos.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Core functions to implement PPO algorithms. The function implemented in this file should be used by trainer with different distributed strategies to implement PPO-like algorithms. """ __all__ = ["register_adv_est", "get_adv_estimator_fn", "AdvantageEstimator"] from collections import defaultdict from enum import Enum from typing import Any, Callable, Optional import numpy as np import torch from omegaconf import DictConfig import verl.utils.torch_functional as verl_F from verl.trainer.config import AlgoConfig from verl.utils import as_torch_index, group_mean_std from verl.utils.import_utils import deprecated from verl.workers.config import ActorConfig PolicyLossFn = Callable[ [ torch.Tensor, # old_log_prob torch.Tensor, # log_prob torch.Tensor, # advantages torch.Tensor, # response_mask str, # loss_agg_mode Optional[DictConfig | AlgoConfig], # config torch.Tensor | None, # rollout_log_probs ], tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor], ] POLICY_LOSS_REGISTRY: dict[str, PolicyLossFn] = {} def register_policy_loss(name: str) -> Callable[[PolicyLossFn], PolicyLossFn]: """Register a policy loss function with the given name. Args: name (str): The name to register the policy loss function under. Returns: function: Decorator function that registers the policy loss function. """ def decorator(func: PolicyLossFn) -> PolicyLossFn: POLICY_LOSS_REGISTRY[name] = func return func return decorator def get_policy_loss_fn(name): """Get the policy loss with a given name. Args: name: `(str)` The name of the policy loss. Returns: `(callable)`: The policy loss function. """ loss_name = name if loss_name not in POLICY_LOSS_REGISTRY: raise ValueError( f"Unsupported loss mode: {loss_name}. Supported modes are: {list(POLICY_LOSS_REGISTRY.keys())}" ) return POLICY_LOSS_REGISTRY[loss_name] class AdvantageEstimator(str, Enum): """Using an enumeration class to avoid spelling errors in adv_estimator. Note(haibin.lin): this enum class is immutable after creation. Extending this enum for new estimators may not be necessary since users can always just call `verl.trainer.ppo.core_algos.register` with string name for a custom advantage estimator instead. """ GAE = "gae" GRPO = "grpo" REINFORCE_PLUS_PLUS = "reinforce_plus_plus" REINFORCE_PLUS_PLUS_BASELINE = "reinforce_plus_plus_baseline" REMAX = "remax" RLOO = "rloo" OPO = "opo" GRPO_PASSK = "grpo_passk" GPG = "gpg" RLOO_VECTORIZED = "rloo_vectorized" GRPO_VECTORIZED = "grpo_vectorized" ON_POLICY_DISTILL = "on_policy_distill" ADV_ESTIMATOR_REGISTRY: dict[str, Any] = {} def register_adv_est(name_or_enum: str | AdvantageEstimator) -> Any: """Decorator to register a advantage estimator function with a given name. Args: name_or_enum: `(str)` or `(AdvantageEstimator)` The name or enum of the advantage estimator. """ def decorator(fn): name = name_or_enum.value if isinstance(name_or_enum, Enum) else name_or_enum if name in ADV_ESTIMATOR_REGISTRY and ADV_ESTIMATOR_REGISTRY[name] != fn: raise ValueError( f"Adv estimator {name} has already been registered: {ADV_ESTIMATOR_REGISTRY[name]} vs {fn}" ) ADV_ESTIMATOR_REGISTRY[name] = fn return fn return decorator def get_adv_estimator_fn(name_or_enum): """Get the advantage estimator function with a given name. Args: name_or_enum: `(str)` or `(AdvantageEstimator)` The name or enum of the advantage estimator. Returns: `(callable)`: The advantage estimator function. """ name = name_or_enum.value if isinstance(name_or_enum, Enum) else name_or_enum if name not in ADV_ESTIMATOR_REGISTRY: raise ValueError(f"Unknown advantage estimator simply: {name}") return ADV_ESTIMATOR_REGISTRY[name] class AdaptiveKLController: """ Adaptive KL controller described in the paper: https://arxiv.org/pdf/1909.08593.pdf """ def __init__(self, init_kl_coef, target_kl, horizon): self.value = init_kl_coef self.target = target_kl self.horizon = horizon def update(self, current_kl, n_steps): """Update the KL coefficient based on current KL divergence. Args: current_kl (float): Current KL divergence value. n_steps (int): Number of steps taken. """ target = self.target proportional_error = np.clip(current_kl / target - 1, -0.2, 0.2) mult = 1 + proportional_error * n_steps / self.horizon self.value *= mult class FixedKLController: """Fixed KL controller.""" def __init__(self, kl_coef): self.value = kl_coef def update(self, current_kl, n_steps): """Update method for fixed KL controller (no-op). Args: current_kl (float): Current KL divergence value (unused). n_steps (int): Number of steps taken (unused). """ pass def get_kl_controller(kl_ctrl): """Factory function to create appropriate KL controller based on configuration. Args: kl_ctrl: Configuration object containing KL controller settings. Returns: KL controller instance (FixedKLController or AdaptiveKLController). Raises: NotImplementedError: If controller type is not supported. AssertionError: If adaptive controller horizon is not positive. """ if kl_ctrl.type == "fixed": return FixedKLController(kl_coef=kl_ctrl.kl_coef) elif kl_ctrl.type == "adaptive": assert kl_ctrl.horizon > 0, f"horizon must be larger than 0. Got {kl_ctrl.horizon}" return AdaptiveKLController(init_kl_coef=kl_ctrl.kl_coef, target_kl=kl_ctrl.target_kl, horizon=kl_ctrl.horizon) else: raise NotImplementedError @register_adv_est(AdvantageEstimator.GAE) # or simply: @register_adv_est("gae") def compute_gae_advantage_return( token_level_rewards: torch.Tensor, values: torch.Tensor, response_mask: torch.Tensor, gamma: torch.Tensor, lam: torch.Tensor, ): """Adapted from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py Args: token_level_rewards: `(torch.Tensor)` shape is (bs, response_length) values: `(torch.Tensor)` shape is (bs, response_length) response_mask: `(torch.Tensor)` shape is (bs, response_length). [EOS] mask. The token after [EOS] have mask zero. gamma is `(float)` discounted factor used in RL lam: `(float)` lambda value when computing Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438) Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ with torch.no_grad(): nextvalues = 0 lastgaelam = 0 advantages_reversed = [] gen_len = token_level_rewards.shape[-1] for t in reversed(range(gen_len)): delta = token_level_rewards[:, t] + gamma * nextvalues - values[:, t] lastgaelam_ = delta + gamma * lam * lastgaelam # skip values and TD-error on observation tokens nextvalues = values[:, t] * response_mask[:, t] + (1 - response_mask[:, t]) * nextvalues lastgaelam = lastgaelam_ * response_mask[:, t] + (1 - response_mask[:, t]) * lastgaelam advantages_reversed.append(lastgaelam) advantages = torch.stack(advantages_reversed[::-1], dim=1) returns = advantages + values advantages = verl_F.masked_whiten(advantages, response_mask) return advantages, returns @register_adv_est(AdvantageEstimator.ON_POLICY_DISTILL) def compute_on_policy_distill_reverse_kl( teacher_log_prob: torch.Tensor, student_log_prob: torch.Tensor, config: Optional[AlgoConfig] = None, ) -> tuple[torch.Tensor, torch.Tensor]: reverse_kl = student_log_prob - teacher_log_prob return -reverse_kl, -reverse_kl # NOTE(sgm): this implementation only consider outcome supervision, where the reward is a scalar. @register_adv_est(AdvantageEstimator.GRPO) # or simply: @register_adv_est("grpo") def compute_grpo_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for GRPO, operating only on Outcome reward (with only one scalar reward for each response). Args: token_level_rewards: `(torch.Tensor)` shape is (bs, response_length) response_mask: `(torch.Tensor)` shape is (bs, response_length) index: `(np.ndarray)` index array for grouping epsilon: `(float)` small value to avoid division by zero norm_adv_by_std_in_grpo: `(bool)` whether to scale the GRPO advantage config: `(Optional[AlgoConfig])` algorithm configuration object Note: If norm_adv_by_std_in_grpo is True, the advantage is scaled by the std, as in the original GRPO. If False, the advantage is not scaled, as in Dr.GRPO (https://arxiv.org/abs/2503.20783). Returns: advantages: `(torch.Tensor)` shape is (bs, response_length) Returns: `(torch.Tensor)` shape is (bs, response_length) """ scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2mean = {} id2std = {} with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): id2score[index[i]].append(scores[i]) for idx in id2score: if len(id2score[idx]) == 1: id2mean[idx] = torch.tensor(0.0) id2std[idx] = torch.tensor(1.0) elif len(id2score[idx]) > 1: scores_tensor = torch.stack(id2score[idx]) id2mean[idx] = torch.mean(scores_tensor) id2std[idx] = torch.std(scores_tensor) else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): if norm_adv_by_std_in_grpo: scores[i] = (scores[i] - id2mean[index[i]]) / (id2std[index[i]] + epsilon) else: scores[i] = scores[i] - id2mean[index[i]] scores = scores.unsqueeze(-1) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.GRPO_VECTORIZED) def compute_grpo_vectorized_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Vectorized GRPO(outcome-only): For each group g: a_i = \\frac{r_i - \\mu_g}{\\sigma_g} (or without dividing by \\sigma_g), then broadcast the scalar across the token dimension (multiplied by response_mask).。 """ with torch.no_grad(): scores = token_level_rewards.sum(dim=-1) g = as_torch_index(index, device=scores.device) mean_g, std_g, _ = group_mean_std(scores, g, eps=epsilon) if norm_adv_by_std_in_grpo: scalars = (scores - mean_g[g]) / (std_g[g] + epsilon) else: scalars = scores - mean_g[g] advantages = scalars.unsqueeze(-1) * response_mask return advantages, advantages @register_adv_est(AdvantageEstimator.GRPO_PASSK) # or simply: @register_adv_est("grpo_passk") def compute_grpo_passk_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for Pass@k using a GRPO-style outcome reward formulation. Only the best response per group gets a non-zero advantage: r_max - r_second_max. Implemented as described in https://arxiv.org/abs/2503.19595. Args: token_level_rewards: (bs, response_length) response_mask: (bs, response_length) index: (bs,) → group ID per sample epsilon: float for numerical stability config: (AlgoConfig) algorithm settings, which contains "norm_adv_by_std_in_grpo" Returns: advantages: (bs, response_length) returns: (bs, response_length) """ assert config is not None # if True, normalize advantage by std within group norm_adv_by_std_in_grpo = config.get("norm_adv_by_std_in_grpo", True) scores = token_level_rewards.sum(dim=-1) # (bs,) advantages = torch.zeros_like(scores) id2scores = defaultdict(list) id2indices = defaultdict(list) with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): idx = index[i] id2scores[idx].append(scores[i]) id2indices[idx].append(i) for idx in id2scores: rewards = torch.stack(id2scores[idx]) # (k,) if rewards.numel() < 2: raise ValueError( f"Pass@k requires at least 2 samples per group. Got {rewards.numel()} for group {idx}." ) topk, topk_idx = torch.topk(rewards, 2) r_max, r_second_max = topk[0], topk[1] i_max = id2indices[idx][topk_idx[0].item()] advantage = r_max - r_second_max if norm_adv_by_std_in_grpo: std = torch.std(rewards) advantage = advantage / (std + epsilon) advantages[i_max] = advantage advantages = advantages.unsqueeze(-1) * response_mask return advantages, advantages @register_adv_est( AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE ) # or simply: @register_adv_est("reinforce_plus_plus_baseline") def compute_reinforce_plus_plus_baseline_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: torch.Tensor, epsilon: float = 1e-6, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for RF++-baseline (https://arxiv.org/abs/2501.03262), operating only on Outcome reward (with only one scalar reward for each response). Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ response_length = token_level_rewards.shape[-1] scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2mean = {} with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): id2score[index[i]].append(scores[i]) for idx in id2score: if len(id2score[idx]) == 1: id2mean[idx] = torch.tensor(0.0) elif len(id2score[idx]) > 1: id2mean[idx] = torch.mean(torch.stack(id2score[idx])) else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): scores[i] = scores[i] - id2mean[index[i]] scores = scores.unsqueeze(-1).tile([1, response_length]) * response_mask scores = verl_F.masked_whiten(scores, response_mask) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.RLOO) # or simply: @register_adv_est("rloo") def compute_rloo_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for RLOO based on https://arxiv.org/abs/2402.14740 Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2mean = {} with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): id2score[index[i]].append(scores[i]) for idx in id2score: if len(id2score[idx]) == 1: id2mean[idx] = torch.tensor(0.0) elif len(id2score[idx]) > 1: id2mean[idx] = torch.mean(torch.stack(id2score[idx])) else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): response_num = len(id2score[index[i]]) if response_num > 1: scores[i] = scores[i] * response_num / (response_num - 1) - id2mean[index[i]] * response_num / ( response_num - 1 ) scores = scores.unsqueeze(-1) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.OPO) # or simply: @register_adv_est("opo") def compute_opo_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for OPO based on https://arxiv.org/pdf/2505.23585 Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ response_length = response_mask.sum(dim=-1) scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2len = defaultdict(list) id2bsl = {} with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): id2score[index[i]].append(scores[i]) id2len[index[i]].append(response_length[i]) for idx in id2score: if len(id2score[idx]) == 1: id2bsl[idx] = torch.tensor(0.0) elif len(id2score[idx]) > 1: score_tensor = torch.stack(id2score[idx]) len_tensor = torch.stack(id2len[idx]) id2bsl[idx] = (len_tensor * score_tensor).sum() / len_tensor.sum() else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): scores[i] = scores[i] - id2bsl[index[i]] scores = scores.unsqueeze(-1) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.REINFORCE_PLUS_PLUS) # or simply: @register_adv_est("reinforce_plus_plus") def compute_reinforce_plus_plus_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, config: Optional[AlgoConfig] = None, **kwargs ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for REINFORCE++. This implementation is based on the paper: https://arxiv.org/abs/2501.03262 Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ assert config is not None gamma = config.gamma with torch.no_grad(): returns = torch.zeros_like(token_level_rewards) running_return = 0 for t in reversed(range(token_level_rewards.shape[1])): running_return = token_level_rewards[:, t] + gamma * running_return returns[:, t] = running_return # Reset after EOS running_return = running_return * response_mask[:, t] advantages = verl_F.masked_whiten(returns, response_mask) advantages = advantages * response_mask return advantages, returns @register_adv_est(AdvantageEstimator.REMAX) # or simply: @register_adv_est("remax") def compute_remax_outcome_advantage( token_level_rewards: torch.Tensor, reward_baselines: torch.Tensor, response_mask: torch.Tensor, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for ReMax, operating only on Outcome reward This implementation is based on the paper: https://arxiv.org/abs/2310.10505 (with only one scalar reward for each response). Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) reward_baselines: `(torch.Tensor)` shape: (bs,) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ with torch.no_grad(): returns = (token_level_rewards * response_mask).flip(dims=[-1]).cumsum(dim=-1).flip(dims=[-1]) advantages = returns - reward_baselines.unsqueeze(-1) * response_mask return advantages, returns @register_adv_est(AdvantageEstimator.GPG) # or simply: @register_adv_est("gpg") def compute_gpg_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, f_norm: float = 1.0, alpha: float = 1.0, config=None, **kwargs, ): """ Compute advantage for GPG, operating only on Outcome reward (with only one scalar reward for each response). Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) index: `(np.ndarray)` shape: (bs,) epsilon: (float) f_norm: (float) alpha: (float) config: (dict) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2mean = {} id2std = {} with torch.no_grad(): bsz = scores.shape[0] m = torch.count_nonzero(scores) alpha = bsz / m.clamp(min=1) for i in range(bsz): id2score[index[i]].append(scores[i]) for idx in id2score: if len(id2score[idx]) == 1: id2mean[idx] = torch.tensor(0.0) id2std[idx] = torch.tensor(1.0) elif len(id2score[idx]) > 1: scores_tensor = torch.stack(id2score[idx]) id2mean[idx] = torch.mean(scores_tensor) id2std[idx] = torch.std(scores_tensor) else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): scores[i] = alpha * (scores[i] - id2mean[index[i]]) / (f_norm) scores = scores.unsqueeze(-1) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.RLOO_VECTORIZED) # or simply: @register_adv_est("rloo_vectorized") def compute_rloo_vectorized_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for RLOO based on https://arxiv.org/abs/2402.14740 Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ scores = token_level_rewards.sum(dim=-1) with torch.no_grad(): inv = torch.from_numpy(np.unique(index, return_inverse=True)[1]).to(scores.device) c = torch.bincount(inv)[inv].to(scores.dtype) adv = ((c * scores - torch.bincount(inv, weights=scores)[inv]) / (c - 1).clamp_min(1)) * (c > 1) adv = adv.unsqueeze(-1) * response_mask return adv, adv def compute_rewards(token_level_scores, old_log_prob, ref_log_prob, kl_ratio): """Compute token-level rewards with KL penalty. Args: token_level_scores (torch.Tensor): Token-level reward scores. old_log_prob (torch.Tensor): Log probabilities from current policy. ref_log_prob (torch.Tensor): Log probabilities from reference policy. kl_ratio (float): KL penalty coefficient. Returns: torch.Tensor: Token-level rewards with KL penalty applied. """ kl = old_log_prob - ref_log_prob return token_level_scores - kl * kl_ratio def agg_loss(loss_mat: torch.Tensor, loss_mask: torch.Tensor, loss_agg_mode: str): """ Aggregate the loss matrix into a scalar. Args: loss_mat: `(torch.Tensor)`: shape: (bs, response_length) loss_mask: `(torch.Tensor)`: shape: (bs, response_length) loss_agg_mode: (str) choices: method to aggregate the loss matrix into a scalar. Returns: loss: `a scalar torch.Tensor` aggregated loss """ if loss_agg_mode == "token-mean": loss = verl_F.masked_mean(loss_mat, loss_mask) elif loss_agg_mode == "seq-mean-token-sum": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) # token-sum seq_mask = (torch.sum(loss_mask, dim=-1) > 0).float() # exclude fully masked sequences loss = verl_F.masked_mean(seq_losses, seq_mask) # seq-mean elif loss_agg_mode == "seq-mean-token-mean": seq_mask = torch.sum(loss_mask, dim=-1) # per-sequence token count seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) / (seq_mask + 1e-8) # token-mean seq_mask = (seq_mask > 0).float() # exclude fully masked sequences loss = verl_F.masked_mean(seq_losses, seq_mask) # seq-mean elif loss_agg_mode == "seq-mean-token-sum-norm": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) loss = torch.sum(seq_losses) / loss_mask.shape[-1] # The divisor # (loss_mask.shape[-1]) should ideally be constant # throughout training to well-replicate the DrGRPO paper. # TODO: Perhaps add user-defined normalizer argument to # agg_loss to ensure divisor stays constant throughout. else: raise ValueError(f"Invalid loss_agg_mode: {loss_agg_mode}") return loss @deprecated("verl.trainer.ppo.core_algos.compute_policy_loss_vanilla") def compute_policy_loss( old_log_prob, log_prob, advantages, response_mask, cliprange=None, cliprange_low=None, cliprange_high=None, clip_ratio_c=3.0, loss_agg_mode: str = "token-mean", ): """ Compute the clipped policy objective and related metrics for PPO. Adapted from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1122 Args: old_log_prob (torch.Tensor): Log-probabilities of actions under the old policy, shape (batch_size, response_length). log_prob (torch.Tensor): Log-probabilities of actions under the current policy, shape (batch_size, response_length). advantages (torch.Tensor): Advantage estimates for each action, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the loss, shape (batch_size, response_length). cliprange (float, optional): Clipping parameter ε for standard PPO. See https://arxiv.org/abs/1707.06347. Defaults to None (must be provided). cliprange_low (float, optional): Lower clip range for dual-clip PPO. Defaults to same as `cliprange`. cliprange_high (float, optional): Upper clip range for dual-clip PPO. Defaults to same as `cliprange`. clip_ratio_c (float, optional): Lower bound of the ratio for dual-clip PPO. See https://arxiv.org/pdf/1912.09729. Defaults to 3.0. loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. Defaults to "token-mean". """ assert clip_ratio_c > 1.0, ( "The lower bound of the clip_ratio_c for dual-clip PPO should be greater than 1.0," + f" but get the value: {clip_ratio_c}." ) negative_approx_kl = log_prob - old_log_prob # Clamp negative_approx_kl for stability negative_approx_kl = torch.clamp(negative_approx_kl, min=-20.0, max=20.0) ratio = torch.exp(negative_approx_kl) ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask) pg_losses1 = -advantages * ratio if cliprange_low is None: cliprange_low = cliprange if cliprange_high is None: cliprange_high = cliprange pg_losses2 = -advantages * torch.clamp( ratio, 1 - cliprange_low, 1 + cliprange_high ) # - clip(ratio, 1-cliprange, 1+cliprange) * A clip_pg_losses1 = torch.maximum( pg_losses1, pg_losses2 ) # max(-ratio * A, -clip(ratio, 1-cliprange, 1+cliprange) * A) pg_clipfrac = verl_F.masked_mean(torch.gt(pg_losses2, pg_losses1).float(), response_mask) pg_losses3 = -advantages * clip_ratio_c clip_pg_losses2 = torch.min(pg_losses3, clip_pg_losses1) pg_clipfrac_lower = verl_F.masked_mean( torch.gt(clip_pg_losses1, pg_losses3) * (advantages < 0).float(), response_mask ) pg_losses = torch.where(advantages < 0, clip_pg_losses2, clip_pg_losses1) pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower @register_policy_loss("vanilla") # type: ignore[arg-type] def compute_policy_loss_vanilla( old_log_prob: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor, response_mask: torch.Tensor, loss_agg_mode: str = "token-mean", config: Optional[DictConfig | AlgoConfig] = None, rollout_is_weights: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Compute the clipped policy objective and related metrics for PPO. Adapted from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1122 Args: old_log_prob (torch.Tensor): Log-probabilities of actions under the old policy, shape (batch_size, response_length). log_prob (torch.Tensor): Log-probabilities of actions under the current policy, shape (batch_size, response_length). advantages (torch.Tensor): Advantage estimates for each action, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the loss, shape (batch_size, response_length). loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. Defaults to "token-mean". config: `(verl.trainer.config.ActorConfig)`: config for the actor. rollout_log_probs: `(torch.Tensor)`: log probabilities of actions under the rollout policy, shape (batch_size, response_length). """ assert config is not None assert not isinstance(config, AlgoConfig) clip_ratio = config.clip_ratio # Clipping parameter ε for standard PPO. See https://arxiv.org/abs/1707.06347. clip_ratio_low = config.clip_ratio_low if config.clip_ratio_low is not None else clip_ratio clip_ratio_high = config.clip_ratio_high if config.clip_ratio_high is not None else clip_ratio clip_ratio_c = config.get( # Lower bound of the ratio for dual-clip PPO. See https://arxiv.org/pdf/1912.09729. "clip_ratio_c", 3.0 ) cliprange = clip_ratio cliprange_low = clip_ratio_low cliprange_high = clip_ratio_high assert clip_ratio_c > 1.0, ( "The lower bound of the clip_ratio_c for dual-clip PPO should be greater than 1.0," + f" but get the value: {clip_ratio_c}." ) negative_approx_kl = log_prob - old_log_prob # Clamp negative_approx_kl for stability negative_approx_kl = torch.clamp(negative_approx_kl, min=-20.0, max=20.0) ratio = torch.exp(negative_approx_kl) ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask) pg_losses1 = -advantages * ratio if cliprange_low is None: cliprange_low = cliprange if cliprange_high is None: cliprange_high = cliprange pg_losses2 = -advantages * torch.clamp( ratio, 1 - cliprange_low, 1 + cliprange_high ) # - clip(ratio, 1-cliprange, 1+cliprange) * A clip_pg_losses1 = torch.maximum( pg_losses1, pg_losses2 ) # max(-ratio * A, -clip(ratio, 1-cliprange, 1+cliprange) * A) pg_clipfrac = verl_F.masked_mean(torch.gt(pg_losses2, pg_losses1).float(), response_mask) pg_losses3 = -advantages * clip_ratio_c clip_pg_losses2 = torch.min(pg_losses3, clip_pg_losses1) pg_clipfrac_lower = verl_F.masked_mean( torch.gt(clip_pg_losses1, pg_losses3) * (advantages < 0).float(), response_mask ) pg_losses = torch.where(advantages < 0, clip_pg_losses2, clip_pg_losses1) # Apply rollout importance sampling weights if provided if rollout_is_weights is not None: pg_losses = pg_losses * rollout_is_weights pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower @register_policy_loss("gspo") def compute_policy_loss_gspo( old_log_prob: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor, response_mask: torch.Tensor, loss_agg_mode: str = "seq-mean-token-mean", config: Optional[DictConfig | ActorConfig] = None, rollout_is_weights: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Compute the clipped policy objective and related metrics for GSPO. See https://arxiv.org/pdf/2507.18071 for more details. Args: old_log_prob (torch.Tensor): Log-probabilities of actions under the old policy, shape (batch_size, response_length). log_prob (torch.Tensor): Log-probabilities of actions under the current policy, shape (batch_size, response_length). advantages (torch.Tensor): Advantage estimates for each action, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the loss, shape (batch_size, response_length). loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. For GSPO, it is recommended to use "seq-mean-token-mean". """ assert config is not None assert isinstance(config, ActorConfig) clip_ratio_low = config.clip_ratio_low if config.clip_ratio_low is not None else config.clip_ratio clip_ratio_high = config.clip_ratio_high if config.clip_ratio_high is not None else config.clip_ratio negative_approx_kl = log_prob - old_log_prob # compute sequence-level importance ratio: # si(θ) = (π_θ(yi|x)/π_θold(yi|x))^(1/|yi|) = # exp [(1/|y_i|) * Σ_t log(π_θ(y_i,t|x,y_i, tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """Adapted from https://github.com/AMAP-ML/GPG/blob/main/VisualThinker-R1-Zero/src/open-r1-multimodal/src/open_r1/trainer/grpo_trainer.py#L495 Args: log_prob: `(torch.Tensor)` shape: (bs, response_length) advantages: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) return: pg_loss: `a scalar torch.Tensor` policy gradient loss computed via GPG """ pg_losses = -log_prob * advantages # Apply rollout importance sampling weights if provided if rollout_is_weights is not None: pg_losses = pg_losses * rollout_is_weights pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0) @register_policy_loss("clip_cov") def compute_policy_loss_clip_cov( old_log_prob: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor, response_mask: torch.Tensor, loss_agg_mode: str = "token-mean", config: Optional[DictConfig | AlgoConfig] = None, rollout_is_weights: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Compute the clipped policy objective and related metrics for Clip-Cov. Adapted from https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/verl/trainer/ppo/core_algos.py Args: old_log_prob (torch.Tensor): Log-probabilities of actions under the old policy, shape (batch_size, response_length). log_prob (torch.Tensor): Log-probabilities of actions under the current policy, shape (batch_size, response_length). advantages (torch.Tensor): Advantage estimates for each action, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the loss, shape (batch_size, response_length). cliprange (float, optional): Clipping parameter ε for standard PPO. See https://arxiv.org/abs/1707.06347. Defaults to None (must be provided). cliprange_low (float, optional): Lower clip range for dual-clip PPO. Defaults to same as `cliprange`. cliprange_high (float, optional): Upper clip range for dual-clip PPO. Defaults to same as `cliprange`. loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. Defaults to "token-mean". clip_cvo_ratio (float, optional): Ratio for clipping the covariance. Defaults to 0.0002. clip_cov_lb (float, optional): Lower bound for clipping covariance. Defaults to 1.0. clip_cov_ub (float, optional): Upper bound for clipping covariance. Defaults to 5.0. """ assert config is not None assert not isinstance(config, AlgoConfig), "passing AlgoConfig not supported yet" assert config.policy_loss is not None clip_cov_ratio = config.policy_loss.clip_cov_ratio if config.policy_loss.clip_cov_ratio is not None else 0.0002 cliprange = config.clip_ratio cliprange_low = config.clip_ratio_low if config.clip_ratio_low is not None else cliprange cliprange_high = config.clip_ratio_high if config.clip_ratio_high is not None else cliprange clip_cov_ub = config.policy_loss.clip_cov_ub if config.policy_loss.clip_cov_ub is not None else 5.0 clip_cov_lb = config.policy_loss.clip_cov_lb if config.policy_loss.clip_cov_lb is not None else 1.0 assert clip_cov_ratio > 0, "clip_ratio should be larger than 0." negative_approx_kl = log_prob - old_log_prob ratio = torch.exp(negative_approx_kl) ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask) pg_losses1 = -advantages * ratio if cliprange_low is None: cliprange_low = cliprange if cliprange_high is None: cliprange_high = cliprange corr = torch.ones_like(advantages) pg_losses2 = -advantages * torch.clamp(ratio, 1 - cliprange_low, 1 + cliprange_high) clip_by_origin = (pg_losses2 > pg_losses1) & (response_mask > 0) cov_all = (advantages - verl_F.masked_mean(advantages, response_mask)) * ( log_prob - verl_F.masked_mean(log_prob.detach(), response_mask) ) cov_all[response_mask == 0] = -torch.inf cov_all[clip_by_origin] = -torch.inf clip_num = max(int(clip_cov_ratio * response_mask.sum().item()), 1) top_k_idx = (cov_all < clip_cov_ub) & (cov_all > clip_cov_lb) & (response_mask > 0) top_k_idx = torch.nonzero(top_k_idx) if len(top_k_idx) > 0: perm = torch.randperm(len(top_k_idx)) top_k_idx = top_k_idx[perm[: min(clip_num, len(top_k_idx))]] else: top_k_idx = torch.empty((0, 2), device=cov_all.device, dtype=torch.long) corr[top_k_idx[:, 0], top_k_idx[:, 1]] = 0 pg_clipfrac = verl_F.masked_mean((corr == 0).float(), response_mask) pg_losses = torch.maximum(pg_losses1, pg_losses2) * corr # Apply rollout importance sampling weights if provided if rollout_is_weights is not None: pg_losses = pg_losses * rollout_is_weights pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, pg_clipfrac, ppo_kl, torch.tensor(0.0) @register_policy_loss("kl_cov") def compute_policy_loss_kl_cov( old_log_prob: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor, response_mask: torch.Tensor, loss_agg_mode: str = "token-mean", config: Optional[DictConfig | AlgoConfig] = None, rollout_is_weights: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Compute the clipped policy objective and related metrics for Clip-Cov. Adapted from https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/verl/trainer/ppo/core_algos.py Args: old_log_prob (torch.Tensor): Log-probabilities of actions under the old policy, shape (batch_size, response_length). log_prob (torch.Tensor): Log-probabilities of actions under the current policy, shape (batch_size, response_length). advantages (torch.Tensor): Advantage estimates for each action, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the loss, shape (batch_size, response_length). loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. Defaults to "token-mean". kl_cov_ratio (float, optional): Ratio for selecting the top-k covariance values. Defaults to 0.0002. ppo_kl_coef (float, optional): Coefficient for the KL penalty term in the loss. Defaults to 1. """ assert config is not None assert not isinstance(config, AlgoConfig), "passing AlgoConfig not supported yet" assert config.policy_loss is not None kl_cov_ratio = config.policy_loss.kl_cov_ratio if config.policy_loss.kl_cov_ratio is not None else 0.0002 ppo_kl_coef = config.policy_loss.ppo_kl_coef if config.policy_loss.ppo_kl_coef is not None else 1.0 assert kl_cov_ratio > 0, "kl_cov_ratio should be larger than 0." negative_approx_kl = log_prob - old_log_prob abs_kl = negative_approx_kl.abs() ratio = torch.exp(negative_approx_kl) ppo_kl_abs = verl_F.masked_mean(negative_approx_kl.abs(), response_mask) pg_losses1 = -advantages * ratio pg_losses_kl = -advantages * ratio + ppo_kl_coef * abs_kl pg_losses = pg_losses1 all_valid = response_mask > 0 all_valid_idx = torch.nonzero(all_valid.reshape(-1), as_tuple=True)[0] all_valid_adv = advantages[all_valid].detach().reshape(-1).cpu() all_valid_logp = log_prob[all_valid].detach().reshape(-1).cpu() k = min(kl_cov_ratio, len(all_valid_adv)) if k != 0: cov_lst_all = (all_valid_adv - all_valid_adv.mean()) * (all_valid_logp - all_valid_logp.mean()) k_percent_nums = max(1, int(len(cov_lst_all) * kl_cov_ratio)) large_cov_idxs = torch.topk(cov_lst_all, k_percent_nums, largest=True).indices if len(large_cov_idxs) != 0: large_cov_idxs = all_valid_idx[large_cov_idxs] pg_losses[large_cov_idxs // advantages.shape[1], large_cov_idxs % advantages.shape[1]] = pg_losses_kl[ large_cov_idxs // advantages.shape[1], large_cov_idxs % advantages.shape[1] ] # Apply rollout importance sampling weights if provided if rollout_is_weights is not None: pg_losses = pg_losses * rollout_is_weights pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, torch.tensor(0.0), ppo_kl_abs, torch.tensor(0.0) @register_policy_loss("geo_mean") def compute_policy_loss_geo_mean( old_log_prob: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor, response_mask: torch.Tensor, loss_agg_mode: str = "token-mean", config: Optional[DictConfig | AlgoConfig] = None, rollout_is_weights: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Compute the clipped policy objective and related metrics for GMPO. Adapted from paper https://arxiv.org/abs/2507.20673 https://github.com/callsys/GMPO/blob/main/train_zero_math_gmpo.py Args: old_log_prob (torch.Tensor): Log-probabilities of actions under the old policy, shape (batch_size, response_length). log_prob (torch.Tensor): Log-probabilities of actions under the current policy, shape (batch_size, response_length). advantages (torch.Tensor): Advantage estimates for each action, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the loss, shape (batch_size, response_length). loss_agg_mode (str, optional): not used """ assert config is not None assert not isinstance(config, AlgoConfig) clip_ratio = config.clip_ratio # Clipping parameter. See https://arxiv.org/abs/1707.06347. clip_ratio_low = config.clip_ratio_low if config.clip_ratio_low is not None else clip_ratio clip_ratio_high = config.clip_ratio_high if config.clip_ratio_high is not None else clip_ratio cliprange = clip_ratio cliprange_low = clip_ratio_low cliprange_high = clip_ratio_high if cliprange_low is None: cliprange_low = cliprange if cliprange_high is None: cliprange_high = cliprange negative_approx_kl = log_prob - old_log_prob # Clamp negative_approx_kl for stability (uncomment it if you like) # negative_approx_kl = torch.clamp(negative_approx_kl, min=-20.0, max=20.0) ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask) # Clipping at token-level & Clipping wider sgn_advantage = torch.sign(advantages) negative_approx_kl_clamp = torch.clamp(negative_approx_kl, -cliprange_low, cliprange_high) negative_approx_kl_min = torch.min(sgn_advantage * negative_approx_kl, sgn_advantage * negative_approx_kl_clamp) negative_approx_kl_min = sgn_advantage * negative_approx_kl_min # Geometric-Mean Policy Optimization response_mask_sum = response_mask.sum(dim=-1) ratio = torch.exp((negative_approx_kl_min * response_mask).sum(dim=-1) / (response_mask_sum + 1e-8)) # we only support sequence level advantage for now, # otherwise, below would be not consistent with the paper advantage = (advantages * response_mask).sum(dim=-1) / (response_mask_sum + 1e-8) pg_losses = -advantage * ratio # Apply rollout importance sampling weights if provided # For geo_mean, IS weights are 2D (batch_size, seq_length) and need to be aggregated to sequence level if rollout_is_weights is not None: # Aggregate token-level weights to sequence level using geometric mean for consistency # Note: rollout_is_weights is always 2D regardless of rollout_is_level seq_is_weights = torch.exp( (torch.log(rollout_is_weights + 1e-10) * response_mask).sum(dim=-1) / (response_mask_sum + 1e-8) ) pg_losses = pg_losses * seq_is_weights pg_loss = torch.mean(pg_losses) # higher: ratio is too large that need clamp to clip_high (when adv > 0) clipped = torch.ne(negative_approx_kl, negative_approx_kl_clamp) pg_clipfrac = verl_F.masked_mean((clipped * (advantages > 0)).float(), response_mask) pg_clipfrac_lower = verl_F.masked_mean((clipped * (advantages < 0)).float(), response_mask) return pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower def compute_entropy_loss(logits, response_mask, loss_agg_mode: str = "token-mean"): """Compute categorical entropy loss (For backward compatibility) Args: logits (torch.Tensor): shape is (bs, response_length, vocab_size) response_mask (torch.Tensor): shape is (bs, response_length) Returns: entropy: a scalar torch.Tensor """ # compute entropy token_entropy = verl_F.entropy_from_logits(logits) # (bs, response_len) entropy_loss = agg_loss(loss_mat=token_entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return entropy_loss def compute_value_loss( vpreds: torch.Tensor, returns: torch.Tensor, values: torch.Tensor, response_mask: torch.Tensor, cliprange_value: float, loss_agg_mode: str = "token-mean", ): """ Compute the clipped value-function loss for PPO. Copied from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1151 Args: vpreds (torch.FloatTensor): Predicted values from the value head, shape (batch_size, response_length). values (torch.FloatTensor): Old (baseline) values from the value head, shape (batch_size, response_length). returns (torch.FloatTensor): Ground-truth returns, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the value loss calculation. cliprange_value (float): Clip range for value prediction updates. loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. Defaults to "token-mean". Returns: vf_loss (torch.FloatTensor): A scalar tensor containing the aggregated value-function loss. vf_clipfrac (float): Fraction of elements where the clipped loss was used. """ vpredclipped = verl_F.clip_by_value(vpreds, values - cliprange_value, values + cliprange_value) vf_losses1 = (vpreds - returns) ** 2 vf_losses2 = (vpredclipped - returns) ** 2 clipped_vf_losses = torch.max(vf_losses1, vf_losses2) vf_loss = 0.5 * agg_loss(loss_mat=clipped_vf_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) vf_clipfrac = verl_F.masked_mean(torch.gt(vf_losses2, vf_losses1).float(), response_mask) return vf_loss, vf_clipfrac def kl_penalty(logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor, kl_penalty) -> torch.FloatTensor: """Compute KL divergence given logprob and ref_logprob. Optionally using straight through to bind k2 on other kl penalty compute method for unbiased KL gradient estimation. See more description in http://joschu.net/blog/kl-approx.html Args: logprob: ref_logprob: Returns: kl_estimate """ forward_score = kl_penalty_forward(logprob, ref_logprob, kl_penalty) if not kl_penalty.endswith("+") or kl_penalty in ("mse", "k2"): return forward_score """ The expectation of k1 and k3 estimator is the expectaed value of KL, but the expected gradient of k1 and k3 estimator is not the expectaed gradient of KL. On the other hand k2 estimator gives right gradient estimator, so we use a straight through trick here if the kl_penalty method ends with '+', .e.g., k3+. """ backward_score = 0.5 * (logprob - ref_logprob).square() return backward_score - backward_score.detach() + forward_score.detach() def kl_penalty_forward(logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor, kl_penalty) -> torch.FloatTensor: """Compute KL divergence given logprob and ref_logprob. Copied from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1104 See more description in http://joschu.net/blog/kl-approx.html Args: logprob: ref_logprob: Returns: kl_estimate """ if kl_penalty in ("kl", "k1"): return logprob - ref_logprob if kl_penalty == "abs": return (logprob - ref_logprob).abs() if kl_penalty in ("mse", "k2"): return 0.5 * (logprob - ref_logprob).square() # J. Schulman. Approximating kl divergence, 2020. # # URL http://joschu.net/blog/kl-approx.html. if kl_penalty in ("low_var_kl", "k3"): kl = ref_logprob - logprob # For numerical stability kl = torch.clamp(kl, min=-20, max=20) ratio = torch.exp(kl) kld = (ratio - kl - 1).contiguous() return torch.clamp(kld, min=-10, max=10) if kl_penalty == "full": # so, here logprob and ref_logprob should contain the logits for every token in vocabulary raise NotImplementedError raise NotImplementedError def compute_pf_ppo_reweight_data( data, reweight_method: str = "pow", weight_pow: float = 2.0, ): """Reweight the data based on the token_level_scores. Args: data: DataProto object, containing batch, non_tensor_batch and meta_info reweight_method: str, choices: "pow", "max_min", "max_random" weight_pow: float, the power of the weight Returns: """ @torch.no_grad() def compute_weights(scores: torch.Tensor, reweight_method: str, weight_pow: float) -> torch.Tensor: """Compute importance weights for resampling based on scores. Args: scores (torch.Tensor): Tensor of scores to compute weights from. reweight_method (str): Method for computing weights ('pow', 'max_min', 'max_random'). weight_pow (float): Power exponent for 'pow' method. Returns: torch.Tensor: Computed importance weights. Raises: ValueError: If reweight_method is not supported. """ if reweight_method == "pow": weights = torch.pow(torch.abs(scores), weight_pow) elif reweight_method == "max_min": max_score = torch.max(scores) min_score = torch.min(scores) weights = torch.where((scores == max_score) | (scores == min_score), 1.0, 0.0) elif reweight_method == "max_random": max_score = torch.max(scores) weights = torch.where(scores == max_score, 0.4, 0.1) else: raise ValueError(f"Unsupported reweight_method: {reweight_method}") return weights scores = data.batch["token_level_scores"].sum(dim=-1) weights = compute_weights(scores, reweight_method, weight_pow) weights = torch.clamp(weights + 1e-8, min=1e-8) batch_size = scores.shape[0] sample_indices = torch.multinomial(weights, batch_size, replacement=True) resampled_batch = {key: tensor[sample_indices] for key, tensor in data.batch.items()} sample_indices_np = sample_indices.numpy() resampled_non_tensor_batch = {} for key, array in data.non_tensor_batch.items(): if isinstance(array, np.ndarray): resampled_non_tensor_batch[key] = array[sample_indices_np] else: resampled_non_tensor_batch[key] = [array[i] for i in sample_indices_np] resampled_meta_info = {} for key, value in data.meta_info.items(): if isinstance(value, list) and len(value) == batch_size: resampled_meta_info[key] = [value[i] for i in sample_indices_np] else: resampled_meta_info[key] = value from copy import deepcopy resampled_data = deepcopy(data) resampled_data.batch = type(data.batch)(resampled_batch) resampled_data.batch.batch_size = data.batch.batch_size resampled_data.non_tensor_batch = resampled_non_tensor_batch resampled_data.meta_info = resampled_meta_info return resampled_data ================================================ FILE: verl_distillation/verl/trainer/ppo/metric_utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Metrics related to the PPO trainer. """ from collections import defaultdict from functools import partial from typing import Any, Callable import numpy as np import torch from verl import DataProto from verl.utils.import_utils import deprecated @deprecated("verl.utils.metric.reduce_metrics") def reduce_metrics(metrics: dict[str, list[Any]]) -> dict[str, Any]: """ Reduces a dictionary of metric lists by computing the mean of each list. Args: metrics: A dictionary mapping metric names to lists of metric values. Returns: A dictionary with the same keys but with each list replaced by its mean value. Example: >>> metrics = {"loss": [1.0, 2.0, 3.0], "accuracy": [0.8, 0.9, 0.7]} >>> reduce_metrics(metrics) {"loss": 2.0, "accuracy": 0.8} """ from verl.utils.metric import reduce_metrics return reduce_metrics(metrics) def _compute_response_info(batch: DataProto) -> dict[str, Any]: """ Computes information about prompts and responses from a batch. This is an internal helper function that extracts masks and lengths for prompts and responses. Args: batch: A DataProto object containing batch data with responses and attention masks. Returns: A dictionary containing: - response_mask: Attention mask for the response tokens - prompt_length: Tensor of prompt lengths for each item in the batch - response_length: Tensor of response lengths for each item in the batch """ response_length = batch.batch["responses"].shape[-1] prompt_mask = batch.batch["attention_mask"][:, :-response_length] response_mask = batch.batch["attention_mask"][:, -response_length:] prompt_length = prompt_mask.sum(-1).float() response_length = response_mask.sum(-1).float() # (batch_size,) return dict( response_mask=response_mask, prompt_length=prompt_length, response_length=response_length, ) def compute_on_policy_distill_data_metrics(batch: DataProto, use_critic: bool = True) -> dict[str, Any]: """ Computes various metrics from a batch of data for PPO training. This function calculates metrics related to scores, rewards, advantages, returns, values, and sequence lengths from a batch of data. It provides statistical information (mean, max, min) for each metric category. Args: batch: A DataProto object containing batch data with token-level scores, rewards, advantages, etc. use_critic: Whether to include critic-specific metrics. Defaults to True. Returns: A dictionary of metrics including: - critic/score/mean, max, min: Statistics about sequence scores - critic/rewards/mean, max, min: Statistics about sequence rewards - critic/advantages/mean, max, min: Statistics about advantages - critic/returns/mean, max, min: Statistics about returns - critic/values/mean, max, min: Statistics about critic values (if use_critic=True) - critic/vf_explained_var: Explained variance of the value function (if use_critic=True) - response_length/mean, max, min, clip_ratio: Statistics about response lengths - prompt_length/mean, max, min, clip_ratio: Statistics about prompt lengths - num_turns/mean, max, min: Statistics about the number of multi-turn conversations """ advantages = batch.batch["advantages"] returns = batch.batch["returns"] max_response_length = batch.batch["responses"].shape[-1] prompt_mask = batch.batch["attention_mask"][:, :-max_response_length].bool() response_mask = batch.batch["response_mask"].bool() max_prompt_length = prompt_mask.size(-1) response_info = _compute_response_info(batch) prompt_length = response_info["prompt_length"] response_length = response_info["response_length"] aborted_mask = (response_length == 0).bool() non_aborted_mask = ~aborted_mask valid_adv = torch.masked_select(advantages, response_mask) valid_returns = torch.masked_select(returns, response_mask) if use_critic: values = batch.batch["values"] valid_values = torch.masked_select(values, response_mask) return_diff_var = torch.var(valid_returns - valid_values) return_var = torch.var(valid_returns) # Aborted samples and non-aborted response length statistics # response_length_non_aborted/*: statistics computed on non-aborted samples only aborted_ratio = torch.mean(aborted_mask.float()).detach().item() non_aborted_response_length = response_length[non_aborted_mask] if non_aborted_response_length.numel() > 0: non_aborted_response_length_mean = torch.mean(non_aborted_response_length).detach().item() non_aborted_response_length_max = torch.max(non_aborted_response_length).detach().item() non_aborted_response_length_min = torch.min(non_aborted_response_length).detach().item() non_aborted_response_length_clip_ratio = ( torch.mean(torch.eq(non_aborted_response_length, max_response_length).float()).detach().item() ) else: raise ValueError("All samples are aborted, this should not happen.") metrics = { # adv "critic/advantages/mean": torch.mean(valid_adv).detach().item(), "critic/advantages/max": torch.max(valid_adv).detach().item(), "critic/advantages/min": torch.min(valid_adv).detach().item(), "critic/advantages/std": torch.std(valid_adv).detach().item(), # returns "critic/returns/mean": torch.mean(valid_returns).detach().item(), "critic/returns/max": torch.max(valid_returns).detach().item(), "critic/returns/min": torch.min(valid_returns).detach().item(), "critic/returns/std": torch.std(valid_returns).detach().item(), **( { # values "critic/values/mean": torch.mean(valid_values).detach().item(), "critic/values/max": torch.max(valid_values).detach().item(), "critic/values/min": torch.min(valid_values).detach().item(), "critic/values/std": torch.std(valid_values).detach().item(), # vf explained var "critic/vf_explained_var": (1.0 - return_diff_var / (return_var + 1e-5)).detach().item(), } if use_critic else {} ), # response length "response_length/mean": torch.mean(response_length).detach().item(), "response_length/max": torch.max(response_length).detach().item(), "response_length/min": torch.min(response_length).detach().item(), "response_length/clip_ratio": torch.mean(torch.eq(response_length, max_response_length).float()) .detach() .item(), # response length (non-aborted only) # These statistics exclude aborted samples to avoid skew from zeros "response_length_non_aborted/mean": non_aborted_response_length_mean, "response_length_non_aborted/max": non_aborted_response_length_max, "response_length_non_aborted/min": non_aborted_response_length_min, "response_length_non_aborted/clip_ratio": non_aborted_response_length_clip_ratio, # aborted ratio # Fraction of samples whose response length is zero "response/aborted_ratio": aborted_ratio, # prompt length "prompt_length/mean": torch.mean(prompt_length).detach().item(), "prompt_length/max": torch.max(prompt_length).detach().item(), "prompt_length/min": torch.min(prompt_length).detach().item(), "prompt_length/clip_ratio": torch.mean(torch.eq(prompt_length, max_prompt_length).float()).detach().item(), } # multi-turn conversation if "__num_turns__" in batch.non_tensor_batch: num_turns = batch.non_tensor_batch["__num_turns__"] metrics["num_turns/min"] = num_turns.min() metrics["num_turns/max"] = num_turns.max() metrics["num_turns/mean"] = num_turns.mean() if "tool_call_counts" in batch.non_tensor_batch: tool_call_counts = batch.non_tensor_batch["tool_call_counts"] metrics["tool_call_counts/min"] = tool_call_counts.min() metrics["tool_call_counts/max"] = tool_call_counts.max() metrics["tool_call_counts/mean"] = tool_call_counts.mean() return metrics def compute_data_metrics(batch: DataProto, use_critic: bool = True) -> dict[str, Any]: """ Computes various metrics from a batch of data for PPO training. This function calculates metrics related to scores, rewards, advantages, returns, values, and sequence lengths from a batch of data. It provides statistical information (mean, max, min) for each metric category. Args: batch: A DataProto object containing batch data with token-level scores, rewards, advantages, etc. use_critic: Whether to include critic-specific metrics. Defaults to True. Returns: A dictionary of metrics including: - critic/score/mean, max, min: Statistics about sequence scores - critic/rewards/mean, max, min: Statistics about sequence rewards - critic/advantages/mean, max, min: Statistics about advantages - critic/returns/mean, max, min: Statistics about returns - critic/values/mean, max, min: Statistics about critic values (if use_critic=True) - critic/vf_explained_var: Explained variance of the value function (if use_critic=True) - response_length/mean, max, min, clip_ratio: Statistics about response lengths - prompt_length/mean, max, min, clip_ratio: Statistics about prompt lengths - num_turns/mean, max, min: Statistics about the number of multi-turn conversations """ sequence_score = batch.batch["token_level_scores"].sum(-1) sequence_reward = batch.batch["token_level_rewards"].sum(-1) advantages = batch.batch["advantages"] returns = batch.batch["returns"] max_response_length = batch.batch["responses"].shape[-1] prompt_mask = batch.batch["attention_mask"][:, :-max_response_length].bool() response_mask = batch.batch["response_mask"].bool() max_prompt_length = prompt_mask.size(-1) response_info = _compute_response_info(batch) prompt_length = response_info["prompt_length"] response_length = response_info["response_length"] aborted_mask = (response_length == 0).bool() non_aborted_mask = ~aborted_mask non_aborted_sequence_score = sequence_score[non_aborted_mask] non_aborted_sequence_reward = sequence_reward[non_aborted_mask] score_mean = torch.mean(non_aborted_sequence_score).detach().item() score_max = torch.max(non_aborted_sequence_score).detach().item() score_min = torch.min(non_aborted_sequence_score).detach().item() reward_mean = torch.mean(non_aborted_sequence_reward).detach().item() reward_max = torch.max(non_aborted_sequence_reward).detach().item() reward_min = torch.min(non_aborted_sequence_reward).detach().item() valid_adv = torch.masked_select(advantages, response_mask) valid_returns = torch.masked_select(returns, response_mask) if use_critic: values = batch.batch["values"] valid_values = torch.masked_select(values, response_mask) return_diff_var = torch.var(valid_returns - valid_values) return_var = torch.var(valid_returns) # Aborted samples and non-aborted response length statistics # response_length_non_aborted/*: statistics computed on non-aborted samples only aborted_ratio = torch.mean(aborted_mask.float()).detach().item() non_aborted_response_length = response_length[non_aborted_mask] if non_aborted_response_length.numel() > 0: non_aborted_response_length_mean = torch.mean(non_aborted_response_length).detach().item() non_aborted_response_length_max = torch.max(non_aborted_response_length).detach().item() non_aborted_response_length_min = torch.min(non_aborted_response_length).detach().item() non_aborted_response_length_clip_ratio = ( torch.mean(torch.eq(non_aborted_response_length, max_response_length).float()).detach().item() ) else: raise ValueError("All samples are aborted, this should not happen.") metrics = { # score "critic/score/mean": score_mean, "critic/score/max": score_max, "critic/score/min": score_min, # reward "critic/rewards/mean": reward_mean, "critic/rewards/max": reward_max, "critic/rewards/min": reward_min, # adv "critic/advantages/mean": torch.mean(valid_adv).detach().item(), "critic/advantages/max": torch.max(valid_adv).detach().item(), "critic/advantages/min": torch.min(valid_adv).detach().item(), # returns "critic/returns/mean": torch.mean(valid_returns).detach().item(), "critic/returns/max": torch.max(valid_returns).detach().item(), "critic/returns/min": torch.min(valid_returns).detach().item(), **( { # values "critic/values/mean": torch.mean(valid_values).detach().item(), "critic/values/max": torch.max(valid_values).detach().item(), "critic/values/min": torch.min(valid_values).detach().item(), # vf explained var "critic/vf_explained_var": (1.0 - return_diff_var / (return_var + 1e-5)).detach().item(), } if use_critic else {} ), # response length "response_length/mean": torch.mean(response_length).detach().item(), "response_length/max": torch.max(response_length).detach().item(), "response_length/min": torch.min(response_length).detach().item(), "response_length/clip_ratio": torch.mean(torch.eq(response_length, max_response_length).float()) .detach() .item(), # response length (non-aborted only) # These statistics exclude aborted samples to avoid skew from zeros "response_length_non_aborted/mean": non_aborted_response_length_mean, "response_length_non_aborted/max": non_aborted_response_length_max, "response_length_non_aborted/min": non_aborted_response_length_min, "response_length_non_aborted/clip_ratio": non_aborted_response_length_clip_ratio, # aborted ratio # Fraction of samples whose response length is zero "response/aborted_ratio": aborted_ratio, # prompt length "prompt_length/mean": torch.mean(prompt_length).detach().item(), "prompt_length/max": torch.max(prompt_length).detach().item(), "prompt_length/min": torch.min(prompt_length).detach().item(), "prompt_length/clip_ratio": torch.mean(torch.eq(prompt_length, max_prompt_length).float()).detach().item(), } # multi-turn conversation if "__num_turns__" in batch.non_tensor_batch: num_turns = batch.non_tensor_batch["__num_turns__"] metrics["num_turns/min"] = num_turns.min() metrics["num_turns/max"] = num_turns.max() metrics["num_turns/mean"] = num_turns.mean() if "tool_call_counts" in batch.non_tensor_batch: tool_call_counts = batch.non_tensor_batch["tool_call_counts"] metrics["tool_call_counts/min"] = tool_call_counts.min() metrics["tool_call_counts/max"] = tool_call_counts.max() metrics["tool_call_counts/mean"] = tool_call_counts.mean() return metrics def compute_timing_metrics(batch: DataProto, timing_raw: dict[str, float]) -> dict[str, Any]: """ Computes timing metrics for different processing stages in PPO training. This function calculates both raw timing metrics (in seconds) and per-token timing metrics (in milliseconds) for various processing stages like generation, reference computation, value computation, advantage computation, and model updates. Args: batch: A DataProto object containing batch data with responses and attention masks. timing_raw: A dictionary mapping stage names to their execution times in seconds. Returns: A dictionary containing: - timing_s/{name}: Raw timing in seconds for each stage - timing_per_token_ms/{name}: Per-token timing in milliseconds for each stage Note: Different stages use different token counts for normalization: - "gen" uses only response tokens - Other stages ("ref", "values", "adv", "update_critic", "update_actor") use all tokens (prompt + response) """ response_info = _compute_response_info(batch) num_prompt_tokens = torch.sum(response_info["prompt_length"]).item() num_response_tokens = torch.sum(response_info["response_length"]).item() num_overall_tokens = num_prompt_tokens + num_response_tokens num_tokens_of_section = { "gen": num_response_tokens, **{name: num_overall_tokens for name in ["ref", "values", "adv", "update_critic", "update_actor"]}, } return { **{f"timing_s/{name}": value for name, value in timing_raw.items()}, **{ f"timing_per_token_ms/{name}": timing_raw[name] * 1000 / num_tokens_of_section[name] for name in set(num_tokens_of_section.keys()) & set(timing_raw.keys()) }, } def compute_throughout_metrics(batch: DataProto, timing_raw: dict[str, float], n_gpus: int) -> dict[str, Any]: """ Computes throughput metrics for PPO training. This function calculates performance metrics related to token processing speed, including the total number of tokens processed, time per step, and throughput (tokens per second per GPU). Args: batch: A DataProto object containing batch data with meta information about token counts. timing_raw: A dictionary mapping stage names to their execution times in seconds. Must contain a "step" key with the total step time. n_gpus: Number of GPUs used for training. Returns: A dictionary containing: - perf/total_num_tokens: Total number of tokens processed in the batch - perf/time_per_step: Time taken for the step in seconds - perf/throughput: Tokens processed per second per GPU Note: The throughput is calculated as total_tokens / (time * n_gpus) to normalize across different GPU counts. """ total_num_tokens = sum(batch.meta_info["global_token_num"]) time = timing_raw["step"] # estimated_flops, promised_flops = flops_function.estimate_flops(num_tokens, time) # f'Actual TFLOPs/s/GPU​': estimated_flops/(n_gpus), # f'Theoretical TFLOPs/s/GPU​': promised_flops, return { "perf/total_num_tokens": total_num_tokens, "perf/time_per_step": time, "perf/throughput": total_num_tokens / (time * n_gpus), } def bootstrap_metric( data: list[Any], subset_size: int, reduce_fns: list[Callable[[np.ndarray], float]], n_bootstrap: int = 1000, seed: int = 42, ) -> list[tuple[float, float]]: """ Performs bootstrap resampling to estimate statistics of metrics. This function uses bootstrap resampling to estimate the mean and standard deviation of metrics computed by the provided reduction functions on random subsets of the data. Args: data: List of data points to bootstrap from. subset_size: Size of each bootstrap sample. reduce_fns: List of functions that compute a metric from a subset of data. n_bootstrap: Number of bootstrap iterations. Defaults to 1000. seed: Random seed for reproducibility. Defaults to 42. Returns: A list of tuples, where each tuple contains (mean, std) for a metric corresponding to each reduction function in reduce_fns. Example: >>> data = [1, 2, 3, 4, 5] >>> reduce_fns = [np.mean, np.max] >>> bootstrap_metric(data, 3, reduce_fns) [(3.0, 0.5), (4.5, 0.3)] # Example values """ np.random.seed(seed) bootstrap_metric_lsts = [[] for _ in range(len(reduce_fns))] for _ in range(n_bootstrap): bootstrap_idxs = np.random.choice(len(data), size=subset_size, replace=True) bootstrap_data = [data[i] for i in bootstrap_idxs] for i, reduce_fn in enumerate(reduce_fns): bootstrap_metric_lsts[i].append(reduce_fn(bootstrap_data)) return [(np.mean(lst), np.std(lst)) for lst in bootstrap_metric_lsts] def calc_maj_val(data: list[dict[str, Any]], vote_key: str, val_key: str) -> float: """ Calculate a value based on majority voting. This function identifies the most common value for a specified vote key in the data, then returns the corresponding value for that majority vote. Args: data: List of dictionaries, where each dictionary contains both vote_key and val_key. vote_key: The key in each dictionary used for voting/counting. val_key: The key in each dictionary whose value will be returned for the majority vote. Returns: The value associated with the most common vote. Example: >>> data = [ ... {"pred": "A", "val": 0.9}, ... {"pred": "B", "val": 0.8}, ... {"pred": "A", "val": 0.7} ... ] >>> calc_maj_val(data, vote_key="pred", val_key="val") 0.9 # Returns the first "val" for the majority vote "A" """ vote2vals = defaultdict(list) for d in data: vote2vals[d[vote_key]].append(d[val_key]) vote2cnt = {k: len(v) for k, v in vote2vals.items()} maj_vote = max(vote2cnt, key=vote2cnt.get) maj_val = vote2vals[maj_vote][0] return maj_val def process_validation_metrics( data_sources: list[str], sample_uids: list[str], infos_dict: dict[str, list[Any]], seed: int = 42 ) -> dict[str, dict[str, dict[str, float]]]: """ Process validation metrics into a structured format with statistical analysis. This function organizes validation metrics by data source and prompt, then computes various statistical measures including means, standard deviations, best/worst values, and majority voting results. It also performs bootstrap sampling to estimate statistics for different sample sizes. Args: data_sources: List of data source identifiers for each sample. sample_uids: List of sample uids corresponding to each sample. infos_dict: Dictionary mapping variable names to lists of values for each sample. seed: Random seed for bootstrap sampling. Defaults to 42. Returns: A nested dictionary with the structure: { data_source: { variable_name: { metric_name: value } } } Where metric_name includes: - "mean@N": Mean value across N samples - "std@N": Standard deviation across N samples - "best@N/mean": Mean of the best values in bootstrap samples of size N - "best@N/std": Standard deviation of the best values in bootstrap samples - "worst@N/mean": Mean of the worst values in bootstrap samples - "worst@N/std": Standard deviation of the worst values in bootstrap samples - "maj@N/mean": Mean of majority voting results in bootstrap samples (if "pred" exists) - "maj@N/std": Standard deviation of majority voting results (if "pred" exists) Example: >>> data_sources = ["source1", "source1", "source2"] >>> sample_uids = ["uid1", "uid1", "uid2"] >>> infos_dict = {"score": [0.8, 0.9, 0.7], "pred": ["A", "A", "B"]} >>> result = process_validation_metrics(data_sources, sample_uids, infos_dict) >>> # result will contain statistics for each data source and variable """ # Group metrics by data source, prompt and variable data_src2uid2var2vals = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for sample_idx, data_source in enumerate(data_sources): uid = sample_uids[sample_idx] var2vals = data_src2uid2var2vals[data_source][uid] for var_name, var_vals in infos_dict.items(): var2vals[var_name].append(var_vals[sample_idx]) # Calculate metrics for each group data_src2uid2var2metric = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) for data_source, uid2var2vals in data_src2uid2var2vals.items(): for uid, var2vals in uid2var2vals.items(): for var_name, var_vals in var2vals.items(): if isinstance(var_vals[0], str): continue metric = {} n_resps = len(var_vals) metric[f"mean@{n_resps}"] = np.mean(var_vals) if n_resps > 1: metric[f"std@{n_resps}"] = np.std(var_vals) ns = [] n = 2 while n < n_resps: ns.append(n) n *= 2 ns.append(n_resps) for n in ns: [(bon_mean, bon_std), (won_mean, won_std)] = bootstrap_metric( data=var_vals, subset_size=n, reduce_fns=[np.max, np.min], seed=seed ) metric[f"best@{n}/mean"], metric[f"best@{n}/std"] = bon_mean, bon_std metric[f"worst@{n}/mean"], metric[f"worst@{n}/std"] = won_mean, won_std if var2vals.get("pred", None) is not None: vote_data = [ {"val": val, "pred": pred} for val, pred in zip(var_vals, var2vals["pred"], strict=True) ] [(maj_n_mean, maj_n_std)] = bootstrap_metric( data=vote_data, subset_size=n, reduce_fns=[partial(calc_maj_val, vote_key="pred", val_key="val")], seed=seed, ) metric[f"maj@{n}/mean"], metric[f"maj@{n}/std"] = maj_n_mean, maj_n_std data_src2uid2var2metric[data_source][uid][var_name] = metric # Aggregate metrics across uids data_src2var2metric2uid_vals = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for data_source, uid2var2metric in data_src2uid2var2metric.items(): for uid, var2metric in uid2var2metric.items(): for var_name, metric in var2metric.items(): for metric_name, metric_val in metric.items(): data_src2var2metric2uid_vals[data_source][var_name][metric_name].append(metric_val) data_src2var2metric2val = defaultdict(lambda: defaultdict(lambda: defaultdict(float))) for data_source, var2metric2uid_vals in data_src2var2metric2uid_vals.items(): for var_name, metric2uid_vals in var2metric2uid_vals.items(): for metric_name, uid_vals in metric2uid_vals.items(): data_src2var2metric2val[data_source][var_name][metric_name] = np.mean(uid_vals) return data_src2var2metric2val ================================================ FILE: verl_distillation/verl/trainer/ppo/mismatch_helper.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rollout Importance Sampling (IS) Helper Module This module handles importance sampling weight computation for correcting distribution mismatch between rollout policy (e.g., vLLM BFloat16) and training policy (e.g., FSDP FP32). Key Features: 1. Three aggregation levels: token, sequence, geometric 2. Two handling modes: truncate, mask 3. Per-token veto mechanism for catastrophic outliers 4. Memory-efficient computation to prevent CUDA OOM 5. Comprehensive metrics tracking Usage Notes: - compute_rollout_importance_weights() computes both IS weights and mismatch metrics - Used in ray_trainer.py via compute_rollout_importance_weights_and_add_to_batch() - Also used in dp_actor.py for distributed worker computations - compute_mismatch_metrics() is called internally by compute_rollout_importance_weights() References: - When Speed Kills Stability: https://yingru.notion.site/When-Speed-Kills-Stability-271211a558b7808d8b12d403fd15edda - Off-policy RL: https://fengyao.notion.site/off-policy-rl """ from typing import Any, Optional import torch import verl.utils.torch_functional as verl_F from verl.protocol import DataProto def compute_rollout_importance_weights( old_log_prob: torch.Tensor, rollout_log_prob: torch.Tensor, response_mask: torch.Tensor, rollout_is_level: str = "token", rollout_is_mode: str = "truncate", rollout_is_threshold: Optional[float] = None, rollout_is_threshold_lower: Optional[float] = None, rollout_is_veto_threshold: Optional[float] = None, ) -> tuple[Optional[DataProto], torch.Tensor, dict[str, Any]]: """Compute importance sampling weights and rejection mask for rollout-training mismatch. This function computes IS weights to correct for distribution mismatch between rollout and training policies, and applies rejection sampling for outliers. Key Design: Separation of IS Weights and Rejection Sampling - IS weights (rollout_is_weights): Ratios π_train/π_rollout with processing applied: * Safety-bounded to prevent overflow: - Token level: exp(clamp(log_ratio, -20, 20)) per token - Sequence level: exp(clamp(sum(log_ratio), -20, 20)) broadcast to all tokens - Geometric level: exp(clamp(mean(log_ratio), -20, 20)) broadcast to all tokens * Truncate mode: upper clamped via .clamp(max=upper_threshold) * Mask mode: safety-bounded ratios preserved (no threshold clamping) * All modes: zeroed at padding positions Used for policy gradient calculations - Response mask (modified_response_mask): Has rejection applied (mask mode + veto) Used for loss aggregation to exclude rejected samples from training Reference: When Speed Kills Stability: https://yingru.notion.site/When-Speed-Kills-Stability-271211a558b7808d8b12d403fd15edda Memory-efficient implementation: - Log-space computation to prevent overflow - Safety bounds (exp(±20)) on all exponentiations - Metrics computed without large intermediate tensors Args: old_log_prob: Log probs from training policy (FSDP FP32), shape (batch_size, seq_length) rollout_log_prob: Log probs from rollout policy (vLLM BF16), shape (batch_size, seq_length) response_mask: Valid token mask (1=valid, 0=padding), shape (batch_size, seq_length) rollout_is_level: IS weight aggregation level - "token": Per-token ratios ρ_t = π_train(t)/π_rollout(t) (biased but low variance) - "sequence": Sequence product ρ_seq = ∏ρ_t (unbiased but high variance) - "geometric": Geometric mean ρ_geo = (∏ρ_t)^(1/T) (experimental trade-off) rollout_is_mode: Treatment of outlier IS weights - "truncate": Clamp weights at upper threshold only. No rejection for outlier ratios, but veto can still apply (TIS) - "mask": Reject tokens/sequences outside [lower, upper] via response_mask (MIS/rejection sampling) rollout_is_threshold: Upper threshold for IS weights (required, e.g., 2.0) rollout_is_threshold_lower: Lower threshold for mask mode (if None, defaults to 1/upper) rollout_is_veto_threshold: Catastrophic token threshold. If any token has ratio < this, reject entire sequence. Applied independently of rollout_is_mode. If None, veto disabled. Default None. Returns: Tuple of (weights_proto, modified_response_mask, metrics): weights_proto: DataProto with processed IS weights, key "rollout_is_weights", shape (batch_size, seq_length). Processing applied: - Safety-bounded to [exp(-20), exp(20)] ≈ [2e-9, 5e8]: * Token level: bounds per-token ratios * Sequence/geometric level: bounds aggregated ratio (broadcast to all tokens) - Truncate mode: upper clamped via .clamp(max=upper_threshold) - Mask mode: safety-bounded ratios preserved (no threshold clamping) - All modes: zeroed at padding positions (response_mask == 0) None if rollout_is_threshold is None. modified_response_mask: Response mask with rejection applied: - truncate mode: unchanged for outlier ratios, but veto rejection still applied - mask mode: tokens outside [lower, upper] masked to 0 - veto: sequences with catastrophic tokens masked to 0 (applied in both modes) Shape (batch_size, seq_length). metrics: Dict of IS and mismatch metrics, all scalars with "mismatch/" prefix """ if rollout_is_threshold is None: return None, response_mask, {} # Parse thresholds: if lower not specified, use 1/upper (reciprocal) upper_threshold = rollout_is_threshold if rollout_is_threshold_lower is not None: lower_threshold = rollout_is_threshold_lower else: # Default: lower = 1/upper (reciprocal) lower_threshold = 1.0 / upper_threshold # Step 1: Compute raw importance weights based on the specified level log_ratio = old_log_prob - rollout_log_prob # Pre-compute log thresholds device = old_log_prob.device log_threshold_upper = torch.log(torch.tensor(upper_threshold, device=device)) log_threshold_lower = torch.log(torch.tensor(lower_threshold, device=device)) # Safety bound to prevent numerical overflow (exp(20) ≈ 485M) SAFETY_BOUND = 20.0 # Store unclamped values in log-space for accurate metrics if rollout_is_level == "token": # Token-level IS: π_train(a|s) / π_rollout(a|s) per token log_ratio_for_metrics = log_ratio # Apply safety bound to prevent overflow log_ratio_safe = torch.clamp(log_ratio, min=-SAFETY_BOUND, max=SAFETY_BOUND) rollout_is_weights = torch.exp(log_ratio_safe) elif rollout_is_level == "sequence": # Sequence-level IS: π_train(y|x) / π_rollout(y|x) for entire sequence # Product of token ratios: exp(Σ log(π_train/π_rollout)) log_ratio_sum = verl_F.masked_sum(log_ratio, response_mask, axis=-1).unsqueeze(-1) log_ratio_for_metrics = log_ratio_sum # Store for metrics # Apply safety bound to prevent overflow log_ratio_sum_safe = torch.clamp(log_ratio_sum, min=-SAFETY_BOUND, max=SAFETY_BOUND) rollout_is_weights = torch.exp(log_ratio_sum_safe).expand_as(old_log_prob) elif rollout_is_level == "geometric": # Geometric mean IS: (∏ π_train/π_rollout)^(1/T) # Equivalent to exp(mean(log(π_train/π_rollout))) log_ratio_mean = verl_F.masked_mean(log_ratio, response_mask, axis=-1).unsqueeze(-1) log_ratio_for_metrics = log_ratio_mean # Store for metrics # Geometric mean rarely explodes due to averaging, but apply safety bound anyway log_ratio_mean_safe = torch.clamp(log_ratio_mean, min=-SAFETY_BOUND, max=SAFETY_BOUND) rollout_is_weights = torch.exp(log_ratio_mean_safe).expand_as(old_log_prob) else: raise ValueError(f"Invalid rollout_is_level: {rollout_is_level}. Must be 'token', 'sequence', or 'geometric'.") # Step 1.5: Apply per-token veto check in log space (memory efficient) if rollout_is_veto_threshold is not None: log_veto_threshold = torch.log(torch.tensor(rollout_is_veto_threshold, device=device)) # Check if any token ratio is below veto threshold (in log space) # log(π_train/π_rollout) < log(veto_threshold) ⟺ π_train/π_rollout < veto_threshold catastrophic_tokens = (log_ratio < log_veto_threshold) & response_mask.bool() # For each sequence, check if it has any catastrophic token # Use broadcasting instead of expand_as to save memory has_catastrophic = catastrophic_tokens.any(dim=-1, keepdim=True) # Create veto mask: 0 if sequence has catastrophic token, 1 otherwise veto_mask = (~has_catastrophic).float() else: # No veto mechanism catastrophic_tokens = torch.zeros_like(response_mask, dtype=torch.bool) has_catastrophic = torch.zeros((old_log_prob.size(0), 1), dtype=torch.bool, device=device) veto_mask = torch.ones((old_log_prob.size(0), 1), dtype=torch.float32, device=device) # Step 2: Compute comprehensive metrics metrics = compute_is_metrics( rollout_is_weights=rollout_is_weights, log_ratio_for_metrics=log_ratio_for_metrics, response_mask=response_mask, rollout_is_level=rollout_is_level, rollout_is_threshold=upper_threshold, rollout_is_threshold_lower=lower_threshold, log_threshold_upper=log_threshold_upper, log_threshold_lower=log_threshold_lower, has_catastrophic=has_catastrophic, catastrophic_tokens=catastrophic_tokens, SAFETY_BOUND=SAFETY_BOUND, ) # Step 3: Apply outlier handling and rejection sampling # Key design principle: IS weights and rejection are separate mechanisms # - rollout_is_weights: IS weight ratios with mode-specific processing # * Truncate mode: upper clamped to prevent extreme values # * Mask mode: safety-bounded ratios preserved (no threshold clamping, rejection via mask) # Used for policy gradient calculations # - modified_response_mask: Has rejection applied (excludes outliers from training) # Used for loss denominator: ensures rejected samples don't dilute gradients if rollout_is_mode == "truncate": # Truncated IS (TIS): clamp weights to prevent extreme importance ratios # Weights are modified by clamping; no rejection via mask for outlier ratios # Veto rejection (if enabled) will still be applied to modified_response_mask below rollout_is_weights = rollout_is_weights.clamp(max=upper_threshold) modified_response_mask = response_mask # Unchanged for outlier ratios (veto applied later) elif rollout_is_mode == "mask": # Masked IS (MIS): rejection sampling for outlier IS weights # Reject tokens/sequences with IS ratios outside [lower, upper] via response_mask # IS weights themselves are NOT threshold-clamped (remain safety-bounded only) mask = (rollout_is_weights >= lower_threshold) & (rollout_is_weights <= upper_threshold) mask = mask.float() # Compute rejection rate metrics metrics["rollout_is_masked_fraction"] = verl_F.masked_mean(1 - mask, response_mask) if rollout_is_level in ["sequence", "geometric"]: # Sequence-level: all tokens have same weight, check first token metrics["rollout_is_seq_masked_fraction"] = (1 - mask[:, 0]).mean() else: # Token-level: sequence rejected if ANY token is rejected seq_has_masked = verl_F.masked_sum(1 - mask, response_mask, axis=-1) > 0 metrics["rollout_is_seq_masked_fraction"] = seq_has_masked.float().mean() # Apply rejection via response_mask (NOT by clamping IS weights) modified_response_mask = response_mask * mask # rollout_is_weights kept as safety-bounded ratios (no threshold clamping) else: raise ValueError(f"Invalid rollout_is_mode: {rollout_is_mode}. Must be 'truncate' or 'mask'.") # Apply veto: reject entire sequences with catastrophic tokens (ratio < veto_threshold) # Veto is independent of mode - it applies to modified_response_mask after mode-specific handling modified_response_mask = modified_response_mask * veto_mask # Note: rollout_is_weights unaffected by veto (already clamped in truncate mode, or kept as-is in mask mode) # Zero out padding positions in IS weights for correct aggregation # This is different from rejection - padding must be zeroed regardless of mode rollout_is_weights = rollout_is_weights * response_mask # Wrap in DataProto for consistency with worker methods rollout_is_weights_proto = DataProto.from_dict(tensors={"rollout_is_weights": rollout_is_weights}) # Compute mismatch metrics (KL, PPL, etc.) and merge with IS metrics mismatch_metrics = compute_mismatch_metrics( old_log_prob=old_log_prob, rollout_log_prob=rollout_log_prob, response_mask=response_mask ) metrics.update(mismatch_metrics) # Convert all tensor metrics to scalars for logging # Note: No need to detach since old_log_prob and rollout_log_prob are computed with torch.no_grad() metrics_scalar = {} for key, value in metrics.items(): if isinstance(value, torch.Tensor): metrics_scalar[f"mismatch/{key}"] = value.item() else: metrics_scalar[f"mismatch/{key}"] = value return rollout_is_weights_proto, modified_response_mask, metrics_scalar def compute_is_metrics( rollout_is_weights: torch.Tensor, log_ratio_for_metrics: torch.Tensor, response_mask: torch.Tensor, rollout_is_level: str, rollout_is_threshold: float, rollout_is_threshold_lower: float, log_threshold_upper: torch.Tensor, log_threshold_lower: torch.Tensor, has_catastrophic: torch.Tensor, catastrophic_tokens: torch.Tensor, SAFETY_BOUND: float, ) -> dict[str, Any]: """Compute comprehensive metrics for importance sampling weights. Reference: When Speed Kills Stability: https://yingru.notion.site/When-Speed-Kills-Stability-271211a558b7808d8b12d403fd15edda This function computes metrics using a mix of true unclamped values (for max/min/fractions in sequence/geometric mode via log-space) and safety-clamped values (for mean/std/ESS) to balance accuracy with numerical stability and avoid overflow. """ # Validate that we have at least one valid sample assert response_mask.any(), "Expected at least one valid sample in response_mask" metrics = {} device = rollout_is_weights.device # Track veto statistics metrics["rollout_is_veto_fraction"] = has_catastrophic.float().mean() metrics["rollout_is_catastrophic_token_fraction"] = verl_F.masked_mean(catastrophic_tokens.float(), response_mask) # Compute metrics based on IS level if rollout_is_level in ["sequence", "geometric"]: # For sequence/geometric, compute true statistics from log-space # This reflects the actual distribution before clamping # True max/min in log space log_max = log_ratio_for_metrics.max() log_min = log_ratio_for_metrics.min() # Convert to regular space with safety bound metrics["rollout_is_max"] = torch.exp(torch.clamp(log_max, max=SAFETY_BOUND)) metrics["rollout_is_min"] = torch.exp(log_min) # Mean uses clamped weights to avoid overflow metrics["rollout_is_mean"] = verl_F.masked_mean(rollout_is_weights, response_mask) # Compute fraction exceeding threshold in log space (accurate) exceeds_upper = log_ratio_for_metrics > log_threshold_upper below_lower = log_ratio_for_metrics < log_threshold_lower if rollout_is_level == "sequence": # For sequence level, all tokens in a sequence have the same weight metrics["rollout_is_ratio_fraction_high"] = exceeds_upper.float().mean() metrics["rollout_is_ratio_fraction_low"] = below_lower.float().mean() else: # geometric # Need to expand to match token dimensions exceeds_upper_expanded = exceeds_upper.expand_as(response_mask) below_lower_expanded = below_lower.expand_as(response_mask) metrics["rollout_is_ratio_fraction_high"] = verl_F.masked_mean( exceeds_upper_expanded.float(), response_mask ) metrics["rollout_is_ratio_fraction_low"] = verl_F.masked_mean(below_lower_expanded.float(), response_mask) else: # Token-level: compute directly from weights metrics["rollout_is_mean"] = verl_F.masked_mean(rollout_is_weights, response_mask) # Fraction exceeding thresholds rollout_is_above_threshold = rollout_is_weights > rollout_is_threshold rollout_is_below_threshold = rollout_is_weights < rollout_is_threshold_lower metrics["rollout_is_ratio_fraction_high"] = verl_F.masked_mean( rollout_is_above_threshold.float(), response_mask ) metrics["rollout_is_ratio_fraction_low"] = verl_F.masked_mean(rollout_is_below_threshold.float(), response_mask) # Max/min for token level mask_bool = response_mask.bool() metrics["rollout_is_max"] = rollout_is_weights.masked_fill(~mask_bool, float("-inf")).max() metrics["rollout_is_min"] = rollout_is_weights.masked_fill(~mask_bool, float("inf")).min() # Compute standard deviation using clamped weights to avoid overflow mask_count = response_mask.sum() if mask_count > 1: # Use clamped weights for variance to avoid squaring huge values weights_for_std = rollout_is_weights.clamp(min=rollout_is_threshold_lower, max=rollout_is_threshold) # Use mean from clamped weights for consistency mean_clamped = verl_F.masked_mean(weights_for_std, response_mask) rollout_is_var = verl_F.masked_mean(weights_for_std.square(), response_mask) - mean_clamped.square() metrics["rollout_is_std"] = torch.sqrt(torch.clamp(rollout_is_var, min=0.0)) else: metrics["rollout_is_std"] = torch.tensor(0.0, device=device) # Effective sample size (use clamped weights to avoid overflow) weights_for_ess = rollout_is_weights.clamp(min=rollout_is_threshold_lower, max=rollout_is_threshold) mean_for_ess = verl_F.masked_mean(weights_for_ess, response_mask) is_weights_normalized = weights_for_ess / (mean_for_ess + 1e-8) metrics["rollout_is_eff_sample_size"] = 1.0 / verl_F.masked_mean(is_weights_normalized.square(), response_mask) # Per-sequence breakdown metrics if rollout_is_weights.dim() > 1: # Compute mean IS weight per sequence seq_mean_weights = verl_F.masked_mean(rollout_is_weights, response_mask, axis=-1) # Per-sequence statistics metrics["rollout_is_seq_mean"] = seq_mean_weights.mean() metrics["rollout_is_seq_std"] = ( seq_mean_weights.std() if seq_mean_weights.numel() > 1 else torch.tensor(0.0, device=device) ) metrics["rollout_is_seq_max"] = seq_mean_weights.max() metrics["rollout_is_seq_min"] = seq_mean_weights.min() # Identify most problematic sequences seq_deviation = (seq_mean_weights - 1.0).abs() metrics["rollout_is_seq_max_deviation"] = seq_deviation.max() # Fraction of sequences with high IS weights metrics["rollout_is_seq_fraction_high"] = (seq_mean_weights > rollout_is_threshold).float().mean() metrics["rollout_is_seq_fraction_low"] = (seq_mean_weights < rollout_is_threshold_lower).float().mean() return metrics def compute_mismatch_metrics( old_log_prob: torch.Tensor, rollout_log_prob: Optional[torch.Tensor], response_mask: torch.Tensor, ) -> dict[str, Any]: """Compute training-inference mismatch metrics (helper function). This helper function operates on raw tensors and is used internally by: - compute_rollout_importance_weights() in this module (automatically included) - Tests (test_rollout_is.py, test_rollout_is_integration.py) These metrics help diagnose the mismatch between the rollout policy (e.g., vLLM) and the training policy (e.g., FSDP), which can cause training instability. Key metrics: - mismatch_kl: Direct KL divergence estimator KL(π_rollout || π_training) - mismatch_k3_kl: K3 KL estimator for stability (more stable for small KL) - training_ppl: Perplexity of training policy - rollout_ppl: Perplexity of rollout policy - log_ppl_diff: Difference in log perplexities - ppl_ratio: Ratio of training PPL to rollout PPL Args: old_log_prob: Log probabilities from training policy, shape (batch_size, seq_length) rollout_log_prob: Log probabilities from rollout policy, shape (batch_size, seq_length) response_mask: Mask for valid tokens, shape (batch_size, seq_length) Returns: Dictionary of mismatch metrics (without prefix) Reference: - When Speed Kills Stability: https://yingru.notion.site/When-Speed-Kills-Stability-271211a558b7808d8b12d403fd15edda """ # Validate that we have at least one valid token assert response_mask.any(), "Expected at least one valid token in response_mask" metrics = {} # 1. Training policy perplexity (always available) # Formula: exp(-1/|T| * Σ log π_training(y_t|y_1 for different models. """ for resource_pool_name, process_on_nodes in self.resource_pool_spec.items(): # max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool # For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one. # For Megatron backend, we recommend using max_colocate_count>1 # that can utilize different WorkerGroup for differnt models resource_pool = RayResourcePool( process_on_nodes=process_on_nodes, use_gpu=True, max_colocate_count=1, name_prefix=resource_pool_name ) self.resource_pool_dict[resource_pool_name] = resource_pool self._check_resource_available() def get_resource_pool(self, role: Role) -> RayResourcePool: """Get the resource pool of the worker_cls""" return self.resource_pool_dict[self.mapping[role]] def get_n_gpus(self) -> int: """Get the number of gpus in this cluster.""" return sum([n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes]) def _check_resource_available(self): """Check if the resource pool can be satisfied in this ray cluster.""" node_available_resources = ray._private.state.available_resources_per_node() node_available_gpus = { node: node_info.get("GPU", 0) if "GPU" in node_info else node_info.get("NPU", 0) for node, node_info in node_available_resources.items() } # check total required gpus can be satisfied total_available_gpus = sum(node_available_gpus.values()) total_required_gpus = sum( [n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes] ) if total_available_gpus < total_required_gpus: raise ValueError( f"Total available GPUs {total_available_gpus} is less than total desired GPUs {total_required_gpus}" ) def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty="kl"): """Apply KL penalty to the token-level rewards. This function computes the KL divergence between the reference policy and current policy, then applies a penalty to the token-level rewards based on this divergence. Args: data (DataProto): The data containing batched model outputs and inputs. kl_ctrl (core_algos.AdaptiveKLController): Controller for adaptive KL penalty. kl_penalty (str, optional): Type of KL penalty to apply. Defaults to "kl". Returns: tuple: A tuple containing: - The updated data with token-level rewards adjusted by KL penalty - A dictionary of metrics related to the KL penalty """ response_mask = data.batch["response_mask"] token_level_scores = data.batch["token_level_scores"] batch_size = data.batch.batch_size[0] # compute kl between ref_policy and current policy # When apply_kl_penalty, algorithm.use_kl_in_reward=True, so the reference model has been enabled. kld = core_algos.kl_penalty( data.batch["old_log_probs"], data.batch["ref_log_prob"], kl_penalty=kl_penalty ) # (batch_size, response_length) kld = kld * response_mask beta = kl_ctrl.value token_level_rewards = token_level_scores - beta * kld current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence current_kl = torch.mean(current_kl, dim=0).item() # according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837 kl_ctrl.update(current_kl=current_kl, n_steps=batch_size) data.batch["token_level_rewards"] = token_level_rewards metrics = {"actor/reward_kl_penalty": current_kl, "actor/reward_kl_penalty_coeff": beta} return data, metrics def compute_response_mask(data: DataProto): """Compute the attention mask for the response part of the sequence. This function extracts the portion of the attention mask that corresponds to the model's response, which is used for masking computations that should only apply to response tokens. Args: data (DataProto): The data containing batched model outputs and inputs. Returns: torch.Tensor: The attention mask for the response tokens. """ responses = data.batch["responses"] response_length = responses.size(1) attention_mask = data.batch["attention_mask"] return attention_mask[:, -response_length:] def compute_advantage( data: DataProto, adv_estimator: AdvantageEstimator, gamma: float = 1.0, lam: float = 1.0, num_repeat: int = 1, distill_adv_max_clip: float = None, distill_adv_min_clip: float = None, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, ) -> DataProto: """Compute advantage estimates for policy optimization. This function computes advantage estimates using various estimators like GAE, GRPO, REINFORCE++, etc. The advantage estimates are used to guide policy optimization in RL algorithms. Args: data (DataProto): The data containing batched model outputs and inputs. adv_estimator (AdvantageEstimator): The advantage estimator to use (e.g., GAE, GRPO, REINFORCE++). gamma (float, optional): Discount factor for future rewards. Defaults to 1.0. lam (float, optional): Lambda parameter for GAE. Defaults to 1.0. num_repeat (int, optional): Number of times to repeat the computation. Defaults to 1. norm_adv_by_std_in_grpo (bool, optional): Whether to normalize advantages by standard deviation in GRPO. Defaults to True. config (dict, optional): Configuration dictionary for algorithm settings. Defaults to None. Returns: DataProto: The updated data with computed advantages and returns. """ # Back-compatible with trainers that do not compute response mask in fit if "response_mask" not in data.batch.keys(): data.batch["response_mask"] = compute_response_mask(data) # prepare response group if adv_estimator == AdvantageEstimator.GAE: # Compute advantages and returns using Generalized Advantage Estimation (GAE) advantages, returns = core_algos.compute_gae_advantage_return( token_level_rewards=data.batch["token_level_rewards"], values=data.batch["values"], response_mask=data.batch["response_mask"], gamma=gamma, lam=lam, ) data.batch["advantages"] = advantages data.batch["returns"] = returns if config.get("use_pf_ppo", False): data = core_algos.compute_pf_ppo_reweight_data( data, config.pf_ppo.get("reweight_method"), config.pf_ppo.get("weight_pow"), ) elif adv_estimator == AdvantageEstimator.GRPO: # Initialize the mask for GRPO calculation grpo_calculation_mask = data.batch["response_mask"] # Call compute_grpo_outcome_advantage with parameters matching its definition advantages, returns = core_algos.compute_grpo_outcome_advantage( token_level_rewards=data.batch["token_level_rewards"], response_mask=grpo_calculation_mask, index=data.non_tensor_batch["uid"], norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) data.batch["advantages"] = advantages data.batch["returns"] = returns elif adv_estimator == AdvantageEstimator.ON_POLICY_DISTILL: advantages, returns = core_algos.compute_on_policy_distill_reverse_kl( teacher_log_prob=data.batch["ref_log_prob"], student_log_prob=data.batch["old_log_probs"], ) if distill_adv_max_clip: advantages = torch.clamp(advantages, max=distill_adv_max_clip) if distill_adv_min_clip: advantages = torch.clamp(advantages, min=distill_adv_min_clip) data.batch["advantages"] = advantages data.batch["returns"] = returns else: # handle all other adv estimator type other than GAE and GRPO adv_estimator_fn = core_algos.get_adv_estimator_fn(adv_estimator) adv_kwargs = { "token_level_rewards": data.batch["token_level_rewards"], "response_mask": data.batch["response_mask"], "config": config, } if "uid" in data.non_tensor_batch: # optional adv_kwargs["index"] = data.non_tensor_batch["uid"] if "reward_baselines" in data.batch: # optional adv_kwargs["reward_baselines"] = data.batch["reward_baselines"] # calculate advantage estimator advantages, returns = adv_estimator_fn(**adv_kwargs) data.batch["advantages"] = advantages data.batch["returns"] = returns return data class RayPPOTrainer: """Distributed PPO trainer using Ray for scalable reinforcement learning. This trainer orchestrates distributed PPO training across multiple nodes and GPUs, managing actor rollouts, critic training, and reward computation with Ray backend. Supports various model architectures including FSDP, Megatron, vLLM, and SGLang integration. """ # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: type[RayWorkerGroup] = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, train_dataset: Optional[Dataset] = None, val_dataset: Optional[Dataset] = None, collate_fn=None, train_sampler: Optional[Sampler] = None, device_name=None, ): """ Initialize distributed PPO trainer with Ray backend. Note that this trainer runs on the driver process on a single CPU/GPU node. Args: config: Configuration object containing training parameters. tokenizer: Tokenizer used for encoding and decoding text. role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes. resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools. ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup. processor: Optional data processor, used for multimodal data reward_fn: Function for computing rewards during training. val_reward_fn: Function for computing rewards during validation. train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None. val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None. collate_fn: Function to collate data samples into batches. train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None. device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to None. """ # Store the tokenizer for text processing self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert self.hybrid_engine, "Currently, only support hybrid engine" if self.hybrid_engine: assert Role.ActorRollout in role_worker_mapping, f"{role_worker_mapping.keys()=}" self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = need_reference_policy(self.role_worker_mapping) self.use_rm = need_reward_model(self.role_worker_mapping) self.use_critic = need_critic(self.config) self.ray_worker_group_cls = ray_worker_group_cls self.device_name = device_name if device_name else self.config.trainer.device self.validation_generations_logger = ValidationGenerationsLogger( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, ) # if ref_in_actor is True, the reference policy will be actor without lora applied self.ref_in_actor = ( config.actor_rollout_ref.model.get("lora_rank", 0) > 0 or config.actor_rollout_ref.model.get("lora_adapter_path") is not None ) # define in-reward KL control # kl loss control currently not suppoorted if self.config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl) self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler: Optional[Sampler]): """ Creates the train and validation dataloaders. """ # TODO: we have to make sure the batch size is divisible by the dp size from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler if train_dataset is None: train_dataset = create_rl_dataset( self.config.data.train_files, self.config.data, self.tokenizer, self.processor, max_samples=self.config.data.get("train_max_samples", -1), ) if val_dataset is None: val_dataset = create_rl_dataset( self.config.data.val_files, self.config.data, self.tokenizer, self.processor, max_samples=self.config.data.get("val_max_samples", -1), ) self.train_dataset, self.val_dataset = train_dataset, val_dataset if train_sampler is None: train_sampler = create_rl_sampler(self.config.data, self.train_dataset) if collate_fn is None: from verl.utils.dataset.rl_dataset import \ collate_fn as default_collate_fn collate_fn = default_collate_fn num_workers = self.config.data["dataloader_num_workers"] self.train_dataloader = StatefulDataLoader( dataset=self.train_dataset, batch_size=self.config.data.get("gen_batch_size", self.config.data.train_batch_size), num_workers=num_workers, drop_last=True, collate_fn=collate_fn, sampler=train_sampler, ) val_batch_size = self.config.data.val_batch_size # Prefer config value if set if val_batch_size is None: val_batch_size = len(self.val_dataset) self.val_dataloader = StatefulDataLoader( dataset=self.val_dataset, batch_size=val_batch_size, num_workers=num_workers, shuffle=self.config.data.get("validation_shuffle", True), drop_last=False, collate_fn=collate_fn, ) assert len(self.train_dataloader) >= 1, "Train dataloader is empty!" assert len(self.val_dataloader) >= 1, "Validation dataloader is empty!" print( f"Size of train dataloader: {len(self.train_dataloader)}, Size of val dataloader: " f"{len(self.val_dataloader)}" ) total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.trainer.total_training_steps is not None: total_training_steps = self.config.trainer.total_training_steps self.total_training_steps = total_training_steps print(f"Total training steps: {self.total_training_steps}") try: OmegaConf.set_struct(self.config, True) with open_dict(self.config): if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"): self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps if OmegaConf.select(self.config, "critic.optim"): self.config.critic.optim.total_training_steps = total_training_steps except Exception as e: print(f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}") def _dump_generations(self, inputs, outputs, scores, reward_extra_infos_dict, dump_path, logger=None): """Dump rollout/validation samples as JSONL.""" os.makedirs(dump_path, exist_ok=True) filename = os.path.join(dump_path, f"{self.global_steps}.jsonl") n = len(inputs) base_data = { "input": inputs, "output": outputs, "score": scores, "step": [self.global_steps] * n, } for k, v in reward_extra_infos_dict.items(): if len(v) == n: base_data[k] = v if logger is not None and 'wandb' in logger.logger: import pandas as pd df = pd.DataFrame(base_data) import wandb logger.logger['wandb'].log({"completions": wandb.Table(dataframe=df)}) return lines = [] for i in range(n): entry = { k: int(v[i]) if any(t in str(type(v[i])) for t in ['int64', 'bool']) else v[i] for k, v in base_data.items() } lines.append(json.dumps(entry, ensure_ascii=False)) with open(filename, "w") as f: f.write("\n".join(lines) + "\n") print(f"Dumped generations to {filename}") def _log_rollout_data( self, batch: DataProto, reward_extra_infos_dict: dict, timing_raw: dict, rollout_data_dir: str, logger = None ): """Log rollout data to disk. Args: batch (DataProto): The batch containing rollout data reward_extra_infos_dict (dict): Additional reward information to log timing_raw (dict): Timing information for profiling rollout_data_dir (str): Directory path to save the rollout data """ with marked_timer("dump_rollout_generations", timing_raw, color="green"): inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True) outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True) scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist() sample_gts = [item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in batch] reward_extra_infos_to_dump = reward_extra_infos_dict.copy() if "request_id" in batch.non_tensor_batch: reward_extra_infos_dict.setdefault( "request_id", batch.non_tensor_batch["request_id"].tolist(), ) self._dump_generations( inputs=inputs, outputs=outputs, gts=sample_gts, scores=scores, reward_extra_infos_dict=reward_extra_infos_to_dump, dump_path=rollout_data_dir, logger=logger, ) def _maybe_log_val_generations(self, inputs, outputs, scores): """Log a table of validation samples to the configured logger (wandb or swanlab)""" generations_to_log = self.config.trainer.log_val_generations if generations_to_log == 0: return import numpy as np # Create tuples of (input, output, score) and sort by input text samples = list(zip(inputs, outputs, scores, strict=True)) samples.sort(key=lambda x: x[0]) # Sort by input text # Use fixed random seed for deterministic shuffling rng = np.random.RandomState(42) rng.shuffle(samples) # Take first N samples after shuffling samples = samples[:generations_to_log] # Log to each configured logger self.validation_generations_logger.log(self.config.trainer.logger, samples, self.global_steps) def _get_gen_batch(self, batch: DataProto) -> DataProto: reward_model_keys = set({"data_source", "reward_model", "extra_info", "uid"}) & batch.non_tensor_batch.keys() # pop those keys for generation batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = set(batch.non_tensor_batch.keys()) - reward_model_keys gen_batch = batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=list(non_tensor_batch_keys_to_pop), ) # For agent loop, we need reward model keys to compute score. if self.async_rollout_mode: gen_batch.non_tensor_batch.update(batch.non_tensor_batch) return gen_batch def _validate(self): data_source_lst = [] reward_extra_infos_dict: dict[str, list] = defaultdict(list) # Lists to collect samples for the table sample_inputs = [] sample_outputs = [] sample_gts = [] sample_scores = [] sample_turns = [] sample_uids = [] for test_data in self.val_dataloader: test_batch = DataProto.from_single_dict(test_data) if "uid" not in test_batch.non_tensor_batch: test_batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(test_batch.batch))], dtype=object ) # repeat test batch test_batch = test_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.val_kwargs.n, interleave=True ) # we only do validation on rule-based rm if self.config.reward_model.enable and test_batch[0].non_tensor_batch["reward_model"]["style"] == "model": return {} # Store original inputs input_ids = test_batch.batch["input_ids"] # TODO: Can we keep special tokens except for padding tokens? input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids] sample_inputs.extend(input_texts) sample_uids.extend(test_batch.non_tensor_batch["uid"]) ground_truths = [ item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in test_batch ] sample_gts.extend(ground_truths) test_gen_batch = self._get_gen_batch(test_batch) test_gen_batch.meta_info = { "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, "recompute_log_prob": False, "do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample, "validate": True, "global_steps": self.global_steps, } print(f"test_gen_batch meta info: {test_gen_batch.meta_info}") # pad to be divisible by dp_size size_divisor = ( self.actor_rollout_wg.world_size if not self.async_rollout_mode else self.config.actor_rollout_ref.rollout.agent.num_workers ) test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, size_divisor) if not self.async_rollout_mode: test_output_gen_batch_padded = self.actor_rollout_wg.generate_sequences(test_gen_batch_padded) else: test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(test_gen_batch_padded) # unpad test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size) print("validation generation end") # Store generated outputs output_ids = test_output_gen_batch.batch["responses"] output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids] sample_outputs.extend(output_texts) test_batch = test_batch.union(test_output_gen_batch) test_batch.meta_info["validate"] = True # evaluate using reward_function if self.val_reward_fn is None: raise ValueError("val_reward_fn must be provided for validation.") result = self.val_reward_fn(test_batch, return_dict=True) reward_tensor = result["reward_tensor"] scores = reward_tensor.sum(-1).cpu().tolist() sample_scores.extend(scores) reward_extra_infos_dict["reward"].extend(scores) if "reward_extra_info" in result: for key, lst in result["reward_extra_info"].items(): reward_extra_infos_dict[key].extend(lst) # collect num_turns of each prompt if "__num_turns__" in test_batch.non_tensor_batch: sample_turns.append(test_batch.non_tensor_batch["__num_turns__"]) data_source_lst.append(test_batch.non_tensor_batch.get("data_source", ["unknown"] * reward_tensor.shape[0])) self._maybe_log_val_generations(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores) # dump generations val_data_dir = self.config.trainer.get("validation_data_dir", None) if val_data_dir: self._dump_generations( inputs=sample_inputs, outputs=sample_outputs, gts=sample_gts, scores=sample_scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=val_data_dir, logger=logger, ) for key_info, lst in reward_extra_infos_dict.items(): assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}" data_sources = np.concatenate(data_source_lst, axis=0) data_src2var2metric2val = process_validation_metrics(data_sources, sample_uids, reward_extra_infos_dict) metric_dict = {} for data_source, var2metric2val in data_src2var2metric2val.items(): core_var = "acc" if "acc" in var2metric2val else "reward" for var_name, metric2val in var2metric2val.items(): n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()]) for metric_name, metric_val in metric2val.items(): if ( (var_name == core_var) and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"]) and (f"@{n_max}" in metric_name) ): metric_sec = "val-core" else: metric_sec = "val-aux" pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}" metric_dict[pfx] = metric_val if len(sample_turns) > 0: sample_turns = np.concatenate(sample_turns) metric_dict["val-aux/num_turns/min"] = sample_turns.min() metric_dict["val-aux/num_turns/max"] = sample_turns.max() metric_dict["val-aux/num_turns/mean"] = sample_turns.mean() return metric_dict def init_workers(self): """Initialize distributed training workers using Ray backend. Creates: 1. Ray resource pools from configuration 2. Worker groups for each role (actor, critic, etc.) """ self.resource_pool_manager.create_resource_pool() self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()} # create actor and rollout if self.hybrid_engine: resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout) actor_rollout_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[Role.ActorRollout], config=self.config.actor_rollout_ref, role=str(Role.ActorRollout), ) self.resource_pool_to_cls[resource_pool][str(Role.ActorRollout)] = actor_rollout_cls else: raise NotImplementedError # create critic if self.use_critic: resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic) critic_cfg = omega_conf_to_dataclass(self.config.critic) critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=critic_cfg) self.resource_pool_to_cls[resource_pool][str(Role.Critic)] = critic_cls # create reference policy if needed if self.use_reference_policy: resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy) ref_policy_cls = RayClassWithInitArgs( self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role=str(Role.RefPolicy), ) self.resource_pool_to_cls[resource_pool][str(Role.RefPolicy)] = ref_policy_cls # create a reward model if reward_fn is None if self.use_rm: # we create a RM here resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model) self.resource_pool_to_cls[resource_pool][str(Role.RewardModel)] = rm_cls # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. # Instead, directly pass different resource pool to different worker groups. # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information. all_wg = {} wg_kwargs = {} # Setting up kwargs for RayWorkerGroup if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None: wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout if OmegaConf.select(self.config.global_profiler, "steps") is not None: wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps") # Only require nsight worker options when tool is nsys if OmegaConf.select(self.config.global_profiler, "tool") == "nsys": assert ( OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options") is not None ), "worker_nsight_options must be set when using nsys with profile_steps" wg_kwargs["worker_nsight_options"] = OmegaConf.to_container( OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options") ) wg_kwargs["device_name"] = self.device_name for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls( resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls, **wg_kwargs, ) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) if self.use_critic: self.critic_wg = all_wg[str(Role.Critic)] self.critic_wg.init_model() if self.use_reference_policy and not self.ref_in_actor: self.ref_policy_wg = all_wg[str(Role.RefPolicy)] self.ref_policy_wg.init_model() self.rm_wg = None # initalization of rm_wg will be deprecated in the future if self.use_rm: self.rm_wg = all_wg[str(Role.RewardModel)] self.rm_wg.init_model() # we should create rollout at the end so that vllm can have a better estimation of kv cache memory self.actor_rollout_wg = all_wg[str(Role.ActorRollout)] self.actor_rollout_wg.init_model() # create async rollout manager and request scheduler self.async_rollout_mode = False if self.config.actor_rollout_ref.rollout.mode == "async": from verl.experimental.agent_loop import AgentLoopManager self.async_rollout_mode = True self.async_rollout_manager = AgentLoopManager( config=self.config, worker_group=self.actor_rollout_wg, rm_wg=self.rm_wg ) def _save_checkpoint(self): from verl.utils.fs import local_mkdir_safe # path: given_path + `/global_step_{global_steps}` + `/actor` local_global_step_folder = os.path.join( self.config.trainer.default_local_dir, f"global_step_{self.global_steps}" ) print(f"local_global_step_folder: {local_global_step_folder}") actor_local_path = os.path.join(local_global_step_folder, "actor") actor_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor") ) remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False) if remove_previous_ckpt_in_save: print( "Warning: remove_previous_ckpt_in_save is deprecated," + " set max_actor_ckpt_to_keep=1 and max_critic_ckpt_to_keep=1 instead" ) max_actor_ckpt_to_keep = ( self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) max_critic_ckpt_to_keep = ( self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) self.actor_rollout_wg.save_checkpoint( actor_local_path, actor_remote_path, self.global_steps, max_ckpt_to_keep=max_actor_ckpt_to_keep ) if self.use_critic: critic_local_path = os.path.join(local_global_step_folder, str(Role.Critic)) critic_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join( self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", str(Role.Critic) ) ) self.critic_wg.save_checkpoint( critic_local_path, critic_remote_path, self.global_steps, max_ckpt_to_keep=max_critic_ckpt_to_keep ) # save dataloader local_mkdir_safe(local_global_step_folder) dataloader_local_path = os.path.join(local_global_step_folder, "data.pt") dataloader_state_dict = self.train_dataloader.state_dict() torch.save(dataloader_state_dict, dataloader_local_path) # latest checkpointed iteration tracker (for atomic usage) local_latest_checkpointed_iteration = os.path.join( self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt" ) with open(local_latest_checkpointed_iteration, "w") as f: f.write(str(self.global_steps)) def _load_checkpoint(self): if self.config.trainer.resume_mode == "disable": # NOTE: while there is no checkpoint to load, we still need to offload the model and optimizer to CPU self.actor_rollout_wg.load_checkpoint(None) return 0 # load from hdfs if self.config.trainer.default_hdfs_dir is not None: raise NotImplementedError("load from hdfs is not implemented yet") else: checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path if not os.path.isabs(checkpoint_folder): working_dir = os.getcwd() checkpoint_folder = os.path.join(working_dir, checkpoint_folder) global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest # find global_step_folder if self.config.trainer.resume_mode == "auto": if global_step_folder is None: print("Training from scratch") self.actor_rollout_wg.load_checkpoint(None) return 0 else: if self.config.trainer.resume_mode == "resume_path": assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type" assert "global_step_" in self.config.trainer.resume_from_path, ( "resume ckpt must specify the global_steps" ) global_step_folder = self.config.trainer.resume_from_path if not os.path.isabs(global_step_folder): working_dir = os.getcwd() global_step_folder = os.path.join(working_dir, global_step_folder) print(f"Load from checkpoint folder: {global_step_folder}") # set global step self.global_steps = int(global_step_folder.split("global_step_")[-1]) print(f"Setting global step to {self.global_steps}") print(f"Resuming from {global_step_folder}") actor_path = os.path.join(global_step_folder, "actor") critic_path = os.path.join(global_step_folder, str(Role.Critic)) # load actor self.actor_rollout_wg.load_checkpoint( actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load critic if self.use_critic: self.critic_wg.load_checkpoint( critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load dataloader, # TODO: from remote not implemented yet dataloader_local_path = os.path.join(global_step_folder, "data.pt") if os.path.exists(dataloader_local_path): dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False) self.train_dataloader.load_state_dict(dataloader_state_dict) else: print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch") def _start_profiling(self, do_profile: bool) -> None: """Start profiling for all worker groups if profiling is enabled.""" if do_profile: self.actor_rollout_wg.start_profile(role="e2e", profile_step=self.global_steps) if self.use_reference_policy: self.ref_policy_wg.start_profile(profile_step=self.global_steps) if self.use_critic: self.critic_wg.start_profile(profile_step=self.global_steps) if self.use_rm: self.rm_wg.start_profile(profile_step=self.global_steps) def _stop_profiling(self, do_profile: bool) -> None: """Stop profiling for all worker groups if profiling is enabled.""" if do_profile: self.actor_rollout_wg.stop_profile() if self.use_reference_policy: self.ref_policy_wg.stop_profile() if self.use_critic: self.critic_wg.stop_profile() if self.use_rm: self.rm_wg.stop_profile() def _balance_batch(self, batch: DataProto, metrics, logging_prefix="global_seqlen", keep_minibatch=False): """Reorder the data on single controller such that each dp rank gets similar total tokens""" attention_mask = batch.batch["attention_mask"] batch_size = attention_mask.shape[0] global_seqlen_lst = batch.batch["attention_mask"].view(batch_size, -1).sum(-1) # (train_batch_size,) global_seqlen_lst = calculate_workload(global_seqlen_lst) world_size = self.actor_rollout_wg.world_size if keep_minibatch: # Decouple the DP balancing and mini-batching. minibatch_size = self.config.actor_rollout_ref.actor.get("ppo_mini_batch_size") minibatch_num = len(global_seqlen_lst) // minibatch_size global_partition_lst = [[] for _ in range(world_size)] for i in range(minibatch_num): rearrange_minibatch_lst = get_seqlen_balanced_partitions( global_seqlen_lst[i * minibatch_size : (i + 1) * minibatch_size], k_partitions=world_size, equal_size=True, ) for j, part in enumerate(rearrange_minibatch_lst): global_partition_lst[j].extend([x + minibatch_size * i for x in part]) else: global_partition_lst = get_seqlen_balanced_partitions( global_seqlen_lst, k_partitions=world_size, equal_size=True ) # Place smaller micro-batches at both ends to reduce the bubbles in pipeline parallel. for idx, partition in enumerate(global_partition_lst): partition.sort(key=lambda x: (global_seqlen_lst[x], x)) ordered_partition = partition[::2] + partition[1::2][::-1] global_partition_lst[idx] = ordered_partition # reorder based on index. The data will be automatically equally partitioned by dispatch function global_idx = torch.tensor([j for partition in global_partition_lst for j in partition]) batch.reorder(global_idx) global_balance_stats = log_seqlen_unbalance( seqlen_list=global_seqlen_lst, partitions=global_partition_lst, prefix=logging_prefix ) metrics.update(global_balance_stats) def compute_rollout_importance_weights_and_add_to_batch(self, batch: DataProto) -> tuple[DataProto, dict]: """Compute IS weights and apply rejection sampling for rollout-training mismatch. Computes importance sampling weights to correct for distribution mismatch between rollout and training policies. Applies rejection sampling (mask mode/veto) by modifying response_mask. Always updates response_mask; conditionally adds IS weights. Key behavior: - response_mask: ALWAYS updated with rejection (mask mode + veto excluded from training) - rollout_is_weights: Added to batch ONLY if config.algorithm.rollout_is=True This separation ensures: - Rejection works even when IS weights are disabled (rollout_is=False) - Metrics can be monitored before enabling IS weight application Args: batch: DataProto with old_log_probs, rollout_log_probs, response_mask Returns: Tuple of (updated_batch, metrics): updated_batch: Batch with modified response_mask (always) and rollout_is_weights (if rollout_is=True) metrics: Dict of IS and mismatch metrics, all with "mismatch/" prefix """ # Compute rollout IS weights if enabled and data is available # rollout_is_threshold is the main on/off switch (None = disabled, float = enabled) rollout_is_threshold = self.config.algorithm.get("rollout_is_threshold", None) if rollout_is_threshold is not None and rollout_is_threshold > 0 and "rollout_log_probs" in batch.batch: # Compute IS weights and get modified response_mask rollout_is_weights, modified_response_mask, rollout_is_metrics = compute_rollout_importance_weights( old_log_prob=batch.batch["old_log_probs"], rollout_log_prob=batch.batch["rollout_log_probs"], response_mask=batch.batch["response_mask"], rollout_is_level=self.config.algorithm.rollout_is_level, rollout_is_mode=self.config.algorithm.rollout_is_mode, rollout_is_threshold=self.config.algorithm.rollout_is_threshold, rollout_is_threshold_lower=self.config.algorithm.get("rollout_is_threshold_lower", None), rollout_is_veto_threshold=self.config.algorithm.get("rollout_is_veto_threshold", None), ) # ALWAYS update response_mask with rejection (even if rollout_is=False) # - Mask mode: tokens with outlier IS ratios excluded # - Veto: sequences with catastrophic tokens excluded # This ensures correct loss normalization (rejected samples not in denominator) batch.batch["response_mask"] = modified_response_mask # Conditionally add IS weights based on rollout_is config flag # - rollout_is=True: Enable IS weight correction in policy loss # - rollout_is=False: Metrics-only mode (rejection still applied via mask) apply_weights = self.config.algorithm.get("rollout_is", False) if apply_weights: # Add IS weights (safety-bounded, mode-processed) to enable weight correction batch = batch.union(rollout_is_weights) return batch, rollout_is_metrics # Return unchanged batch and empty metrics if IS is disabled return batch, {} def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return if self.config.actor_rollout_ref.rollout.get("skip_rollout", False): rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg) rollout_skip.wrap_generate_sequences() # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None self.max_steps_duration = 0 prev_step_profile = False curr_step_profile = ( self.global_steps in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) next_step_profile = False for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} with marked_timer("start_profile", timing_raw): self._start_profiling( not prev_step_profile and curr_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) batch: DataProto = DataProto.from_single_dict(batch_dict) # add uid to batch batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object ) gen_batch = self._get_gen_batch(batch) # pass global_steps to trace gen_batch.meta_info["global_steps"] = self.global_steps gen_batch_output = gen_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True ) is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw, color="red"): if not self.async_rollout_mode: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch_output) else: gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch_output) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: if self.reward_fn is None: raise ValueError("A reward_fn is required for REMAX advantage estimation.") with marked_timer("gen_max", timing_raw, color="purple"): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False if not self.async_rollout_mode: gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) else: gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_batch) batch = batch.union(gen_baseline_output) # compute reward model score on batch rm_scores = None if self.use_rm and "rm_scores" not in batch.batch.keys(): rm_scores = self.rm_wg.compute_rm_score(batch) batch = batch.union(rm_scores) reward_baseline_tensor, _ = compute_reward(batch, self.reward_fn) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) keys_to_pop = set(gen_baseline_output.batch.keys()) if rm_scores is not None: keys_to_pop.update(rm_scores.batch.keys()) batch.pop(batch_keys=list(keys_to_pop)) batch.batch["reward_baselines"] = reward_baseline_tensor del rm_scores, gen_baseline_batch, gen_baseline_output # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) if "response_mask" not in batch.batch.keys(): batch.batch["response_mask"] = compute_response_mask(batch) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() with marked_timer("reward", timing_raw, color="yellow"): # compute reward model score if self.use_rm and "rm_scores" not in batch.batch.keys(): reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) if self.config.reward_model.launch_reward_fn_async: future_reward = compute_reward_async.remote( data=batch, config=self.config, tokenizer=self.tokenizer ) else: reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn) # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, color="blue"): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if "rollout_log_probs" in batch.batch.keys(): # TODO: we may want to add diff of probs too. from verl.utils.debug.metrics import \ calculate_debug_metrics metrics.update(calculate_debug_metrics(batch)) if self.use_reference_policy: # compute reference log_prob with marked_timer(str(Role.RefPolicy), timing_raw, color="olive"): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw, color="cyan"): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw, color="brown"): # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] if self.config.reward_model.launch_reward_fn_async: reward_tensor, reward_extra_infos_dict = ray.get(future_reward) batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()}) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] # Compute rollout importance sampling weights centrally (once per batch) # This corrects for mismatch between rollout policy and training policy # Also computes mismatch metrics (KL, PPL, etc.) batch, is_metrics = self.compute_rollout_importance_weights_and_add_to_batch(batch) # IS and mismatch metrics already have mismatch/ prefix metrics.update(is_metrics) # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get( "norm_adv_by_std_in_grpo", True ) # GRPO adv normalization factor batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, config=self.config.algorithm, ) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, color="pink"): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, color="red"): batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: self._log_rollout_data(batch, reward_extra_infos_dict, timing_raw, rollout_data_dir, logger) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, color="green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) # Check if the ESI (Elastic Server Instance)/training plan is close to expiration. esi_close_to_expiration = should_save_ckpt_esi( max_steps_duration=self.max_steps_duration, redundant_time=self.config.trainer.esi_redundant_time, ) # Check if the conditions for saving a checkpoint are met. # The conditions include a mandatory condition (1) and # one of the following optional conditions (2/3/4): # 1. The save frequency is set to a positive value. # 2. It's the last training step. # 3. The current step number is a multiple of the save frequency. # 4. The ESI(Elastic Server Instance)/training plan is close to expiration. if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration ): if esi_close_to_expiration: print("Force saving checkpoint: ESI instance expiration approaching.") with marked_timer("save_checkpoint", timing_raw, color="green"): self._save_checkpoint() with marked_timer("stop_profile", timing_raw): next_step_profile = ( self.global_steps + 1 in self.config.global_profiler.steps if self.config.global_profiler.steps is not None else False ) self._stop_profiling( curr_step_profile and not next_step_profile if self.config.global_profiler.profile_continuous_steps else curr_step_profile ) prev_step_profile = curr_step_profile curr_step_profile = next_step_profile steps_duration = timing_raw["step"] self.max_steps_duration = max(self.max_steps_duration, steps_duration) # training metrics metrics.update( { "training/global_step": self.global_steps, "training/epoch": epoch, } ) # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) # Note: mismatch metrics (KL, PPL, etc.) are collected at line 1179 after advantage computation # this is experimental and may be changed/removed in the future in favor of a general-purpose one if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler): self.train_dataloader.sampler.update(batch=batch) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) progress_bar.update(1) self.global_steps += 1 if ( hasattr(self.config.actor_rollout_ref.actor, "profiler") and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory" ): self.actor_rollout_wg.dump_memory_snapshot( tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}" ) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return # this is experimental and may be changed/removed in the future # in favor of a general-purpose data buffer pool if hasattr(self.train_dataset, "on_batch_end"): # The dataset may be changed after each training batch self.train_dataset.on_batch_end(batch=batch) ================================================ FILE: verl_distillation/verl/trainer/ppo/reward.py ================================================ # Copyright 2025 Individual Contributor: Thibaut Barroyer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.util import inspect import multiprocessing import os import sys import warnings from functools import partial from typing import Any, Optional import ray import torch from omegaconf import DictConfig from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.utils.transferqueue_utils import tqbridge from verl.workers.reward_manager import get_reward_manager_cls from verl.workers.reward_manager.abstract import AbstractRewardManager, RawRewardFn def _call_with_kwargs(raw_fn, extra_kwargs, *args, **kwargs): """Calls `raw_fn` by merging `extra_kwargs` into call-time `kwargs`, with `extra_kwargs` taking precedence. This function is used to merge additional keyword arguments with the original function's arguments. """ merged_kwargs = {**kwargs, **extra_kwargs} return raw_fn(*args, **merged_kwargs) async def _call_with_kwargs_async(raw_fn, extra_kwargs, *args, **kwargs): """Calls `raw_fn` by merging `extra_kwargs` into call-time `kwargs`, with `extra_kwargs` taking precedence. This function is used to merge additional keyword arguments with the original function's arguments. """ merged_kwargs = {**kwargs, **extra_kwargs} return await raw_fn(*args, **merged_kwargs) def get_custom_reward_fn(config: DictConfig) -> Optional[RawRewardFn]: """Load and return a custom reward function from external file. Dynamically imports a reward function from a specified file path and wraps it with additional keyword arguments from the configuration. Args: config (dict): Configuration dictionary containing custom_reward_function settings with 'path', 'name', and 'reward_kwargs' fields. Returns: callable or None: Wrapped reward function with merged kwargs, or None if no custom reward function is configured. Raises: FileNotFoundError: If the specified reward function file doesn't exist. RuntimeError: If there's an error loading the module from file. AttributeError: If the specified function name isn't found in the module. """ reward_fn_config = config.get("custom_reward_function") or {} file_path = reward_fn_config.get("path") if not file_path: return None function_name = reward_fn_config.get("name") assert function_name is not None module = sys.modules.get("custom_module", None) if module is None: if not os.path.exists(file_path): raise FileNotFoundError(f"Reward function file '{file_path}' not found.") spec = importlib.util.spec_from_file_location("custom_module", file_path) assert spec is not None module = importlib.util.module_from_spec(spec) try: sys.modules["custom_module"] = module assert spec.loader is not None spec.loader.exec_module(module) except Exception as e: raise RuntimeError(f"Error loading module from '{file_path}': {e}") from e if not hasattr(module, function_name): raise AttributeError(f"Reward function '{function_name}' not found in '{module.__file__}'.") print(f"using customized reward function '{function_name}' from '{module.__file__}'") raw_fn = getattr(module, function_name) reward_kwargs = dict(reward_fn_config.get("reward_kwargs", {})) if not inspect.iscoroutinefunction(raw_fn): return partial(_call_with_kwargs, raw_fn, reward_kwargs) else: return partial(_call_with_kwargs_async, raw_fn, reward_kwargs) def load_reward_manager( config: DictConfig, tokenizer: Any, num_examine: int, **reward_kwargs: Any ) -> AbstractRewardManager: """ Load and initialize a reward manager based on the configuration. Args: config: PPO trainer configuration object containing reward_model fields. tokenizer: Tokenizer object used for processing text. num_examine: Number of samples to examine. **reward_kwargs: Additional keyword arguments for the reward manager. Returns: An instance of the specified reward manager class. """ # Try to get a custom reward function based on the configuration # user defined reward manager can be registered in custom_reward_fn compute_score = get_custom_reward_fn(config) final_compute_score = compute_score # The list of pre-defined reward managers are defined in `verl/workers/reward_manager/`: # naive: NaiveRewardManager # prime: PrimeRewardManager # batch: BatchRewardManager # dapo: DAPORewardManager # Note(haibin.lin): For custom reward managers, please make sure they are imported and # registered via `verl.workers.reward_manager.register` # By default reward_manager is set to naive (NaiveRewardManager) reward_manager_name = config.reward_model.get("reward_manager", "naive") reward_manager_cls = get_reward_manager_cls(reward_manager_name) if compute_score is None: sandbox_config = config.reward_model.get("sandbox_fusion") sandbox_url = sandbox_config.get("url") if sandbox_config else None memory_limit_mb = sandbox_config.get("memory_limit_mb", 1024) if sandbox_config else 1024 if sandbox_url: sandbox_manager = multiprocessing.Manager() # Create a semaphore to control concurrent access to the sandbox _concurrent_semaphore = sandbox_manager.Semaphore(sandbox_config.get("max_concurrent", 64)) final_compute_score = partial( default_compute_score, sandbox_fusion_url=sandbox_url, concurrent_semaphore=_concurrent_semaphore, memory_limit_mb=memory_limit_mb, ) else: final_compute_score = default_compute_score # Instantiate and return the reward manager with the specified parameters return reward_manager_cls( tokenizer=tokenizer, num_examine=num_examine, compute_score=final_compute_score, reward_fn_key=config.data.reward_fn_key, **reward_kwargs, ) @tqbridge(put_data=False) def compute_reward(data: DataProto, reward_fn: AbstractRewardManager) -> tuple[torch.Tensor, dict[str, Any]]: """ Compute reward for a batch of data. Args: data: DataProto object containing the input data. reward_fn: Reward function to compute the reward. Returns: Tuple of reward tensor and extra info dictionary. """ try: reward_result = reward_fn(data, return_dict=True) reward_tensor = reward_result["reward_tensor"] reward_extra_infos_dict = reward_result.get("reward_extra_info", {}) except Exception as e: print(f"Error in reward_fn: {e}") reward_tensor = reward_fn(data) reward_extra_infos_dict = {} return reward_tensor, reward_extra_infos_dict @ray.remote(num_cpus=1) def compute_reward_async(data: DataProto, config=None, tokenizer=None, reward_fn=None): """ Load the reward manager and compute the reward for a batch of data. This is meant to be run in a separate Ray worker. """ if reward_fn is None: assert config is not None and tokenizer is not None, ( "config and tokenizer must not be None when reward_fn is None" ) warnings.warn("using config and tokenizer with compute_reward_async is deprecated", stacklevel=2) reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) return compute_reward(data, reward_fn) ================================================ FILE: verl_distillation/verl/trainer/ppo/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from enum import Enum from omegaconf import DictConfig from verl.single_controller.base import Worker from verl.trainer.ppo.core_algos import AdvantageEstimator WorkerType = type[Worker] class Role(Enum): """ To create more roles dynamically, you can subclass Role and add new members """ Actor = 0 Rollout = 1 ActorRollout = 2 Critic = 3 RefPolicy = 4 RewardModel = 5 ActorRolloutRef = 6 def __str__(self): return self._get_role_string() def _get_role_string(self): role_mapping = { Role.Actor: "actor", Role.Rollout: "rollout", Role.ActorRollout: "actor_rollout", Role.Critic: "critic", Role.RefPolicy: "ref", Role.RewardModel: "rm", Role.ActorRolloutRef: "actor_rollout_ref", } return role_mapping.get(self, self.name.lower()) @classmethod def from_string(cls, name: str): string_mapping = { "actor": cls.Actor, "rollout": cls.Rollout, "actor_rollout": cls.ActorRollout, "critic": cls.Critic, "ref": cls.RefPolicy, "rm": cls.RewardModel, "actor_rollout_ref": cls.ActorRolloutRef, } role = string_mapping.get(name.lower()) if role is None: raise ValueError(f"No Role found for string: {name}") return role def need_reference_policy( role_worker_mapping: dict[Role, WorkerType], ) -> bool: """Given a role worker mapping, do we need ref policy.""" return Role.RefPolicy in role_worker_mapping def need_reward_model( role_worker_mapping: dict[Role, WorkerType], ) -> bool: """Given a role worker mapping, do we need reward model.""" return Role.RewardModel in role_worker_mapping def need_critic(config: DictConfig) -> bool: """Given a config, do we need critic.""" if config.critic.enable is not None: return bool(config.critic.enable) elif config.algorithm.adv_estimator == AdvantageEstimator.GAE: return True else: warnings.warn( "Disabled critic as algorithm.adv_estimator != gae. If it is not intended, please set critic.enable=True", stacklevel=2, ) return False ================================================ FILE: verl_distillation/verl/trainer/runtime_env.yaml ================================================ working_dir: ./ excludes: ["/.git/"] env_vars: TORCH_NCCL_AVOID_RECORD_STREAMS: "1" CUDA_DEVICE_MAX_CONNECTIONS: "1" ================================================ FILE: verl_distillation/verl/trainer/sft_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from functools import partial os.environ["NCCL_DEBUG"] = "WARN" os.environ["TOKENIZERS_PARALLELISM"] = "true" import logging import hydra import torch import torch.distributed from codetiming import Timer from omegaconf import OmegaConf from torch.utils.data import DistributedSampler from torchdata.stateful_dataloader import StatefulDataLoader from tqdm import tqdm from verl.utils import tensordict_utils as tu from verl.utils.checkpoint import CheckpointHandler from verl.utils.dataset.dataset_utils import SFTTensorCollator from verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset from verl.utils.device import get_device_name, is_cuda_available, is_npu_available from verl.utils.distributed import destroy_global_process_group from verl.utils.flops_counter import FlopsCounter from verl.utils.logger import log_with_rank from verl.utils.tracking import Tracking if is_cuda_available: pass elif is_npu_available: pass logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN")) class SFTTrainer: def __init__( self, config, ): self.config = config self.rank = torch.distributed.get_rank() self._build_config() self._build_dataset() self._build_engine() self._build_dataloader() self._init_engine() self._build_ckpt_handler() # Initialize resume-related variables self.resume_global_step = self.ckpt_handler.load_checkpoint() self.device_name = self.config.trainer.device from verl.workers.roles.utils.losses import sft_loss self.loss_fn = partial(sft_loss, config=None) self.flops_counter = FlopsCounter(self.model_config.hf_config) if self.rank == 0: print(self.config) def _build_ckpt_handler(self): resume_mode = getattr(self.config.trainer, "resume_mode", "auto") resume_from_path = getattr(self.config.trainer, "resume_from_path", None) max_ckpt_to_keep = getattr(self.config.trainer, "max_ckpt_to_keep", None) default_hdfs_dir = getattr(self.config.trainer, "default_hdfs_dir", None) self.ckpt_handler = CheckpointHandler( engine=self.engine, train_dataloader=self.train_dataloader, default_local_dir=self.config.trainer.default_local_dir, max_ckpt_to_keep=max_ckpt_to_keep, default_hdfs_dir=default_hdfs_dir, resume_mode=resume_mode, resume_from_path=resume_from_path, ) def _build_config(self): from verl.utils.config import omega_conf_to_dataclass self.model_config = omega_conf_to_dataclass(self.config.model) self.engine_config = omega_conf_to_dataclass(self.config.engine) self.optimizer_config = omega_conf_to_dataclass(self.config.optim) self.checkpoint_config = omega_conf_to_dataclass(self.config.checkpoint) def _build_engine(self): from verl.workers.engine import BaseEngine, EngineRegistry self.engine: BaseEngine = EngineRegistry.new( model_type="language_model", backend=self.engine_config.strategy, model_config=self.model_config, engine_config=self.engine_config, optimizer_config=self.optimizer_config, checkpoint_config=self.checkpoint_config, ) def _init_engine(self): # patch optimizer config if self.config.trainer.total_training_steps is not None: self.total_training_steps = self.config.trainer.total_training_steps else: self.total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs self.optimizer_config.total_training_steps = self.total_training_steps self.steps_per_epoch = len(self.train_dataloader) # manage save and test frequency self.save_freq = self.config.trainer.save_freq if self.save_freq == "after_each_epoch": self.save_freq = self.steps_per_epoch self.test_freq = self.config.trainer.test_freq if self.test_freq == "after_each_epoch": self.test_freq = self.steps_per_epoch self.engine.initialize() def _build_dataset(self): config = self.config tokenizer = self.model_config.tokenizer train_dataset = create_sft_dataset( config.data.train_files, config.data, tokenizer, max_samples=config.data.get("train_max_samples", -1) ) if config.data.val_files: val_dataset = create_sft_dataset( config.data.val_files, config.data, tokenizer, max_samples=config.data.get("val_max_samples", -1) ) else: val_dataset = None self.train_dataset, self.val_dataset = train_dataset, val_dataset def _build_dataloader(self): # build dataset config = self.config # build dataloader # Use data parallel rank and size instead of global rank and world size # Set pin_memory_device when pin_memory is enabled. device_name = get_device_name() dp_rank = self.engine.get_data_parallel_rank() dp_size = self.engine.get_data_parallel_size() self.train_sampler = DistributedSampler( self.train_dataset, shuffle=True, num_replicas=dp_size, rank=dp_rank, drop_last=True ) self.global_batch_size = config.data.train_batch_size self.train_batch_size_per_dp = self.global_batch_size // dp_size self.collate_fn = SFTTensorCollator(config.data.pad_mode) self.train_dataloader = StatefulDataLoader( dataset=self.train_dataset, batch_size=self.train_batch_size_per_dp, sampler=self.train_sampler, collate_fn=self.collate_fn, num_workers=8, pin_memory=True, drop_last=True, pin_memory_device=device_name, ) if self.val_dataset: self.val_sampler = DistributedSampler( self.val_dataset, shuffle=False, num_replicas=dp_size, rank=dp_rank, drop_last=True ) self.val_dataloader = StatefulDataLoader( dataset=self.val_dataset, batch_size=self.train_batch_size_per_dp, sampler=self.val_sampler, collate_fn=self.collate_fn, num_workers=8, pin_memory=True, drop_last=True, pin_memory_device=device_name, ) else: self.val_dataloader = None def fit(self): is_logging = self.engine.is_mp_src_rank_with_outputs() and self.engine.get_data_parallel_rank() == 0 # TODO: add a unified tracking if is_logging: tracking = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) global_step = self.resume_global_step # Start from resumed step last_valid_metric = None log_with_rank( f"Total training steps: {self.total_training_steps},", logger=logger, rank=0, log_only_rank_0=True, ) # With StatefulDataLoader, we don't need to manually calculate epochs and steps # The dataloader will automatically resume from where it left off if global_step > 0: log_with_rank( f"StatefulDataLoader will automatically resume from global step: {global_step}", logger=logger, rank=0, log_only_rank_0=True, ) # Calculate which epoch we're starting from for sampler.set_epoch() start_epoch = global_step // self.steps_per_epoch meta_info = { "use_remove_padding": self.config.model.use_remove_padding, "use_dynamic_bsz": self.config.data.use_dynamic_bsz, "max_token_len_per_gpu": self.config.data.max_token_len_per_gpu, "micro_batch_size_per_gpu": self.config.data.micro_batch_size_per_gpu, "temperature": 1.0, "global_batch_size": self.global_batch_size, "pad_mode": self.config.data.pad_mode, "pad_token_id": self.model_config.tokenizer.pad_token_id, } train_time = 0 total_tokens = 0 for epoch in range(start_epoch, self.config.trainer.total_epochs): self.train_sampler.set_epoch(epoch=epoch) for step_in_epoch, data in enumerate( tqdm( self.train_dataloader, initial=global_step % self.steps_per_epoch if epoch == start_epoch else 0, total=self.steps_per_epoch, desc=f"Epoch {epoch + 1}/{self.config.trainer.total_epochs}", disable=not is_logging, ) ): global_step += 1 # construct tensordict data = tu.get_tensordict(tensor_dict=data, non_tensor_dict=meta_info) with self.engine.train_mode(): with Timer(name="update_policy", logger=None) as timer: output = self.engine.train_batch(data=data, loss_function=self.loss_fn) lr = self.engine.lr_scheduler_step() if self.engine.is_mp_src_rank_with_outputs(): metrics = output["metrics"] loss = torch.sum(torch.tensor(metrics["loss"], device=self.device_name)) # mean over dp group is_nested = data["input_ids"].is_nested if is_nested: batch_seqlens: torch.Tensor = data["input_ids"].offsets().diff() else: batch_seqlens: torch.Tensor = data["attention_mask"].sum(dim=-1) batch_seqlens = batch_seqlens.to(self.device_name) # (global_bsz // dp) output_tensor = torch.randint( 0, 100, (batch_seqlens.shape[0] * self.engine.get_data_parallel_size(),), device=self.device_name, ) # (global_bsz,) torch.distributed.all_gather_into_tensor( output_tensor=output_tensor, input_tensor=batch_seqlens, group=self.engine.get_data_parallel_group(), ) torch.distributed.all_reduce( loss, op=torch.distributed.ReduceOp.AVG, group=self.engine.get_data_parallel_group() ) batch_seqlens = output_tensor.tolist() loss = loss.item() # TODO: we can actual accumulate metrics for N steps and perform aggregate metrics metrics["loss"] = loss metrics["train/loss"] = metrics.pop("loss") metrics["train/grad_norm"] = metrics.pop("grad_norm") metrics["train/lr"] = lr metrics["train/global_tokens"] = output_tensor.sum().item() total_tokens += metrics["train/global_tokens"] metrics["train/total_tokens(B)"] = total_tokens / 1e9 # mfu delta_time = timer.last estimated_flops, promised_flops = self.flops_counter.estimate_flops(batch_seqlens, delta_time) metrics["train/mfu"] = estimated_flops / promised_flops / torch.distributed.get_world_size() if self.engine.get_data_parallel_rank() == 0: tracking.log(data=metrics, step=global_step) is_last_step = global_step >= self.total_training_steps is_valid_step = global_step % self.test_freq == 0 is_save_step = global_step % self.save_freq == 0 # early exit or validation step if is_last_step and self.val_dataloader is not None or (self.test_freq > 0 and is_valid_step): # Perform validation val_losses = [] for val_data in self.val_dataloader: with self.engine.eval_mode(): # construct tensordict val_data = tu.get_tensordict(tensor_dict=val_data, non_tensor_dict=meta_info) output = self.engine.infer_batch(data=val_data, loss_function=self.loss_fn) if self.engine.is_mp_src_rank_with_outputs(): val_losses.extend(output["metrics"]["loss"]) if self.engine.is_mp_src_rank_with_outputs(): val_loss = torch.mean(torch.tensor(val_losses, device=self.device_name)) # average over data parallel group torch.distributed.all_reduce( val_loss, op=torch.distributed.ReduceOp.AVG, group=self.engine.get_data_parallel_group() ) if is_logging: metric = {"val/loss": val_loss.detach().item()} tracking.log(data=metric, step=global_step) last_valid_metric = metric torch.distributed.barrier() if is_last_step or (self.save_freq > 0 and is_save_step): self.ckpt_handler.save_checkpoint(step=global_step) if is_last_step: if is_logging: print(f"Total time for train steps: {train_time:.2f}s") print(f"Final validation metrics: {last_valid_metric}") return def run_sft(config): from verl.utils.distributed import initialize_global_process_group initialize_global_process_group() trainer = SFTTrainer(config=config) trainer.fit() destroy_global_process_group() @hydra.main(config_path="config", config_name="sft_trainer_engine", version_base=None) def main(config): run_sft(config) def create_sft_dataset(data_paths, data_config, tokenizer, max_samples=-1): """Create a dataset.""" # build dataset # First check if a custom dataset class is specified if data_config.custom_cls.get("path", None): from verl.utils.import_utils import load_extern_type dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name) else: # Default to multi-turn dataset dataset_cls = MultiTurnSFTDataset # Create datasets based on the selected class dataset = dataset_cls(parquet_files=data_paths, tokenizer=tokenizer, config=data_config, max_samples=max_samples) return dataset if __name__ == "__main__": main() ================================================ FILE: verl_distillation/verl/utils/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import config, tokenizer from .config import omega_conf_to_dataclass, validate_config from .groupwise import as_torch_index, group_mean_std from .tokenizer import hf_processor, hf_tokenizer __all__ = ( tokenizer.__all__ + config.__all__ + ["hf_processor", "hf_tokenizer", "omega_conf_to_dataclass", "validate_config"] + ["as_torch_index", "group_mean_std"] ) ================================================ FILE: verl_distillation/verl/utils/activation_offload.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functionality for CPU offloading of tensors saved for backward pass.""" from __future__ import annotations import functools import logging import os from typing import Any, Optional import torch from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from verl.utils.device import get_torch_device from verl.utils.fsdp_utils import FSDPModule as FSDP2 logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def _get_unique_tensor_key(tensor): key = (tensor.untyped_storage().data_ptr() + tensor.storage_offset(), tensor.dtype) return key class FSDPParameterFilter: def __init__(self): self.model_parameters_storage = set() def __call__(self, tensor): return tensor.untyped_storage().data_ptr() not in self.model_parameters_storage def update_model_parameters(self, model): new_storage = set() for p in model.parameters(): new_storage.add(p.data.untyped_storage().data_ptr()) self.model_parameters_storage = new_storage class CpuOffloadHookWithOffloadHandler: """Context-manager that offloads/recovers tensors through an offload hander. The hook just offloads/recovers the tensor object to the handler through `tensor_push` and `tensor_pop` interface. How the offload-handler manages the offloading, recovering or prefetching timing is transparent to this hook. """ def __init__( self, offload_handler: OffloadHandler, handler_extra_kwargs: Optional[dict[str, Any]] = None, ) -> None: if handler_extra_kwargs is None: handler_extra_kwargs = {} self.offload_handler: OffloadHandler = offload_handler self.handler_extra_kwargs: dict[str, Any] = handler_extra_kwargs self.inside_context = False def __enter__(self): self.inside_context = True torch._C._autograd._push_saved_tensors_default_hooks(self.on_save_for_backward, self.on_get_saved_tensor) def __exit__(self, *args: Any): self.inside_context = False torch._C._autograd._pop_saved_tensors_default_hooks() def on_save_for_backward(self, tensor: torch.Tensor) -> Any: retrieve_identifier = self.offload_handler.tensor_push(tensor, **self.handler_extra_kwargs) return retrieve_identifier def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor: tensor = self.offload_handler.tensor_pop(saved_state, **self.handler_extra_kwargs) return tensor class OffloadHandler: """A base class for CPU offload-handler.""" def __init__(self) -> None: pass def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: """Tensor push.""" raise NotImplementedError( "`tensor_push is not implented in OffloadHandler class. Inherit this class and implement your " "custom tensor_push." ) def tensor_pop(self, tensor_tag: Any, **kwargs): """Tensor pop.""" raise NotImplementedError( "`tensor_pop is not implented in OffloadHandler class. Inherit this class and implement your " "custom tensor_pop." ) class GroupCommitFunction(torch.autograd.Function): """this is a dummy op with output identical to input. However, it is necessary for marking a timepoint for offload handler to accomplish all synchronizations. Implementing it as a function is necessary because we need to actions in both forward and backward. """ @staticmethod def forward(ctx, tensor, cpu_offload_handler): # pylint: disable=missing-function-docstring cpu_offload_handler.on_group_commit_forward() ctx.cpu_offload_handler = cpu_offload_handler # return the identical tensor return tensor @staticmethod def backward(ctx, grad_output): # pylint: disable=missing-function-docstring cpu_offload_handler = ctx.cpu_offload_handler cpu_offload_handler.on_group_commit_backward() return grad_output, None group_prefetch_offload_commit = GroupCommitFunction.apply class SynchronizedGroupOffloadHandler(OffloadHandler): """Offload Handler that offloads/reloads in a synchronized way. The device-to-host and host-to-device copying happen in the same stream as the computation kernels, thus the copying will block computation. """ def __init__(self, num_offload_group, tensor_need_offloading_checker=(lambda _: True)) -> None: super().__init__() self.num_offload_group = num_offload_group self.tensor_need_offloading_checker = tensor_need_offloading_checker self.groupid_reset() def groupid_reset(self): """Groupid reset.""" # Data structures to label saved tensors and book-keep their cpu copies. # Currently, on push, create a new cpu tensor and copies; on pop, copies # the tensor back to gpu and deletes the cpu tensor. # These will increment whenever `group_commit()` is invoked self.current_group, self.tensor_count_current_group = (0, 0) self.torch_tensor_count = 0 self.tensor_tag_to_state = {} def on_group_commit_forward(self): """On group commit forward.""" # finishing up with updating current group and tensor count self.current_group += 1 # increment self.tensor_count_current_group = 0 # reset def on_group_commit_backward(self): """On group commit backward.""" self.current_group -= 1 assert self.current_group >= 0 @staticmethod def offload(src_tensor, pin_memory=True): """Offload.""" cpu_backup = torch.empty( src_tensor.size(), dtype=src_tensor.dtype, layout=src_tensor.layout, device="cpu", pin_memory=pin_memory, ) cpu_backup.copy_(src_tensor, non_blocking=True) state = (src_tensor.device, cpu_backup) return state @staticmethod def reload(state, non_blocking=None): """Reload.""" dev, cpu_backup = state if non_blocking is None: non_blocking = cpu_backup.is_pinned() return cpu_backup.to(dev, non_blocking=non_blocking) def tensor_push(self, tensor: torch.Tensor, **kwargs): """Tensor push.""" # obtain a unique tensor tag tensor_tag = (self.current_group, self.tensor_count_current_group) self.tensor_count_current_group += 1 assert tensor_tag not in self.tensor_tag_to_state if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(tensor): state = SynchronizedGroupOffloadHandler.offload(tensor) self.tensor_tag_to_state[tensor_tag] = state else: # will be offloaded together after group commit self.tensor_tag_to_state[tensor_tag] = tensor return tensor_tag def tensor_pop(self, tensor_tag, **kwargs): """Tensor pop.""" assert tensor_tag in self.tensor_tag_to_state state = self.tensor_tag_to_state.pop(tensor_tag) if isinstance(state, tuple): tensor = SynchronizedGroupOffloadHandler.reload(state) else: tensor = state return tensor class AsyncDoubleBufferGroupOffloadHandler(SynchronizedGroupOffloadHandler): """Compared to synchronize, this uses more memory because of the buffer but achieves better performance due to the overlapping. D2h and h2d copying are completely hidden behind computation if computation time of a layer is longer than host-device communication time. Bulk offloading with delay and bulk reloading with prefetch are implemented.""" def __init__( self, num_offload_group, # must be <= actual number of groups (number of commits) num_model_group, tensor_need_offloading_checker=(lambda t: True), ) -> None: super().__init__( num_offload_group=num_offload_group, tensor_need_offloading_checker=tensor_need_offloading_checker, ) # Number of layers in the model self.num_layers = num_model_group # Data Structure to maintain reference to activation tensors self.tensor_tag_to_buf = {} # Tracking the number of layers offloaded self.offloaded_group_count = 0 # Core data structure that decides the window for offloading self.layer_window_map = {} self.group_offload_mapping = {} # Logic to make offloading load balance across computation # for optimal CPU/GPU interconnect usage constant = 0 for i in range(self.num_offload_group): self.layer_window_map[i] = ((self.num_layers // self.num_offload_group) * (i + 1)) - 1 if i < (self.num_layers % self.num_offload_group): self.layer_window_map[i] += i + 1 constant = i + 1 else: self.layer_window_map[i] += constant # allocate streams and events for synchronization self.d2h_stream = get_torch_device().Stream() self.h2d_stream = get_torch_device().Stream() def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: torch_stray_tensor = isinstance( tensor, torch._subclasses.fake_tensor.FakeTensor | torch._subclasses.functional_tensor.FunctionalTensor, ) need_offload = not torch_stray_tensor need_offload = need_offload and self.tensor_need_offloading_checker(tensor) if need_offload: # obtain a unique tensor tag tensor_tag = (self.current_group, self.tensor_count_current_group) self.tensor_count_current_group += 1 assert tensor_tag not in self.tensor_tag_to_state self.tensor_tag_to_state[tensor_tag] = tensor if self.current_group < self.num_offload_group: self.tensor_tag_to_buf[tensor_tag] = tensor else: tensor_tag = tensor return tensor_tag def tensor_pop(self, tensor_tag, **kwargs): """Tensor pop.""" if isinstance(tensor_tag, torch.Tensor): return tensor_tag assert tensor_tag in self.tensor_tag_to_state tensor = self.tensor_tag_to_state.pop(tensor_tag) self.tensor_tag_to_buf.pop(tensor_tag, None) # the tensor should have been copied back in on_group_commit_backward() # which invokes bulk_reload_group. assert not isinstance(tensor, tuple) return tensor def bulk_offload_group(self, group_to_offload): """Bulk offload group.""" offload_mapping = {} offload_size = 0 with get_torch_device().stream(self.d2h_stream): for tensor_tag, state in self.tensor_tag_to_state.items(): group_id, _ = tensor_tag if group_id == group_to_offload: assert not isinstance(state, tuple) key = _get_unique_tensor_key(state) if key not in offload_mapping: offload_mapping[key] = state # if offload, return the reference to cpu copy self.tensor_tag_to_state[tensor_tag] = (key, state.shape) for key, tensor in offload_mapping.items(): state = SynchronizedGroupOffloadHandler.offload(tensor) offload_size += tensor.numel() * tensor.element_size() offload_mapping[key] = state self.group_offload_mapping[group_to_offload] = offload_mapping def synchronize_on_group_commit_forward(self, current_group): """Synchronize on group commit forward.""" # For the first group, kickstart the offload after we have # the first compute completion if current_group == 0: self.d2h_stream.wait_stream(get_torch_device().current_stream()) self.bulk_offload_group(current_group) # Window map data structure helps us synchronize based on number # of layers offloaded if self.layer_window_map[self.offloaded_group_count] == current_group: # Stream synchronization both ways self.d2h_stream.wait_stream(get_torch_device().current_stream()) get_torch_device().current_stream().wait_stream(self.d2h_stream) # Time to free the activation memory after usage for tensor_tag, _ in self.tensor_tag_to_buf.items(): if tensor_tag[0] == self.offloaded_group_count: self.tensor_tag_to_buf[tensor_tag] = None # Time to offload the next group if self.offloaded_group_count < (self.num_offload_group - 1): self.bulk_offload_group(self.offloaded_group_count + 1) # Increment the offload group count to keep track self.offloaded_group_count += 1 def on_group_commit_forward(self): """This function will cause host device synchronization""" # handle synchronization events self.synchronize_on_group_commit_forward(self.current_group) super().on_group_commit_forward() @torch.no_grad def bulk_reload_group(self, group_to_reload): """Bulk reload group.""" assert group_to_reload < self.num_offload_group with get_torch_device().stream(self.h2d_stream): # move back tensors offload_mapping = self.group_offload_mapping.pop(group_to_reload) assert offload_mapping is not None for key, state in offload_mapping.items(): offload_mapping[key] = SynchronizedGroupOffloadHandler.reload(state) for tensor_label, state in self.tensor_tag_to_state.items(): group_id, _ = tensor_label if group_id == group_to_reload and not isinstance(state, torch.Tensor): assert isinstance(state, tuple), f"{group_id} {state}" key, shape = state recovered_tensor = offload_mapping[key].view(shape) self.tensor_tag_to_state[tensor_label] = recovered_tensor def on_group_commit_backward(self): # first decrement the current group. # after last commit in forward, the group will +1; in backward it -1. # Finally it should be decremented to 0. self.current_group -= 1 assert self.current_group >= 0 # Layer window data structure helps us to reload at right times if self.layer_window_map[self.offloaded_group_count - 1] == self.current_group: # Stream synchronization both ways self.h2d_stream.wait_stream(get_torch_device().current_stream()) get_torch_device().current_stream().wait_stream(self.h2d_stream) # Time to reload the next group self.bulk_reload_group(self.offloaded_group_count - 1) # Decrease the offloading group counter self.offloaded_group_count -= 1 if self.offloaded_group_count > 1 else 0 # Last group computation needs to wait till all the reloads complete if self.current_group == 0: get_torch_device().current_stream().wait_stream(self.h2d_stream) self.offloaded_group_count = 0 def get_activation_offload_context( num_layers: int = 1, model_layers: int = 1, tensor_need_offloading_checker=(lambda t: True) ): cpu_offload_handler = AsyncDoubleBufferGroupOffloadHandler( num_offload_group=num_layers, num_model_group=model_layers, tensor_need_offloading_checker=tensor_need_offloading_checker, ) def group_prefetch_offload_commit_async(tensor): return group_prefetch_offload_commit(tensor, cpu_offload_handler) return ( CpuOffloadHookWithOffloadHandler(offload_handler=cpu_offload_handler), group_prefetch_offload_commit_async, ) class ActivationHandler: def __init__(self, offload_ctx, sync_func, tensor_filter, enable_ckpt): self._offload_ctx = offload_ctx self._sync_func = sync_func self._enable_ckpt = enable_ckpt self._tensor_filter = tensor_filter if enable_ckpt: self.checkpoint_fn = functools.partial( torch.utils.checkpoint.checkpoint, use_reentrant=True, ) def pre_forward(self, module): if module.training: self._offload_ctx.__enter__() self._tensor_filter.update_model_parameters(module) def post_forward(self, module): if module.training: self._offload_ctx.__exit__(None, None, None) def _pack_kwargs(self, *args, **kwargs): kwarg_keys = [] flat_args = list(args) for k, v in kwargs.items(): kwarg_keys.append(k) flat_args.append(v) return tuple(flat_args), tuple(kwarg_keys) def _unpack_kwargs(self, flat_args, kwarg_keys): assert len(kwarg_keys) <= len(flat_args), f"too many keys {len(kwarg_keys)} vs. {len(flat_args)}" if len(kwarg_keys) == 0: return flat_args, {} args = flat_args[: -len(kwarg_keys)] kwargs = dict(zip(kwarg_keys, flat_args[-len(kwarg_keys) :], strict=True)) return args, kwargs def _ckpt_forward(self, forward_method, *args, **kwargs): flat_args, kwarg_keys = self._pack_kwargs(*args, **kwargs) def my_function(*inputs): # unpack back into args and kwargs nonlocal forward_method, kwarg_keys unpacked_args, unpacked_kwargs = self._unpack_kwargs(inputs, kwarg_keys) # run original module return forward_method(*unpacked_args, **unpacked_kwargs) return self.checkpoint_fn( my_function, *flat_args, ) def forward(self, module, forward_method, *args, **kwargs): if not module.training: return forward_method(*args, **kwargs) if not self._enable_ckpt: ret = forward_method(*args, **kwargs) else: ret = self._ckpt_forward(forward_method, *args, **kwargs) binded_tensor = ret if isinstance(ret, tuple): binded_tensor = ret[0] binded_tensor = self._sync_func(binded_tensor) final_ret = binded_tensor if isinstance(ret, tuple): final_ret = (final_ret,) + ret[1:] return final_ret def wrap_module_forward_method(self, module): orig_method = module.forward handler = self @functools.wraps(orig_method) def wrapped_method(model_self, *args, **kwargs): nonlocal handler handler.pre_forward(model_self) out = handler.forward(model_self, orig_method, *args, **kwargs) handler.post_forward(model_self) return out module.forward = wrapped_method.__get__(module, type(module)) def enable_activation_offloading(model, strategy, enable_ckpt=False): """ Enable activation offloading for the model. It groups activations by TransformerLayer and offloads activation groups asynchronously. This means that the offloading of the i-th activation group and the computation of the i+1-th activation group happen at the same time, and there are at most two activation groups in GPU memory. Args: model: the model to enable activation offloading strategy: the training strategy of the model, such as "fsdp" enable_ckpt: whether activation checkpointing(also called gradient checkpointing) has been enabled for the model Note: For best efficiency, activation offloading is usually combined with activation checkpointing. However, this implementation of activation offloading is conflicted with the implementation of activation checkpointing in some training strategies. This function resolves this conflict, and therefore requires the "strategy" and "enable_ckpt" arguments. Returns: """ assert strategy == "fsdp" or strategy == "fsdp2", "activation offloading only supports fsdp strategy" layers = [] def get_layers(module): for name, child in module.named_children(): if not isinstance(child, FSDP | FSDP2): get_layers(child) else: wrapped_module = child if isinstance(child, FSDP): wrapped_module = child._fsdp_wrapped_module # In some cases, torch.nn.Embedding is wrapped with FSDP alone. However, the activation # size of torch.nn.Embedding is small, so it's not necessary to offload it. if not isinstance(wrapped_module, torch.nn.Embedding): layers.append(child) get_layers(model) if len(layers) < 3: logger.warning(f"Find only {len(layers)} fsdp layers, not neccessary to enable async activation offloading") return tensor_filter = FSDPParameterFilter() context, sync_func = get_activation_offload_context(len(layers) - 1, len(layers), tensor_filter) if enable_ckpt: # The implementation of activation checkpointing in transformers library is incompatible with # activation offloading, # so it will be disabled, but this implementation supports another version of activation checkpointing, so that # these two features can be enabled at the same time. for module in model.modules(): if hasattr(module, "gradient_checkpointing_disable"): module.gradient_checkpointing_disable() handler = ActivationHandler(context, sync_func, tensor_filter, enable_ckpt) for layer in layers: module = layer if isinstance(layer, FSDP): module = module._fsdp_wrapped_module handler.wrap_module_forward_method(module) ================================================ FILE: verl_distillation/verl/utils/attention_utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable _index_first_axis, _pad_input, _rearrange, _unpad_input = None, None, None, None def _get_attention_functions() -> tuple[Callable, Callable, Callable, Callable]: """Dynamically import attention functions based on available hardware.""" from verl.utils.device import is_cuda_available, is_npu_available global _index_first_axis, _pad_input, _rearrange, _unpad_input if is_cuda_available: from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input elif is_npu_available: from verl.utils.npu_utils import index_first_axis, pad_input, rearrange, unpad_input _index_first_axis, _pad_input, _rearrange, _unpad_input = index_first_axis, pad_input, rearrange, unpad_input return _index_first_axis, _pad_input, _rearrange, _unpad_input def index_first_axis(*args, **kwargs): """ Unified entry point for `index_first_axis` across CUDA and NPU backends. Dynamically dispatches to the appropriate device-specific implementation: - On CUDA: `flash_attn.bert_padding.index_first_axis` - On NPU: `transformers.integrations.npu_flash_attention.index_first_axis` (falls back to `transformers.modeling_flash_attention_utils._index_first_axis` in newer versions of transformers). Users can call this function directly without worrying about the underlying device. """ func, *_ = _get_attention_functions() return func(*args, **kwargs) def pad_input(*args, **kwargs): """ Unified entry point for `pad_input` across CUDA and NPU backends. Dynamically dispatches to the appropriate device-specific implementation: - On CUDA: `flash_attn.bert_padding.pad_input` - On NPU: `transformers.integrations.npu_flash_attention.pad_input` (falls back to `transformers.modeling_flash_attention_utils._pad_input` in newer versions of transformers). Users can call this function directly without worrying about the underlying device. """ _, func, *_ = _get_attention_functions() return func(*args, **kwargs) def rearrange(*args, **kwargs): """ Unified entry point for `rearrange` across CUDA and NPU backends. Dynamically dispatches to the appropriate device-specific implementation: - On CUDA: `flash_attn.bert_padding.rearrange` - On NPU: `transformers.integrations.npu_flash_attention.rearrange` (falls back to `einops.rearrange` if no dedicated NPU implementation exists). Users can call this function directly without worrying about the underlying device. """ *_, func, _ = _get_attention_functions() return func(*args, **kwargs) def unpad_input(*args, **kwargs): """ Unified entry point for `unpad_input` across CUDA and NPU backends. Dynamically dispatches to the appropriate device-specific implementation: - On CUDA: `flash_attn.bert_padding.unpad_input` - On NPU: `transformers.integrations.npu_flash_attention.unpad_input` (falls back to `transformers.modeling_flash_attention_utils._unpad_input` in newer versions of transformers). Users can call this function directly without worrying about the underlying device. """ *_, func = _get_attention_functions() return func(*args, **kwargs) __all__ = ["index_first_axis", "pad_input", "rearrange", "unpad_input"] ================================================ FILE: verl_distillation/verl/utils/checkpoint/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .checkpoint_handler import CheckpointHandler __all__ = ["CheckpointHandler"] ================================================ FILE: verl_distillation/verl/utils/checkpoint/checkpoint_handler.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: add unit tests import logging import os import re import torch import verl.utils.hdfs_io as hdfs_io from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, get_checkpoint_tracker_filename from verl.utils.logger import log_with_rank from verl.workers.engine import BaseEngine def extract_step(path): match = re.search(r"global_step_(\d+)", path) if match: return int(match.group(1)) return None logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN")) class CheckpointHandler: """ Checkpoint handler handles the path, global_step of a checkpoint folder. Currently, it only works with a single model. We can expand it to support multiple models. It is expected to be used with SPMD style (e.g., torchrun) """ def __init__( self, engine: BaseEngine, train_dataloader, *, default_local_dir, max_ckpt_to_keep=None, default_hdfs_dir=None, resume_mode="auto", resume_from_path=None, ): self.default_local_dir = default_local_dir self.max_ckpt_to_keep = max_ckpt_to_keep self.default_hdfs_dir = default_hdfs_dir self.resume_mode = resume_mode self.resume_from_path = resume_from_path self.engine = engine self.train_dataloader = train_dataloader self.rank = torch.distributed.get_rank() def save_checkpoint(self, step): """Save checkpoint using FSDPCheckpointManager with improved tracking""" from verl.utils.fs import local_mkdir_safe # Determine checkpoint path local_global_step_folder = os.path.join(self.default_local_dir, f"global_step_{step}") if self.rank == 0: print(f"Saving checkpoint to: {local_global_step_folder}") # Get max checkpoints to keep max_ckpt_to_keep = self.max_ckpt_to_keep # Use checkpoint manager to save self.engine.save_checkpoint( local_path=local_global_step_folder, global_step=step, max_ckpt_to_keep=max_ckpt_to_keep ) # Save dataloader state. Note that we only save the iterator in the train_dataloader. # So it's identical in each dp rank. if self.engine.is_mp_src_rank_with_outputs(): dp_rank = self.engine.get_data_parallel_rank() local_mkdir_safe(local_global_step_folder) dataloader_local_path = os.path.join(local_global_step_folder, f"data_{dp_rank}.pt") # Use StatefulDataLoader's built-in state dict functionality dataloader_state_dict = self.train_dataloader.state_dict() torch.save(dataloader_state_dict, dataloader_local_path) print(f"Saved dataloader state to: {dataloader_local_path}") if self.rank == 0: # Update latest checkpoint tracker (atomic write) tracker_file = get_checkpoint_tracker_filename(self.default_local_dir) temp_tracker_file = tracker_file + ".tmp" with open(temp_tracker_file, "w") as f: f.write(str(step)) os.rename(temp_tracker_file, tracker_file) print(f"Updated checkpoint tracker: {tracker_file}") # Copy to HDFS if configured if self.rank == 0 and self.default_hdfs_dir: hdfs_io.makedirs(self.default_hdfs_dir, exist_ok=True) hdfs_io.copy(src=local_global_step_folder, dst=self.default_hdfs_dir, dirs_exist_ok=True) torch.distributed.barrier() def load_checkpoint(self): # Determine resume path based on configuration checkpoint_path = self._determine_resume_path() if checkpoint_path is None: return 0 # extract resume step from checkpoint path resume_step = extract_step(checkpoint_path) if resume_step is None: log_with_rank( f"Warning: Could not extract step number from {checkpoint_path}, starting from step 0", logger=logger, rank=self.rank, level=logging.WARNING, log_only_rank_0=True, ) return 0 self.resume_global_step = resume_step # Use checkpoint manager to load model state self.engine.load_checkpoint(checkpoint_path) # Always load dataloader state for StatefulDataLoader self._load_dataloader_state(checkpoint_path) return resume_step def _load_dataloader_state(self, checkpoint_path: str): """Load dataloader state from checkpoint""" dp_rank = self.engine.get_data_parallel_rank() dataloader_path = os.path.join(checkpoint_path, f"data_{dp_rank}.pt") if os.path.exists(dataloader_path): # Use StatefulDataLoader's built-in state dict functionality dataloader_state_dict = torch.load(dataloader_path, map_location="cpu", weights_only=False) self.train_dataloader.load_state_dict(dataloader_state_dict) log_with_rank( f"Successfully loaded dataloader state from {dataloader_path}", logger=logger, rank=self.rank, log_only_rank_0=True, ) else: log_with_rank( f"Warning: No dataloader state found at {dataloader_path}, will start from scratch", logger=logger, rank=self.rank, level=logging.WARNING, log_only_rank_0=True, ) def _determine_resume_path(self): """Determine the path to resume from based on resume_mode configuration""" resume_mode = self.resume_mode resume_from_path = self.resume_from_path if resume_mode == "disable": return None elif resume_mode == "auto": if resume_from_path is not None: assert os.path.exists(resume_from_path), ( "resume_from_path must be null or an existing path when resume_mode is 'auto'" ) assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps" return resume_from_path # Try to find the latest checkpoint in the default directory return self._find_latest_checkpoint() elif resume_mode == "resume_path": assert os.path.exists(resume_from_path), ( "resume_from_path must be an existing path when resume_mode is 'resume_path'" ) assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps" return resume_from_path else: raise ValueError(f"Invalid resume_mode: {resume_mode}. Must be 'auto', 'disable', or 'resume_path'") def _find_latest_checkpoint(self): """Find the latest checkpoint in the default local directory""" checkpoint_dir = self.default_local_dir if not os.path.exists(checkpoint_dir): return None latest_checkpoint = find_latest_ckpt_path(checkpoint_dir) if latest_checkpoint and self.rank == 0: step_num = extract_step(latest_checkpoint) print(f"Found latest checkpoint: {latest_checkpoint} (step {step_num})") return latest_checkpoint ================================================ FILE: verl_distillation/verl/utils/checkpoint/checkpoint_manager.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import shutil import numpy as np import torch import torch.distributed from omegaconf import DictConfig from transformers import PreTrainedTokenizer, ProcessorMixin from verl.trainer.config import CheckpointConfig from verl.utils.device import get_device_name, get_torch_device class BaseCheckpointManager: """ A checkpoint manager that saves and loads the following states in a SPMD way: - model - optimizer - lr_scheduler - extra_states We save - sharded model states and optimizer states - full lr_scheduler states - huggingface tokenizer and config for ckpt merge """ def __init__( self, model, optimizer: torch.optim.Optimizer, lr_scheduler: torch.optim.lr_scheduler.LRScheduler = None, processing_class: PreTrainedTokenizer | ProcessorMixin = None, checkpoint_config: DictConfig | CheckpointConfig = None, ): self.checkpoint_config = checkpoint_config checkpoint_load_contents = checkpoint_config.get("load_contents", None) if checkpoint_config else None checkpoint_save_contents = checkpoint_config.get("save_contents", None) if checkpoint_config else None if checkpoint_load_contents is None: checkpoint_load_contents = ["model", "optimizer", "extra"] if checkpoint_save_contents is None: checkpoint_save_contents = ["model", "optimizer", "extra"] self.previous_global_step = None self.previous_saved_paths = [] self.model = model self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.processing_class = processing_class self.checkpoint_load_contents = checkpoint_load_contents self.checkpoint_save_contents = checkpoint_save_contents self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() @property def should_save_model(self) -> bool: """ Returns True if 'model' is in checkpoint_save_contents, indicating the model state should be saved. """ return "model" in self.checkpoint_save_contents @property def should_save_optimizer(self) -> bool: """ Returns True if 'optimizer' is in checkpoint_save_contents, indicating the optimizer state should be saved. """ return "optimizer" in self.checkpoint_save_contents @property def should_save_extra(self) -> bool: """ Returns True if 'extra' is in checkpoint_save_contents, indicating the extra state should be saved. """ return "extra" in self.checkpoint_save_contents @property def should_save_hf_model(self) -> bool: """ Returns True if 'hf_model' is in checkpoint_save_contents, indicating the model should be converted to hf model and saved. """ return "hf_model" in self.checkpoint_save_contents @property def should_load_model(self) -> bool: """ Returns True if 'model' is in checkpoint_load_contents, indicating the model state should be loaded. """ return "model" in self.checkpoint_load_contents @property def should_load_optimizer(self) -> bool: """ Returns True if 'optimizer' is in checkpoint_load_contents, indicating the optimizer state should be loaded. """ return "optimizer" in self.checkpoint_load_contents @property def should_load_extra(self) -> bool: """ Returns True if 'extra' is in checkpoint_load_contents, indicating the extra state should be loaded. """ return "extra" in self.checkpoint_load_contents def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load: bool = False): raise NotImplementedError def save_checkpoint( self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep: int = None ): raise NotImplementedError @staticmethod def checkpath(local_path: str, hdfs_path: str): assert local_path is not None or hdfs_path is not None, "local_path and hdfs_path cannot be both None" return local_path is not None, local_path if local_path is not None else hdfs_path def remove_previous_save_local_path(self, path): if isinstance(path, str): path = [path] for p in path: abs_path = os.path.abspath(p) print(f"Checkpoint manager remove previous save local path: {abs_path}") if not os.path.exists(abs_path): continue shutil.rmtree(abs_path, ignore_errors=True) @staticmethod def get_rng_state(): rng_state = { "cpu": torch.get_rng_state(), "numpy": np.random.get_state(), "random": random.getstate(), } if get_device_name() != "cpu": rng_state[get_device_name()] = get_torch_device().get_rng_state() return rng_state @staticmethod def load_rng_state(rng_state): torch.set_rng_state(rng_state["cpu"]) np.random.set_state(rng_state["numpy"]) random.setstate(rng_state["random"]) if get_device_name() != "cpu": get_torch_device().set_rng_state(rng_state[get_device_name()]) def find_latest_ckpt_path(path, directory_format="global_step_{}"): """ Return the most recent checkpoint directory based on a tracker file. Args: path (str): Base directory containing the checkpoint tracker. directory_format (str): Template for checkpoint subfolders with one placeholder for the iteration number (default "global_step_{}"). Returns: str or None: Full path to the latest checkpoint directory, or None if the tracker or checkpoint folder is missing. """ if path is None: return None tracker_file = get_checkpoint_tracker_filename(path) if not os.path.exists(tracker_file): if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0: print(f"Checkpoint tracker file does not exist: {tracker_file}") return None with open(tracker_file, "rb") as f: iteration = int(f.read().decode()) ckpt_path = os.path.join(path, directory_format.format(iteration)) if not os.path.exists(ckpt_path): print("Checkpoint does not exist: %s", ckpt_path) return None print("Found checkpoint: %s", ckpt_path) return ckpt_path def get_checkpoint_tracker_filename(root_path: str): """ Tracker file rescords the latest chckpoint during training to restart from. """ return os.path.join(root_path, "latest_checkpointed_iteration.txt") def should_save_ckpt_esi(max_steps_duration: float, save_ckpt_duration: float = 60, redundant_time: float = 0) -> bool: """ Determine if checkpoint should be saved based on capacity esi expiration. Args: max_steps_duration: Max estimated time (seconds) required to complete one training step save_ckpt_duration: Estimated time (seconds) required to save checkpoint (default: 60) redundant_time: Additional buffer time (seconds) for unexpected delays (default: 0) """ exp_ts_mlp = os.getenv("MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP") # vemlp exp_ts_aws = os.getenv("SAGEMAKER_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP") # aws if exp_ts_mlp: try: import time remaining = float(exp_ts_mlp) - time.time() except ValueError: return False return ( remaining > 0 and max_steps_duration > 0 and remaining <= save_ckpt_duration + max_steps_duration + redundant_time ) elif exp_ts_aws: from datetime import datetime, timedelta expiration_time = datetime.fromtimestamp(int(exp_ts_aws)) time_difference = expiration_time - datetime.now() threshold_minutes = (save_ckpt_duration + max_steps_duration + redundant_time) / 60 return time_difference < timedelta(minutes=threshold_minutes) else: return False ================================================ FILE: verl_distillation/verl/utils/checkpoint/fsdp_checkpoint_manager.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import warnings from dataclasses import asdict, dataclass from typing import Optional import torch import torch.distributed from accelerate import init_empty_weights from omegaconf import DictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType from transformers import GenerationConfig, PreTrainedTokenizer, ProcessorMixin from transformers.dynamic_module_utils import custom_object_save from verl.utils.device import is_cuda_available from verl.utils.fs import copy_to_local, is_non_local, local_mkdir_safe from verl.utils.fsdp_utils import fsdp_version, get_fsdp_full_state_dict, get_fsdp_state_ctx from verl.utils.logger import log_with_rank from .checkpoint_manager import BaseCheckpointManager # Setup logging logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO")) @dataclass class FSDPConfig: """Configuration for FSDP checkpointing. Args: FSDP_version (int): Version of FSDP being used. world_size (int): Number of processes in the distributed training setup. """ FSDP_version: int world_size: int class FSDPCheckpointManager(BaseCheckpointManager): """ Manage FSDP checkpointing in SPMD training. - Saves/loads per-rank sharded model & optimizer states - Persists full lr_scheduler and RNG state - Stores HF tokenizer/processor and model/config for unified restore Args: model (FSDP): Wrapped model instance. optimizer (Optimizer): Training optimizer. lr_scheduler (LRScheduler): Learning-rate scheduler. processing_class (PreTrainedTokenizer or ProcessorMixin, optional): Pre-/post-processing artifact handler. checkpoint_contents DictConfig: Configuration for checkpoint contents. - 'load': Components to load; must contain 'model'. Defaults to ['model', 'optimizer', 'extra']. - 'save': Components to save; must contain 'model'. Defaults to ['model', 'optimizer', 'extra']. """ def __init__( self, model: FSDP, optimizer: Optional[torch.optim.Optimizer] = None, lr_scheduler: Optional[torch.optim.lr_scheduler.LRScheduler] = None, processing_class: PreTrainedTokenizer | ProcessorMixin = None, checkpoint_config: DictConfig = None, **kwargs, ): if processing_class is None and "tokenizer" in kwargs: warnings.warn( "`tokenizer` is deprecated. use `processing_class` instead.", DeprecationWarning, stacklevel=2 ) processing_class = kwargs.pop("tokenizer") super().__init__( model, optimizer, lr_scheduler=lr_scheduler, processing_class=processing_class, checkpoint_config=checkpoint_config, ) def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load=False): """ Load an FSDP checkpoint for this rank. Downloads and loads: - model and optimizer shards - extra state dict (scheduler + RNG) Args: local_path: Directory with per-rank checkpoint files. hdfs_path: Unused (for API compatibility). del_local_after_load: Remove local files after loading. """ if local_path is None: return # check if the checkpoint_load_contents is valid if self.should_load_model: assert self.model is not None, "model must be provided when checkpoint_contents.load includes ['model']" if self.should_load_optimizer: assert self.optimizer is not None, ( "optimizer must be provided when checkpoint_contents.load includes ['optimizer']" ) # every rank download its own checkpoint state_dict_cfg = ( ShardedStateDictConfig(offload_to_cpu=True if is_cuda_available else False) if self.should_load_model else None ) optim_cfg = ( ShardedOptimStateDictConfig(offload_to_cpu=True if is_cuda_available else False) if self.should_load_optimizer else None ) with get_fsdp_state_ctx(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg): if self.should_load_model: remote_model_path = os.path.join(local_path, f"model_world_size_{self.world_size}_rank_{self.rank}.pt") local_model_path = copy_to_local(remote_model_path) model_state_dict = torch.load(local_model_path, weights_only=False) self.model.load_state_dict(model_state_dict) log_with_rank(f"Loaded model from {remote_model_path}", rank=self.rank, logger=logger) if self.should_load_optimizer: remote_optim_path = os.path.join(local_path, f"optim_world_size_{self.world_size}_rank_{self.rank}.pt") local_optim_path = copy_to_local(remote_optim_path) optimizer_state_dict = torch.load(local_optim_path, weights_only=False) self.optimizer.load_state_dict(optimizer_state_dict) log_with_rank(f"Loaded optimizer from {remote_optim_path}", rank=self.rank, logger=logger) if self.should_load_extra: remote_extra_state_path = os.path.join( local_path, f"extra_state_world_size_{self.world_size}_rank_{self.rank}.pt" ) local_extra_state_path = copy_to_local(remote_extra_state_path) extra_state_dict = torch.load(local_extra_state_path, weights_only=False) # recover random state if "rng" in extra_state_dict: # 'rng' may not exist for backward compatibility self.load_rng_state(extra_state_dict["rng"]) log_with_rank(f"Loaded rng from {remote_extra_state_path}", rank=self.rank, logger=logger) lr_scheduler_state_dict = extra_state_dict["lr_scheduler"] if lr_scheduler_state_dict is not None and self.lr_scheduler is not None: self.lr_scheduler.load_state_dict(lr_scheduler_state_dict) log_with_rank(f"Loaded lr_scheduler from {remote_extra_state_path}", rank=self.rank, logger=logger) if self.rank == 0 and del_local_after_load: try: os.remove(local_model_path) if is_non_local(local_model_path) else None os.remove(local_optim_path) if is_non_local(local_optim_path) else None os.remove(local_extra_state_path) if is_non_local(local_extra_state_path) else None except Exception as e: log_with_rank( f"remove local resume ckpt file after loading failed, exception {e} will be ignored", rank=self.rank, logger=logger, ) # wait for everyone to load checkpoints torch.distributed.barrier() def save_checkpoint(self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep=None): """ Save an FSDP checkpoint for this rank. Writes: - model & optimizer shard files - extra state dict (scheduler + RNG) - HF tokenizer/processor and model/config on rank 0 - optional full HF model under 'huggingface/' if requested Rotates old checkpoints, keeping at most `max_ckpt_to_keep`. Args: local_path: Target directory for checkpoint files. hdfs_path: Unused (for API compatibility). global_step: Current training step (used for bookkeeping). max_ckpt_to_keep: Number of recent checkpoints to retain. """ if local_path is None: return # record the previous global step self.previous_global_step = global_step # remove previous local_path, only rank 0 should do this if ( self.rank == 0 and max_ckpt_to_keep and isinstance(max_ckpt_to_keep, int) and max_ckpt_to_keep > 0 and len(self.previous_saved_paths) >= max_ckpt_to_keep ): keep_start = len(self.previous_saved_paths) - max_ckpt_to_keep + 1 self.remove_previous_save_local_path(self.previous_saved_paths[:keep_start]) self.previous_saved_paths = self.previous_saved_paths[keep_start:] local_path = local_mkdir_safe(local_path) torch.distributed.barrier() # check if the checkpoint_save_contents is valid if self.should_save_model: assert self.model is not None, "model must be provided when checkpoint_contents.save includes ['model']" if self.should_save_optimizer: assert self.optimizer is not None, ( "optimizer must be provided when checkpoint_contents.save includes ['optimizer']" ) # every rank will save its own model and optim shard state_dict_cfg = ShardedStateDictConfig(offload_to_cpu=True if is_cuda_available else False) optim_cfg = ShardedOptimStateDictConfig(offload_to_cpu=True if is_cuda_available else False) with warnings.catch_warnings(): warnings.simplefilter("ignore") with get_fsdp_state_ctx(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg): model_path = os.path.join(local_path, f"model_world_size_{self.world_size}_rank_{self.rank}.pt") optim_path = os.path.join(local_path, f"optim_world_size_{self.world_size}_rank_{self.rank}.pt") extra_path = os.path.join(local_path, f"extra_state_world_size_{self.world_size}_rank_{self.rank}.pt") if self.should_save_model: model_state_dict = self.model.state_dict() torch.save(model_state_dict, model_path) log_with_rank(f"Saved model to {os.path.abspath(model_path)}", rank=self.rank, logger=logger) if self.should_save_optimizer: optimizer_state_dict = self.optimizer.state_dict() torch.save(optimizer_state_dict, optim_path) log_with_rank(f"Saved optim to {os.path.abspath(optim_path)}", rank=self.rank, logger=logger) if self.should_save_extra: lr_scheduler_state_dict = self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None extra_state_dict = { "lr_scheduler": lr_scheduler_state_dict, "rng": self.get_rng_state(), } torch.save(extra_state_dict, extra_path) log_with_rank(f"Saved extra_state to {os.path.abspath(extra_path)}", rank=self.rank, logger=logger) if self.rank == 0: # Save HF tokenizer/processor and model config on rank 0 to huggingface/ directory, no matter whether # huggingface model is requested to be saved or not. if fsdp_version(self.model) == 1: unwrap_model = self.model._fsdp_wrapped_module else: unwrap_model = self.model hf_config_tokenizer_path = os.path.join(local_path, "huggingface") local_mkdir_safe(hf_config_tokenizer_path) model_config = unwrap_model.config generation_config = None if unwrap_model.can_generate() and hasattr(model_config, "name_or_path") and model_config.name_or_path: try: # Some model's name_or_path is empty if not initialized from pretrained, # in this cases, we don't save generation config. generation_config = GenerationConfig.from_pretrained(model_config.name_or_path) generation_config.save_pretrained(hf_config_tokenizer_path) except Exception: # if the generation config isn't available, we don't save it pass model_config.save_pretrained(hf_config_tokenizer_path) if self.processing_class is not None: self.processing_class.save_pretrained(hf_config_tokenizer_path) log_with_rank( f"Saved model config and tokenizer class to {os.path.abspath(hf_config_tokenizer_path)}", rank=self.rank, logger=logger, log_only_rank_0=True, ) # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if hasattr(model_config, "auto_map"): custom_object_save(unwrap_model, hf_config_tokenizer_path, config=model_config) # Also save runtime FSDP config fsdp_config_path = os.path.join(local_path, "fsdp_config.json") fsdp_config = FSDPConfig( FSDP_version=fsdp_version(self.model), world_size=self.world_size, ) with open(fsdp_config_path, "w") as f: json.dump(asdict(fsdp_config), f, indent=4) # wait for everyone to dump to local torch.distributed.barrier() if self.should_save_hf_model: # Only rank 0 will save hf model and, # offload to cpu to save LLMs which may be too large to fit in one GPU state_dict = get_fsdp_full_state_dict(self.model, offload_to_cpu=True, rank0_only=True) if self.rank == 0: hf_local_path = os.path.join(local_path, "huggingface") os.makedirs(hf_local_path, exist_ok=True) if "ForTokenClassification" in model_config.architectures[0]: from transformers import AutoModelForTokenClassification auto_model_cls = AutoModelForTokenClassification elif "ForCausalLM" in model_config.architectures[0]: from transformers import AutoModelForCausalLM auto_model_cls = AutoModelForCausalLM elif "ForConditionalGeneration" in model_config.architectures[0]: # Handle different transformers versions for Vision2Seq models import transformers from packaging import version if version.parse(transformers.__version__) >= version.parse("4.54.0"): # transformers >= 4.54.0 uses AutoModelForImageTextToText from transformers import AutoModelForImageTextToText auto_model_cls = AutoModelForImageTextToText else: # transformers < 4.54.0 uses AutoModelForVision2Seq from transformers import AutoModelForVision2Seq auto_model_cls = AutoModelForVision2Seq else: raise NotImplementedError(f"Unknown architecture {model_config['architectures']}") with init_empty_weights(): save_model = auto_model_cls.from_config(model_config, torch_dtype=torch.bfloat16) save_model.to_empty(device="cpu") if save_model.can_generate(): if generation_config is not None: save_model.generation_config = generation_config else: print( f"Warning: {self.__class__.__name__}.save_checkpoint: Generation config file not found " f"in, using a generation config created from the model config when saving hf_model." ) save_model.save_pretrained(hf_local_path, state_dict=state_dict) log_with_rank( f"Saved hf_model to {os.path.abspath(hf_local_path)}", rank=self.rank, logger=logger, log_only_rank_0=True, ) del state_dict del save_model # wait for rank0 to dump hf_model to local torch.distributed.barrier() self.previous_saved_paths.append(local_path) ================================================ FILE: verl_distillation/verl/utils/checkpoint/megatron_checkpoint_manager.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import random from collections.abc import Callable from dataclasses import asdict import numpy as np import torch import torch.distributed from megatron.core import mpu, tensor_parallel from megatron.core.dist_checkpointing.mapping import ShardedObject from megatron.core.transformer.enums import AttnBackend from transformers import GenerationConfig from verl.models.weight_loader_registry import get_weight_saver from verl.utils.device import get_device_name, get_torch_device from verl.utils.fs import is_non_local, local_mkdir_safe from verl.utils.logger import log_with_rank from verl.utils.megatron.dist_checkpointing import load_dist_checkpointing, save_dist_checkpointing from verl.utils.megatron_utils import ( get_dist_checkpoint_path, get_hf_model_checkpoint_path, get_transformer_config_checkpoint_path, ) from .checkpoint_manager import BaseCheckpointManager # Setup logging logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO")) class MegatronCheckpointManager(BaseCheckpointManager): """ Checkpoint manager for Megatron-LM distributed training. This class manages the saving and loading of model checkpoints in a Megatron-LM distributed training environment. It handles various aspects of checkpointing including model states, optimizer states, learning rate schedulers, and random number generator states, ensuring compatibility with HuggingFace formats. Key features: - Distributed checkpoint saving and loading using Megatron's dist_checkpointing - Support for tensor parallel, pipeline parallel, and data parallel configurations - Automatic handling of model state dictionaries across multiple pipeline stages - Integration with HuggingFace model configurations and tokenizers - Random number generator state management for reproducibility - Support for both synchronous and asynchronous checkpoint operations The manager automatically handles: - Directory structure creation based on global steps and process ranks - Model configuration and tokenizer saving in HuggingFace format - Optimizer and scheduler state persistence - CUDA RNG state management for deterministic training - Checkpoint cleanup and retention policies Args: model: The Megatron model instance to checkpoint optimizer: The optimizer instance (optional) lr_scheduler: The learning rate scheduler instance (optional) Attributes: model: Reference to the Megatron model being checkpointed optimizer: Reference to the optimizer (if provided) lr_scheduler: Reference to the learning rate scheduler (if provided) rank: Current process rank in the distributed setup Example: ```python checkpoint_manager = MegatronCheckpointManager( model=megatron_model, optimizer=optimizer, lr_scheduler=scheduler ) checkpoint_manager.save_checkpoint( local_path="checkpoints/step_1000", global_step=1000 ) checkpoint_manager.load_checkpoint( local_path="checkpoints/step_1000" ) ``` """ def __init__( self, config, checkpoint_config, model_config, transformer_config, role, model: torch.nn.ModuleList, arch: str, hf_config, param_dtype: torch.dtype, share_embeddings_and_output_weights: bool, processing_class, optimizer, optimizer_scheduler, use_distributed_optimizer: bool, use_checkpoint_opt_param_scheduler: bool = False, use_dist_checkpointing: bool = True, bridge=None, **kwargs, ): super().__init__( model, optimizer=optimizer, lr_scheduler=optimizer_scheduler, processing_class=processing_class, checkpoint_config=checkpoint_config, ) self.arch = arch self.config = config self.transformer_config = transformer_config self.role = role self.is_value_model = False if self.role in ["reward", "critic"]: self.is_value_model = True self.model_config = model_config self.hf_config = hf_config self.param_dtype = param_dtype self.share_embeddings_and_output_weights = share_embeddings_and_output_weights self.model_path = self.config.model.path self.use_distributed_optimizer = use_distributed_optimizer self.use_checkpoint_opt_param_scheduler = use_checkpoint_opt_param_scheduler self.bridge = bridge self.rank = torch.distributed.get_rank() self.use_dist_checkpointing = use_dist_checkpointing or not self.bridge or self.is_value_model self.use_hf_checkpoint = not self.use_dist_checkpointing self.weight_saver = None if self.bridge is None: self.weight_saver = get_weight_saver(self.arch) def get_rng_state(self, use_dist_ckpt: bool = True, data_parallel_random_init: bool = False): """collect rng state across data parallel ranks""" rng_state = { "random_rng_state": random.getstate(), "np_rng_state": np.random.get_state(), "torch_rng_state": torch.get_rng_state(), "rng_tracker_states": tensor_parallel.get_cuda_rng_tracker().get_states(), } if get_device_name() != "cpu": rng_state[f"{get_device_name()}_rng_state"] = get_torch_device().get_rng_state() rng_state_list = None if torch.distributed.is_initialized() and mpu.get_data_parallel_world_size() > 1 and data_parallel_random_init: rng_state_list = [None for i in range(mpu.get_data_parallel_world_size())] torch.distributed.all_gather_object(rng_state_list, rng_state, group=mpu.get_data_parallel_group()) else: rng_state_list = [rng_state] if use_dist_ckpt: pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() rng_state_list = ShardedObject( "rng_state", rng_state_list, (pp_size, tp_size), (pp_rank, tp_rank), replica_id=mpu.get_data_parallel_rank(with_context_parallel=True), ) return rng_state_list def get_checkpoint_name( self, checkpoints_path, pipeline_parallel=None, tensor_rank=None, pipeline_rank=None, cp_rank=None, expert_parallel=None, expert_rank=None, return_base_dir=True, basename="model.pt", ): """Determine the directory name for this rank's checkpoint.""" # Use both the tensor and pipeline MP rank. if pipeline_parallel is None: pipeline_parallel = mpu.get_pipeline_model_parallel_world_size() > 1 if tensor_rank is None: tensor_rank = mpu.get_tensor_model_parallel_rank() if pipeline_rank is None: pipeline_rank = mpu.get_pipeline_model_parallel_rank() if cp_rank is None: cp_rank = mpu.get_context_parallel_rank() if expert_parallel is None: expert_parallel = mpu.get_expert_model_parallel_world_size() > 1 if expert_rank is None: expert_rank = mpu.get_expert_model_parallel_rank() # Use both the tensor and pipeline MP rank. If using the distributed # optimizer, then the optimizer's path must additionally include the # data parallel rank. # due to the fact that models are identical across cp ranks, cp rank is not used in the checkpoint path if not pipeline_parallel: common_path = os.path.join(checkpoints_path, f"mp_rank_{tensor_rank:02d}") else: common_path = os.path.join(checkpoints_path, f"mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}") if expert_parallel: common_path = common_path + f"_{expert_rank:03d}" os.makedirs(common_path, exist_ok=True) if return_base_dir: return common_path return os.path.join(common_path, basename) def generate_state_dict( self, generate_model: bool = True, generate_optimizer: bool = True, generate_extra: bool = True, is_loading: bool = False, ): # For save dist checkpointing state_dict = {} # Should always generate model state dict # All ranks Save Model to reduce memory pressure # Get sharded state dict, notice that state_dict will collect among dp groups, causing memory pressure for vpp_rank, model in enumerate(self.model): if len(self.model) > 1: mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank) key = f"model{vpp_rank}" if len(self.model) > 1 else "model" else: key = "model" if hasattr(model, "module"): model = model.module state_dict[key] = model.sharded_state_dict() # Optimizer State Dict if generate_optimizer: torch.distributed.barrier() optimizer_sharded_states = self.optimizer.sharded_state_dict(state_dict, is_loading=is_loading) state_dict["optimizer"] = optimizer_sharded_states if self.lr_scheduler is not None: lr_state_dict = self.lr_scheduler.state_dict() state_dict["lr_scheduler"] = lr_state_dict if not generate_model: state_dict.pop("model", None) # RNG States State Dict if generate_extra: torch.distributed.barrier() rng_state = self.get_rng_state() state_dict["rng_state"] = rng_state return state_dict def load_rng_states(self, rng_states, data_parallel_random_init=False, use_dist_ckpt=True): # access rng_state for data parallel rank if data_parallel_random_init: rng_states = rng_states[mpu.get_data_parallel_rank()] else: rng_states = rng_states[0] random.setstate(rng_states["random_rng_state"]) np.random.set_state(rng_states["np_rng_state"]) torch.set_rng_state(rng_states["torch_rng_state"]) if get_device_name() != "cpu": get_torch_device().set_rng_state(rng_states[f"{get_device_name()}_rng_state"]) # Check for empty states array if not rng_states["rng_tracker_states"]: raise KeyError tensor_parallel.get_cuda_rng_tracker().set_states(rng_states["rng_tracker_states"]) def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load=False): if local_path is not None: assert os.path.exists(local_path), f"Checkpoint path {local_path} does not exist." # For load optimizer dist_ckpt import transformer_engine torch.serialization.add_safe_globals([torch.optim.AdamW]) torch.serialization.add_safe_globals([transformer_engine.pytorch.optimizers.fused_adam.FusedAdam]) dist_checkpoint_path = get_dist_checkpoint_path(local_path) # Get State Dict for loading sharded_state_dict = self.generate_state_dict( self.should_load_model and self.use_dist_checkpointing, self.should_load_optimizer, self.should_load_extra, is_loading=True, ) log_with_rank(f"Generated state dict for loading: {sharded_state_dict.keys()}", rank=self.rank, logger=logger) # Load Dist Checkpointing state_dict = load_dist_checkpointing( sharded_state_dict=sharded_state_dict, ckpt_dir=dist_checkpoint_path, ) if self.should_load_model and self.use_dist_checkpointing: assert "model" in state_dict or any( f"model{vpp_rank}" in state_dict for vpp_rank in range(len(self.model)) ), f"Model state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}." for vpp_rank, model in enumerate(self.model): if len(self.model) == 1: model_state_dict = state_dict["model"] else: assert f"model{vpp_rank}" in state_dict, f"model{vpp_rank} not found in state_dict" model_state_dict = state_dict[f"model{vpp_rank}"] mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank) self.model[vpp_rank].load_state_dict(model_state_dict) log_with_rank(f"Loaded sharded model checkpoint from {local_path}", rank=self.rank, logger=logger) elif self.should_load_model and self.use_hf_checkpoint: hf_model_path = get_hf_model_checkpoint_path(local_path) self.bridge.load_weights(self.model, hf_model_path) log_with_rank(f"Loaded HF model checkpoint from {hf_model_path} with bridge", rank=self.rank, logger=logger) if self.should_load_optimizer: assert "optimizer" in state_dict, ( f"Optimizer state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}." ) optimizer_state_dict = state_dict["optimizer"] self.optimizer.load_state_dict(optimizer_state_dict) log_with_rank(f"Loaded optimizer checkpoint from {local_path}", rank=self.rank, logger=logger) if self.use_checkpoint_opt_param_scheduler: assert "lr_scheduler" in state_dict, ( f"LR scheduler state dict not found in {state_dict.keys()}. Please check the checkpoint file " f"{local_path}." ) lr_scheduler_state_dict = state_dict["lr_scheduler"] if self.lr_scheduler is not None: self.lr_scheduler.load_state_dict(lr_scheduler_state_dict) log_with_rank(f"Loaded LR scheduler checkpoint from {local_path}", rank=self.rank, logger=logger) if self.should_load_extra: assert "rng_state" in state_dict, ( f"RNG state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}." ) rng_state = state_dict["rng_state"] self.load_rng_states(rng_state) log_with_rank(f"Loaded RNG states from {local_path}", rank=self.rank, logger=logger) if del_local_after_load: try: os.remove(local_path) if is_non_local(local_path) else None except Exception as e: log_with_rank( f"remove local resume ckpt file after loading failed, exception {e} will be ignored", rank=self.rank, logger=logger, ) def save_checkpoint(self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep=None): # record the previous global step self.previous_global_step = global_step # remove previous local_path if ( max_ckpt_to_keep and isinstance(max_ckpt_to_keep, int) and max_ckpt_to_keep > 0 and len(self.previous_saved_paths) >= max_ckpt_to_keep ): keep_start = len(self.previous_saved_paths) - max_ckpt_to_keep + 1 self.remove_previous_save_local_path(self.previous_saved_paths[:keep_start]) self.previous_saved_paths = self.previous_saved_paths[keep_start:] local_path = local_mkdir_safe(local_path) dist_checkpoint_path = get_dist_checkpoint_path(local_path) # Note that model weights, optimizer states, and extra states are generated # together in a state dict, we save them in one time if self.use_dist_checkpointing: # Generate state dict for saving state_dict = self.generate_state_dict( self.should_save_model, self.should_save_optimizer, self.should_save_extra ) log_with_rank(f"Generated state dict for saving: {state_dict.keys()}", rank=self.rank, logger=logger) for vpp_rank, model in enumerate(self.model): if len(self.model) > 1: model_i_keys = state_dict[f"model{vpp_rank}"].keys() log_with_rank(f"Generated state dict for saving: {model_i_keys}", rank=self.rank, logger=logger) else: log_with_rank( f"Generated state dict for saving: {state_dict['model'].keys()}", rank=self.rank, logger=logger ) # Start Async save if enabled async_save_request = save_dist_checkpointing( sharded_state_dict=state_dict, ckpt_path=dist_checkpoint_path, async_save=self.checkpoint_config.async_save, ) # Synchronize all async save requests if not self.checkpoint_config.async_save: assert async_save_request is None, "Async save request should be None when not using async save." torch.distributed.barrier() else: assert self.use_hf_checkpoint, "When not using distributed checkpointing, use_hf_checkpoint should be True." # Generate optimizer and exra state dicts state_dict = self.generate_state_dict( generate_model=False, generate_optimizer=self.should_save_optimizer, generate_extra=self.should_save_extra, ) # Save optimizer and extra states to local path # Start Async save if enabled async_save_request = save_dist_checkpointing( sharded_state_dict=state_dict, ckpt_path=dist_checkpoint_path, async_save=self.checkpoint_config.async_save, ) # Synchronize all async save requests if not self.checkpoint_config.async_save: assert async_save_request is None, "Async save request should be None when not using async save." torch.distributed.barrier() if self.should_save_model: if self.use_hf_checkpoint: # Use mbridge to save HF model checkpoint log_with_rank(f"Saving HF model checkpoint to {local_path} with bridge", rank=self.rank, logger=logger) hf_ckpt_path = get_hf_model_checkpoint_path(local_path) self.bridge.save_weights(self.model, hf_ckpt_path) log_with_rank(f"Saved bridge checkpoint to {hf_ckpt_path}", rank=self.rank, logger=logger) # Only rank 0 saves the hf config and tokenizer to huggingface path # No matter whether we save hf model or not if self.rank == 0: # Save tokenizer hf_config_tokenizer_path = get_hf_model_checkpoint_path(local_path) if self.processing_class is not None: self.processing_class.save_pretrained(hf_config_tokenizer_path) # Save huggingface config self.hf_config.save_pretrained(hf_config_tokenizer_path) if hasattr(self.hf_config, "name_or_path") and self.hf_config.name_or_path: try: generation_config = GenerationConfig.from_pretrained(self.hf_config.name_or_path) generation_config.save_pretrained(hf_config_tokenizer_path) except Exception: # if the generation config isn't available, we don't save it pass log_with_rank( f"Saved Huggingface config and tokenizer to {hf_config_tokenizer_path}", rank=self.rank, logger=logger, log_only_rank_0=True, ) if self.should_save_extra: if self.rank == 0: # Save transformer config print(self.transformer_config) transformer_config_dict = asdict(self.transformer_config) to_convert_types = {torch.dtype: str, AttnBackend: str} ignore_types = [Callable] pop_keys = [] for key, value in transformer_config_dict.items(): if type(value) in to_convert_types: transformer_config_dict[key] = to_convert_types[type(value)](value) if type(value) in ignore_types: pop_keys.append(key) if callable(value): pop_keys.append(key) for key in pop_keys: transformer_config_dict.pop(key) transformer_config_path = get_transformer_config_checkpoint_path(local_path) with open(transformer_config_path, "w") as f: json.dump(transformer_config_dict, f, indent=2) if self.should_save_hf_model and not self.use_hf_checkpoint: # wait for everyone to dump to local if self.bridge is not None: hf_model_ckpt_path = get_hf_model_checkpoint_path(local_path) self.bridge.save_weights(self.model, hf_model_ckpt_path) else: state_dict = self.weight_saver( self.model, self.hf_config, dtype=self.param_dtype, is_value_model=self.is_value_model, tie_word_embeddings=self.share_embeddings_and_output_weights, ) torch.distributed.barrier() if self.rank == 0: hf_model_ckpt_path = get_hf_model_checkpoint_path(local_path) import warnings from accelerate import init_empty_weights with init_empty_weights(), warnings.catch_warnings(): warnings.simplefilter("ignore") if "mistral7b-rm" in self.config.model.path: from transformers import MistralForSequenceClassification model = MistralForSequenceClassification.from_pretrained( self.config.model.path ) # use score head instead of lm_head state_dict["score.weight"] = state_dict["score.weight"] else: from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(self.config.model.path, torch_dtype="auto") model.save_pretrained(hf_model_ckpt_path, state_dict=state_dict) log_with_rank( f"Saved Huggingface config and tokenizer to {hf_model_ckpt_path}", rank=self.rank, logger=logger, log_only_rank_0=True, ) if hdfs_path is not None: log_with_rank( f"Uploading checkpoint to {hdfs_path}", rank=self.rank, logger=logger, log_only_rank_0=True ) from verl.utils import hdfs_io hdfs_io.makedirs(hdfs_path, exist_ok=True) hdfs_io.copy(src=hf_model_ckpt_path, dst=hdfs_path, dirs_exist_ok=True) log_with_rank( f"HDFS checkpoint uploaded to {hdfs_path}", rank=self.rank, logger=logger, log_only_rank_0=True, ) def finalize_save_fn(): # Rank 0 uploads checkpoint to HDFS if hdfs_path is provided log_with_rank( f"Dist checkpointing save completed for {dist_checkpoint_path}", rank=self.rank, logger=logger ) if self.rank == 0: if hdfs_path is not None: log_with_rank(f"Uploading checkpoint to {hdfs_path}", rank=self.rank, logger=logger) from verl.utils import hdfs_io hdfs_io.makedirs(hdfs_path, exist_ok=True) hdfs_io.copy(src=dist_checkpoint_path, dst=hdfs_path, dirs_exist_ok=True) hdfs_io.copy(src=hf_config_tokenizer_path, dst=hdfs_path, dirs_exist_ok=True) if self.checkpoint_config.async_save: assert async_save_request is not None, "Async save request should not be None when using async save." async_save_request.add_finalize_fn(finalize_save_fn) else: finalize_save_fn() self.previous_saved_paths.append(local_path) ================================================ FILE: verl_distillation/verl/utils/config.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import is_dataclass from typing import Any, Optional from omegaconf import DictConfig, ListConfig, OmegaConf __all__ = ["omega_conf_to_dataclass", "validate_config"] def omega_conf_to_dataclass(config: DictConfig | dict, dataclass_type: Optional[type[Any]] = None) -> Any: """ Convert an OmegaConf DictConfig to a dataclass. Args: config: The OmegaConf DictConfig or dict to convert. dataclass_type: The dataclass type to convert to. When dataclass_type is None, the DictConfig must contain _target_ to be instantiated via hydra.instantiate API. Returns: The dataclass instance. """ # Got an empty config if not config: return dataclass_type if dataclass_type is None else dataclass_type() # Got an object if not isinstance(config, DictConfig | ListConfig | dict | list): return config if dataclass_type is None: assert "_target_" in config, ( "When dataclass_type is not provided, config must contain _target_. " "See trainer/config/ppo_trainer.yaml algorithm section for an example. " f"Got config: {config}" ) from hydra.utils import instantiate return instantiate(config, _convert_="partial") if not is_dataclass(dataclass_type): raise ValueError(f"{dataclass_type} must be a dataclass") cfg = OmegaConf.create(config) # in case it's a dict # pop _target_ to avoid hydra instantiate error, as most dataclass do not have _target_ # Updated (vermouth1992) We add _target_ to BaseConfig so that it is compatible. # Otherwise, this code path can't support recursive instantiation. # if "_target_" in cfg: # cfg.pop("_target_") cfg_from_dataclass = OmegaConf.structured(dataclass_type) # let cfg override the existing vals in `cfg_from_dataclass` cfg_merged = OmegaConf.merge(cfg_from_dataclass, cfg) # now convert to `dataclass_type` config_object = OmegaConf.to_object(cfg_merged) return config_object def update_dict_with_config(dictionary: dict, config: DictConfig): for key in dictionary: if hasattr(config, key): dictionary[key] = getattr(config, key) def validate_config( config: DictConfig, use_reference_policy: bool, use_critic: bool, ) -> None: """Validate an OmegaConf DictConfig. Args: config (DictConfig): The OmegaConf DictConfig to validate. use_reference_policy (bool): is ref policy needed use_critic (bool): is critic needed """ # number of GPUs total n_gpus = config.trainer.n_gpus_per_node * config.trainer.nnodes if not config.actor_rollout_ref.actor.use_dynamic_bsz: if config.actor_rollout_ref.actor.strategy == "megatron": model_parallel_size = ( config.actor_rollout_ref.actor.megatron.tensor_model_parallel_size * config.actor_rollout_ref.actor.megatron.pipeline_model_parallel_size ) assert ( n_gpus % (model_parallel_size * config.actor_rollout_ref.actor.megatron.context_parallel_size) == 0 ), ( f"n_gpus ({n_gpus}) must be divisible by model_parallel_size ({model_parallel_size}) times " f"context_parallel_size ({config.actor_rollout_ref.actor.megatron.context_parallel_size})" ) megatron_dp = n_gpus // ( model_parallel_size * config.actor_rollout_ref.actor.megatron.context_parallel_size ) minimal_bsz = megatron_dp * config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu else: minimal_bsz = n_gpus # 1. Check total batch size for data correctness real_train_batch_size = config.data.train_batch_size * config.actor_rollout_ref.rollout.n assert real_train_batch_size % minimal_bsz == 0, ( f"real_train_batch_size ({real_train_batch_size}) must be divisible by minimal possible batch size " f"({minimal_bsz})" ) # A helper function to check "micro_batch_size" vs "micro_batch_size_per_gpu" # We throw an error if the user sets both. The new convention is "..._micro_batch_size_per_gpu". def check_mutually_exclusive(mbs, mbs_per_gpu, name: str): """Validate mutually exclusive micro batch size configuration options. Ensures that users don't set both deprecated micro_batch_size and the new micro_batch_size_per_gpu parameters simultaneously. Args: mbs: Deprecated micro batch size parameter value. mbs_per_gpu: New micro batch size per GPU parameter value. name (str): Configuration section name for error messages. Raises: ValueError: If both parameters are set or neither is set. """ settings = { "reward_model": "micro_batch_size", "actor_rollout_ref.ref": "log_prob_micro_batch_size", "actor_rollout_ref.rollout": "log_prob_micro_batch_size", } if name in settings: param = settings[name] param_per_gpu = f"{param}_per_gpu" if mbs is None and mbs_per_gpu is None: raise ValueError(f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.") if mbs is not None and mbs_per_gpu is not None: raise ValueError( f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove " f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)." ) # Actor validation done in ActorConfig.__post_init__ and validate() actor_config = omega_conf_to_dataclass(config.actor_rollout_ref.actor) actor_config.validate(n_gpus, config.data.train_batch_size, config.actor_rollout_ref.model) if not config.actor_rollout_ref.actor.use_dynamic_bsz: if use_reference_policy: # reference: log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.ref.log_prob_micro_batch_size, config.actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu, "actor_rollout_ref.ref", ) # The rollout section also has log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.rollout.log_prob_micro_batch_size, config.actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu, "actor_rollout_ref.rollout", ) # Check for reward model micro-batch size conflicts if config.reward_model.enable and not config.reward_model.use_dynamic_bsz: check_mutually_exclusive( config.reward_model.micro_batch_size, config.reward_model.micro_batch_size_per_gpu, "reward_model" ) if config.algorithm.use_kl_in_reward and config.actor_rollout_ref.actor.use_kl_loss: print("NOTICE: You have both enabled in-reward kl and kl loss.") # critic if use_critic: critic_config = omega_conf_to_dataclass(config.critic) critic_config.validate(n_gpus, config.data.train_batch_size) if config.data.get("val_batch_size", None) is not None: print( "WARNING: val_batch_size is deprecated." + " Validation datasets are sent to inference engines as a whole batch," + " which will schedule the memory themselves." ) # check eval config if config.actor_rollout_ref.rollout.val_kwargs.do_sample: assert config.actor_rollout_ref.rollout.temperature > 0, ( "validation gen temperature should be greater than 0 when enabling do_sample" ) # check LoRA rank in vLLM if config.actor_rollout_ref.model.get("lora_rank", 0) > 0 and config.actor_rollout_ref.rollout.name == "vllm": assert config.actor_rollout_ref.model.lora_rank <= 512, "LoRA rank in vLLM must be less than or equal to 512" print("[validate_config] All configuration checks passed successfully!") ================================================ FILE: verl_distillation/verl/utils/dataset/README.md ================================================ # Dataset Format ## RLHF dataset We combine all the data sources into a single parquet files. We directly organize the prompt into the chat format so that multi-turn chats can be easily incorporated. In the prompt, we may add instruction following texts to guide the model output the answers in a particular format so that we can extract the answers. Math problems ```json { "data_source": "openai/gsm8k", "prompt": [{"role": "user", "content": "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May? Let's think step by step and output the final answer after \"####\""}], "ability": "math", "reward_model": { "style": "rule", "ground_truth": ["72"] }, } ``` ================================================ FILE: verl_distillation/verl/utils/dataset/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .rl_dataset import RLHFDataset from .rm_dataset import RMDataset from .sft_dataset import SFTDataset __all__ = ["RLHFDataset", "RMDataset", "SFTDataset"] ================================================ FILE: verl_distillation/verl/utils/dataset/dataset_utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from enum import Enum import torch class DatasetPadMode(str, Enum): """Padding mode for dataset""" RIGHT = "right" LEFT_RIGHT = "left_right" NO_PADDING = "no_padding" class SFTTensorCollator: """ A custom collate_fn that handles batching of sequences. 1. for variable-length sequences, convert them into NestedTensors. 2. for fixed-length sequences, use default_collate. """ def __init__(self, pad_mode: DatasetPadMode = DatasetPadMode.LEFT_RIGHT): self.pad_mode = pad_mode def __call__(self, batch: list[dict[str, any]]) -> dict[str, any]: if self.pad_mode == DatasetPadMode.NO_PADDING: return self.collate_variable_batch(batch) elif self.pad_mode in [DatasetPadMode.RIGHT, DatasetPadMode.LEFT_RIGHT]: from torch.utils.data import default_collate return default_collate(batch) else: raise NotImplementedError(f"pad_mode {self.pad_mode} not implemented") def collate_variable_batch(self, batch: list[dict[str, any]]) -> dict[str, any]: """ Collates a list of samples into a single batch. Args: batch: A list of dictionary samples from the dataset. Returns: A dictionary representing the batched data, with variable-length sequences converted to NestedTensors. """ final_batch = {} tensor_keys = [key for key in batch[0].keys() if isinstance(batch[0][key], torch.Tensor)] # Handle tensor values by creating a NestedTensor. for key in tensor_keys: tensors = [item[key] for item in batch] final_batch[key] = torch.nested.as_nested_tensor(tensors, layout=torch.jagged) return final_batch ================================================ FILE: verl_distillation/verl/utils/dataset/multiturn_sft_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2025 ModelBest Inc. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Multi-turn SFT dataset that supports training on conversation data with multiple turns """ import logging from typing import Any, Optional import numpy as np import pandas as pd import torch from omegaconf import ListConfig from torch.utils.data import Dataset from transformers import PreTrainedTokenizer from verl.utils import hf_tokenizer from verl.utils.dataset.dataset_utils import DatasetPadMode from verl.utils.fs import copy_local_path_from_hdfs def convert_nested_value_to_list_recursive(data_item): if isinstance(data_item, dict): return {k: convert_nested_value_to_list_recursive(v) for k, v in data_item.items()} elif isinstance(data_item, list): return [convert_nested_value_to_list_recursive(elem) for elem in data_item] elif isinstance(data_item, np.ndarray): # Convert to list, then recursively process the elements of the new list return convert_nested_value_to_list_recursive(data_item.tolist()) else: # Base case: item is already a primitive type (int, str, float, bool, etc.) return data_item class MultiTurnSFTDataset(Dataset): """ Dataset for multi-turn conversations where each assistant response should be trained """ def __init__(self, parquet_files: str | list[str], tokenizer, config=None, max_samples: int = -1): # Set defaults and extract parameters from config if provided config = config or {} self.pad_mode = config.get("pad_mode", "right") assert self.pad_mode in ["right", "no_padding"], ( f"Expect pad_mode to be 'right' or 'no_padding'. Got {self.pad_mode}" ) self.truncation = config.get("truncation", "error") # for right padding self.max_length = config.get("max_length", 1024) # Get messages_key from the new multiturn config structure multiturn_config = config.get("multiturn", {}) self.messages_key = multiturn_config.get("messages_key", "messages") self.tools_key = multiturn_config.get("tools_key", "tools") self.enable_thinking_key = multiturn_config.get("enable_thinking_key", "enable_thinking") self.apply_chat_template_kwargs = config.get("apply_chat_template_kwargs", {}) self.shuffle = config.get("shuffle", False) self.seed = config.get("seed") self.max_samples = max_samples assert self.truncation in ["error", "left", "right"] if not isinstance(parquet_files, list | ListConfig): parquet_files = [parquet_files] self.parquet_files = parquet_files if isinstance(tokenizer, str): tokenizer = hf_tokenizer(tokenizer) self.tokenizer: PreTrainedTokenizer = tokenizer self._download() self._read_files_and_process() def _download(self): for i, parquet_file in enumerate(self.parquet_files): self.parquet_files[i] = copy_local_path_from_hdfs(parquet_file, verbose=True) def _read_files_and_process(self): def series_to_item(ls): import numpy import pandas while isinstance(ls, pandas.core.series.Series | numpy.ndarray) and len(ls) == 1: ls = ls[0] return ls dataframes = [] for parquet_file in self.parquet_files: dataframe = pd.read_parquet(parquet_file) dataframes.append(dataframe) self.dataframe = pd.concat(dataframes) total = len(self.dataframe) print(f"dataset len: {len(self.dataframe)}") if self.max_samples > 0 and self.max_samples < total: if self.shuffle: rngs_args = (self.seed,) if self.seed is not None else () rng = np.random.default_rng(*rngs_args) indices = rng.choice(total, size=self.max_samples, replace=False) else: indices = np.arange(self.max_samples) self.dataframe = self.dataframe.iloc[indices.tolist()] print(f"selected {self.max_samples} random samples out of {total}") # Extract messages list from dataframe self.messages = self.dataframe[self.messages_key].apply(series_to_item).tolist() # Extract tools list from dataframe if self.tools_key in self.dataframe.columns: self.tools = self.dataframe[self.tools_key].apply(convert_nested_value_to_list_recursive).tolist() else: self.tools = None # Extract enable_thinking list from dataframe if self.enable_thinking_key in self.dataframe.columns: self.enable_thinking = self.dataframe[self.enable_thinking_key].tolist() else: self.enable_thinking = None def __len__(self): return len(self.messages) def _process_message_tokens( self, messages: list[dict[str, Any]], start_idx: int, end_idx: int, is_assistant: bool = False, enable_thinking: Optional[bool] = None, tools: Optional[list[dict[str, Any]]] = None, ) -> tuple[list[int], list[int], list[int]]: """ Process tokens for a single message or a group of messages. Args: messages: List of message dictionaries start_idx: Start index in messages list end_idx: End index in messages list is_assistant: Whether this is an assistant message enable_thinking: Whether to enable thinking mode Returns: Tuple of (tokens, loss_mask, attention_mask) """ if start_idx > 0: prev_applied_text = self.tokenizer.apply_chat_template( messages[:start_idx], tokenize=False, add_generation_prompt=False, enable_thinking=enable_thinking, tools=tools, **self.apply_chat_template_kwargs, ) if is_assistant: prev_applied_text_w_generation_prompt = self.tokenizer.apply_chat_template( messages[:start_idx], tokenize=False, add_generation_prompt=True, enable_thinking=enable_thinking, tools=tools, **self.apply_chat_template_kwargs, ) else: prev_applied_text = "" cur_applied_text = self.tokenizer.apply_chat_template( messages[:end_idx], tokenize=False, add_generation_prompt=False, enable_thinking=enable_thinking, tools=tools, **self.apply_chat_template_kwargs, ) # Get tokens for the current message only if is_assistant: generation_prompt_text = prev_applied_text_w_generation_prompt[len(prev_applied_text) :] generation_prompt_tokens = self.tokenizer.encode( generation_prompt_text, add_special_tokens=False, ) _message_tokens = self.tokenizer.encode( cur_applied_text[len(prev_applied_text_w_generation_prompt) :], add_special_tokens=False, ) message_tokens = generation_prompt_tokens + _message_tokens loss_mask = [0] * (len(generation_prompt_tokens)) + [1] * ( len(message_tokens) - len(generation_prompt_tokens) ) else: message_tokens = self.tokenizer.encode( cur_applied_text[len(prev_applied_text) :], add_special_tokens=False, ) loss_mask = [0] * len(message_tokens) attention_mask = [1] * len(message_tokens) return message_tokens, loss_mask, attention_mask def _validate_and_convert_tokens( self, full_tokens: torch.Tensor, concat_tokens: list[int], concat_loss_mask: list[int], concat_attention_mask: list[int], ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Validate tokenization and convert to tensors. Args: full_tokens: Full conversation tokens concat_tokens: Concatenated tokens concat_loss_mask: Concatenated loss mask concat_attention_mask: Concatenated attention mask Returns: Tuple of (input_ids, loss_mask, attention_mask) as tensors """ full_tokens_list = full_tokens.tolist() if len(concat_tokens) != len(full_tokens_list) or not all( a == b for a, b in zip(concat_tokens, full_tokens_list, strict=True) ): logging.warning( f"Token mismatch detected! Full tokenization length: {len(full_tokens_list)}, Concatenated tokens " f"length: {len(concat_tokens)}. Using concatenated version." # f"full tokens text: {self.tokenizer.decode(full_tokens_list)}" # f"concat tokens text: {self.tokenizer.decode(concat_tokens)}" ) return ( torch.tensor(concat_tokens, dtype=torch.long), torch.tensor(concat_loss_mask, dtype=torch.long), torch.tensor(concat_attention_mask, dtype=torch.long), ) return ( full_tokens, torch.tensor(concat_loss_mask, dtype=torch.long), torch.tensor(concat_attention_mask, dtype=torch.long), ) def __getitem__(self, item): tokenizer = self.tokenizer messages = self.messages[item] tools = self.tools[item] if self.tools is not None else None enable_thinking = self.enable_thinking[item] if self.enable_thinking is not None else None # First, get the full conversation tokens try: full_tokens = tokenizer.apply_chat_template( messages, tools=tools, tokenize=True, return_tensors="pt", add_generation_prompt=False, enable_thinking=enable_thinking, **self.apply_chat_template_kwargs, ) except Exception as e: logging.error( f"Error applying chat template: {e}\nMessages: {messages}\nTools: {tools}\nEnable thinking: " f"{enable_thinking}" ) raise # Track concatenated tokens for validation concat_tokens = [] concat_loss_mask = [] concat_attention_mask = [] i = 0 while i < len(messages): cur_messages = messages[i] if cur_messages["role"] == "assistant": # Process assistant message tokens, loss_mask, attention_mask = self._process_message_tokens( messages, i, i + 1, is_assistant=True, enable_thinking=enable_thinking, tools=tools ) i += 1 elif cur_messages["role"] == "tool": # Process consecutive tool messages st = i ed = i + 1 while ed < len(messages) and messages[ed]["role"] == "tool": ed += 1 tokens, loss_mask, attention_mask = self._process_message_tokens( messages, st, ed, enable_thinking=enable_thinking, tools=tools ) i = ed elif cur_messages["role"] in ["user", "system"]: # Process user or system message if cur_messages["role"] == "system" and i != 0: raise ValueError("System message should be the first message") tokens, loss_mask, attention_mask = self._process_message_tokens( messages, i, i + 1, enable_thinking=enable_thinking, tools=tools ) i += 1 else: raise ValueError(f"Unknown role: {cur_messages['role']}") # override loss mask with mask in the dataset to handle multi-turn conversation override_loss_mask = cur_messages.get("loss_mask", None) if override_loss_mask is not None: if isinstance(override_loss_mask, np.ndarray): override_loss_mask = override_loss_mask.item() assert isinstance(override_loss_mask, int), f"loss_mask should be int, got {type(override_loss_mask)}" assert override_loss_mask in [0, 1], f"loss_mask should be 0 or 1, got {override_loss_mask}" loss_mask = [override_loss_mask] * len(tokens) concat_tokens.extend(tokens) concat_loss_mask.extend(loss_mask) concat_attention_mask.extend(attention_mask) # Validate and convert tokens input_ids, loss_mask, attention_mask = self._validate_and_convert_tokens( full_tokens[0], concat_tokens, concat_loss_mask, concat_attention_mask ) # encode prompt if messages[0]["role"] == "system": assert messages[1]["role"] == "user" assert messages[2]["role"] == "assistant" elif messages[0]["role"] == "user": assert messages[1]["role"] == "assistant" else: raise ValueError(f"Unknown role: {messages[0]['role']}") sequence_length = input_ids.shape[0] # Handle sequence length if self.pad_mode == DatasetPadMode.RIGHT: if sequence_length < self.max_length: # Pad sequences pad_token_id = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else 0 padded_input_ids = torch.full((self.max_length - sequence_length,), pad_token_id, dtype=input_ids.dtype) padded_attention_mask = torch.zeros((self.max_length - sequence_length,), dtype=attention_mask.dtype) padded_loss_mask = torch.zeros((self.max_length - sequence_length,), dtype=loss_mask.dtype) input_ids = torch.cat((input_ids, padded_input_ids)) attention_mask = torch.cat((attention_mask, padded_attention_mask)) loss_mask = torch.cat((loss_mask, padded_loss_mask)) elif sequence_length > self.max_length: if self.truncation == "left": input_ids = input_ids[-self.max_length :] attention_mask = attention_mask[-self.max_length :] loss_mask = loss_mask[-self.max_length :] elif self.truncation == "right": input_ids = input_ids[: self.max_length] attention_mask = attention_mask[: self.max_length] loss_mask = loss_mask[: self.max_length] elif self.truncation == "error": raise ValueError(f"{sequence_length=} is larger than {self.max_length=}") else: raise ValueError(f"Unknown truncation method {self.truncation}") # Create position IDs position_ids = torch.arange(len(input_ids), dtype=torch.long) # Zero out position IDs for padding position_ids = position_ids * attention_mask return { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, "loss_mask": loss_mask, } elif self.pad_mode == DatasetPadMode.NO_PADDING: # truncate input_ids if it is longer than max_length if len(input_ids) > self.max_length: input_ids = input_ids[: self.max_length] loss_mask = loss_mask[: self.max_length] # create position IDs position_ids = torch.arange(len(input_ids), dtype=torch.long) # return nested tensor with out padding return { "input_ids": input_ids, "position_ids": position_ids, "loss_mask": loss_mask, } else: raise ValueError(f"Unknown pad mode {self.pad_mode}") ================================================ FILE: verl_distillation/verl/utils/dataset/onerec_dataset.py ================================================ import ast import copy import logging import os import random import re from typing import Any, Optional import datasets import numpy as np from omegaconf import DictConfig, ListConfig from torch.utils.data import Dataset from transformers import PreTrainedTokenizer, ProcessorMixin import verl.utils.torch_functional as verl_F from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__name__) class OneRecDataset(Dataset): """Onerec数据集读取与预处理。 - 缓存Parquet文件到本地; - 利用HF Dataset读取并转换chat结构; - 根据配置过滤超长prompt; - 支持多模态预处理与位置编码。 """ def __init__( self, data_files: str | list[str], tokenizer: PreTrainedTokenizer, config: DictConfig, processor: Optional[ProcessorMixin] = None, max_samples: int = -1, ) -> None: if not isinstance(data_files, (list, ListConfig)): data_files = [data_files] self.data_files = copy.deepcopy(data_files) self.original_data_files = copy.deepcopy(data_files) self.tokenizer = tokenizer self.processor = processor self.max_samples = max_samples self.config = config self.cache_dir = os.path.expanduser(config.get("cache_dir", "~/.cache/verl/rlhf")) self.prompt_key = config.get("prompt_key", "prompt") self.image_key = config.get("image_key", "images") self.video_key = config.get("video_key", "videos") self.max_prompt_length = config.get("max_prompt_length", 1024) self.return_raw_chat = config.get("return_raw_chat", False) self.return_full_prompt = config.get("return_full_prompt", False) self.truncation = config.get("truncation", "error") self.filter_overlong_prompts = config.get("filter_overlong_prompts", True) self.need_tools_kwargs = config.get("need_tools_kwargs", False) self.filter_prompts = config.get("filter_prompts", True) self.return_multi_modal_inputs = config.get("return_multi_modal_inputs", True) self.enable_think = config.get("enable_think", True) self.think_mode = config.get("think_mode", "force_think") self.shuffle = config.get("shuffle", False) self.seed = config.get("seed", None) auto_workers = max(1, (os.cpu_count() or 4) // 4) self.num_workers = min( config.get("filter_overlong_prompts_workers", auto_workers), os.cpu_count() or auto_workers, ) self.use_shm = config.get("use_shm", False) self.serialize_dataset = False #self._download() self._read_files_and_tokenize() # --------------------------------------------------------------------- # 数据准备 # --------------------------------------------------------------------- def _download(self, use_origin_parquet: bool = False) -> None: from verl.utils.fs import copy_to_local target_files = self.original_data_files if use_origin_parquet else self.data_files for idx, parquet_file in enumerate(target_files): local_path = copy_to_local(src=parquet_file, cache_dir=self.cache_dir, use_shm=self.use_shm) target_files[idx] = local_path if use_origin_parquet: self.data_files = target_files def _read_files_and_tokenize(self) -> None: #dataframes: list[datasets.Dataset] = [] self.dataframe = datasets.load_dataset("parquet", data_files=self.data_files)["train"] #for parquet_file in self.data_files: # dataframe = datasets.load_dataset("parquet", data_files=parquet_file)["train"] # dataframes.append(dataframe) #self.dataframe = datasets.concatenate_datasets(dataframes) # type: ignore[attr-defined] logger.info("dataset len: %s", len(self.dataframe)) if self.max_samples > 0 and self.max_samples < len(self.dataframe): if self.shuffle: rngs_args = (self.seed,) if self.seed is not None else () rng = np.random.default_rng(*rngs_args) indices = rng.choice(len(self.dataframe), size=self.max_samples, replace=False) else: indices = np.arange(self.max_samples) self.dataframe = self.dataframe.select(indices.tolist()) print(f"selected {self.max_samples} random samples out of {len(self.dataframe)}") self.dataframe = self.dataframe.map( self._extract_prompt_fields, num_proc=self.num_workers, desc="Extract prompts and reward annotations", ) # 过滤掉处理失败的样本 original_len = len(self.dataframe) self.dataframe = self.dataframe.filter( self._is_valid_sample, num_proc=self.num_workers, desc="Filtering out failed samples", ) filtered_len = len(self.dataframe) logger.info("Filtered out %s failed samples, remaining: %s", original_len - filtered_len, filtered_len) logger.info("processed dataset len: %s", len(self.dataframe)) self.dataframe = self.maybe_filter_out_long_prompts(self.dataframe) def _extract_prompt_fields(self, row: dict[str, Any]) -> dict[str, Any]: try: raw_messages = row.get("messages") if isinstance(raw_messages, str): messages = ast.literal_eval(raw_messages) else: messages = raw_messages or [] # 多轮对话清洗成单轮对话 user_cnt = 0 assistant_cnt = 0 clean_chats = [] for msg in messages: if user_cnt > 0 and assistant_cnt > 0: break role = msg.get("role", None) content = msg.get("content", None) if role is None or content is None: raise ValueError("role or content is None!") content_text = "" if isinstance(content, str): content_text = content elif isinstance(content, dict) and content.get("type") == "text": content_text = content["text"] elif isinstance(content, list): for seg in content: if isinstance(seg, str): content_text += seg elif isinstance(seg, dict) and seg.get("type") == "text": content_text += seg.get("text", "") if role == "user" and content_text.strip() == "": raise ValueError("content is empty!") # # drop system prompt # if role == "system": # if "" in content_text or "" in content_text: # continue clean_chats.append({ "role": role, "content": content_text }) if role == "user": user_cnt += 1 if role == "assistant": assistant_cnt += 1 if not clean_chats or len(clean_chats) < 2: raise ValueError("Sample has empty messages; please check data integrity.") prompt_messages = clean_chats[:-1] # 根据配置决定是否给 user 消息添加 /think /no_think 指令 if self.enable_think: think_suffix = "" if self.think_mode == "force_think": think_suffix = " /think" elif self.think_mode == "force_nothink": think_suffix = " /no_think" elif self.think_mode == "auto": tm_idx = random.randint(0, 2) think_suffix = " /think" if tm_idx == 1 else " /no_think" if tm_idx == 2 else "" else: raise ValueError("think_mode is unexcept") for message in prompt_messages: if message["role"] == "user": message["content"] = message["content"] + think_suffix ground_truth_message = clean_chats[-1]["content"] reward_payload = { "ground_truth": ground_truth_message, "style": "rule", } row[self.prompt_key] = prompt_messages row["reward_model"] = reward_payload return row except Exception as e: # 标记处理失败的样本 row["_processing_failed"] = True row["_processing_error"] = str(e) return row def _is_valid_sample(self, row: dict[str, Any]) -> bool: """检查样本是否处理成功""" return not row.get("_processing_failed", False) # --------------------------------------------------------------------- # 过滤与恢复 # --------------------------------------------------------------------- def maybe_filter_out_long_prompts(self, dataframe: datasets.Dataset) -> datasets.Dataset: if not self.filter_overlong_prompts: return dataframe tokenizer = self.tokenizer processor = self.processor prompt_key = self.prompt_key image_key = self.image_key video_key = self.video_key if processor is not None: from verl.utils.dataset.vision_utils import (process_image, process_video) def doc_length(doc: dict[str, Any]) -> int: messages = self._build_messages(dict(doc)) raw_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) images = [process_image(image) for image in doc.get(image_key, [])] videos = [process_video(video) for video in doc.get(video_key, [])] encoded = processor(text=[raw_prompt], images=images or None, videos=videos or None, return_tensors="pt") return int(encoded["input_ids"].shape[-1]) else: def doc_length(doc: dict[str, Any]) -> int: messages = doc[prompt_key] return len(tokenizer.apply_chat_template(messages, add_generation_prompt=True)) filtered = dataframe.filter( lambda doc: doc_length(doc) <= self.max_prompt_length, num_proc=self.num_workers, desc=f"Filtering prompts longer than {self.max_prompt_length} tokens", ) # 获取data_source字段值为"distill"和"sft"的indices if "data_source" in filtered.features: self.distill_indices = [i for i, doc in enumerate(filtered) if doc.get("data_source") == "distill"] self.sft_indices = [i for i, doc in enumerate(filtered) if doc.get("data_source") == "sft"] logger.info(f"distill samples: {len(self.distill_indices)}, sft samples: {len(self.sft_indices)}") else: logger.warning("data_source field not found in filtered dataset") logger.info("filtered dataset len: %s", len(filtered)) return filtered def resume_dataset_state(self) -> None: self.serialize_dataset = not hasattr(self, "original_data_files") if not self.serialize_dataset: self._download(use_origin_parquet=True) self._read_files_and_tokenize() else: logger.warning("resume with serialized dataloader, consider restarting from scratch for better perf") # --------------------------------------------------------------------- # Dataset 接口 # --------------------------------------------------------------------- def __len__(self) -> int: # type: ignore[override] return len(self.dataframe) def _build_messages(self, example: dict[str, Any]) -> list[dict[str, Any]]: messages: list[dict[str, Any]] = example.pop(self.prompt_key) if self.image_key in example or self.video_key in example: for message in messages: content = message["content"] segments = [segment for segment in re.split(r"(|
score = score / 4 return score return score else: return format_score def compute_score_subem(solution_str, ground_truth, method="strict", format_score=0.0, score=1.0): """The scoring function for substring exact match (EM). Args: solution_str: the solution text ground_truth: the ground truth method: the method to extract the solution, choices are 'strict' and 'flexible' format_score: the score for the format score: the score for the correct answer """ answer = extract_solution(solution_str=solution_str) do_print = random.randint(1, 64) == 1 if do_print: print("--------------------------------") print(f"Golden answers: {ground_truth['target']}") print(f"Extracted answer: {answer}") print(f"Solution string: {solution_str}") if answer is None: return 0 else: if subem_check(answer, ground_truth["target"]): return score else: return format_score ================================================ FILE: verl_distillation/verl/utils/rollout_skip.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from verl.protocol import DataProto class RolloutSkip: """ RolloutSkip skips sequence generation during rollout by attempting to load previously dumped data. If no dumped data is found, it generates new sequences and saves them to disk. Args: config: The configuration object containing rollout settings. rollout_wg: The worker group that handles the rollout process. Note: When rollout.n or rollout.gen_batch_size differ from previous runs, new sequences will be generated and saved with different filenames. """ print_mark = "[RolloutSkip()]" def __init__(self, config, rollout_wg): self.rollout_config = config.actor_rollout_ref.rollout self.exp_name = config.data.get("experiment_name", "") self.project_name = config.data.get("project_name", "") self.n = int(self.rollout_config.get("n", 0)) self.gbs = int(config.data.get("gen_batch_size", config.data.get("train_batch_size", 0))) self.dumped_dir = Path(self.rollout_config.get("skip_dump_dir", "/tmp/verl/rollout_dump")) self.dumped_dir.mkdir(parents=True, exist_ok=True) # Check if path is in Ray temporary directory if str(self.dumped_dir.absolute()).startswith("/tmp/ray/session"): print( f"\033[33m{self.print_mark} Warning: \nUsing dump path ", f"'{self.dumped_dir.absolute()}' is not recommended ", "as it's located in /tmp/ray/session*\033[0m", flush=True, ) print( f"{self.print_mark} Rollout skip dump path set to: ", f"{self.dumped_dir.absolute()}", flush=True, ) self._rollout_wg = rollout_wg @property def curr_path_dump(self): return self.dumped_dir.joinpath(f"{self.exp_name}_{self.project_name}_GBS{self.gbs}__N{self.n}").absolute() def wrap_generate_sequences(self): try: self._rollout_wg.generate_sequences = wrap_generate_sequences(self, self._rollout_wg) print( f"{self.print_mark} Successfully patched `actor_rollout_wg.generate_sequences()`", flush=True, ) except Exception as e: raise RuntimeError( "{self.print_mark} Failed to patch `actor_rollout_wg.generate_sequences()`", flush=True, ) from e def try_load(self): if not self.curr_path_dump.exists(): print( f"{self.print_mark} No data dump found at {self.curr_path_dump}.", "The trainer will generate and automatically dump the data for this first run.", flush=True, ) return None try: # * Load ret_batch = DataProto.load_from_disk(self.curr_path_dump) print( f"\033[32m{self.print_mark} Successfully load pre-generated data from {self.curr_path_dump}\033[0m", flush=True, ) return ret_batch except Exception as e: print( f"\033[31m{self.print_mark} Failed to load pre-generated data from {self.curr_path_dump}", f"Error: {str(e)}\033[0m", flush=True, ) return None def dump(self, outputs: DataProto): try: outputs.save_to_disk(self.curr_path_dump) print( f"\033[32m{self.print_mark} Successfully dump data in {self.curr_path_dump}\033[0m", flush=True, ) except Exception as e: print( f"\033[31m{self.print_mark} Failed to dump data in {self.curr_path_dump}: {e}\033[0m", flush=True, ) def wrap_generate_sequences(rolloutskip: RolloutSkip, rollout_wg): generate_sequences = rollout_wg.generate_sequences def warp_fn(batch, **kwargs): gen_batch_output = rolloutskip.try_load() if gen_batch_output is None: # * 1. Generation gen_batch_output = generate_sequences(batch, **kwargs) # * 2. Dump rolloutskip.dump(gen_batch_output) return gen_batch_output return warp_fn ================================================ FILE: verl_distillation/verl/utils/rollout_trace.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import contextlib import functools import inspect import os from typing import Optional class RolloutTraceConfig: """Configuration for rollout tracing with various backends. Singleton configuration class for managing rollout trace settings across different tracing backends like Weave and MLflow. Args: backend (Optional[str]): Tracing backend to use ('weave', 'mlflow', or None). client (Optional[object]): Client instance for the selected backend. token2text (bool): Whether to convert tokens to text in traces. Defaults to False. project_name (str): Name of the project for tracing. experiment_name (str): Name of the experiment for tracing. """ _instance: Optional["RolloutTraceConfig"] = None backend: Optional[str] = None client: Optional[object] = None token2text: bool = False _initialized: bool = False project_name: str = None experiment_name: str = None def __new__(cls, *args, **kwargs): if cls._instance is None: cls._instance = super().__new__(cls) cls._instance._initialized = False return cls._instance @classmethod def get_instance(cls) -> "RolloutTraceConfig": if cls._instance is None: cls._instance = cls() return cls._instance @classmethod def init(cls, project_name: str, experiment_name: str, backend: str, token2text: bool = False): config = cls.get_instance() if config._initialized: return config.backend = backend config.token2text = token2text config.project_name = project_name config.experiment_name = experiment_name if backend == "weave": import weave config.client = weave.init(project_name) elif backend == "mlflow": import mlflow mlflow.config.enable_async_logging() config.client = mlflow MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "sqlite:////tmp/mlruns.db") mlflow.set_tracking_uri(MLFLOW_TRACKING_URI) mlflow.set_experiment(project_name) else: config.client = None config._initialized = True @classmethod def get_backend(cls) -> Optional[str]: return cls.get_instance().backend @classmethod def get_client(cls) -> Optional[object]: return cls.get_instance().client @classmethod def enable_token2text(cls) -> Optional[bool]: return cls.get_instance().token2text @classmethod def reset(cls): cls._instance = None @contextlib.contextmanager def rollout_trace_attr(sample_index=None, step=None, rollout_n=None, name="rollout_trace", validate=False): """A context manager to add attributes to a trace for the configured backend.""" backend = RolloutTraceConfig.get_backend() attributes = {} if backend: if sample_index is not None: attributes["sample_index"] = sample_index if step is not None: attributes["step"] = step if rollout_n is not None: attributes["rollout_n"] = rollout_n attributes["validate"] = validate attributes["experiment_name"] = RolloutTraceConfig.get_instance().experiment_name if not attributes or backend is None: yield return if backend == "weave": import weave with weave.attributes(attributes): yield elif backend == "mlflow": import mlflow with mlflow.start_span(name=name) as span: trace_id = span.trace_id for key, value in attributes.items(): mlflow.set_trace_tag(trace_id, str(key), str(value)) yield else: yield def rollout_trace_op(func): @functools.wraps(func) async def async_wrapper(self, *args, **kwargs): backend = RolloutTraceConfig.get_backend() enable_token2text = RolloutTraceConfig.enable_token2text() if backend is None: return await func(self, *args, **kwargs) sig = inspect.signature(func) bound_args = sig.bind(self, *args, **kwargs) bound_args.apply_defaults() inputs = dict(bound_args.arguments) del inputs["self"] async def add_token2text(self, result): if hasattr(result, "prompt_ids") and hasattr(self, "tokenizer") and hasattr(self.tokenizer, "decode"): _result = vars(result) loop = asyncio.get_running_loop() if hasattr(result, "prompt_ids"): prompt_text = await loop.run_in_executor(None, self.tokenizer.decode, result.prompt_ids) _result["prompt_text"] = prompt_text if hasattr(result, "response_ids"): response_text = await loop.run_in_executor(None, self.tokenizer.decode, result.response_ids) _result["response_text"] = response_text return _result return result if backend == "weave": tracer = RolloutTraceConfig.get_client() from weave.trace.context import call_context cur_attributes = {**call_context.call_attributes.get()} call = tracer.create_call(op=func.__qualname__, inputs=inputs, attributes=cur_attributes) try: result = await func(self, *args, **kwargs) if enable_token2text: _result = await add_token2text(self, result) tracer.finish_call(call, output=_result) else: tracer.finish_call(call, output=result) return result except Exception as e: tracer.finish_call(call, exception=e) raise e elif backend == "mlflow": import mlflow with mlflow.start_span(name=func.__qualname__) as span: span.set_inputs(inputs) result = await func(self, *args, **kwargs) if enable_token2text: _result = await add_token2text(self, result) span.set_outputs(_result) else: span.set_outputs(result) return result else: return await func(self, *args, **kwargs) @functools.wraps(func) def wrapper(self, *args, **kwargs): backend = RolloutTraceConfig.get_backend() if backend is None: return func(self, *args, **kwargs) sig = inspect.signature(func) bound_args = sig.bind(self, *args, **kwargs) bound_args.apply_defaults() inputs = dict(bound_args.arguments) del inputs["self"] if backend == "weave": tracer = RolloutTraceConfig.get_client() from weave.trace.context import call_context cur_attributes = {**call_context.call_attributes.get()} call = tracer.create_call(op=func.__qualname__, inputs=inputs, attributes=cur_attributes) try: result = func(self, *args, **kwargs) tracer.finish_call(call, output=result) return result except Exception as e: tracer.finish_call(call, exception=e) raise e elif backend == "mlflow": import mlflow return mlflow.trace(func)(self, *args, **kwargs) else: return func(self, *args, **kwargs) return async_wrapper if inspect.iscoroutinefunction(func) else wrapper ================================================ FILE: verl_distillation/verl/utils/seqlen_balancing.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import heapq from itertools import chain import torch from torch import distributed as dist from verl.protocol import DataProto from verl.utils import tensordict_utils as tu from verl.utils.device import get_device_name def calculate_workload(seqlen_list: list[int]): """ Calculate the workload for a dense transformer block based on sequence length. FLOPs = 12 * hidden_size^2 * seqlen + 2 * hidden_size * seqlen^2 Hardcodes the constants by a 7B model (hidden_size=4096), so the FLOPs are propotional to (6 * 4096 * seqlen + seqlen^2). """ return 24576 * seqlen_list + seqlen_list**2 def karmarkar_karp(seqlen_list: list[int], k_partitions: int, equal_size: bool): # see: https://en.wikipedia.org/wiki/Largest_differencing_method class Set: def __init__(self) -> None: self.sum = 0 self.items = [] def add(self, idx: int, val: int): self.items.append((idx, val)) self.sum += val def merge(self, other): for idx, val in other.items: self.items.append((idx, val)) self.sum += val def __lt__(self, other): if self.sum != other.sum: return self.sum < other.sum if len(self.items) != len(other.items): return len(self.items) < len(other.items) return self.items < other.items class State: def __init__(self, items: list[tuple[int, int]], k: int) -> None: self.k = k # sets should always be decreasing order self.sets = [Set() for _ in range(k)] assert len(items) in [1, k], f"{len(items)} not in [1, {k}]" for i, (idx, seqlen) in enumerate(items): self.sets[i].add(idx=idx, val=seqlen) self.sets = sorted(self.sets, reverse=True) def get_partitions(self): partitions = [] for i in range(len(self.sets)): cur_partition = [] for idx, _ in self.sets[i].items: cur_partition.append(idx) partitions.append(cur_partition) return partitions def merge(self, other): for i in range(self.k): self.sets[i].merge(other.sets[self.k - 1 - i]) self.sets = sorted(self.sets, reverse=True) @property def spread(self) -> int: return self.sets[0].sum - self.sets[-1].sum def __lt__(self, other): # least heap, let the state with largest spread to be popped first, # if the spread is the same, let the state who has the largest set # to be popped first. if self.spread != other.spread: return self.spread > other.spread return self.sets[0] > other.sets[0] def __repr__(self) -> str: repr_str = "[" for i in range(self.k): if i > 0: repr_str += "," repr_str += "{" for j, (_, seqlen) in enumerate(self.sets[i].items): if j > 0: repr_str += "," repr_str += str(seqlen) repr_str += "}" repr_str += "]" return repr_str sorted_seqlen_list = sorted([(seqlen, i) for i, seqlen in enumerate(seqlen_list)]) states_pq = [] if equal_size: assert len(seqlen_list) % k_partitions == 0, f"{len(seqlen_list)} % {k_partitions} != 0" for offset in range(0, len(sorted_seqlen_list), k_partitions): items = [] for i in range(k_partitions): seqlen, idx = sorted_seqlen_list[offset + i] items.append((idx, seqlen)) heapq.heappush(states_pq, State(items=items, k=k_partitions)) else: for seqlen, idx in sorted_seqlen_list: heapq.heappush(states_pq, State(items=[(idx, seqlen)], k=k_partitions)) while len(states_pq) > 1: state0 = heapq.heappop(states_pq) state1 = heapq.heappop(states_pq) # merge states state0.merge(state1) heapq.heappush(states_pq, state0) final_state = states_pq[0] partitions = final_state.get_partitions() if equal_size: for i, partition in enumerate(partitions): assert len(partition) * k_partitions == len(seqlen_list), ( f"{len(partition)} * {k_partitions} != {len(seqlen_list)}" ) return partitions def greedy_partition(seqlen_list: list[int], k_partitions: int, equal_size: bool): bias = sum(seqlen_list) + 1 if equal_size else 0 sorted_seqlen = [(seqlen + bias, i) for i, seqlen in enumerate(seqlen_list)] partitions = [[] for _ in range(k_partitions)] partition_sums = [0 for _ in range(k_partitions)] for seqlen, i in sorted_seqlen: min_idx = None for j in range(k_partitions): if min_idx is None or partition_sums[j] < partition_sums[min_idx]: min_idx = j partitions[min_idx].append(i) partition_sums[min_idx] += seqlen if equal_size: for i, partition in enumerate(partitions): assert len(partition) * k_partitions == len(seqlen_list), ( f"{len(partition)} * {k_partitions} != {len(seqlen_list)}" ) return partitions def get_seqlen_balanced_partitions(seqlen_list: list[int], k_partitions: int, equal_size: bool): """ Calculates partitions of indices from seqlen_list such that the sum of sequence lengths in each partition is balanced. Uses the Karmarkar-Karp differencing method. This is useful for balancing workload across devices or batches, especially when dealing with variable sequence lengths. Args: seqlen_list (List[int]): A list of sequence lengths for each item. k_partitions (int): The desired number of partitions. equal_size (bool): If True, ensures that each partition has the same number of items. Requires len(seqlen_list) to be divisible by k_partitions. If False, partitions can have varying numbers of items, focusing only on balancing the sum of sequence lengths. Returns: List[List[int]]: A list containing k_partitions lists. Each inner list contains the original indices of the items assigned to that partition. The indices within each partition list are sorted. Raises: AssertionError: If len(seqlen_list) < k_partitions. AssertionError: If equal_size is True and len(seqlen_list) is not divisible by k_partitions. AssertionError: If any resulting partition is empty. """ assert len(seqlen_list) >= k_partitions, f"number of items:[{len(seqlen_list)}] < k_partitions:[{k_partitions}]" def _check_and_sort_partitions(partitions): assert len(partitions) == k_partitions, f"{len(partitions)} != {k_partitions}" seen_idx = set() sorted_partitions = [None] * k_partitions for i, partition in enumerate(partitions): assert len(partition) > 0, f"the {i}-th partition is empty" for idx in partition: seen_idx.add(idx) sorted_partitions[i] = sorted(partition) assert seen_idx == set(range(len(seqlen_list))) return sorted_partitions partitions = karmarkar_karp(seqlen_list=seqlen_list, k_partitions=k_partitions, equal_size=equal_size) return _check_and_sort_partitions(partitions) def log_seqlen_unbalance(seqlen_list: list[int], partitions: list[list[int]], prefix): """ Calculate and log metrics related to sequence length imbalance before and after partitioning. Args: seqlen_list (List[int]): A list of sequence lengths for each item. partitions (List[List[int]]): A list of partitions, where each inner list contains indices from seqlen_list assigned to that partition. prefix (str): A prefix to be added to each metric key in the returned dictionary. Returns: dict: A dictionary containing metrics related to sequence length imbalance. """ # Get the number of partitions k_partition = len(partitions) # assert len(seqlen_list) % k_partition == 0 batch_size = len(seqlen_list) // k_partition min_sum_seqlen = None max_sum_seqlen = None total_sum_seqlen = 0 # Iterate over each batch of sequence lengths for offset in range(0, len(seqlen_list), batch_size): cur_sum_seqlen = sum(seqlen_list[offset : offset + batch_size]) if min_sum_seqlen is None or cur_sum_seqlen < min_sum_seqlen: min_sum_seqlen = cur_sum_seqlen if max_sum_seqlen is None or cur_sum_seqlen > max_sum_seqlen: max_sum_seqlen = cur_sum_seqlen total_sum_seqlen += cur_sum_seqlen balanced_sum_seqlen_list = [] for partition in partitions: cur_sum_seqlen_balanced = sum([seqlen_list[i] for i in partition]) balanced_sum_seqlen_list.append(cur_sum_seqlen_balanced) # print("balanced_sum_seqlen_list: ", balanced_sum_seqlen_list) min_sum_seqlen_balanced = min(balanced_sum_seqlen_list) max_sum_seqlen_balanced = max(balanced_sum_seqlen_list) return { f"{prefix}/min": min_sum_seqlen, f"{prefix}/max": max_sum_seqlen, f"{prefix}/minmax_diff": max_sum_seqlen - min_sum_seqlen, f"{prefix}/balanced_min": min_sum_seqlen_balanced, f"{prefix}/balanced_max": max_sum_seqlen_balanced, f"{prefix}/mean": total_sum_seqlen / len(partitions), } def ceildiv(a, b): return -(a // -b) def roundup_divisible(a, b): return ((a + b - 1) // b) * b def rearrange_micro_batches( batch, max_token_len, dp_group=None, num_batches_divided_by=None, same_micro_num_in_dp=True, min_num_micro_batch=None, use_dynamic_bsz_balance=True, ): """ Split a batch into micro-batches by total token count, with optional DP sync and padding. Args: batch (TensorDict): must include "attention_mask" (B*S); other fields are sliced similarly. max_token_len (int): max sum of attention_mask per micro-batch. dp_group (optional): torch.distributed group for data-parallel sync. num_batches_divided_by (optional): virtual pipeline parallel size, for megatron. same_micro_num_in_dp (bool): if True and dp_group set, pad all ranks to the same count. min_num_micro_batch (int, optional): force at least this many splits (pads empty ones). use_dynamic_bsz_balance (bool, optional): balance the computational workload between micro-batches Returns: List[TensorDict]: the micro-batches. List[List[int]]: index lists mapping each micro-batch back to original positions. """ # this is per local micro_bsz input_ids = batch["input_ids"] if input_ids.is_nested: seq_len_effective: torch.Tensor = input_ids.offsets().diff() max_seq_len = max(seq_len_effective) else: max_seq_len = batch["attention_mask"].shape[-1] seq_len_effective: torch.Tensor = batch["attention_mask"].sum(dim=1) assert max_token_len >= max_seq_len, ( f"max_token_len must be greater than the sequence length. Got {max_token_len=} and {max_seq_len=}" ) total_seqlen = seq_len_effective.sum().item() # NOTE: num_microbatches <= batch_size, so take the min of this two. num_micro_batches = min(len(seq_len_effective), ceildiv(total_seqlen, max_token_len)) if min_num_micro_batch is not None: # used to support pp num_micro_batches = max(min_num_micro_batch, num_micro_batches) if dist.is_initialized() and same_micro_num_in_dp: num_micro_batches = torch.tensor([num_micro_batches], device=get_device_name()) dist.all_reduce(num_micro_batches, op=dist.ReduceOp.MAX, group=dp_group) num_micro_batches = num_micro_batches.cpu().item() if num_batches_divided_by is not None: num_micro_batches = roundup_divisible(num_micro_batches, num_batches_divided_by) assert num_micro_batches <= len(seq_len_effective) workloads = calculate_workload(seq_len_effective) micro_bsz_idx = get_seqlen_balanced_partitions(workloads, num_micro_batches, equal_size=False) if use_dynamic_bsz_balance: # Use the sum of squared sequence lengths to approximate attention computation workload micro_bsz_idx.sort( key=lambda partition: ( sum(workloads[idx] for idx in partition), partition[0] if partition else 0, ), reverse=True, ) # Place smaller micro-batches at both ends to reduce the bubbles exposed during the warm-up and cool-down. micro_bsz_idx = micro_bsz_idx[::2][::-1] + micro_bsz_idx[1::2] micro_batches = [] for partition in micro_bsz_idx: curr_micro_batch = tu.index_select_tensor_dict(batch, partition) micro_batches.append(curr_micro_batch) return micro_batches, micro_bsz_idx def get_reverse_idx(idx_map): """ Build the inverse of an index mapping. Args: idx_map (Sequence[int]): Sequence where idx_map[i] = j. Returns: List[int]: Inverse mapping list such that output[j] = i for each i. """ reverse_idx_map = copy.deepcopy(idx_map) for i, idx in enumerate(idx_map): reverse_idx_map[idx] = i return reverse_idx_map def prepare_dynamic_batch( data: DataProto, max_token_len: int, dp_group=None, num_batches_divided_by=None, same_micro_num_in_dp=True, min_num_micro_batch=None, use_dynamic_bsz_balance=True, ) -> tuple[list[DataProto], list[list[int]]]: """ Prepare a batch for dynamic batching. Args: data (DataProto): The input data. max_token_len (int): The maximum token length for dynamic batching. Returns: Tuple[List[DataProto], List[List[int]]]: A tuple containing a list of DataProto objects and a list of index lists. """ batch, batch_idx_list = rearrange_micro_batches( data.batch, max_token_len=max_token_len, dp_group=dp_group, num_batches_divided_by=num_batches_divided_by, same_micro_num_in_dp=same_micro_num_in_dp, min_num_micro_batch=min_num_micro_batch, use_dynamic_bsz_balance=use_dynamic_bsz_balance, ) micro_batches = [] for i, batch_idx in enumerate(batch_idx_list): tensors = dict(batch[i]) non_tensors = {key: value[batch_idx] for key, value in data.non_tensor_batch.items()} meta_info = copy.deepcopy(data.meta_info) micro_batches.append(DataProto.from_dict(tensors, non_tensors, meta_info=meta_info)) return micro_batches, batch_idx_list def restore_dynamic_batch(data: torch.Tensor, batch_idx_list: list[list[int]]) -> torch.Tensor: """ Restore a batch from dynamic batching. Args: data (torch.Tensor): The input data. batch_idx_list (List[List[int]]): The list of index lists. Returns: torch.Tensor: The restored data. """ indices = list(chain.from_iterable(batch_idx_list)) batch_size = data.shape[0] assert len(indices) == batch_size, f"{len(indices)} vs. {batch_size}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) if data.is_nested: tensors = [data[i] for i in revert_indices] reverted_data = torch.nested.as_nested_tensor(tensors, layout=torch.jagged) else: reverted_data = data[revert_indices] return reverted_data ================================================ FILE: verl_distillation/verl/utils/tensordict_utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Iterator import torch from tensordict import TensorDict from tensordict.tensorclass import NonTensorData, NonTensorStack def assign_non_tensor_dict(tensor_dict: TensorDict, non_tensor_dict: dict): for key, val in non_tensor_dict.items(): assign_non_tensor_data(tensor_dict=tensor_dict, key=key, val=val) return tensor_dict def assign_non_tensor_data(tensor_dict: TensorDict, key, val): tensor_dict[key] = NonTensorData(val) def assign_non_tensor(tensordict: TensorDict, **kwargs): for key, val in kwargs.items(): assign_non_tensor_data(tensor_dict=tensordict, key=key, val=val) return tensordict def unwrap_non_tensor_data(data): if isinstance(data, NonTensorData): return data.data return data def get_non_tensor_data(data: TensorDict, key: str, default): output = data.get(key, default) return unwrap_non_tensor_data(output) def get_tensordict(tensor_dict: dict[str, torch.Tensor | list], non_tensor_dict: dict = None) -> TensorDict: """ Args: data_dict: meta_info: Returns: """ if non_tensor_dict is None: non_tensor_dict = {} batch_size = None for key, val in tensor_dict.items(): if isinstance(val, list): for v in val: assert not isinstance(v, torch.Tensor), ( "Passing a list makes the data NonTensorStack, " "which doesn't support torch.Tensor. Please convert to numpy first" ) assert isinstance(val, torch.Tensor | list) if batch_size is None: batch_size = val.size(0) if isinstance(val, torch.Tensor) else len(val) else: val_batch_size = val.size(0) if isinstance(val, torch.Tensor) else len(val) assert val_batch_size == batch_size, ( f"Batch size of tensor {key} is not consistent with other tensors. " f"Expected {batch_size}, got {val_batch_size}" ) if batch_size is None: batch_size = [] else: batch_size = [batch_size] for key, val in non_tensor_dict.items(): assert key not in tensor_dict tensor_dict[key] = NonTensorData(val) return TensorDict(source=tensor_dict, batch_size=batch_size) def index_select_tensor_dict(batch: TensorDict, indices: torch.Tensor | list[int]) -> TensorDict: """Index a tensor dict with a tensor of indices.""" if isinstance(indices, list): indices = torch.tensor(indices) assert indices.dim() == 1, "indices must be a 1D tensor" data_dict = {} batch_size = indices.shape[0] if batch is not None: for key, tensor in batch.items(): if isinstance(tensor, torch.Tensor) and not tensor.is_nested: data_dict[key] = tensor[indices] elif isinstance(tensor, torch.Tensor) and tensor.is_nested: data_dict[key] = torch.nested.as_nested_tensor([tensor[idx] for idx in indices], layout=torch.jagged) else: # This handles NonTensorStack (indexable by batch dim) and NonTensorData (scalar metadata). if tensor.shape: data_dict[key] = tensor[indices] else: data_dict[key] = tensor selected_batch = TensorDict(source=data_dict, batch_size=batch_size) else: selected_batch = None return selected_batch def union_tensor_dict(tensor_dict1: TensorDict, tensor_dict2: TensorDict) -> TensorDict: """Union two tensordicts.""" assert tensor_dict1.batch_size == tensor_dict2.batch_size, ( f"Two tensor dict must have identical batch size. Got {tensor_dict1.batch_size} and {tensor_dict2.batch_size}" ) for key in tensor_dict2.keys(): if key not in tensor_dict1.keys(): tensor_dict1[key] = tensor_dict2[key] else: if isinstance(tensor_dict2[key], torch.Tensor): assert tensor_dict1[key].equal(tensor_dict2[key]), ( f"{key} in tensor_dict1 and tensor_dict2 are not the same object" ) else: # non-tensor assert tensor_dict1[key] == tensor_dict2[key], ( f"{key} in tensor_dict1 and tensor_dict2 are not the same object" ) return tensor_dict1 def make_iterator(tensordict: TensorDict, mini_batch_size, epochs, seed=None, dataloader_kwargs=None): from torch.utils.data import DataLoader assert tensordict.batch_size[0] % mini_batch_size == 0, f"{tensordict.batch_size[0]} % {mini_batch_size} != 0" # we can directly create a dataloader from TensorDict if dataloader_kwargs is None: dataloader_kwargs = {} if seed is not None: generator = torch.Generator() generator.manual_seed(seed) else: generator = None assert isinstance(dataloader_kwargs, dict) train_dataloader = DataLoader( dataset=tensordict, batch_size=mini_batch_size, collate_fn=lambda x: x, generator=generator, **dataloader_kwargs ) def get_data(): for _ in range(epochs): yield from train_dataloader return iter(get_data()) def assert_tensordict_eq(tensordict1: TensorDict, tensordict2: TensorDict): assert set(tensordict1.keys()) == set(tensordict2.keys()) for key in tensordict1.keys(): val = tensordict1[key] val2 = tensordict2[key] assert type(val) is type(val2), f"The type of {key} must be the same. Got {type(val)} vs {type(val2)}" if isinstance(val, torch.Tensor): if val.is_nested: assert val.is_nested and val2.is_nested, ( f"Both tensors must be nested tensors. {val.is_nested=}, {val2.is_nested=}" ) t1, t2 = val.unbind(), val2.unbind() assert len(t1) == len(t2), f"Nested tensor should have the same lengths. {len(t1)=} vs {len(t2)=}" for c1, c2 in zip(t1, t2, strict=True): assert torch.equal(c1, c2), f"Nested tensor components have different values. {c1=} vs {c2=}" else: assert torch.all(torch.eq(val, val2)).item() else: assert val == val2 def pop(tensordict: TensorDict, keys: Iterator[str]) -> TensorDict: tensor_output = {} non_tensor_output = {} for key in keys: output = tensordict.get(key) if isinstance(output, torch.Tensor): tensor_output[key] = tensordict.pop(key) elif isinstance(output, NonTensorStack): tensor_output[key] = tensordict.pop(key).tolist() else: assert isinstance(output, NonTensorData) non_tensor_output[key] = tensordict.pop(key) return get_tensordict(tensor_output, non_tensor_output) def pad_to_divisor(data: TensorDict, size_divisor: int): """Pad a TensorDict to size divisible by size_divisor Args: size_divisor (int): size divisor Returns: data: (TensorDict): the padded TensorDict pad_size (int) """ assert isinstance(data, TensorDict), "data must be a TensorDict" if len(data) % size_divisor != 0: pad_size = size_divisor - len(data) % size_divisor padding_protos = [] remaining_pad = pad_size while remaining_pad > 0: take_size = min(remaining_pad, len(data)) padding_protos.append(data[:take_size]) remaining_pad -= take_size data_padded = torch.cat([data] + padding_protos) else: if len(data) == 0: logging.warning("padding a DataProto with no item, no changed made") pad_size = 0 data_padded = data return data_padded, pad_size def unpad(data: TensorDict, pad_size): """Unpad the data proto with pad_size. i.e. `data[:-pad_size]`""" if pad_size != 0: data = data[:-pad_size] return data ================================================ FILE: verl_distillation/verl/utils/tokenizer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for tokenization.""" import warnings __all__ = ["hf_tokenizer", "hf_processor"] def set_pad_token_id(tokenizer): """Set pad_token_id to eos_token_id if it is None. Args: tokenizer (transformers.PreTrainedTokenizer): The tokenizer to be set. """ if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id warnings.warn(f"tokenizer.pad_token_id is None. Now set to {tokenizer.eos_token_id}", stacklevel=1) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token warnings.warn(f"tokenizer.pad_token is None. Now set to {tokenizer.eos_token}", stacklevel=1) def hf_tokenizer(name_or_path, correct_pad_token=True, correct_gemma2=True, **kwargs): """Create a huggingface pretrained tokenizer which correctness handles eos and pad tokens. Args: name (str): The name of the tokenizer. correct_pad_token (bool): Whether to correct the pad token id. correct_gemma2 (bool): Whether to correct the gemma2 tokenizer. Returns: transformers.PreTrainedTokenizer: The pretrained tokenizer. """ from transformers import AutoTokenizer if correct_gemma2 and isinstance(name_or_path, str) and "gemma-2-2b-it" in name_or_path: # the EOS token in gemma2 is ambiguious, which may worsen RL performance. # https://huggingface.co/google/gemma-2-2b-it/commit/17a01657f5c87135bcdd0ec7abb4b2dece04408a warnings.warn( "Found gemma-2-2b-it tokenizer. Set eos_token and eos_token_id to and 107.", stacklevel=1 ) kwargs["eos_token"] = "" kwargs["eos_token_id"] = 107 tokenizer = AutoTokenizer.from_pretrained(name_or_path, **kwargs) if correct_pad_token: set_pad_token_id(tokenizer) return tokenizer def hf_processor(name_or_path, **kwargs): """Create a huggingface processor to process multimodal data. Args: name_or_path (str): The name of the processor. Returns: transformers.ProcessorMixin: The pretrained processor. """ from transformers import AutoProcessor try: processor = AutoProcessor.from_pretrained(name_or_path, **kwargs) except Exception as e: processor = None # TODO(haibin.lin): try-catch should be removed after adding transformer version req to setup.py to avoid # silent failure warnings.warn(f"Failed to create processor: {e}. This may affect multimodal processing", stacklevel=1) # Avoid load tokenizer, see: # https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/models/auto/processing_auto.py#L344 if processor is not None and "Processor" not in processor.__class__.__name__: processor = None return processor ================================================ FILE: verl_distillation/verl/utils/torch_dtypes.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from Cruise. """ import torch HALF_LIST = [16, "16", "fp16", "float16", torch.float16] FLOAT_LIST = [32, "32", "fp32", "float32", torch.float32] BFLOAT_LIST = ["bf16", "bfloat16", torch.bfloat16] class PrecisionType: """Type of precision used. >>> PrecisionType.HALF == 16 True >>> PrecisionType.HALF in (16, "16") True """ HALF = "16" FLOAT = "32" FULL = "64" BFLOAT = "bf16" MIXED = "mixed" @staticmethod def supported_type(precision: str | int) -> bool: return any(x == precision for x in PrecisionType) @staticmethod def supported_types() -> list[str]: return [x.value for x in PrecisionType] @staticmethod def is_fp16(precision): return precision in HALF_LIST @staticmethod def is_fp32(precision): return precision in FLOAT_LIST @staticmethod def is_bf16(precision): return precision in BFLOAT_LIST @staticmethod def to_dtype(precision): if precision in HALF_LIST: return torch.float16 elif precision in FLOAT_LIST: return torch.float32 elif precision in BFLOAT_LIST: return torch.bfloat16 else: raise RuntimeError(f"unexpected precision: {precision}") @staticmethod def to_str(precision): if precision == torch.float16: return "fp16" elif precision == torch.float32: return "fp32" elif precision == torch.bfloat16: return "bf16" else: raise RuntimeError(f"unexpected precision: {precision}") ================================================ FILE: verl_distillation/verl/utils/torch_functional.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Contain small torch utilities """ import math from contextlib import contextmanager from typing import Optional import torch import torch.distributed import torch.nn.functional as F from tensordict import TensorDict from torch import nn from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from transformers import PreTrainedTokenizer from verl.utils.device import get_device_name, get_torch_device try: from flash_attn.ops.triton.cross_entropy import cross_entropy_loss FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE = True except ImportError: FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE = False try: import torch_npu NPU_CROSS_ENTROPY_LOSS_AVAILABLE = hasattr(torch_npu, "npu_cross_entropy_loss") except ImportError: NPU_CROSS_ENTROPY_LOSS_AVAILABLE = False def gather_from_labels(data, label): """Gather the label from data. The value in label should be [0, vocab_size) Args: data: (..., vocab_size) label (torch.IntTensor) : (...,) Returns: """ output = torch.gather(data, -1, label.unsqueeze(-1)).squeeze(-1) return output def logprobs_from_logits(logits, labels, inplace_backward=True): """ Compute per-token log-probabilities for the given labels. Uses a Flash-Attention–based cross-entropy (if available) for efficient backward, otherwise falls back to a standard log-softmax+gather approach. See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591 Args: logits (Tensor): Model outputs of shape (..., vocab_size). labels (LongTensor): True class indices of shape matching logits[..., :-1]. inplace_backward (bool): If True and Flash-Attn is available, perform backward in-place. Returns: Tensor: Log-probabilities of the target labels, shape logits.shape[:-1]. """ if FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE: batch_dim = logits.shape[:-1] last_dim = logits.shape[-1] logits = logits.reshape(-1, last_dim) labels = labels.reshape(-1) output = logprobs_from_logits_flash_attn(logits, labels, inplace_backward=inplace_backward) output = output.view(*batch_dim) elif NPU_CROSS_ENTROPY_LOSS_AVAILABLE: output = logprobs_from_logits_torch_npu(logits, labels) else: output = logprobs_from_logits_v2(logits, labels) return output def logprobs_from_logits_flash_attn(logits, labels, inplace_backward=True): output = cross_entropy_loss(logits, labels, inplace_backward=inplace_backward) assert isinstance(output, tuple), ( "please make sure flash-attn>=2.4.3 where cross_entropy_loss returns Tuple[losses, z_losses]." ) return -output[0] def logprobs_from_logits_torch_npu(logits, labels): batch_dim = logits.shape[:-1] logits = logits.reshape(-1, logits.shape[-1]) loss, _, _, _ = torch_npu.npu_cross_entropy_loss(logits, labels.reshape(-1), reduction="none") return -loss.view(*batch_dim) def logprobs_from_logits_naive(logits, labels): logp = F.log_softmax(logits, dim=-1) logpy = gather_from_labels(logp, labels) return logpy def logprobs_from_logits_v2(logits: torch.FloatTensor, labels): """ A memory efficient implementation of logprobs_from_logits """ if logits.dtype in [torch.float32, torch.float64]: logits_labels = torch.gather(logits, dim=-1, index=labels.unsqueeze(-1)).squeeze(-1) # loop to reduce peak mem consumption logsumexp_values = torch.stack([torch.logsumexp(logit, dim=-1) for logit in logits]) logprobs_labels = logits_labels - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x) else: # logsumexp approach is unstable with bfloat16, fall back to slightly less efficent approach logprobs_labels = [] for row_logits, row_labels in zip(logits, labels, strict=True): # loop to reduce peak mem consumption row_logprobs = F.log_softmax(row_logits, dim=-1) row_logprobs_labels = row_logprobs.gather(dim=-1, index=row_labels.unsqueeze(-1)).squeeze(-1) logprobs_labels.append(row_logprobs_labels) logprobs_labels = torch.stack(logprobs_labels) return logprobs_labels def clip_by_value(x, tensor_min, tensor_max): """ Tensor extenstion to torch.clamp https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713 """ clipped = torch.max(torch.min(x, tensor_max), tensor_min) return clipped def entropy_from_logits(logits: torch.Tensor): """Calculate entropy from logits.""" pd = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, dim=-1) - torch.sum(pd * logits, dim=-1) return entropy def entropy_from_logits_with_chunking(logits: torch.Tensor, chunk_size: int = 2048): """Memory-efficient entropy calculation with chunking.""" entropy = torch.zeros(logits.shape[0], device=logits.device) for i in range(0, logits.shape[0], chunk_size): logits_chunk = logits[i : i + chunk_size].float() pd_chunk = torch.nn.functional.softmax(logits_chunk, dim=-1) entropy_chunk = torch.logsumexp(logits_chunk, dim=-1) - torch.sum(pd_chunk * logits_chunk, dim=-1) entropy[i : i + chunk_size] = entropy_chunk return entropy def masked_sum(values, mask, axis=None): """Compute mean of tensor with a masked values.""" # If NaNs exist out of mask, replace NaNs in values with a value that # won't affect the sum (e.g., 0 for masked regions) valid_values = torch.where(mask.bool(), values, 0.0) return (valid_values * mask).sum(axis=axis) def masked_mean(values, mask, axis=None): """ Compute the mean of `values` over elements selected by `mask`. Args: values (Tensor): Input tensor. mask (Tensor): Boolean or numeric mask of the same shape as `values`. axis (int or tuple of int, optional): Dimension(s) along which to compute the mean. Defaults to None (over all elements). Returns: Tensor: Masked mean, with shape equal to `values` reduced over `axis`. """ s = masked_sum(values, mask, axis) return s / (mask.sum(axis=axis) + 1e-8) def masked_var(values, mask, unbiased=True): """Compute variance of tensor with masked values.""" mean = masked_mean(values, mask) centered_values = values - mean variance = masked_mean(centered_values**2, mask) if unbiased: mask_sum = mask.sum() if mask_sum == 0: raise ValueError("At least one element in the mask has to be 1.") # note that if mask_sum == 1, then there is a division by zero issue # to avoid it you just need to use a larger minibatch_size if mask_sum == 1: raise ValueError("The sum of the mask is one, which can cause a division by zero.") bessel_correction = mask_sum / (mask_sum - 1) variance = variance * bessel_correction return variance def masked_whiten(values, mask, shift_mean=True): """ Whiten `values` by normalizing with mean and variance computed over `mask`. Args: values (torch.Tensor): Input tensor. mask (torch.Tensor): Boolean tensor of same shape, selects elements for stats. shift_mean (bool): If True (default), output is zero-mean; if False, the original mean is re-added after scaling. Returns: torch.Tensor: Whitened tensor of same shape as `values`. """ mean, var = masked_mean(values, mask), masked_var(values, mask) whitened = (values - mean) * torch.rsqrt(var + 1e-8) if not shift_mean: whitened += mean return whitened def get_response_mask(response_id: torch.Tensor, eos_token: int | list[int] = 2, dtype=torch.int64): """ end of sentence token can be int or list: 1 or [1, 2] e.g. response_id = torch.tensor([[20, 10, 34, 1, 0, 0, 0], [78, 0, 76, 2, 1, 0, 0], [23, 98, 1, 0, 0, 0, 0], [33, 3, 98, 45, 1, 0, 0]]) #eos_token=1 response_mask: tensor([[1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0]]) #eos_token=[1,2] response_mask: tensor([[1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0]]) """ eos_mask = torch.isin(response_id, torch.tensor(eos_token, device=response_id.device)).int() return (eos_mask.cumsum(dim=1) - eos_mask).eq(0).to(dtype) def compute_grad_norm(model: nn.Module): total_grad_square = 0 for param in model.parameters(): if param.grad is not None: total_grad_square += torch.sum(torch.square(param.grad.detach())).item() return total_grad_square def broadcast_dict_tensor(tensors: dict[str, torch.Tensor] | TensorDict, src, group): """ TODO: optimize this. Technically, we only need one broadcast """ for key in tensors.sorted_keys: torch.distributed.broadcast(tensors[key], src=src, group=group, async_op=False) def allgather_dict_tensors(tensors: dict[str, torch.Tensor] | TensorDict, size, group, dim=0): """ TODO: optimize this. - We can use async ops - We can use only one allgather Args: tensors: size: group: Returns: """ if isinstance(tensors, TensorDict): is_tensor_dict = True tensors_as_dict = tensors.to_dict() else: tensors_as_dict = tensors is_tensor_dict = False output = {} sorted_keys = sorted(tensors_as_dict.keys()) for key in sorted_keys: val = tensors_as_dict[key] output[key] = [torch.empty_like(val) for _ in range(size)] torch.distributed.all_gather(output[key], val, group=group, async_op=False) output[key] = torch.cat(output[key], dim=dim) if is_tensor_dict: output = TensorDict(source=output, batch_size=tensors.batch_size[0] * size) return output def split_dict_tensor_into_batches(tensors: TensorDict, batch_size) -> list[TensorDict]: assert tensors.batch_size[0] % batch_size == 0, ( f"input data batch size: {tensors.batch_size[0]}, split batch size: {batch_size}" ) return tensors.split(batch_size) def pad_2d_list_to_length(response, pad_token_id, max_length=None): """ pad a 2D list (e.g. responses, logprobs) to a 2D tensor. """ response_length = max(len(sub_list) for sub_list in response) target_length = max_length if max_length is not None and max_length > response_length else response_length padded_response = [tuple(sub_list) + (pad_token_id,) * (target_length - len(sub_list)) for sub_list in response] tensor = torch.tensor(padded_response) return tensor def pad_sequence_to_length(tensors, max_seq_len, pad_token_id, left_pad=False): """ pad a 2D tensors (e.g. responses, logprobs) in the last dim to max_seq_length. input shape: [bs, seq_length] output shape: [bs, max_seq_length] """ if tensors.shape[-1] >= max_seq_len: return tensors # (0, max_seq_len - tensors.shape[-1]) means right pad to max_seq_length and no left pad pad_tuple = (max_seq_len - tensors.shape[-1], 0) if left_pad else (0, max_seq_len - tensors.shape[-1]) return F.pad(tensors, pad_tuple, "constant", pad_token_id) def postprocess_data( input_ids: torch.Tensor, attention_mask: torch.Tensor, max_length: int, pad_token_id: int, left_pad=True, truncation="error", ): """Process tokenizer outputs to consistent shapes via padding/truncation. Args: input_ids: Token indices [batch_size, seq_len] attention_mask: Mask [batch_size, seq_len] max_length: Target sequence length pad_token_id: Padding token ID left_pad: Pad left if True truncation: "left", "right", "middle" or "error" Returns: (input_ids, attention_mask) padded/truncated to max_length """ assert truncation in ["left", "right", "middle", "error"] assert input_ids.ndim == 2 sequence_length = input_ids.shape[-1] if sequence_length < max_length: input_ids = pad_sequence_to_length( input_ids, max_seq_len=max_length, pad_token_id=pad_token_id, left_pad=left_pad ) attention_mask = pad_sequence_to_length( attention_mask, max_seq_len=max_length, pad_token_id=0, left_pad=left_pad ) elif sequence_length > max_length: if truncation == "left": # actually, left truncation may not be reasonable input_ids = input_ids[:, -max_length:] attention_mask = attention_mask[:, -max_length:] elif truncation == "right": input_ids = input_ids[:, :max_length] attention_mask = attention_mask[:, :max_length] elif truncation == "middle": left_half = max_length // 2 right_half = max_length - left_half input_ids = torch.cat([input_ids[:, :left_half], input_ids[:, -right_half:]], dim=-1) attention_mask = torch.cat([attention_mask[:, :left_half], attention_mask[:, -right_half:]], dim=-1) elif truncation == "error": raise NotImplementedError(f"{sequence_length=} is larger than {max_length=}") else: raise NotImplementedError(f"Unknown truncation method {truncation}") return input_ids, attention_mask def tokenize_and_postprocess_data( prompt: str, tokenizer: PreTrainedTokenizer, max_length: int, pad_token_id: int, left_pad=True, truncation="error" ): """Tokenize text and process outputs to consistent tensor shapes. Args: prompt: Input text to tokenize tokenizer: HuggingFace tokenizer instance max_length: Target sequence length pad_token_id: Padding token ID left_pad: Pad left if True truncation: Truncation strategy ("left"/"right"/"error") Returns: Tuple of (input_ids, attention_mask) from postprocess_data """ input_data = tokenizer(prompt, return_tensors="pt", add_special_tokens=False) input_ids = input_data["input_ids"] attention_mask = input_data["attention_mask"] return postprocess_data(input_ids, attention_mask, max_length, pad_token_id, left_pad, truncation) def remove_pad_token(input_ids: torch.Tensor, attention_mask: torch.Tensor): """Remove the pad token. Args: input_ids shape: [bs, seq_length] attention_mask shape: [bs, seq_length] Returns: no_padding_batch(List[List[int]]): contains the rmpad token ids per query. """ no_padding_batch = [] for ids, mask in zip(input_ids, attention_mask, strict=True): no_padding_batch.append((ids[len(ids) - mask.sum() :]).cpu().numpy().tolist()) return no_padding_batch def log_probs_from_logits_response(input_ids, logits, response_length): """Compute the response log_probs from full logits. Note that logits = model(input_ids) Args: input_ids: [batch_size, seqlen] logits: [batch_size, seqlen, vocab_size] Returns: response_log_prob: """ response_logits = logits[:, -response_length - 1 : -1] response = input_ids[:, -response_length:] response_log_prob = logprobs_from_logits(logits=response_logits, labels=response) return response_log_prob def log_probs_from_logits_response_rmpad(input_ids, attention_mask, logits_rmpad, response_length): """Compute the log_probs from logits with rmpad logits and pad input. Note that logits_rmpad = model(input_ids_rmpad). For each sentences, there is a shift between logits and input_ids. The reason for this function to is to compute logprobs_from_logits in rmpad mode because it is memory-intensive for large vocab_size Args: input_ids: [batch_size, seqlen] attention_mask: [batch_size, seqlen] logits_rmpad: [total_nnz, vocab_size] response_length: int """ from flash_attn.bert_padding import pad_input, unpad_input batch_size, seqlen = input_ids.shape input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask=attention_mask) input_ids_rmpad = input_ids_rmpad.squeeze(-1) input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0) full_log_probs_rmpad = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (total_nnz,) full_output = pad_input( hidden_states=full_log_probs_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen ) output = full_output.squeeze(-1)[:, -response_length - 1 : -1] # [batch_size, response_length] return output def log_probs_from_logits_all_rmpad(input_ids_rmpad, logits_rmpad, indices, batch_size, seqlen, response_length): """Compute the log_probs from logits with rmpad input_ids and logits. Note that logits_rmpad = model(input_ids_rmpad). For each sentences, there is a shift between logits and input_ids. The reason for this function to is to compute logprobs_from_logits in rmpad mode because it is memory-intensive for large vocab_size Args: input_ids_rmpad: [1, total_nnz] logits_rmpad: [total_nnz, vocab_size] indices: [total_nnz] batch_size: int seqlen: int response_length: int """ from flash_attn.bert_padding import pad_input input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # transpose back to [total_nnz, 1] input_ids_rmpad = input_ids_rmpad.squeeze(-1) input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0) full_log_probs_rmpad = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (total_nnz,) full_output = pad_input( hidden_states=full_log_probs_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen ) output = full_output.squeeze(-1)[:, -response_length - 1 : -1] # [batch_size, response_length] return output def post_process_logits(input_ids, logits, temperature, top_k, top_p): if temperature != 1.0: logits = logits.div_(temperature) # inplace operation to avoid OOM # TODO: add them back # if top_k is not None and top_k > 0: # logits = TopKLogitsWarper(top_k=top_k)(input_ids, logits) # if top_p is not None and top_p < 1.0 and top_p > 0.0: # logits = TopPLogitsWarper(top_p=top_p)(input_ids, logits) return logits """ Optimizer related """ def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, min_lr_ratio: float = 0.0, num_cycles: float = 0.5, last_epoch: int = -1, init_lr_ratio: float = None, ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. min_lr_ratio (:obj:`float`, `optional`, defaults to 0.0): The minimum lr ratio w.r.t the maximum. num_cycles (:obj:`float`, `optional`, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. init_lr_ratio (:obj:`float`, `optional`, defaults to None): The initial lr ratio w.r.t the maximum. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ min_lr_ratio = 0.0 if min_lr_ratio is None else min_lr_ratio assert min_lr_ratio >= 0 and min_lr_ratio <= 1.0 coef = (1 - min_lr_ratio) * 0.5 intercept = (1 + min_lr_ratio) * 0.5 init_lr_ratio = 0.0 if init_lr_ratio is None else init_lr_ratio assert init_lr_ratio >= 0 and init_lr_ratio <= 1.0 def lr_lambda(current_step): if current_step < num_warmup_steps: return init_lr_ratio + (1.0 - init_lr_ratio) * (float(current_step) / float(max(1, num_warmup_steps))) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) x = math.cos(math.pi * float(num_cycles) * 2.0 * progress) return max(min_lr_ratio, x * coef + intercept) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_constant_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1, ): """ Create a constant LR schedule with a linear warmup phase. Args: optimizer (Optimizer): Wrapped optimizer. num_warmup_steps (int): Number of steps to ramp up the LR from 0 to initial value. last_epoch (int, optional): The index of the last epoch when resuming training. Defaults to -1. Returns: LambdaLR: Scheduler that increases LR linearly during warmup, then holds it constant. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch) def prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) def get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) def get_wsd_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, min_lr_ratio: float = 0.0, num_cycles: float = 0.5, last_epoch: int = -1, stable_ratio: float = 0.9, ): """ Create a Warmup-Stable-Decay learning rate scheduler. The schedule follows three phases: 1. Warmup: Learning rate increases linearly from 0 to the initial LR 2. Stable: Learning rate remains constant at the initial LR 3. Decay: Learning rate decreases following a cosine curve to min_lr_ratio * initial LR Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. min_lr_ratio (:obj:`float`, `optional`, defaults to 0.0): The minimum learning rate ratio w.r.t the initial learning rate. num_cycles (:obj:`float`, `optional`, defaults to 0.5): The number of waves in the cosine schedule during decay phase. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. stable_ratio (:obj:`float`, `optional`, defaults to 0.0): The ratio of non-warmup steps that should maintain a constant learning rate. Set to 0.0 to behave exactly like cosine schedule. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ remaining_steps = max(0, num_training_steps - num_warmup_steps) num_stable_steps = int(remaining_steps * stable_ratio) num_decay_steps = remaining_steps - num_stable_steps def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) if current_step < num_warmup_steps + num_stable_steps: return 1.0 if current_step < num_training_steps: progress = float(current_step - num_warmup_steps - num_stable_steps) / float(max(1, num_decay_steps)) value = max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return (1.0 - min_lr_ratio) * value + min_lr_ratio return min_lr_ratio return LambdaLR(optimizer, lr_lambda, last_epoch) @contextmanager def check_device_is_available(): """ Some modules must be imported after CUDA is initialized. Such as sglang's sharding manager. This context manager checks if CUDA is available and raises an error if it is not. """ if not get_torch_device().is_available(): raise RuntimeError("Device {} must be initialized before importing this module.".format(get_device_name())) yield def distributed_mean_max_min_std(local_tensor, compute_max=True, compute_min=True, compute_std=True): """Compute distributed statistics across all processes. Args: local_tensor: Tensor containing local values compute_max: Include maximum value calculation compute_min: Include minimum value calculation compute_std: Include standard deviation calculation Returns: Tuple containing (mean, max, min, std) in this order. None for disabled metrics. """ # Sum the local tensor across all processes local_sum = torch.sum(local_tensor) local_num = torch.tensor(torch.numel(local_tensor), device=get_device_name()) torch.distributed.all_reduce(local_sum, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(local_num, op=torch.distributed.ReduceOp.SUM) global_mean = local_sum / local_num if compute_max: local_max = torch.max(local_tensor) torch.distributed.all_reduce(local_max, op=torch.distributed.ReduceOp.MAX) else: local_max = None if compute_min: local_min = torch.min(local_tensor) torch.distributed.all_reduce(local_min, op=torch.distributed.ReduceOp.MIN) else: local_min = None if compute_std: square_diff = torch.sum(torch.pow(local_tensor - global_mean, 2)) torch.distributed.all_reduce(square_diff, op=torch.distributed.ReduceOp.SUM) global_std = torch.sqrt(square_diff / (local_num - 1)) else: global_std = None return global_mean, local_max, local_min, global_std def distributed_masked_mean(local_tensor, local_mask): """Compute global mean of non-masked elements across distributed processes. Args: local_tensor (torch.Tensor): Input tensor with local values local_mask (torch.Tensor): Binary mask (1=valid, 0=ignore) matching local_tensor shape Returns: torch.Tensor: Global mean of all valid elements across processes """ local_tensor = local_tensor * local_mask local_sum = torch.sum(local_tensor) local_num = torch.sum(local_mask) torch.distributed.all_reduce(local_sum, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(local_num, op=torch.distributed.ReduceOp.SUM) global_mean = local_sum / local_num return global_mean ================================================ FILE: verl_distillation/verl/utils/tracking.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A unified tracking interface that supports logging data to different backend """ import dataclasses import json import os from enum import Enum from functools import partial from pathlib import Path from typing import Any class Tracking: """A unified tracking interface for logging experiment data to multiple backends. This class provides a centralized way to log experiment metrics, parameters, and artifacts to various tracking backends including WandB, MLflow, SwanLab, TensorBoard, and console. Attributes: supported_backend: List of supported tracking backends. logger: Dictionary of initialized logger instances for each backend. """ supported_backend = [ "wandb", "mlflow", "swanlab", "vemlp_wandb", "tensorboard", "console", "clearml", "trackio", "file", ] def __init__(self, project_name, experiment_name, default_backend: str | list[str] = "console", config=None): if isinstance(default_backend, str): default_backend = [default_backend] for backend in default_backend: if backend == "tracking": import warnings warnings.warn("`tracking` logger is deprecated. use `wandb` instead.", DeprecationWarning, stacklevel=2) else: assert backend in self.supported_backend, f"{backend} is not supported" self.logger = {} if "tracking" in default_backend or "wandb" in default_backend: import os import wandb settings = None if config and config["trainer"].get("wandb_proxy", None): settings = wandb.Settings(https_proxy=config["trainer"]["wandb_proxy"]) entity = os.environ.get("WANDB_ENTITY", None) wandb.init(project=project_name, name=experiment_name, entity=entity, config=config, settings=settings) self.logger["wandb"] = wandb if "trackio" in default_backend: import trackio trackio.init(project=project_name, name=experiment_name, config=config) self.logger["trackio"] = trackio if "mlflow" in default_backend: import os import mlflow MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "sqlite:////tmp/mlruns.db") mlflow.set_tracking_uri(MLFLOW_TRACKING_URI) # Project_name is actually experiment_name in MLFlow # If experiment does not exist, will create a new experiment experiment = mlflow.set_experiment(project_name) mlflow.start_run(experiment_id=experiment.experiment_id, run_name=experiment_name) mlflow.log_params(_compute_mlflow_params_from_objects(config)) self.logger["mlflow"] = _MlflowLoggingAdapter() if "swanlab" in default_backend: import os import swanlab SWANLAB_API_KEY = os.environ.get("SWANLAB_API_KEY", None) SWANLAB_LOG_DIR = os.environ.get("SWANLAB_LOG_DIR", "swanlog") SWANLAB_MODE = os.environ.get("SWANLAB_MODE", "cloud") if SWANLAB_API_KEY: swanlab.login(SWANLAB_API_KEY) # NOTE: previous login information will be overwritten if config is None: config = {} # make sure config is not None, otherwise **config will raise error swanlab.init( project=project_name, experiment_name=experiment_name, config={"FRAMEWORK": "verl", **config}, logdir=SWANLAB_LOG_DIR, mode=SWANLAB_MODE, ) self.logger["swanlab"] = swanlab if "vemlp_wandb" in default_backend: import os import volcengine_ml_platform from volcengine_ml_platform import wandb as vemlp_wandb volcengine_ml_platform.init( ak=os.environ["VOLC_ACCESS_KEY_ID"], sk=os.environ["VOLC_SECRET_ACCESS_KEY"], region=os.environ["MLP_TRACKING_REGION"], ) vemlp_wandb.init( project=project_name, name=experiment_name, config=config, sync_tensorboard=True, ) self.logger["vemlp_wandb"] = vemlp_wandb if "tensorboard" in default_backend: self.logger["tensorboard"] = _TensorboardAdapter(project_name, experiment_name) if "console" in default_backend: from verl.utils.logger import LocalLogger self.console_logger = LocalLogger(print_to_console=True) self.logger["console"] = self.console_logger if "clearml" in default_backend: self.logger["clearml"] = ClearMLLogger(project_name, experiment_name, config) if "file" in default_backend: self.logger["file"] = FileLogger(project_name, experiment_name) def log(self, data, step, backend=None): for default_backend, logger_instance in self.logger.items(): if backend is None or default_backend in backend: logger_instance.log(data=data, step=step) def __del__(self): if "wandb" in self.logger: self.logger["wandb"].finish(exit_code=0) if "swanlab" in self.logger: self.logger["swanlab"].finish() if "vemlp_wandb" in self.logger: self.logger["vemlp_wandb"].finish(exit_code=0) if "tensorboard" in self.logger: self.logger["tensorboard"].finish() if "clearml" in self.logger: self.logger["clearml"].finish() if "trackio" in self.logger: self.logger["trackio"].finish() if "file" in self.logger: self.logger["file"].finish() class ClearMLLogger: def __init__(self, project_name: str, experiment_name: str, config): self.project_name = project_name self.experiment_name = experiment_name import clearml self._task: clearml.Task = clearml.Task.init( task_name=experiment_name, project_name=project_name, continue_last_task=True, output_uri=False, ) self._task.connect_configuration(config, name="Hyperparameters") def _get_logger(self): return self._task.get_logger() def log(self, data, step): import numpy as np import pandas as pd # logs = self._rewrite_logs(data) logger = self._get_logger() for k, v in data.items(): title, series = k.split("/", 1) if isinstance(v, int | float | np.floating | np.integer): logger.report_scalar( title=title, series=series, value=v, iteration=step, ) elif isinstance(v, pd.DataFrame): logger.report_table( title=title, series=series, table_plot=v, iteration=step, ) else: logger.warning( f'Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}". This ' f"invocation of ClearML logger's function is incorrect so this attribute was dropped. " ) def finish(self): self._task.close() class FileLogger: def __init__(self, project_name: str, experiment_name: str): self.project_name = project_name self.experiment_name = experiment_name self.filepath = os.getenv("VERL_FILE_LOGGER_PATH", None) if self.filepath is None: root_path = os.path.expanduser(os.getenv("VERL_FILE_LOGGER_ROOT", ".")) directory = os.path.join(root_path, self.project_name) os.makedirs(directory, exist_ok=True) self.filepath = os.path.join(directory, f"{self.experiment_name}.jsonl") print(f"Creating file logger at {self.filepath}") self.fp = open(self.filepath, "w") def log(self, data, step): data = {"step": step, "data": data} self.fp.write(json.dumps(data) + "\n") def finish(self): self.fp.close() class _TensorboardAdapter: def __init__(self, project_name, experiment_name): import os from torch.utils.tensorboard import SummaryWriter tensorboard_dir = os.environ.get("TENSORBOARD_DIR", f"tensorboard_log/{project_name}/{experiment_name}") os.makedirs(tensorboard_dir, exist_ok=True) print(f"Saving tensorboard log to {tensorboard_dir}.") self.writer = SummaryWriter(tensorboard_dir) def log(self, data, step): for key in data: self.writer.add_scalar(key, data[key], step) def finish(self): self.writer.close() class _MlflowLoggingAdapter: def __init__(self): import logging import re self.logger = logging.getLogger(__name__) # MLflow metric key validation logic: # https://github.com/mlflow/mlflow/blob/master/mlflow/utils/validation.py#L157C12-L157C44 # Only characters allowed: slashes, alphanumerics, underscores, periods, dashes, colons, # and spaces. self._invalid_chars_pattern = re.compile( r"[^/\w.\- :]" ) # Allowed: slashes, alphanumerics, underscores, periods, dashes, colons, and spaces. def log(self, data, step): import mlflow def sanitize_key(key): # First replace @ with _at_ for backward compatibility sanitized = key.replace("@", "_at_") # Then replace any other invalid characters with _ sanitized = self._invalid_chars_pattern.sub("_", sanitized) if sanitized != key: self.logger.warning( "[MLflow] Metric key '%s' sanitized to '%s' due to invalid characters.", key, sanitized ) return sanitized results = {sanitize_key(k): v for k, v in data.items()} mlflow.log_metrics(metrics=results, step=step) def _compute_mlflow_params_from_objects(params) -> dict[str, Any]: if params is None: return {} return _flatten_dict(_transform_params_to_json_serializable(params, convert_list_to_dict=True), sep="/") def _transform_params_to_json_serializable(x, convert_list_to_dict: bool): _transform = partial(_transform_params_to_json_serializable, convert_list_to_dict=convert_list_to_dict) if dataclasses.is_dataclass(x): return _transform(dataclasses.asdict(x)) if isinstance(x, dict): return {k: _transform(v) for k, v in x.items()} if isinstance(x, list): if convert_list_to_dict: return {"list_len": len(x)} | {f"{i}": _transform(v) for i, v in enumerate(x)} else: return [_transform(v) for v in x] if isinstance(x, Path): return str(x) if isinstance(x, Enum): return x.value return x def _flatten_dict(raw: dict[str, Any], *, sep: str) -> dict[str, Any]: import pandas as pd ans = pd.json_normalize(raw, sep=sep).to_dict(orient="records")[0] assert isinstance(ans, dict) return ans @dataclasses.dataclass class ValidationGenerationsLogger: project_name: str = None experiment_name: str = None def log(self, loggers, samples, step): if "wandb" in loggers: self.log_generations_to_wandb(samples, step) if "swanlab" in loggers: self.log_generations_to_swanlab(samples, step) if "mlflow" in loggers: self.log_generations_to_mlflow(samples, step) if "clearml" in loggers: self.log_generations_to_clearml(samples, step) if "tensorboard" in loggers: self.log_generations_to_tensorboard(samples, step) if "vemlp_wandb" in loggers: self.log_generations_to_vemlp_wandb(samples, step) def log_generations_to_vemlp_wandb(self, samples, step): from volcengine_ml_platform import wandb as vemlp_wandb self._log_generations_to_wandb(samples, step, vemlp_wandb) def log_generations_to_wandb(self, samples, step): import wandb self._log_generations_to_wandb(samples, step, wandb) def _log_generations_to_wandb(self, samples, step, wandb): """Log samples to wandb as a table""" # Create column names for all samples columns = ["step"] + sum( [[f"input_{i + 1}", f"output_{i + 1}", f"score_{i + 1}"] for i in range(len(samples))], [] ) if not hasattr(self, "validation_table"): # Initialize the table on first call self.validation_table = wandb.Table(columns=columns) # Create a new table with same columns and existing data # Workaround for https://github.com/wandb/wandb/issues/2981#issuecomment-1997445737 new_table = wandb.Table(columns=columns, data=self.validation_table.data) # Add new row with all data row_data = [] row_data.append(step) for sample in samples: row_data.extend(sample) new_table.add_data(*row_data) # Update reference and log wandb.log({"val/generations": new_table}, step=step) self.validation_table = new_table def log_generations_to_swanlab(self, samples, step): """Log samples to swanlab as text""" import swanlab swanlab_table = swanlab.echarts.Table() # Create column names headers = ["step", "input", "output", "score"] swanlab_row_list = [[step, *sample] for sample in samples] swanlab_table.add(headers=headers, rows=swanlab_row_list) # Log to swanlab swanlab.log({"val/generations": swanlab_table}, step=step) def log_generations_to_mlflow(self, samples, step): """Log validation generation to mlflow as artifacts""" # https://mlflow.org/docs/latest/api_reference/python_api/mlflow.html?highlight=log_artifact#mlflow.log_artifact import json import tempfile import mlflow try: with tempfile.TemporaryDirectory() as tmp_dir: validation_gen_step_file = Path(tmp_dir, f"val_step{step}.json") row_data = [] for sample in samples: data = {"input": sample[0], "output": sample[1], "score": sample[2]} row_data.append(data) with open(validation_gen_step_file, "w") as file: json.dump(row_data, file) mlflow.log_artifact(validation_gen_step_file) except Exception as e: print(f"WARNING: save validation generation file to mlflow failed with error {e}") def log_generations_to_clearml(self, samples, step): """Log validation generation to clearml as table""" import clearml import pandas as pd task: clearml.Task | None = clearml.Task.current_task() if task is None: return table = [ { "step": step, "input": sample[0], "output": sample[1], "score": sample[2], } for sample in samples ] logger = task.get_logger() logger.report_table( series="Validation generations", title="Validation", table_plot=pd.DataFrame.from_records(table), iteration=step, ) def log_generations_to_tensorboard(self, samples, step): """Log samples to tensorboard as text""" # Initialize tensorboard writer if not exists if not hasattr(self, "writer"): from torch.utils.tensorboard import SummaryWriter # Use the same directory structure as _TensorboardAdapter if self.project_name and self.experiment_name: default_dir = os.path.join("tensorboard_log", self.project_name, self.experiment_name) else: default_dir = "tensorboard_log" tensorboard_dir = os.environ.get("TENSORBOARD_DIR", default_dir) os.makedirs(tensorboard_dir, exist_ok=True) self.writer = SummaryWriter(log_dir=tensorboard_dir) # Format the samples data into readable text text_content = f"**Generation Results - Step {step}**\n\n" for i, sample in enumerate(samples): text_content += f"### Sample {i + 1}\n" # Assuming sample contains [input, output, score] if len(sample) >= 3: input_text, output_text, score = sample[0], sample[1], sample[2] text_content += f"**Input:** {input_text}\n\n" text_content += f"**Output:** {output_text}\n\n" text_content += f"**Score:** {score}\n\n" else: # Handle cases where sample format might be different text_content += f"**Data:** {sample}\n\n" text_content += "---\n\n" # Log to tensorboard as text self.writer.add_text("val/generations", text_content, step) # Flush to ensure data is written self.writer.flush() ================================================ FILE: verl_distillation/verl/utils/transferqueue_utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import inspect import os import threading from functools import wraps from typing import Any, Callable from tensordict import TensorDict try: from transfer_queue import ( AsyncTransferQueueClient, BatchMeta, ZMQServerInfo, ) except ImportError: # TODO: Use a hacky workaround for ImportError since # transfer_queue isn't a default verl dependency. class BatchMeta: pass from verl.protocol import DataProto _TRANSFER_QUEUE_CLIENT = None _VAL_TRANSFER_QUEUE_CLIENT = None is_transferqueue_enabled = os.environ.get("TRANSFER_QUEUE_ENABLE", False) def create_transferqueue_client( client_id: str, controller_infos: dict[Any, "ZMQServerInfo"], storage_infos: dict[Any, "ZMQServerInfo"], ) -> None: global _TRANSFER_QUEUE_CLIENT global _VAL_TRANSFER_QUEUE_CLIENT if "val" in client_id: _VAL_TRANSFER_QUEUE_CLIENT = AsyncTransferQueueClient(client_id, controller_infos, storage_infos) else: _TRANSFER_QUEUE_CLIENT = AsyncTransferQueueClient(client_id, controller_infos, storage_infos) def get_transferqueue_client() -> "AsyncTransferQueueClient": return _TRANSFER_QUEUE_CLIENT def get_val_transferqueue_client() -> "AsyncTransferQueueClient": return _VAL_TRANSFER_QUEUE_CLIENT def _run_async_in_temp_loop(async_func: Callable[..., Any], *args, **kwargs) -> Any: # Use a temporary event loop in a new thread because event # loop may already exist in server mode tmp_event_loop = asyncio.new_event_loop() thread = threading.Thread( target=tmp_event_loop.run_forever, name="batchmeta dataproto converter", daemon=True, ) def run_coroutine(coroutine): if not thread.is_alive(): thread.start() future = asyncio.run_coroutine_threadsafe(coroutine, tmp_event_loop) return future.result() async def stop_loop(): tmp_event_loop.stop() try: return run_coroutine(async_func(*args, **kwargs)) finally: if thread.is_alive(): asyncio.run_coroutine_threadsafe(stop_loop(), tmp_event_loop) thread.join() def _find_batchmeta(*args, **kwargs): for arg in args: if isinstance(arg, BatchMeta): return arg for v in kwargs.values(): if isinstance(v, BatchMeta): return v return None async def _async_batchmeta_to_dataproto(batchmeta: "BatchMeta") -> DataProto: if batchmeta.samples == [] or batchmeta.samples is None: return DataProto( batch=TensorDict({}, batch_size=(0,)), non_tensor_batch={}, meta_info=batchmeta.extra_info.copy(), ) if batchmeta.extra_info.get("validate", False): tensordict = await _VAL_TRANSFER_QUEUE_CLIENT.async_get_data(batchmeta) else: tensordict = await _TRANSFER_QUEUE_CLIENT.async_get_data(batchmeta) return DataProto.from_tensordict(tensordict, meta_info=batchmeta.extra_info.copy()) def _batchmeta_to_dataproto(batchmeta: "BatchMeta") -> DataProto: return _run_async_in_temp_loop(_async_batchmeta_to_dataproto, batchmeta) async def _async_update_batchmeta_with_output(output: DataProto, batchmeta: "BatchMeta") -> None: for k, v in output.meta_info.items(): batchmeta.set_extra_info(k, v) if len(output) > 0: tensordict = output.to_tensordict() # pop meta_info for key in output.meta_info.keys(): tensordict.pop(key) batchmeta.add_fields(tensordict) if batchmeta.extra_info.get("validate", False): await _VAL_TRANSFER_QUEUE_CLIENT.async_put(data=tensordict, metadata=batchmeta) else: await _TRANSFER_QUEUE_CLIENT.async_put(data=tensordict, metadata=batchmeta) def _update_batchmeta_with_output(output: DataProto, batchmeta: "BatchMeta") -> None: _run_async_in_temp_loop(_async_update_batchmeta_with_output, output, batchmeta) def tqbridge(put_data: bool = True): """ "Creates a decorator for bridging BatchMeta and DataProto. This decorator automatically handles conversions between `BatchMeta` and `DataProto` in function parameters, and decides whether to sync function output back to `BatchMeta` based on configuration(`put_data`). It supports both synchronous and asynchronous functions (async def), and can control whether to enable enhanced logic via the global `HAS_TQ` variable (when disabled, simply calls the original function as-is). Args: put_data: Whether put the DataProto into Storage after func return. If True, after function execution, the output result will be updated to `BatchMeta` and `BatchMeta` will be returned; If False, the function output result will be returned directly. Defaults to True. Returns: A decorator function used to decorate target functions (synchronous or asynchronous). """ def decorator(func): @wraps(func) def inner(*args, **kwargs): batchmeta = _find_batchmeta(*args, **kwargs) if batchmeta is None: return func(*args, **kwargs) else: args = [_batchmeta_to_dataproto(arg) if isinstance(arg, BatchMeta) else arg for arg in args] kwargs = {k: _batchmeta_to_dataproto(v) if isinstance(v, BatchMeta) else v for k, v in kwargs.items()} output = func(*args, **kwargs) if put_data: _update_batchmeta_with_output(output, batchmeta) return batchmeta else: return output @wraps(func) async def async_inner(*args, **kwargs): batchmeta = _find_batchmeta(*args, **kwargs) if batchmeta is None: return await func(*args, **kwargs) else: args = [await _async_batchmeta_to_dataproto(arg) if isinstance(arg, BatchMeta) else arg for arg in args] kwargs = { k: await _async_batchmeta_to_dataproto(v) if isinstance(v, BatchMeta) else v for k, v in kwargs.items() } output = await func(*args, **kwargs) if put_data: await _async_update_batchmeta_with_output(output, batchmeta) return batchmeta return output @wraps(func) def dummy_inner(*args, **kwargs): return func(*args, **kwargs) @wraps(func) async def dummy_async_inner(*args, **kwargs): return await func(*args, **kwargs) wrapper_inner = inner if is_transferqueue_enabled else dummy_inner wrapper_async_inner = async_inner if is_transferqueue_enabled else dummy_async_inner wrapper = wrapper_async_inner if inspect.iscoroutinefunction(func) else wrapper_inner return wrapper return decorator ================================================ FILE: verl_distillation/verl/utils/transformers_compat.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Compatibility utilities for different versions of transformers library. """ import importlib.metadata from functools import lru_cache from typing import Optional from packaging import version # Handle version compatibility for flash_attn_supports_top_left_mask # This function was added in newer versions of transformers try: from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask except ImportError: # For older versions of transformers that don't have this function # Default to False as a safe fallback for older versions def flash_attn_supports_top_left_mask(): """Fallback implementation for older transformers versions. Returns False to disable features that require this function. """ return False @lru_cache def is_transformers_version_in_range(min_version: Optional[str] = None, max_version: Optional[str] = None) -> bool: try: # Get the installed version of the transformers library transformers_version_str = importlib.metadata.version("transformers") except importlib.metadata.PackageNotFoundError as e: raise ModuleNotFoundError("The `transformers` package is not installed.") from e transformers_version = version.parse(transformers_version_str) lower_bound_check = True if min_version is not None: lower_bound_check = version.parse(min_version) <= transformers_version upper_bound_check = True if max_version is not None: upper_bound_check = transformers_version <= version.parse(max_version) return lower_bound_check and upper_bound_check ================================================ FILE: verl_distillation/verl/utils/ulysses.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for DeepSpeed Ulysses Sequence Parallelism. DeepSpeed Ulysses Paper: https://arxiv.org/abs/2309.14509 Inspired from: https://github.com/deepspeedai/DeepSpeed/blob/master/deepspeed/sequence/layer.py """ from typing import Any, Optional import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup _ULYSSES_SEQUENCE_PARALLEL_GROUP = None def set_ulysses_sequence_parallel_group(group: dist.ProcessGroup): """ Set ulysses sequence parallel process group. """ global _ULYSSES_SEQUENCE_PARALLEL_GROUP _ULYSSES_SEQUENCE_PARALLEL_GROUP = group def get_ulysses_sequence_parallel_group() -> Optional[dist.ProcessGroup]: """ Get ulysses sequence parallel process group. """ global _ULYSSES_SEQUENCE_PARALLEL_GROUP return _ULYSSES_SEQUENCE_PARALLEL_GROUP def get_ulysses_sequence_parallel_world_size(group: ProcessGroup = None) -> int: """ Get ulysses sequence parallel world size. """ group = get_ulysses_sequence_parallel_group() if group is None else group return dist.get_world_size(group) if group else 1 def get_ulysses_sequence_parallel_rank(group: ProcessGroup = None) -> int: """ Get ulysses sequence parallel rank. """ group = get_ulysses_sequence_parallel_group() if group is None else group return dist.get_rank(group) if group else 0 def gather_seq_scatter_heads( x: Tensor, seq_dim: int, head_dim: int, unpadded_dim_size: int = 0, group: ProcessGroup = None, ) -> Tensor: """ A func to sync embedding input with alltoall in sequence parallel gather sequence dimension and scatter head dim: e.g. seq_dim: 1, head_dim: 2 [bsz, seq/n, h, ...] -> [bsz, seq, h/n, ...] """ group = get_ulysses_sequence_parallel_group() if group is None else group if not group: return x sp_world = get_ulysses_sequence_parallel_world_size(group) x = SeqAllToAll.apply(group, x, head_dim, seq_dim) if unpadded_dim_size and unpadded_dim_size % sp_world != 0: padding_size = x.size(seq_dim) - unpadded_dim_size x = _unpad_tensor(x, seq_dim, padding_size) return x def gather_heads_scatter_seq(x: Tensor, head_dim: int, seq_dim: int, group: ProcessGroup = None) -> Tensor: """ A func to sync attention result with alltoall in sequence parallel gather head dimension and scatter seq dim: e.g. seq_dim: 1, head_dim: 2 [bsz, seq, h/n, ...] -> [bsz, seq/n, h, ...] """ group = get_ulysses_sequence_parallel_group() if group is None else group if not group: return x dim_size = x.size(seq_dim) sp_world = get_ulysses_sequence_parallel_world_size(group) if dim_size % sp_world != 0: padding_size = sp_world - (dim_size % sp_world) x = _pad_tensor(x, seq_dim, padding_size) return SeqAllToAll.apply(group, x, seq_dim, head_dim, False) def _pad_tensor(x: Tensor, dim: int, padding_size: int) -> Tensor: shape = list(x.shape) shape[dim] = padding_size pad = torch.zeros(shape, dtype=x.dtype, device=x.device) return torch.cat([x, pad], dim=dim) def _unpad_tensor(x: Tensor, dim: int, padding_size: int) -> Tensor: slc = [slice(None)] * len(x.shape) slc[dim] = slice(0, -padding_size) return x[tuple(slc)] def slice_input_tensor(x: Tensor, dim: int, padding: bool = True, group: ProcessGroup = None) -> Tensor: group = get_ulysses_sequence_parallel_group() if group is None else group sp_world_size = dist.get_world_size(group) sp_rank = get_ulysses_sequence_parallel_rank() dim_size = x.size(dim) # pad before slice if padding and dim_size % sp_world_size: padding_size = sp_world_size - (dim_size % sp_world_size) x = _pad_tensor(x, dim, padding_size) # slice the input tensor parts = x.size(dim) // sp_world_size slc = [slice(None)] * len(x.shape) slc[dim] = slice(sp_rank * parts, (sp_rank + 1) * parts) return x[tuple(slc)].contiguous() def all_to_all_tensor( local_input: Tensor, scatter_dim: int, gather_dim: int, group: Optional[dist.ProcessGroup] = None, async_op: bool = False, ): group = get_ulysses_sequence_parallel_group() if group is None else group seq_world_size = dist.get_world_size(group) input_list = [t.contiguous() for t in torch.tensor_split(local_input, seq_world_size, scatter_dim)] output_list = [torch.empty_like(input_list[0]) for _ in range(seq_world_size)] comm = dist.all_to_all(output_list, input_list, group=group, async_op=async_op) if async_op: def wait(): comm.wait() return torch.cat(output_list, dim=gather_dim).contiguous() return wait return torch.cat(output_list, dim=gather_dim).contiguous() def all_gather_tensor(local_tensor: Tensor, group: Optional[dist.ProcessGroup] = None, async_op: bool = False): group = get_ulysses_sequence_parallel_group() if group is None else group sp_world_size = dist.get_world_size(group=group) output_shape = list(local_tensor.shape) output_shape[0] = output_shape[0] * sp_world_size output = torch.empty(output_shape, dtype=local_tensor.dtype, device=local_tensor.device) dist.all_gather_into_tensor(output, local_tensor, group=group, async_op=async_op) return output class SeqAllToAll(torch.autograd.Function): @staticmethod def forward( ctx: Any, group: dist.ProcessGroup, local_input: Tensor, scatter_dim: int, gather_dim: int, async_op: bool = False, ) -> Tensor: ctx.group = group ctx.scatter_dim = scatter_dim ctx.gather_dim = gather_dim ctx.async_op = async_op return all_to_all_tensor(local_input, scatter_dim, gather_dim, group, async_op) @staticmethod def backward(ctx: Any, *grad_output: Tensor) -> tuple[None, Tensor, None, None]: input_t = torch.cat(grad_output[1:], dim=ctx.gather_dim).contiguous() if ctx.async_op else grad_output[0] return ( None, all_to_all_tensor(input_t, ctx.gather_dim, ctx.scatter_dim, ctx.group, False), None, None, None, None, ) class Gather(torch.autograd.Function): @staticmethod def forward( ctx: Any, group: dist.ProcessGroup, local_tensor: Tensor, gather_dim: int, grad_scaler: bool = True, async_op=False, ) -> Tensor: ctx.group = group ctx.gather_dim = gather_dim ctx.grad_scaler = grad_scaler ctx.async_op = async_op sp_world_size = dist.get_world_size(group=group) ctx.sp_world_size = sp_world_size sp_rank = dist.get_rank(group=group) ctx.sp_rank = sp_rank local_shape = list(local_tensor.size()) split_size = local_shape[0] part_size = local_shape[gather_dim] # store original size ctx.part_size = part_size output = all_gather_tensor(local_tensor, group, async_op) return torch.cat(output.split(split_size, dim=0), dim=gather_dim) @staticmethod def backward(ctx: Any, grad_output: Tensor) -> Any: if ctx.grad_scaler: grad_output = grad_output * ctx.sp_world_size return ( None, grad_output.split(ctx.part_size, dim=ctx.gather_dim)[ctx.sp_rank].contiguous(), None, None, None, None, ) def gather_outpus_and_unpad(*args, **kwargs): raise RuntimeError( "please use verl.utils.ulysses.gather_outputs_and_unpad instead of verl.utils.ulysses.gather_outpus_and_unpad" ) def gather_outputs_and_unpad( x: Tensor, gather_dim: int, unpad_dim: int = None, padding_size: int = 0, grad_scaler: bool = True, group: Optional[dist.ProcessGroup] = None, ): """ Gather a tensor across a process group and optionally unpad its padded elements. Args: x (Tensor): Input tensor to gather. gather_dim (int): Dimension along which to gather across ranks. unpad_dim (int, optional): Dimension from which to remove padding. If None, no unpadding. padding_size (int): Number of padding elements to remove on `unpad_dim`. Defaults to 0. grad_scaler (bool): Whether to apply gradient scaling during gather. Defaults to True. group (ProcessGroup, optional): Process group for gathering. If None, uses `get_ulysses_sequence_parallel_group()`. If still None, returns `x` unchanged. Returns: Tensor: The gathered tensor, with padding removed if requested. """ group = get_ulysses_sequence_parallel_group() if group is None else group if group is None: return x x = Gather.apply(group, x, gather_dim, grad_scaler) if unpad_dim is not None: assert isinstance(padding_size, int), "padding size is not given or is not an integer" if padding_size == 0: return x x = _unpad_tensor(x, unpad_dim, padding_size) return x def ulysses_pad(input_ids_rmpad: torch.Tensor, position_ids_rmpad: Optional[torch.Tensor] = None, sp_size: int = 1): if position_ids_rmpad is not None: assert position_ids_rmpad.size(-2) == 1 assert input_ids_rmpad.size(-1) == position_ids_rmpad.size(-1) if sp_size <= 1: return input_ids_rmpad, position_ids_rmpad, 0 _, total_seq_len = input_ids_rmpad.shape pad_size = (sp_size - total_seq_len % sp_size) % sp_size if pad_size > 0: input_ids_rmpad = torch.nn.functional.pad(input_ids_rmpad, (0, pad_size), value=0) if position_ids_rmpad is not None: pad_pos_ids = torch.arange(pad_size, device=position_ids_rmpad.device).unsqueeze(0) if position_ids_rmpad.dim() == 3: pad_pos_ids = pad_pos_ids.unsqueeze(0).repeat(position_ids_rmpad.size(0), 1, 1) position_ids_rmpad = torch.cat((position_ids_rmpad, pad_pos_ids), dim=-1) return input_ids_rmpad, position_ids_rmpad, pad_size def ulysses_pad_and_slice_inputs( input_ids_rmpad: torch.Tensor, position_ids_rmpad: Optional[torch.Tensor] = None, sp_size: int = 1 ): """ Pad and slice input_ids to be divisible by sp_size Pad position_ids to be divisible by sp_size. Note both input_ids_rmpad and position_ids_rmpad will be padded and sliced. The is the utility of pre-forward for ulysses sequence parallelism Args: input_ids_rmpad: shape of [bsz, seqlen] position_ids_rmpad: shape of [bsz, seqlen], where bsz must be 1 sp_size (int): ulysses sequence parallelism size Returns: torch.Tensor: padded and sliced input_ids torch.Tensor: padded and sliced position_ids int: pad size """ input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad(input_ids_rmpad, position_ids_rmpad, sp_size) input_ids_rmpad = slice_input_tensor(input_ids_rmpad, dim=1, padding=False) if position_ids_rmpad is not None: position_ids_rmpad = slice_input_tensor(position_ids_rmpad, dim=1, padding=False) return input_ids_rmpad, position_ids_rmpad, pad_size def validate_ulysses_config(num_heads, ulysses_sequence_size): if ulysses_sequence_size > 1: assert num_heads % ulysses_sequence_size == 0, ( f"num_heads ({num_heads}) must be divisible by ulysses sequence size({ulysses_sequence_size})" ) ================================================ FILE: verl_distillation/verl/utils/vllm/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .utils import TensorLoRARequest, VLLMHijack, is_version_ge # The contents of vllm/patch.py should not be imported here, because the contents of # patch.py should be imported after the vllm LLM instance is created. Therefore, # wait until you actually start using it before importing the contents of # patch.py separately. __all__ = [ "TensorLoRARequest", "VLLMHijack", "is_version_ge", ] ================================================ FILE: verl_distillation/verl/utils/vllm/patch.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # To support different vLLM versions, we add the model into SUPPORTED_MOE_MODELS separately to avoid triggering # unsupported issues. SUPPORTED_MOE_MODELS = [] try: from vllm.model_executor.models.deepseek_v2 import DeepseekV2ForCausalLM, DeepseekV3ForCausalLM SUPPORTED_MOE_MODELS.append(DeepseekV2ForCausalLM) SUPPORTED_MOE_MODELS.append(DeepseekV3ForCausalLM) except ImportError: pass try: from vllm.model_executor.models.mixtral import MixtralForCausalLM SUPPORTED_MOE_MODELS.append(MixtralForCausalLM) except ImportError: pass try: from vllm.model_executor.models.qwen2_moe import Qwen2MoeForCausalLM SUPPORTED_MOE_MODELS.append(Qwen2MoeForCausalLM) except ImportError: pass try: from vllm.model_executor.models.qwen3_moe import Qwen3MoeForCausalLM SUPPORTED_MOE_MODELS.append(Qwen3MoeForCausalLM) except ImportError: pass try: from vllm.model_executor.models.qwen3_vl_moe import Qwen3MoeLLMForCausalLM SUPPORTED_MOE_MODELS.append(Qwen3MoeLLMForCausalLM) except ImportError: pass try: from vllm.model_executor.models.kimi_vl import KimiVLForConditionalGeneration SUPPORTED_MOE_MODELS.append(KimiVLForConditionalGeneration) except ImportError: pass def patch_vllm_moe_model_weight_loader(model): # this is a work around to load the weight of vllm fused moe model # it is from a bug from vllm 0.8.2 # all the weights are supposed to have a weight_loader, but the moe weights # do not have a weight_loader, so we need to patch it # (True, 'model.embed_tokens.weight') # (True, 'model.layers.0.self_attn.qkv_proj.weight') # (True, 'model.layers.0.self_attn.qkv_proj.bias') # (True, 'model.layers.0.self_attn.o_proj.weight') # (True, 'model.layers.0.mlp.gate.weight') # (True, 'model.layers.0.mlp.shared_expert.gate_up_proj.weight') # (True, 'model.layers.0.mlp.shared_expert.down_proj.weight') # (False, 'model.layers.0.mlp.shared_expert_gate.weight') use default # (False, 'model.layers.0.input_layernorm.weight') use default # (False, 'model.layers.0.post_attention_layernorm.weight') use default # (False, 'model.layers.0.mlp.experts.w13_weight') use mlp.experts.weight_loader # (False, 'model.layers.0.mlp.experts.w2_weight') use mlp.experts.weight_loader # Early return if no MOE models are supported if not SUPPORTED_MOE_MODELS: return original_model_type = type(model) # Define MLP attribute mapping for different model types MLP_ATTR_MAPPING = {} try: from vllm.model_executor.models.mixtral import MixtralForCausalLM MLP_ATTR_MAPPING[MixtralForCausalLM] = "block_sparse_moe" except ImportError: pass DEFAULT_MLP_ATTR = "mlp" # Get inner model (either model.model or model.language_model) inner_model = getattr(model, "model", None) or getattr(model, "language_model", None) if inner_model is None: raise ValueError("The provided model does not have a valid 'model' or 'language_model' attribute.") if not isinstance(model, tuple(SUPPORTED_MOE_MODELS)) and not isinstance(inner_model, tuple(SUPPORTED_MOE_MODELS)): return # TODO(@leisuzz): class Qwen3MoeLLMForCausalLM is not available if VLLM version < 0.11.0, # will update the 'if statement' with 'isinstance' when verl commonly use VLLM version >= 0.11.0 if type(inner_model).__name__ == "Qwen3MoeLLMForCausalLM": inner_model = inner_model.model # Reassign inner_model in Qwen3-vl for layer_idx, layer in enumerate(inner_model.layers): mlp_attr = MLP_ATTR_MAPPING.get(original_model_type, DEFAULT_MLP_ATTR) mlp = getattr(layer, mlp_attr, None) if not mlp: continue experts = getattr(mlp, "experts", None) if not experts or not hasattr(experts, "weight_loader"): continue # Patch the weight loaders for name, param in mlp.named_parameters(): if "w13_weight" in name or "w2_weight" in name: param.weight_loader = experts.weight_loader ================================================ FILE: verl_distillation/verl/utils/vllm/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from msgspec import field from packaging import version as vs from vllm.lora.models import LoRAModel from vllm.lora.request import LoRARequest from vllm.lora.utils import get_adapter_absolute_path from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager from verl.third_party.vllm import get_version class TensorLoRARequest(LoRARequest): peft_config: dict = field(default=None) lora_tensors: dict = field(default=None) class VLLMHijack: @staticmethod def hijack(): def hijack__load_adapter(self, lora_request: TensorLoRARequest) -> LoRAModel: """ based on vllm.lora.worker_manager.WorkerLoRAManager._load_adapter, support load adapter with lora tensors Reason: VLLM does not support adding LoRA from tensors directly. It only supports adding LoRA via file paths. To synchronize the LoRA tensors of the actor model, we need to find a workaround to enable VLLM to load memory-based LoRA tensors. """ try: supported_lora_modules = self._adapter_manager.supported_lora_modules packed_modules_mapping = self._adapter_manager.packed_modules_mapping expected_lora_modules: list[str] = [] for module in supported_lora_modules: if module in packed_modules_mapping: expected_lora_modules.extend(packed_modules_mapping[module]) else: expected_lora_modules.append(module) expected_lora_modules = list(set(expected_lora_modules)) lora_tensors = None from vllm.lora.peft_helper import PEFTHelper if isinstance(lora_request, TensorLoRARequest): peft_config = lora_request.peft_config lora_tensors = lora_request.lora_tensors peft_helper = PEFTHelper.from_dict(peft_config) else: lora_path = get_adapter_absolute_path(lora_request.lora_path) peft_helper = PEFTHelper.from_local_dir(lora_path, self.max_position_embeddings) # Validates the LoRA configuration against requirements before # loading weights, throwing an exception if validation fails. peft_helper.validate_legal(self.lora_config) # For some models like Qwen2VL, we need to use hf_to_vllm_mapper # to ensure correct loading of lora weights. model = self._adapter_manager.model hf_to_vllm_mapper = None if hasattr(model, "hf_to_vllm_mapper") and model.hf_to_vllm_mapper is not None: hf_to_vllm_mapper = model.hf_to_vllm_mapper if isinstance(lora_request, TensorLoRARequest): lora = self._lora_model_cls.from_lora_tensors( lora_model_id=lora_request.lora_int_id, tensors=lora_tensors, peft_helper=peft_helper, device="cpu", dtype=self.lora_config.lora_dtype, embeddings=None, target_embedding_padding=self.vocab_size + self.lora_config.lora_extra_vocab_size, embedding_modules=self.embedding_modules, embedding_padding_modules=self.embedding_padding_modules, weights_mapper=hf_to_vllm_mapper, ) else: lora = self._lora_model_cls.from_local_checkpoint( lora_path, expected_lora_modules, peft_helper=peft_helper, lora_model_id=lora_request.lora_int_id, device="cpu", dtype=self.lora_config.lora_dtype, target_embedding_padding=self.vocab_size + self.lora_config.lora_extra_vocab_size, embedding_modules=self.embedding_modules, embedding_padding_modules=self.embedding_padding_modules, weights_mapper=hf_to_vllm_mapper, ) except Exception as e: raise e if lora.extra_vocab_size > self.lora_config.lora_extra_vocab_size: raise ValueError( f"LoRA added vocab size {lora.extra_vocab_size} is greater than lora_extra_vocab_size " f"{self.lora_config.lora_extra_vocab_size}." ) return lora def do_hijack(target_cls, target_method_name, hooking_method): setattr(target_cls, target_method_name, hooking_method) do_hijack(LRUCacheWorkerLoRAManager, "_load_adapter", hijack__load_adapter) def is_version_ge(pkg: str = "vllm", minver: str = "0.7.3"): """check if the package version is greater than or equal to the minimum version""" return vs.parse(get_version(pkg)) >= vs.parse(minver) ================================================ FILE: verl_distillation/verl/version/version ================================================ 0.7.0.dev ================================================ FILE: verl_distillation/verl/workers/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/workers/actor/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BasePPOActor from .dp_actor import DataParallelPPOActor __all__ = ["BasePPOActor", "DataParallelPPOActor"] ================================================ FILE: verl_distillation/verl/workers/actor/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The base class for Actor """ from abc import ABC, abstractmethod import torch from verl import DataProto __all__ = ["BasePPOActor"] class BasePPOActor(ABC): def __init__(self, config): """The base class for PPO actor Args: config (DictConfig): a config passed to the PPOActor. We expect the type to be DictConfig (https://omegaconf.readthedocs.io/), but it can be any namedtuple in general. """ super().__init__() self.config = config @abstractmethod def compute_log_prob(self, data: DataProto) -> torch.Tensor: """Compute logits given a batch of data. Args: data (DataProto): a batch of data represented by DataProto. It must contain key ```input_ids```, ```attention_mask``` and ```position_ids```. Returns: DataProto: a DataProto containing the key ```log_probs``` """ pass @abstractmethod def update_policy(self, data: DataProto) -> dict: """Update the policy with an iterator of DataProto Args: data (DataProto): an iterator over the DataProto that returns by ```make_minibatch_iterator``` Returns: Dict: a dictionary contains anything. Typically, it contains the statistics during updating the model such as ```loss```, ```grad_norm```, etc,. """ pass ================================================ FILE: verl_distillation/verl/workers/actor/dp_actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Single Process Actor """ import logging import os import torch from torch import nn from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor import DTensor import verl.utils.torch_functional as verl_F from verl import DataProto from verl.trainer.ppo.core_algos import agg_loss, get_policy_loss_fn, kl_penalty from verl.utils.attention_utils import index_first_axis, pad_input, rearrange, unpad_input from verl.utils.device import get_device_id, get_device_name from verl.utils.fsdp_utils import FSDPModule, fsdp2_clip_grad_norm_ from verl.utils.profiler import GPUMemoryLogger from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import prepare_dynamic_batch, restore_dynamic_batch from verl.utils.torch_functional import logprobs_from_logits from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad, ulysses_pad_and_slice_inputs from verl.workers.actor import BasePPOActor from verl.workers.config import ActorConfig __all__ = ["DataParallelPPOActor"] logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class DataParallelPPOActor(BasePPOActor): """FSDP DataParallel PPO Actor or Ref worker Args: config (ActorConfig): Actor config actor_module (nn.Module): Actor or ref module actor_optimizer (torch.optim.Optimizer, optional): Actor optimizer. Defaults to None. """ def __init__(self, config: ActorConfig, actor_module: nn.Module, actor_optimizer: torch.optim.Optimizer = None): """When optimizer is None, it is Reference Policy""" super().__init__(config) self.actor_module = actor_module self.actor_optimizer = actor_optimizer role = "Ref" if actor_optimizer is None else "Actor" self.use_remove_padding = self.config.get("use_remove_padding", False) if torch.distributed.get_rank() == 0: print(f"{role} use_remove_padding={self.use_remove_padding}") self.use_fused_kernels = self.config.get("use_fused_kernels", False) if torch.distributed.get_rank() == 0: print(f"{role} use_fused_kernels={self.use_fused_kernels}") self.ulysses_sequence_parallel_size = self.config.ulysses_sequence_parallel_size self.use_ulysses_sp = self.ulysses_sequence_parallel_size > 1 if self.config.entropy_from_logits_with_chunking: entropy_from_logits = verl_F.entropy_from_logits_with_chunking else: entropy_from_logits = verl_F.entropy_from_logits self.compute_entropy_from_logits = ( torch.compile(entropy_from_logits, dynamic=True) if self.config.get("use_torch_compile", True) # use torch compile by default else entropy_from_logits ) self.device_name = get_device_name() def _forward_micro_batch( self, micro_batch, temperature, calculate_entropy=False ) -> tuple[torch.Tensor, torch.Tensor]: """ Returns: entropy: # (bs, response_len) log_probs: # (bs, response_len) """ response_length = micro_batch["responses"].size(-1) multi_modal_inputs = {} if "multi_modal_inputs" in micro_batch.keys(): from verl.utils.model import extract_multi_modal_inputs multi_modal_inputs = extract_multi_modal_inputs(micro_batch["multi_modal_inputs"]) with torch.autocast(device_type=self.device_name, dtype=torch.bfloat16): input_ids = micro_batch["input_ids"] batch_size, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] entropy = None if position_ids.dim() == 3: # qwen2vl mrope position_ids = position_ids.transpose(0, 1) # (bsz, 4, seqlen) -> (4, bsz, seqlen) if self.use_remove_padding: input_ids_rmpad, indices, cu_seqlens, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary if position_ids.dim() == 3: position_ids_rmpad = ( index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices) .transpose(0, 1) .unsqueeze(1) ) # (4, bsz, seqlen) -> (4, 1, bsz * seqlen) else: position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) if "image_bound" in multi_modal_inputs: from verl.utils.dataset.vision_utils import process_multi_modal_inputs_for_minicpmo multi_modal_inputs = process_multi_modal_inputs_for_minicpmo( input_ids, attention_mask, position_ids, cu_seqlens, multi_modal_inputs ) # for compute the log_prob input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz) # pad and slice the inputs if sp > 1 if self.use_ulysses_sp: is_vlm_model = hasattr( getattr(self.actor_module, "module", self.actor_module).config, "vision_config" ) if is_vlm_model: # vlm model's inputs will be sliced after embedding input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad( input_ids_rmpad, position_ids_rmpad=position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size, ) else: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad=position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size, ) input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs( input_ids_rmpad_rolled, position_ids_rmpad=None, sp_size=self.ulysses_sequence_parallel_size, ) input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad) # only pass input_ids and position_ids to enable flash_attn_varlen extra_args = {} if self.use_fused_kernels: extra_args["temperature"] = temperature extra_args["return_dict"] = True output = self.actor_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, **multi_modal_inputs, use_cache=False, **extra_args, ) # prevent model thinks we are generating if self.use_fused_kernels: log_probs = output.log_probs.squeeze(0) # (total_nnz,) entropy_rmpad = output.entropy.squeeze(0) # (total_nnz,) else: logits_rmpad = output.logits.squeeze(0) # (total_nnz, vocab_size) logits_rmpad.div_(temperature) # if use_sp: ((total_nnz / sp) + pad) ; if not use_sp: (batch, seqlen) inplace_backward = True if calculate_entropy: inplace_backward = False log_probs = logprobs_from_logits( logits=logits_rmpad, labels=input_ids_rmpad_rolled, inplace_backward=inplace_backward, ) # compute entropy if calculate_entropy: if not self.config.entropy_checkpointing: entropy_rmpad = self.compute_entropy_from_logits(logits_rmpad) # ((total_nnz / sp) + pad) else: entropy_rmpad = torch.utils.checkpoint.checkpoint( self.compute_entropy_from_logits, logits_rmpad ) # gather log_prob if sp > 1 if self.use_ulysses_sp: # gather and unpad for the ulysses sp log_probs = gather_outputs_and_unpad( log_probs, gather_dim=0, unpad_dim=0, padding_size=pad_size, ) if calculate_entropy: entropy_rmpad = gather_outputs_and_unpad( entropy_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size, ) # pad back to (bsz, seqlen) if calculate_entropy: full_entropy = pad_input( hidden_states=entropy_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen, ) full_log_probs = pad_input( hidden_states=log_probs.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen, ) # only return response part: if calculate_entropy: entropy = full_entropy.squeeze(-1)[:, -response_length - 1 : -1] # (bsz, response_length) log_probs = full_log_probs.squeeze(-1)[:, -response_length - 1 : -1] # (bsz, response_length) else: # not using rmpad and no ulysses sp extra_args = {} if self.use_fused_kernels: extra_args["temperature"] = temperature extra_args["return_dict"] = True output = self.actor_module( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **multi_modal_inputs, use_cache=False, **extra_args, ) # prevent model thinks we are generating if self.use_fused_kernels: log_probs = output.log_probs[:, -response_length - 1 : -1] entropy = output.entropy[:, -response_length - 1 : -1] # (bsz, response_length) else: logits = output.logits logits.div_(temperature) logits = logits[:, -response_length - 1 : -1, :] # (bsz, response_length, vocab_size) log_probs = logprobs_from_logits(logits, micro_batch["responses"]) if calculate_entropy: if not self.config.entropy_checkpointing: entropy = verl_F.entropy_from_logits(logits) # (bsz, response_length) else: entropy = torch.utils.checkpoint.checkpoint(verl_F.entropy_from_logits, logits) return entropy, log_probs def _optimizer_step(self): assert self.config.grad_clip is not None if isinstance(self.actor_module, FSDP): grad_norm = self.actor_module.clip_grad_norm_(max_norm=self.config.grad_clip) elif isinstance(self.actor_module, FSDPModule): grad_norm = fsdp2_clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip) else: grad_norm = torch.nn.utils.clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip) if isinstance(grad_norm, DTensor): grad_norm = grad_norm.full_tensor() # if grad_norm is not finite, skip the update if not torch.isfinite(grad_norm): print(f"WARN: rank {torch.distributed.get_rank()} grad_norm is not finite: {grad_norm}") self.actor_optimizer.zero_grad() else: self.actor_optimizer.step() return grad_norm @GPUMemoryLogger(role="dp actor", logger=logger) def compute_log_prob(self, data: DataProto, calculate_entropy=False, mask_special_token=False) -> torch.Tensor: """Compute the log probability of the responses given input_ids, attention_mask and position_ids Args: data (DataProto): a DataProto containing keys ``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``. ``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64. ``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. ``responses``: tensor of shape [batch_size, response_length]. torch.int64. Returns: torch.Tensor: the log_prob tensor """ # set to eval self.actor_module.eval() micro_batch_size = data.meta_info["micro_batch_size"] temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid silent error use_dynamic_bsz = data.meta_info["use_dynamic_bsz"] has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() select_keys = ["responses", "input_ids", "attention_mask", "position_ids"] if mask_special_token: select_keys.append("distill_special_token_mask") non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else [] data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys) # replace distill_special_token to EOS, the token behind the first distill_speical_token will be masked. if mask_special_token: distill_special_token_mask = torch.zeros_like(data.batch["attention_mask"]) distill_special_token_mask[:,-len(data.batch["distill_special_token_mask"][0]):] = data.batch["distill_special_token_mask"] data.batch["input_ids"][distill_special_token_mask == 1] = 151645 if use_dynamic_bsz: max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size micro_batches, batch_idx_list = prepare_dynamic_batch(data, max_token_len=max_token_len) else: micro_batches = data.split(micro_batch_size) log_probs_lst = [] entropy_lst = [] for micro_batch in micro_batches: micro_batch = micro_batch.to(get_device_id()) model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch} with torch.no_grad(): entropy, log_probs = self._forward_micro_batch( model_inputs, temperature=temperature, calculate_entropy=calculate_entropy ) log_probs_lst.append(log_probs) if calculate_entropy: entropy_lst.append(entropy) log_probs = torch.concat(log_probs_lst, dim=0) entropys = None if calculate_entropy: entropys = torch.concat(entropy_lst, dim=0) if use_dynamic_bsz: log_probs = restore_dynamic_batch(log_probs, batch_idx_list) if calculate_entropy: entropys = restore_dynamic_batch(entropys, batch_idx_list) if mask_special_token: log_probs[data.batch["distill_special_token_mask"] == 1] = self.config.ref_log_prob_replace_val return log_probs, entropys @GPUMemoryLogger(role="dp actor", logger=logger) def update_policy(self, data: DataProto): # make sure we are in training mode self.actor_module.train() temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid silent error select_keys = [ "responses", "response_mask", "input_ids", "attention_mask", "position_ids", "old_log_probs", "advantages", ] if self.config.use_kl_loss: select_keys.append("ref_log_prob") # Include pre-computed IS weights if present in batch # Weights are computed centrally in trainer and added to batch when algorithm.rollout_is=True if "rollout_is_weights" in data.batch.keys(): select_keys.append("rollout_is_weights") has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else [] data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys) # Split to make minibatch iterator for updating the actor # See PPO paper for details. https://arxiv.org/abs/1707.06347 mini_batches = data.split(self.config.ppo_mini_batch_size) on_policy = len(mini_batches) == 1 and self.config.ppo_epochs == 1 metrics = {} for _ in range(self.config.ppo_epochs): for batch_idx, mini_batch in enumerate(mini_batches): if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, _ = prepare_dynamic_batch(mini_batch, max_token_len=max_token_len) else: self.gradient_accumulation = ( self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu ) micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu) self.actor_optimizer.zero_grad() for micro_batch in micro_batches: micro_batch = micro_batch.to(get_device_id()) micro_batch_metrics = {} model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch} response_mask = model_inputs["response_mask"] old_log_prob = model_inputs["old_log_probs"] advantages = model_inputs["advantages"] entropy_coeff = self.config.entropy_coeff loss_agg_mode = self.config.loss_agg_mode if self.config.use_dynamic_bsz: loss_scale_factor = response_mask.shape[0] / self.config.ppo_mini_batch_size else: loss_scale_factor = 1 / self.gradient_accumulation # all return: (bsz, response_length) calculate_entropy = False if entropy_coeff != 0: calculate_entropy = True entropy, log_prob = self._forward_micro_batch( model_inputs, temperature=temperature, calculate_entropy=calculate_entropy ) # for fully_async_policy recipe if hasattr(self.config, "use_rollout_log_probs") and self.config.use_rollout_log_probs: old_log_prob = model_inputs["old_log_probs"] else: if on_policy: old_log_prob = log_prob.detach() else: old_log_prob = model_inputs["old_log_probs"] loss_mode = self.config.policy_loss.get("loss_mode", "vanilla") # vanilla -> verl.trainer.ppo.core_algos.compute_policy_loss_vanilla # Extract pre-computed rollout importance sampling weights if present # Weights are computed centrally in trainer and added when algorithm.rollout_is=True rollout_is_weights = model_inputs.get("rollout_is_weights", None) # NOTE: Both mismatch diagnostic metrics (PPL, KL, etc.) and IS weight metrics # are computed centrally in ray_trainer.py for consistency and efficiency. # This ensures metrics are computed uniformly across all batches at the trainer level # and avoids redundant computation across workers and micro-batches. # gpg -> verl.trainer.ppo.core_algos.compute_policy_loss_gpg # clip_cov -> verl.trainer.ppo.core_algos.compute_policy_loss_clip_cov policy_loss_fn = get_policy_loss_fn(loss_mode) # Compute policy loss (all functions return 4 values) pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = policy_loss_fn( old_log_prob=old_log_prob, log_prob=log_prob, advantages=advantages, response_mask=response_mask, loss_agg_mode=loss_agg_mode, config=self.config, rollout_is_weights=rollout_is_weights, ) if entropy_coeff != 0: entropy_loss = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) # compute policy loss policy_loss = pg_loss - entropy_loss * entropy_coeff else: policy_loss = pg_loss if self.config.use_kl_loss: ref_log_prob = model_inputs["ref_log_prob"] # compute kl loss kld = kl_penalty( logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=self.config.kl_loss_type ) kl_loss = agg_loss(loss_mat=kld, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef micro_batch_metrics["actor/kl_loss"] = kl_loss.detach().item() * loss_scale_factor micro_batch_metrics["actor/kl_coef"] = self.config.kl_loss_coef if self.config.use_dynamic_bsz: # relative to the dynamic bsz loss = policy_loss * loss_scale_factor else: loss = policy_loss * loss_scale_factor loss.backward() micro_batch_metrics.update( { "actor/pg_loss": pg_loss.detach().item() * loss_scale_factor, "actor/pg_clipfrac": pg_clipfrac.detach().item(), "actor/ppo_kl": ppo_kl.detach().item(), "actor/pg_clipfrac_lower": pg_clipfrac_lower.detach().item(), } ) append_to_dict(metrics, micro_batch_metrics) grad_norm = self._optimizer_step() mini_batch_metrics = {"actor/grad_norm": grad_norm.detach().item()} append_to_dict(metrics, mini_batch_metrics) self.actor_optimizer.zero_grad() return metrics ================================================ FILE: verl_distillation/verl/workers/actor/megatron_actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Megatron Actor. In megatron actor, the differences are: 1. We only make minibatch Note that our model doesn't have to be `MegatronModule` because we don't share embedding in the last layer """ import itertools import logging import os from functools import partial from typing import Iterable import torch import torch.distributed from megatron.core import parallel_state as mpu from megatron.core.distributed import finalize_model_grads # from megatron.core.optimizer import DistributedOptimizer from megatron.core.optimizer import DistributedOptimizer from megatron.core.pipeline_parallel import get_forward_backward_func from omegaconf import OmegaConf from torch import nn from verl import DataProto from verl.trainer.ppo.core_algos import agg_loss, get_policy_loss_fn, kl_penalty from verl.utils.device import get_device_id, get_torch_device from verl.utils.megatron.pipeline_parallel import make_batch_generator from verl.utils.megatron.tensor_parallel import vocab_parallel_entropy, vocab_parallel_log_probs_from_logits from verl.utils.megatron_utils import get_model_config from verl.utils.profiler import GPUMemoryLogger from verl.utils.profiler.profile import Profiler from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.utils.torch_functional import broadcast_dict_tensor from verl.workers.actor import BasePPOActor __all__ = ["MegatronPPOActor"] logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MegatronPPOActor(BasePPOActor): def __init__( self, config, model_config, hf_config, tf_config, actor_module: nn.ModuleList, actor_optimizer: DistributedOptimizer, ): """MeagtronPPOActor class. This class implements the simple PPO logics when the model is built with Megatron. Args: config (OmegaConf): the basic config that contains the hyper-parameters of PPO Actor. It must contain ``ppo_micro_batch_size_per_gpu``: micro batch size when updating ppo. ``ppo_mini_batch_size``: minibatch size when updating ppo using the batch data. ``ppo_epochs``: number of epochs to update the actor using the batch data. ``shuffle``: whether to shuffle the data after each ppo epoch. ``clip_ratio``: clip ratio of the ppo algorithm. See https://arxiv.org/abs/1707.06347. ``entropy_coeff``: entropy coefficient of the PPO loss. See https://arxiv.org/abs/1707.06347. model_config (OmegaConf): model configuration. It must contains ``model_config.vocab_size`` and ``model_config.hidden_size`` hf_config (PretrainedConfig): huggingface config tf_config (TransformerConfig): mcore transformer config actor_module (nn.ModuleList): actor module is a ModuleList that contains a list of nn.Module in this pp stage. each nn.Module in this rank holds a vpp module chunk. See https://arxiv.org/pdf/2104.04473.pdf for more details. The actor module has some constraints to follow in order to use the updating logics implemented here 1. It must implement unpad_input before any computation and pad_input after all the computation. Remove padding is an optimization that removes the padding tokens. See unpad_input and pad_input function in flash-attn (https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py). 2. Each pp stage must return the hidden state with the same shape [total_nnz, 1, hidden_size], where total_nnz is the number of valid tokens in this batch. If sequence parallel is enabled, the size of the hidden state is [total_nnz // tp, 1, hidden_size]. actor_optimizer (DistributedOptimizer): currently, we only support DistributedOptimizer in Megatron. It implements zero1 optimizer that shards the optimizer state across dp ranks. >>> from megatron.training import get_model >>> from megatron.optimizer import get_megatron_optimizer >>> actor_module = get_model(megatron_actor_model_provider, wrap_with_ddp=True) >>> actor_module = nn.ModuleList(actor_module) >>> actor_optimizer = get_megatron_optimizer(actor_module) >>> actor = MegatronPPOActor(config=config, >>> model_config=actor_model_config, >>> hf_config=hf_config, >>> tf_config=tf_config, >>> actor_module=actor_module, >>> actor_optimizer=actor_optimizer) """ super().__init__(config) self._validate_config(config) self.model_config = model_config self.hf_config = hf_config self.tf_config = tf_config self.actor_module = actor_module self.actor_optimizer: DistributedOptimizer = actor_optimizer self.use_torch_profiler = self.config.profiler.get("tool") == "torch" if self.use_torch_profiler: self.prof = Profiler( self.config.profiler, tool_config=self.config.profiler.get("tool_config", {}).get("torch", {}) ) else: self.prof = None self.use_fused_kernels = self.config.get("use_fused_kernels", False) if self.use_fused_kernels and not getattr(self.config, "overlap_moe_expert_parallel_comm", False): # do not patch if overlap_moe_expert_parallel_comm is enabled from verl.models.mcore.model_forward_fused import patch_fused_forward for model in self.actor_module: patch_fused_forward(model) self.optimizer_step_args = OmegaConf.create( { "skip_grad": None, "overlap_dp_param_comm": False, "overlap_dp_grad_comm": False, "gradient_accumulation_steps": 1, "sequence_parallel": self.tf_config.sequence_parallel, "DDP_impl": "local", "layernorm_allreduce_bucket_threshold": 0, "pipeline_model_parallel_split_rank": None, "reduce_grads_use_alltoall": False, } ) config = get_model_config(self.actor_module[0]) print(config) config.finalize_model_grads_func = finalize_model_grads def _validate_config(self, config) -> None: """Validate config options not implemented for Megatron backend""" assert config.get("ulysses_sequence_parallel_size", 1) == 1 if config.get("shuffle", False): assert config.data_loader_seed is not None, "If shuffle dataloader, seed must be manually set" if config.megatron.tensor_model_parallel_size == 1: print("[Warining] Because actor tp size == 1, set sp to False") config.megatron.sequence_parallel = False self.config = config @GPUMemoryLogger(role="megatron actor", logger=logger) def compute_log_prob(self, data: DataProto, calculate_entropy=False) -> torch.Tensor: """Compute the log probability of the responses given input_ids, attention_mask and position_ids Args: data (DataProto): a DataProto containing keys ``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``. ``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64. ``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. ``responses``: tensor of shape [batch_size, response_length]. torch.int64. Returns: DataProto: torch.Tensor: the log_prob tensor """ use_dynamic_bsz = data.meta_info.get("use_dynamic_bsz", False) micro_batch_size = data.meta_info.get("micro_batch_size", None) max_token_len = data.meta_info.get("max_token_len", None) if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" max_token_len = max_token_len * self.config.megatron.context_parallel_size else: assert micro_batch_size is not None, ( "micro batch size is needed for forward compute when use_dynamic_bsz is False" ) def compute_logprobs_fn(output, data, use_dynamic_bsz=False, indices=None): response = data["responses"] response_length = response.size(1) log_probs = output["log_probs"][:, -response_length - 1 : -1].contiguous() return {"log_probs": log_probs} # We make recompute_old_log_prob by default here. # TODO (zhangchi.usc1992): actually, this function should only return log_prob and this logic should be # handled by user outside recompute_old_log_prob = self.config.get("recompute_old_log_prob", True) entropys = torch.Tensor() if recompute_old_log_prob: select_keys = ["responses", "input_ids", "attention_mask", "position_ids"] batch = data.select(batch_keys=select_keys).batch input_ids = batch["input_ids"] batch_size = input_ids.size(0) response = batch["responses"] response_length = response.size(1) with torch.no_grad(): output = self.forward_backward_batch( data, forward_only=True, post_process_fn=compute_logprobs_fn, calculate_entropy=calculate_entropy, use_dynamic_bsz=use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len, ) if mpu.is_pipeline_last_stage(ignore_virtual=True): # only on last rank. It should be on every tp rank if calculate_entropy: log_probs = [o[0]["log_probs"] for o in output["output"]] # (bs, seq_size) else: log_probs = [o["log_probs"] for o in output["output"]] # (bs, seq_size) log_probs = torch.cat(log_probs, dim=0).to(torch.float32) if use_dynamic_bsz: indices = output["indices"] indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == log_probs.size(0), f"{len(indices)} vs. {log_probs.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) log_probs = log_probs[revert_indices] else: log_probs = torch.empty( size=(batch_size, response_length), dtype=torch.float32, device=input_ids.device ) log_probs = log_probs.to(get_device_id()) # broadcast across pp ranks torch.distributed.broadcast( tensor=log_probs, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), async_op=False, ) log_probs = log_probs.to("cpu") if calculate_entropy: # Note that o[0] is metrics, o[1] is entropy if mpu.is_pipeline_last_stage(ignore_virtual=True): entropys = torch.cat([o[1] for o in output["output"]], dim=0) entropys = entropys.to(torch.float32) if use_dynamic_bsz: indices = output["indices"] indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == entropys.size(0), f"{len(indices)} vs. {entropys.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) entropys = entropys[revert_indices] else: entropys = torch.empty( size=(batch_size, response_length), dtype=torch.float32, device=input_ids.device ) # broadcast across pp ranks entropys = entropys.to(get_device_id()) torch.distributed.broadcast( tensor=entropys, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), async_op=False, ) entropys = entropys.to("cpu") # add empty cache after each compute get_torch_device().empty_cache() return log_probs, entropys def make_minibatch_iterator(self, data: DataProto) -> Iterable[DataProto]: """Make minibatch iterator for updating the actor Args: data (DataProto): a DataProto containing keys ``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64, where ``sequence_length = prompt_length + response_length`` ``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64 ``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64 ``responses``: tensor of shape [batch_size, response_length]. torch.int64. Note that responses = input_ids[:, -response_length:] ``old_log_probs``: tensor of shape [batch_size, response_length]. torch.float32. The log probability of responses. ``advantages``: tensor of shape [batch_size, response_length]. torch.float32. The advantages of responses. See PPO paper for details. https://arxiv.org/abs/1707.06347 Returns: """ select_keys = [ "responses", "input_ids", "attention_mask", "response_mask", "position_ids", "old_log_probs", "advantages", ] if self.config.use_kl_loss: select_keys.append("ref_log_prob") # Include pre-computed IS weights if present in batch # Weights are computed centrally in trainer and added to batch when algorithm.rollout_is=True if "rollout_is_weights" in data.batch.keys(): select_keys.append("rollout_is_weights") self.has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() if self.has_multi_modal_inputs: data = data.select(select_keys, ["multi_modal_inputs"]) else: data = data.select(batch_keys=select_keys) return data.make_iterator( mini_batch_size=self.config.ppo_mini_batch_size, epochs=self.config.ppo_epochs, seed=self.config.data_loader_seed, dataloader_kwargs={"shuffle": self.config.shuffle}, ) def forward_backward_batch( self, data: DataProto, forward_only=False, post_process_fn=None, calculate_entropy=False, use_dynamic_bsz=False, micro_batch_size=None, max_token_len=None, mini_batch_size=None, ): """ We assume: - The model takes input: (input_ids, attention_mask, position_ids). No rmpad for the input - The communication shape is (total_nnz_pad_to_sp // tp_size, 1, hidden_size) if sequence parallel is enabled """ # broadcast from last pp rank to all other pp ranks # TODO: actually, we just need to control the sampling order. data.to(get_device_id()) data.batch = data.batch.contiguous() mini_batch = data broadcast_dict_tensor( mini_batch.batch, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), ) mini_batch.to("cpu") # split into micro-batches mini_batch.batch["attention_mask"] = mini_batch.batch["attention_mask"].to(bool) self.has_multi_modal_inputs = "multi_modal_inputs" in mini_batch.non_tensor_batch.keys() if self.has_multi_modal_inputs: mini_batch.batch["multi_modal_inputs"] = mini_batch.non_tensor_batch["multi_modal_inputs"] mini_batch.batch["multi_modal_inputs_idx"] = torch.Tensor( list(range(len(mini_batch.non_tensor_batch["multi_modal_inputs"]))) ).to(torch.int64) if mini_batch.batch["position_ids"].dim() == 3: # qwen2vl mrope [bs, 3, seq_len] mini_batch.batch["position_ids"] = mini_batch.batch["position_ids"][ :, 0 ] # mcore patch recompute qwen2vl's pos ids during forward indices = None temperature = data.meta_info["temperature"] if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() if vpp_size is not None and vpp_size > 1: microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage micro_batches, indices = rearrange_micro_batches( batch=mini_batch.batch, num_batches_divided_by=microbatch_group_size_per_vp_stage, max_token_len=max_token_len, ) assert len(micro_batches) % self.tf_config.microbatch_group_size_per_vp_stage == 0, ( f"micro_batches {micro_batches} must be divisible by microbatch_group_size_per_vp_stage " f"{microbatch_group_size_per_vp_stage} for megatron backend" ) else: micro_batches, indices = rearrange_micro_batches(batch=mini_batch.batch, max_token_len=max_token_len) total_seqlen = max_token_len else: assert micro_batch_size is not None, ( "micro_batch_size is needed to be passed in when not using dynamic batch size" ) micro_batches = mini_batch.batch.split(micro_batch_size) seq_len = micro_batches[0]["input_ids"].shape[1] total_seqlen = micro_batch_size * seq_len # compute input shapes for pp stages n_micro_batch = len(micro_batches) forward_backward_func = get_forward_backward_func() def loss_func(output, data, meta_info): # For memory efficiency # We move calculation of entropy to compute_log_probs, forward_only == True log_probs = None entropy = None if isinstance(output, dict): log_probs = output["log_probs"] if "entropy" in output: entropy = output["entropy"] else: assert isinstance(output, torch.Tensor) log_probs = output device = log_probs.device metrics = {} if forward_only: if post_process_fn is None: pass # metrics["logits"] = output else: stats = post_process_fn(output, data) metrics.update(stats) if not calculate_entropy: return torch.tensor(1.0, device=device), metrics responses = data["responses"] response_length = responses.size(1) response_mask = data["response_mask"].to(bool) loss_agg_mode = self.config.loss_agg_mode # compute policy loss log_prob = log_probs[:, -response_length - 1 : -1].contiguous() ret_entropy = None stats = {} if not forward_only: old_log_prob = data["old_log_probs"] advantages = data["advantages"] entropy_coeff = self.config.entropy_coeff loss_agg_mode = self.config.loss_agg_mode loss_mode = self.config.policy_loss.get("loss_mode", "vanilla") policy_loss_fn = get_policy_loss_fn(loss_mode) # Extract pre-computed rollout importance sampling weights if present # Weights are computed centrally in trainer and added when algorithm.rollout_is=True rollout_is_weights = data.get("rollout_is_weights", None) # NOTE: Both mismatch diagnostic metrics (PPL, KL, etc.) and IS weight metrics # are computed centrally in ray_trainer.py for consistency and efficiency. # This ensures metrics are computed uniformly across all batches at the trainer level # and avoids redundant computation across workers and micro-batches. pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = policy_loss_fn( old_log_prob=old_log_prob, log_prob=log_prob, advantages=advantages, response_mask=response_mask, loss_agg_mode=loss_agg_mode, config=self.config, rollout_is_weights=rollout_is_weights, ) stats.update( { "actor/pg_loss": pg_loss.detach().item(), "actor/pg_clipfrac": pg_clipfrac.detach().item(), "actor/ppo_kl": ppo_kl.detach().item(), "actor/pg_clipfrac_lower": pg_clipfrac_lower.detach().item(), } ) policy_loss = pg_loss if calculate_entropy: entropy = output["entropy"][:, -response_length - 1 : -1].contiguous() if not forward_only: entropy_loss = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) entropy_coeff = meta_info["entropy_coeff"] policy_loss = pg_loss - entropy_coeff * entropy_loss else: ret_entropy = entropy if forward_only: policy_loss = torch.tensor(1.0, device=device) else: if self.config.use_kl_loss: ref_log_prob = data["ref_log_prob"] # compute kl loss kld = kl_penalty(logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=self.config.kl_loss_type) kl_loss = agg_loss(loss_mat=kld, loss_mask=response_mask, loss_agg_mode=self.config.loss_agg_mode) policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef metrics["actor/kl_loss"] = kl_loss.detach().item() metrics["actor/kl_coef"] = self.config.kl_loss_coef # return loss and stats append_to_dict(metrics, stats) return policy_loss, [metrics, ret_entropy] def forward_step(batch_iter, model, return_schedule_plan: bool = False): """ Args: batch_iter: the batch iterator model: the model return_schedule_plan: whether to return the schedule plan, for 1f1b overlap """ if return_schedule_plan: assert self.tf_config.overlap_moe_expert_parallel_comm, ( "overlap_moe_expert_parallel_comm must be enabled to return the schedule plan" ) # TODO: Fix this assert not calculate_entropy, "calculate_entropy must be disabled to return the schedule plan" from megatron.core.models.gpt.gpt_model import GPTModel assert isinstance(model, GPTModel), "model must be a GPTModel" assert self.use_fused_kernels, "use_fused_kernels must be enabled to return the schedule plan" # TODO: support VLM with MoE from verl.models.mcore.model_forward_1f1b_overlap import gptmodel_forward_1f1b_overlap batch = next(batch_iter) batch = batch.to(get_device_id()) batch = batch.contiguous() input_ids = batch["input_ids"] attention_mask = batch["attention_mask"].to(bool) position_ids = batch["position_ids"] multi_modal_inputs = {} if "multi_modal_inputs" in batch: from verl.utils.model import extract_multi_modal_inputs indices = batch.get("multi_modal_inputs_idx", None) multi_modal_inputs = extract_multi_modal_inputs(batch["multi_modal_inputs"], indices) responses = batch["responses"] response_length = responses.size(1) label = position_ids.clone() label[:, -response_length - 1 : -1] = responses label_mask = attention_mask.clone() label_mask[:, : -response_length - 1] = False label_mask[:, -1] = False from verl.models.mcore import get_mcore_forward_fn, get_mcore_forward_fused_fn if self.use_fused_kernels: forward_fn = get_mcore_forward_fused_fn(self.hf_config) if return_schedule_plan: forward_fn = gptmodel_forward_1f1b_overlap # return dict of [logits, entropy] output = forward_fn( model=model, input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask, labels=label, labels_mask=label_mask, temperature=temperature, multi_modal_inputs=multi_modal_inputs, ) else: forward_fn = get_mcore_forward_fn(self.hf_config) def logits_processor(logits, label, label_mask): assert logits.shape[:2] == label.shape[:2] assert label.shape == label_mask.shape logits.div_(temperature) ret = {} if calculate_entropy: logits_bak = logits.clone() logger.warning_once( "For memory-efficient computation, enable fused kernels via " "`actor_rollout_ref.model.use_fused_kernels=True`. " "The current `clone()` operation ensures correctness but increases memory usage." ) entropy = vocab_parallel_entropy(logits) ret["entropy"] = entropy else: logits_bak = logits log_probs = vocab_parallel_log_probs_from_logits(logits_bak, label) log_probs = log_probs.masked_fill(~label_mask, 0.0) ret["log_probs"] = log_probs return ret logits_processor_args = {"label": label, "label_mask": label_mask} output = forward_fn( model=model, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, multi_modal_inputs=multi_modal_inputs, logits_processor=logits_processor, logits_processor_args=logits_processor_args, ) if forward_only: meta_info = None else: clip_ratio_c = self.config.get("clip_ratio_c", 3.0) meta_info = { "clip_ratio": self.config.clip_ratio, "entropy_coeff": self.config.entropy_coeff, "clip_ratio_c": clip_ratio_c, } return output, partial(loss_func, data=batch, meta_info=meta_info) # batch should be a list of batches inside micro-batches batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.actor_module)) # TODO: we may use the new schedule instead # for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size) if mpu.get_pipeline_model_parallel_world_size() > 1: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.actor_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # no use when input_shapes was set micro_batch_size=1, # no use when input_shapes was set forward_only=forward_only, ) else: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.actor_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # in use for pp = 1 micro_batch_size=1, # in use for pp = 1 forward_only=forward_only, ) # loss_reduces contains the stats returned from loss_func if self.has_multi_modal_inputs: data.batch.pop("multi_modal_inputs") data.batch.pop("multi_modal_inputs_idx") data.non_tensor_batch.pop("multi_modal_inputs") losses_reduced = {"output": losses_reduced} if use_dynamic_bsz: losses_reduced["indices"] = indices return losses_reduced @GPUMemoryLogger(role="megatron actor", logger=logger) def update_policy(self, dataloader: Iterable[DataProto]) -> dict: """Update the policy with an iterator of DataProto Args: dataloader (Iterable[DataProto]): an iterator over the DataProto that returns by ``make_minibatch_iterator`` The keys of each data batch is described in the make_minibatch_iterator. Returns: Dict: a dictionary containing the statistics. Note that the statistics are only valid in the last pp stage and users have to combine the output in each dp rank manually. """ metrics = {} if self.use_torch_profiler and self.prof and self.prof.enable: self.prof.start() for data in dataloader: self.actor_optimizer.zero_grad() # use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm for chunk in self.actor_module: # if use distributed optimizer, zero grad buffer will be handled by optimizer chunk.zero_grad_buffer() calculate_entropy = self.config.entropy_coeff != 0 if data.meta_info.get("micro_batch_size", None) is not None: micro_batch_size = data.meta_info["micro_batch_size"] else: micro_batch_size = self.config.ppo_micro_batch_size_per_gpu max_token_len = None if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.config.megatron.context_parallel_size metric_micro_batch = self.forward_backward_batch( data, calculate_entropy=calculate_entropy, use_dynamic_bsz=self.config.use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len, mini_batch_size=self.config.ppo_mini_batch_size, ) metric_micro_batch = metric_micro_batch["output"] for metric in metric_micro_batch: # Note that o[0] is metrics, o[1] is entropy, o[2] is response_mask append_to_dict(metrics, metric[0]) # append the metric from this micro-batch to global metrics. update_successful, grad_norm, num_zeros_in_grad = self.actor_optimizer.step() data = {"actor/grad_norm": grad_norm} append_to_dict(metrics, data) if update_successful: # allgather already execute in optimizer.step in new megatron pass else: raise NotImplementedError if self.use_torch_profiler and self.prof and self.prof.enable: self.prof.step() # add empty cache after each compute if self.use_torch_profiler and self.prof and self.prof.enable: self.prof.stop_and_save() self.prof.stop_trace() get_torch_device().empty_cache() return metrics ================================================ FILE: verl_distillation/verl/workers/config/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import actor, critic, engine, model, optimizer, reward_model, rollout from .actor import * # noqa: F401 from .critic import * # noqa: F401 from .engine import * # noqa: F401 from .model import * # noqa: F401 from .optimizer import * # noqa: F401 from .reward_model import * # noqa: F401 from .rollout import * # noqa: F401 __all__ = ( actor.__all__ + critic.__all__ + reward_model.__all__ + engine.__all__ + optimizer.__all__ + rollout.__all__ + model.__all__ ) ================================================ FILE: verl_distillation/verl/workers/config/actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from omegaconf import MISSING from verl.base_config import BaseConfig from verl.trainer.config import CheckpointConfig from verl.utils.profiler.config import ProfilerConfig from .engine import FSDPEngineConfig, McoreEngineConfig from .model import HFModelConfig from .optimizer import OptimizerConfig __all__ = ["PolicyLossConfig", "ActorConfig", "FSDPActorConfig", "McoreActorConfig"] @dataclass class PolicyLossConfig(BaseConfig): """Configuration for policy loss computation. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: loss_mode (str): Loss function mode. Options: 'vanilla', 'clip-cov', 'kl-cov', 'gpg'. clip_cov_ratio (float): Ratio of tokens to be clipped for clip-cov loss. clip_cov_lb (float): Lower bound for clip-cov loss. clip_cov_ub (float): Upper bound for clip-cov loss. kl_cov_ratio (float): Ratio of tokens to be applied KL penalty for kl-cov loss. ppo_kl_coef (float): KL divergence penalty coefficient. """ loss_mode: str = "vanilla" clip_cov_ratio: float = 0.0002 clip_cov_lb: float = 1.0 clip_cov_ub: float = 5.0 kl_cov_ratio: float = 0.0002 ppo_kl_coef: float = 0.1 @dataclass class ActorConfig(BaseConfig): """Configuration for actor model training. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: strategy (str): Training strategy. Must be specified. ppo_mini_batch_size (int): Mini-batch size for PPO training. ppo_micro_batch_size (Optional[int]): Micro-batch size for PPO training. If None, uses ppo_micro_batch_size_per_gpu. ppo_micro_batch_size_per_gpu (Optional[int]): Micro-batch size per GPU for PPO training. use_dynamic_bsz (bool): Whether to use dynamic batch sizing. ppo_max_token_len_per_gpu (int): Maximum token length per GPU for PPO training. clip_ratio (float): PPO clipping ratio for policy loss. clip_ratio_low (float): Lower bound for PPO clipping ratio. clip_ratio_high (float): Upper bound for PPO clipping ratio. policy_loss (PolicyLossConfig): Configuration for policy loss computation. clip_ratio_c (float): Clipping ratio for critic loss. loss_agg_mode (str): Loss aggregation mode. Options: 'token-mean', 'sample-mean'. entropy_coeff (float): Entropy coefficient for regularization. use_kl_loss (bool): Whether to use KL divergence loss. use_torch_compile (bool): Whether to use torch.compile for optimization. kl_loss_coef (float): KL divergence loss coefficient. kl_loss_type (str): Type of KL loss to use. ppo_epochs (int): Number of PPO epochs per training step. shuffle (bool): Whether to shuffle data during training. checkpoint (CheckpointConfig): Configuration for checkpointing. optim (OptimizerConfig): Configuration for optimizer. use_fused_kernels (bool): Whether to use custom fused kernels (e.g., FlashAttention, fused MLP). """ _mutable_fields = BaseConfig._mutable_fields | { "ppo_mini_batch_size", "ppo_micro_batch_size", "ppo_micro_batch_size_per_gpu", "ppo_infer_micro_batch_size_per_gpu", } strategy: str = MISSING ppo_mini_batch_size: int = 256 ppo_micro_batch_size: Optional[int] = None # deprecate ppo_micro_batch_size_per_gpu: Optional[int] = None ppo_infer_micro_batch_size_per_gpu: Optional[int] = None use_dynamic_bsz: bool = False ppo_max_token_len_per_gpu: int = 16384 ppo_infer_max_token_len_per_gpu: int = 16384 clip_ratio: float = 0.2 clip_ratio_low: float = 0.2 clip_ratio_high: float = 0.2 freeze_vision_tower: bool = False policy_loss: PolicyLossConfig = field(default_factory=PolicyLossConfig) clip_ratio_c: float = 3.0 loss_agg_mode: str = "token-mean" entropy_coeff: float = 0 use_kl_loss: bool = False use_torch_compile: bool = True kl_loss_coef: float = 0.001 kl_loss_type: str = "low_var_kl" ppo_epochs: int = 1 shuffle: bool = False checkpoint: CheckpointConfig = field(default_factory=CheckpointConfig) optim: OptimizerConfig = field(default_factory=OptimizerConfig) use_fused_kernels: bool = False profiler: ProfilerConfig = field(default_factory=ProfilerConfig) engine: BaseConfig = field(default_factory=BaseConfig) data_loader_seed = 1 rollout_n: int = 1 # must be override by sampling config ref_log_prob_replace_val: float = -10.0 model_config: HFModelConfig = field(default_factory=BaseConfig) def __post_init__(self): """Validate actor configuration parameters.""" assert self.strategy != MISSING assert self.rollout_n != MISSING if not self.use_dynamic_bsz: if self.ppo_micro_batch_size is not None and self.ppo_micro_batch_size_per_gpu is not None: raise ValueError( "[actor] You have set both 'actor.ppo_micro_batch_size' AND 'actor.ppo_micro_batch_size_per_gpu'. " "Please remove 'actor.ppo_micro_batch_size' because only '*_ppo_micro_batch_size_per_gpu' is " "supported (the former is deprecated)." ) else: assert not (self.ppo_micro_batch_size is None and self.ppo_micro_batch_size_per_gpu is None), ( "[actor] Please set at least one of 'actor.ppo_micro_batch_size' or " "'actor.ppo_micro_batch_size_per_gpu' if use_dynamic_bsz is not enabled." ) valid_loss_agg_modes = [ "token-mean", "seq-mean-token-sum", "seq-mean-token-mean", "seq-mean-token-sum-norm", ] if self.loss_agg_mode not in valid_loss_agg_modes: raise ValueError(f"Invalid loss_agg_mode: {self.loss_agg_mode}") def validate(self, n_gpus: int, train_batch_size: int, model_config: dict = None): """Validate actor configuration with runtime parameters.""" if not self.use_dynamic_bsz: if train_batch_size < self.ppo_mini_batch_size: raise ValueError( f"train_batch_size ({train_batch_size}) must be >= " f"actor.ppo_mini_batch_size ({self.ppo_mini_batch_size})" ) sp_size = getattr(self, "ulysses_sequence_parallel_size", 1) if self.ppo_micro_batch_size is not None: if self.ppo_mini_batch_size % self.ppo_micro_batch_size != 0: raise ValueError( f"ppo_mini_batch_size ({self.ppo_mini_batch_size}) must be divisible by " f"ppo_micro_batch_size ({self.ppo_micro_batch_size})" ) if self.ppo_micro_batch_size * sp_size < n_gpus: raise ValueError( f"ppo_micro_batch_size ({self.ppo_micro_batch_size}) * " f"ulysses_sequence_parallel_size ({sp_size}) must be >= n_gpus ({n_gpus})" ) @staticmethod def _check_mutually_exclusive(mbs, mbs_per_gpu, name: str): """Validate mutually exclusive micro batch size configuration options.""" param = "ppo_micro_batch_size" param_per_gpu = f"{param}_per_gpu" if mbs is None and mbs_per_gpu is None: raise ValueError(f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.") if mbs is not None and mbs_per_gpu is not None: raise ValueError( f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove " f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)." ) @dataclass class McoreActorConfig(ActorConfig): """Configuration for Megatron actor models. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: strategy (str): Training strategy set to 'megatron' for Megatron parallelism. data_loader_seed (Optional[int]): Seed for data loader. If None, uses global seed. load_weight (bool): Whether to load model weights from checkpoint. megatron (dict[str, Any]): Configuration for Megatron parallelism settings. profile (dict[str, Any]): Configuration for profiling settings. """ strategy: str = "megatron" data_loader_seed: Optional[int] = None load_weight: bool = True megatron: McoreEngineConfig = field(default_factory=McoreEngineConfig) profile: dict[str, Any] = field(default_factory=dict) use_rollout_log_probs: bool = False @dataclass class FSDPActorConfig(ActorConfig): """Configuration for FSDP actor models. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: strategy (str): Training strategy set to 'fsdp' for Fully Sharded Data Parallel. grad_clip (float): Gradient clipping threshold. ulysses_sequence_parallel_size (int): Ulysses sequence parallel size for long sequences. entropy_from_logits_with_chunking (bool): Whether to compute entropy from logits with chunking for memory efficiency. entropy_checkpointing (bool): Whether to use gradient checkpointing for entropy computation. fsdp_config (dict[str, Any]): Configuration for FSDP settings. use_remove_padding (bool): Whether to remove padding tokens in inputs during training """ strategy: str = "fsdp" grad_clip: float = 1.0 ulysses_sequence_parallel_size: int = 1 entropy_from_logits_with_chunking: bool = False entropy_checkpointing: bool = False fsdp_config: FSDPEngineConfig = field(default_factory=FSDPEngineConfig) use_remove_padding: bool = False profiler: ProfilerConfig = field(default_factory=ProfilerConfig) use_rollout_log_probs: bool = False def __post_init__(self): """Validate FSDP actor configuration parameters.""" super().__post_init__() def validate(self, n_gpus: int, train_batch_size: int, model_config: dict = None): """Validate FSDP actor configuration with runtime parameters.""" super().validate(n_gpus, train_batch_size, model_config) if self.strategy in {"fsdp", "fsdp2"} and self.ulysses_sequence_parallel_size > 1: if model_config and not model_config.get("use_remove_padding", False): raise ValueError( "When using sequence parallelism for actor/ref policy, you must enable `use_remove_padding`." ) ================================================ FILE: verl_distillation/verl/workers/config/critic.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass, field from typing import Optional from omegaconf import MISSING from verl.base_config import BaseConfig from verl.trainer.config import BaseModelConfig, CheckpointConfig from verl.utils.profiler import ProfilerConfig from .engine import FSDPEngineConfig, McoreEngineConfig from .model import HFModelConfig from .optimizer import OptimizerConfig __all__ = ["CriticConfig", "FSDPCriticConfig", "McoreCriticConfig", "FSDPCriticModelCfg"] @dataclass class CriticConfig(BaseConfig): """Configuration for critic model training. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: strategy (str): Strategy used for critic model training (fsdp, fsdp2, megatron). ppo_micro_batch_size_per_gpu (int): Local per-GPU micro batch size. rollout_n (int): Number of rollouts per update (mirrors actor rollout_n). optim (Dict[str, Any]): Optimizer configuration including lr, weight_decay, etc. model (Dict[str, Any]): Model configuration including path, tokenizer_path, etc. ppo_mini_batch_size (int): PPO mini-batch size per update. ppo_micro_batch_size (Optional[int]): Global micro batch size (deprecated). use_dynamic_bsz (bool): Whether to automatically adjust batch size at runtime. ppo_max_token_len_per_gpu (int): Max tokens per GPU in one PPO batch. forward_max_token_len_per_gpu (int): Max token length per GPU in forward pass. ppo_epochs (int): Number of PPO epochs per batch. shuffle (bool): Shuffle training data across PPO epochs. cliprange_value (float): PPO value function clipping range. loss_agg_mode (str): Loss aggregation mode. checkpoint (Dict[str, Any]): Checkpoint configuration. profiler (Dict[str, Any]): Profiler configuration. enable (Optional[bool]): Whether to enable the critic. """ _mutable_fields = BaseConfig._mutable_fields | { "ppo_micro_batch_size_per_gpu", "ppo_mini_batch_size", "ppo_micro_batch_size", "model_config", } strategy: str = MISSING ppo_micro_batch_size_per_gpu: Optional[int] = None enable: Optional[bool] = None rollout_n: int = 1 ppo_mini_batch_size: int = 1 use_dynamic_bsz: bool = False ppo_max_token_len_per_gpu: int = 32768 # deprecate this forward_max_token_len_per_gpu: int = 32768 ppo_infer_micro_batch_size_per_gpu: Optional[int] = None ppo_infer_max_token_len_per_gpu: int = 32768 ppo_epochs: int = 1 data_loader_seed: int = 1 shuffle: bool = True cliprange_value: float = 0.5 loss_agg_mode: str = "token-mean" ppo_micro_batch_size: Optional[int] = None engine: BaseConfig = field(default_factory=BaseConfig) optim: OptimizerConfig = field(default_factory=OptimizerConfig) # deprecate model to favor model_config model: BaseModelConfig = field(default_factory=BaseModelConfig) model_config: HFModelConfig = None checkpoint: CheckpointConfig = field(default_factory=CheckpointConfig) profiler: ProfilerConfig = field(default_factory=ProfilerConfig) def __post_init__(self): """Validate critic configuration parameters.""" assert self.strategy != MISSING if self.model_config is None: warnings.warn("using model in Critic Config is deprecated, please use model_config instead", stacklevel=2) self.model_config = self.model if not self.use_dynamic_bsz: self._check_mutually_exclusive(self.ppo_micro_batch_size, self.ppo_micro_batch_size_per_gpu, "critic") if self.ppo_micro_batch_size is not None: if self.ppo_mini_batch_size % self.ppo_micro_batch_size != 0: raise ValueError( f"[critic] ppo_mini_batch_size ({self.ppo_mini_batch_size}) must be divisible by " f"ppo_micro_batch_size ({self.ppo_micro_batch_size})" ) def validate(self, n_gpus: int, train_batch_size: int): """Validate critic configuration with runtime parameters. Args: n_gpus: Total number of GPUs available train_batch_size: Training batch size from data config """ if not self.use_dynamic_bsz: if train_batch_size < self.ppo_mini_batch_size: raise ValueError( f"train_batch_size ({train_batch_size}) must be >= " f"critic.ppo_mini_batch_size ({self.ppo_mini_batch_size})" ) @staticmethod def _check_mutually_exclusive(mbs, mbs_per_gpu, name: str): """Validate mutually exclusive micro batch size configuration options. Ensures that users don't set both deprecated micro_batch_size and the new micro_batch_size_per_gpu parameters simultaneously. Args: mbs: Deprecated micro batch size parameter value. mbs_per_gpu: New micro batch size per GPU parameter value. name (str): Configuration section name for error messages. Raises: ValueError: If both parameters are set or neither is set. """ param = "micro_batch_size" param_per_gpu = f"{param}_per_gpu" if mbs is None and mbs_per_gpu is None: raise ValueError(f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.") if mbs is not None and mbs_per_gpu is not None: raise ValueError( f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove " f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)." ) @dataclass class McoreCriticConfig(CriticConfig): """Configuration for Megatron-based critic model training. The inheritance from CriticConfig provides all base critic configuration plus Megatron-specific settings. Args: nccl_timeout (int): NCCL timeout in seconds for distributed operations. megatron (Dict[str, Any]): Megatron-specific parallelism settings. load_weight (bool): Whether to load initial weights. data_loader_seed (Optional[int]): Seed for data loader. """ strategy: str = "megatron" nccl_timeout: int = 600 megatron: McoreEngineConfig = field(default_factory=McoreEngineConfig) load_weight: bool = True data_loader_seed: Optional[int] = None def validate(self, n_gpus: int, train_batch_size: int): """Validate Megatron critic configuration with runtime parameters.""" super().validate(n_gpus, train_batch_size) @dataclass class FSDPCriticConfig(CriticConfig): """Configuration for FSDP-based critic model training. The inheritance from CriticConfig provides all base critic configuration plus FSDP-specific settings. Args: forward_micro_batch_size (int): Forward-only batch size during inference (global). forward_micro_batch_size_per_gpu (int): Forward-only batch size during inference (per GPU). ulysses_sequence_parallel_size (int): Sequence parallelism size for Ulysses-style model parallelism. grad_clip (float): Gradient clipping for critic updates. """ _mutable_fields = CriticConfig._mutable_fields | { "forward_micro_batch_size", "forward_micro_batch_size_per_gpu", } strategy: str = "fsdp" forward_micro_batch_size: int = 1 forward_micro_batch_size_per_gpu: int = 1 ulysses_sequence_parallel_size: int = 1 grad_clip: float = 1.0 def __post_init__(self): """Validate FSDP critic configuration parameters.""" super().__post_init__() if self.strategy in {"fsdp", "fsdp2"}: if self.ulysses_sequence_parallel_size > 1: if not self.model.get("use_remove_padding", False): raise ValueError( "When using sequence parallelism for critic, you must enable `use_remove_padding`." ) def validate(self, n_gpus: int, train_batch_size: int): """Validate FSDP critic configuration with runtime parameters.""" super().validate(n_gpus, train_batch_size) if not self.use_dynamic_bsz: sp_size = self.ulysses_sequence_parallel_size if self.ppo_micro_batch_size is not None: if self.ppo_micro_batch_size * sp_size < n_gpus: raise ValueError( f"critic.ppo_micro_batch_size ({self.ppo_micro_batch_size}) * " f"ulysses_sequence_parallel_size ({sp_size}) must be >= n_gpus ({n_gpus})" ) @dataclass class FSDPCriticModelCfg(BaseModelConfig): """FSDP-enabled critic model configuration. Inherits base critic settings and adds distributed-memory and LoRA options. Args: use_shm (bool): Whether to use shared memory for loading the model. enable_activation_offload (bool): Offload activations to CPU to reduce GPU memory usage. use_remove_padding (bool): Use remove-padding optimization (saves compute). enable_gradient_checkpointing (bool): Enable gradient checkpointing for memory efficiency. fsdp_config (FSDPEngineConfig): FSDP-specific configuration block. lora_rank (int): Set to positive value to enable LoRA (e.g., 32). lora_alpha (int): LoRA scaling factor. target_modules (Union[str, List[str]]): LoRA target modules: "all-linear" or list of layer names. """ use_shm: bool = False enable_activation_offload: bool = False use_remove_padding: bool = False enable_gradient_checkpointing: bool = True fsdp_config: FSDPEngineConfig = field(default_factory=FSDPEngineConfig) lora_rank: int = 0 lora_alpha: int = 16 target_modules: str | list[str] = "all-linear" ================================================ FILE: verl_distillation/verl/workers/config/engine.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass, field from typing import Any, Optional from verl.base_config import BaseConfig __all__ = ["FSDPEngineConfig", "McoreEngineConfig"] @dataclass class McoreEngineConfig(BaseConfig): """Configuration for Megatron parallelism. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: param_offload (bool): Whether to offload parameters to CPU. grad_offload (bool): Whether to offload gradients to CPU. optimizer_offload (bool): Whether to offload optimizer states to CPU. tensor_model_parallel_size (int): Tensor model parallel size. expert_model_parallel_size (int): Expert model parallel size for MoE models. expert_tensor_parallel_size (Optional[int]): Expert tensor parallel size for MoE models. pipeline_model_parallel_size (int): Pipeline model parallel size. virtual_pipeline_model_parallel_size (Optional[int]): Virtual pipeline model parallel size for interleaved scheduling. context_parallel_size (int): Context parallel size for long sequences. sequence_parallel (bool): Whether to enable sequence parallelism. use_distributed_optimizer (bool): Whether to use distributed optimizer. use_dist_checkpointing (bool): Whether to use distributed checkpointing. dist_checkpointing_path (Optional[str]): Path for distributed checkpointing. seed (int): Random seed for reproducibility. override_ddp_config (dict[str, Any]): Override configuration for DDP. override_transformer_config (dict[str, Any]): Override configuration for transformer. use_mbridge (bool): Whether to use MBridge for communication. """ # sequence_parallel is not listed as a frozen field for auto-correction purpose _mutable_fields = BaseConfig._mutable_fields | {"sequence_parallel"} param_offload: bool = False grad_offload: bool = False optimizer_offload: bool = False tensor_model_parallel_size: int = 1 expert_model_parallel_size: int = 1 expert_tensor_parallel_size: Optional[int] = None pipeline_model_parallel_size: int = 1 virtual_pipeline_model_parallel_size: Optional[int] = None context_parallel_size: int = 1 sequence_parallel: bool = True use_distributed_optimizer: bool = True use_dist_checkpointing: bool = False dist_checkpointing_path: Optional[str] = None seed: int = 42 override_ddp_config: dict[str, Any] = field(default_factory=dict) override_transformer_config: dict[str, Any] = field(default_factory=dict) override_mcore_model_config: dict[str, Any] = field(default_factory=dict) use_mbridge: bool = False forward_only: bool = False strategy: str = "megatron" def __post_init__(self) -> None: """config validation logics go here""" assert self.strategy == "megatron" if self.tensor_model_parallel_size == 1: warnings.warn("set sequence parallel to false as TP size is 1", stacklevel=2) self.sequence_parallel = False @dataclass class FSDPEngineConfig(BaseConfig): """Configuration for FSDP (Fully Sharded Data Parallel). The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: wrap_policy (Dict[str, Any]): Configuration for FSDP wrap policy. param_offload (bool): Whether to offload parameters to CPU, default False optimizer_offload (bool): Whether to offload optimizer states to CPU, default False offload_policy (bool): Whether to offload policy model parameters, default False reshard_after_forward (bool): Whether to reshard parameters after forward pass, default True fsdp_size (int): FSDP group size. -1 means use all available GPUs. forward_prefetch (bool): Whether to prefetch parameters for next forward pass, default False model_dtype (str): Model data type used to initialize the transformers model. default "fp32" use_orig_params (bool): Whether to use original parameters when initialize FSDP1, default False mixed_precision (Optional[dict[str, Any]]): Mixed precision configuration for FSDP, default None """ wrap_policy: dict[str, Any] = field(default_factory=dict) param_offload: bool = False optimizer_offload: bool = False offload_policy: bool = False reshard_after_forward: bool = True fsdp_size: int = -1 forward_prefetch: bool = False model_dtype: str = "fp32" use_orig_params: bool = False mixed_precision: Optional[dict[str, Any]] = None ulysses_sequence_parallel_size: int = 1 entropy_from_logits_with_chunking: bool = False use_torch_compile: bool = True entropy_checkpointing: bool = False forward_only: bool = False strategy: str = "fsdp" def __post_init__(self): assert self.strategy in ["fsdp", "fsdp2"], f"strategy {self.strategy} not supported" ================================================ FILE: verl_distillation/verl/workers/config/model.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from omegaconf import MISSING from transformers import AutoConfig from verl.base_config import BaseConfig from verl.utils import hf_processor, hf_tokenizer from verl.utils.fs import copy_to_local from verl.utils.import_utils import import_external_libs from verl.utils.model import get_generation_config, update_model_config __all__ = ["HFModelConfig"] @dataclass class HFModelConfig(BaseConfig): # note that we separate model_path, model_config_path and tokenizer_path in case they are different _mutable_fields = { "hf_config_path", "tokenizer_path", "hf_config", "generation_config", "tokenizer", "processor", "local_path", "architectures", "local_hf_config_path", "local_tokenizer_path", } path: str = MISSING local_path: Optional[str] = None hf_config_path: Optional[str] = None local_hf_config_path: Optional[str] = None tokenizer_path: Optional[str] = None local_tokenizer_path: Optional[str] = None # whether to load tokenizer. This is useful when we only want to load model config load_tokenizer: bool = True hf_config: Any = None generation_config: Any = None tokenizer: Any = None processor: Any = None # whether to use shared memory use_shm: bool = False trust_remote_code: bool = False # custom chat template for the model custom_chat_template: Optional[str] = None external_lib: Optional[str] = None override_config: dict = field(default_factory=dict) enable_gradient_checkpointing: bool = True enable_activation_offload: bool = False use_remove_padding: bool = False # lora related. We may setup a separate config later lora_rank: int = 0 lora_alpha: int = 16 target_modules: Optional[str] = "all-linear" exclude_modules: Optional[str] = None # path to pre-trained LoRA adapter to load for continued training lora_adapter_path: Optional[str] = None use_liger: bool = False use_fused_kernels: bool = False fused_kernel_options: dict = field(default_factory=dict) architectures: Optional[list[str]] = None def __post_init__(self): import_external_libs(self.external_lib) if self.hf_config_path is None: self.hf_config_path = self.path if self.tokenizer_path is None: self.tokenizer_path = self.path self.local_path = copy_to_local(self.path, use_shm=self.use_shm) # constuct tokenizer if self.load_tokenizer: self.local_tokenizer_path = copy_to_local(self.tokenizer_path, use_shm=self.use_shm) self.tokenizer = hf_tokenizer(self.local_tokenizer_path, trust_remote_code=self.trust_remote_code) self.processor = hf_processor(self.local_tokenizer_path, trust_remote_code=self.trust_remote_code) if self.custom_chat_template is not None: if self.processor is not None: self.processor.chat_template = self.custom_chat_template else: self.tokenizer.chat_template = self.custom_chat_template self.local_hf_config_path = copy_to_local(self.hf_config_path, use_shm=self.use_shm) self.generation_config = get_generation_config( self.local_hf_config_path, trust_remote_code=self.trust_remote_code ) # constuct hf_config attn_implementation = self.override_config.get("attn_implementation", "flash_attention_2") self.hf_config = AutoConfig.from_pretrained( self.local_hf_config_path, trust_remote_code=self.trust_remote_code, attn_implementation=attn_implementation ) override_config_kwargs = {} if self.tokenizer is not None: override_config_kwargs.update( { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } ) # TODO: (vermouth1992). self.config.model in megatron differs from that of fsdp in the override_config. override_config = ( self.override_config["model_config"] if "model_config" in self.override_config else self.override_config ) override_config_kwargs.update(override_config) update_model_config(self.hf_config, override_config_kwargs=override_config_kwargs) self.share_embeddings_and_output_weights = getattr(self.hf_config, "tie_word_embeddings", False) # get model architectures self.architectures = getattr(self.hf_config, "architectures", None) assert self.architectures is not None and len(self.architectures) == 1, ( "Expect only one architecture, got {}".format(self.architectures) ) # per model patch if getattr(self.hf_config, "model_type", None) == "kimi_vl": self.hf_config.text_config.topk_method = "greedy" def get_processor(self): return self.processor if self.processor is not None else self.tokenizer ================================================ FILE: verl_distillation/verl/workers/config/optimizer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass from typing import Optional from omegaconf import MISSING from verl.base_config import BaseConfig __all__ = ["OptimizerConfig", "FSDPOptimizerConfig", "McoreOptimizerConfig", "build_optimizer"] @dataclass class OptimizerConfig(BaseConfig): """Base optimizer configuration. Args: lr (float): learning rate. Must be specified. lr_warmup_steps_ratio (float): Warmup steps ratio; total steps will be injected at runtime. total_training_steps (int): Total training steps (must be overridden at runtime). weight_decay (float): Weight decay factor. lr_warmup_steps (Optional[int]): Number of warmup steps; None delegates to lr_warmup_steps_ratio. """ _mutable_fields = {"clip_grad", "total_training_steps", "lr_warmup_steps"} lr: float = 1e-3 lr_warmup_steps_ratio: float = 0.0 total_training_steps: int = -1 weight_decay: float = 0.01 lr_warmup_steps: Optional[int] = -1 betas: tuple[float, float] = (0.9, 0.999) clip_grad: float = 1.0 # deprecate grad_clip grad_clip: Optional[float] = None def __post_init__(self): assert self.lr != MISSING if self.grad_clip is not None: warnings.warn("`grad_clip` is deprecated, use `clip_grad` instead.", DeprecationWarning, stacklevel=2) self.clip_grad = self.grad_clip @dataclass class FSDPOptimizerConfig(OptimizerConfig): """FSDP optimizer configuration extending base OptimizerConfig. Args: optimizer (str): Optimizer class name (e.g., "AdamW", "AdamW8bit", "_AdamW"). optimizer_impl (str): Module path to import optimizer from (e.g., "torch.optim", "torchao.optim", "bitsandbytes.optim"). lr (float): Learning rate. min_lr_ratio (Optional[float]): Minimum LR ratio for cosine schedule. lr_scheduler_type (str): LR scheduler type: "constant" or "cosine". num_cycles (float): Number of cosine cycles in LR schedule. """ _mutable_fields = OptimizerConfig._mutable_fields.copy() _mutable_fields.add("lr_scheduler_type") optimizer: str = "AdamW" optimizer_impl: str = "torch.optim" min_lr_ratio: Optional[float] = None # deprecate warmup_style warmup_style: Optional[str] = None lr_scheduler_type: str = "constant" num_cycles: float = 0.5 override_optimizer_config: Optional[dict] = None def __post_init__(self): if self.warmup_style is not None: assert self.warmup_style in ["constant", "cosine"] warnings.warn( "`warmup_style` is deprecated, use `lr_scheduler_type` instead.", DeprecationWarning, stacklevel=2 ) self.lr_scheduler_type = self.warmup_style assert self.lr_scheduler_type in ["constant", "cosine"] return super().__post_init__() @dataclass class McoreOptimizerConfig(OptimizerConfig): """Mcore optimizer configuration extending base OptimizerConfig. Args: optimizer (str): Optimizer name; default is "adam". lr (float): Learning rate. clip_grad (float): Gradient clipping norm. lr_warmup_init (float): Initial learning rate for warmup; defaults to 0.0. lr_decay_steps (Optional[int]): Number of decay steps. lr_decay_style (str): LR decay style: "constant", "linear", "cosine", or "inverse_square_root". min_lr (float): Minimum learning rate. weight_decay_incr_style (str): Weight decay increment style: "constant" or "cosine". lr_wsd_decay_style (str): Weight-standard-deviation decay style: "constant", "exponential", or "cosine". lr_wsd_decay_steps (Optional[int]): Number of steps for weight-standard-deviation decay. use_checkpoint_opt_param_scheduler (bool): Whether to use checkpoint optimizer parameter scheduler. """ optimizer: str = "adam" lr_warmup_init: float = 0.0 lr_decay_steps: Optional[int] = None lr_decay_style: str = "linear" min_lr: float = 0.0 weight_decay_incr_style: str = "constant" lr_wsd_decay_style: str = "exponential" lr_wsd_decay_steps: Optional[int] = None use_checkpoint_opt_param_scheduler: bool = False override_optimizer_config: Optional[dict] = None def build_optimizer(parameters, config: FSDPOptimizerConfig): """Build an optimizer based on the configuration. Dynamically imports and instantiates an optimizer class from the specified module. Args: parameters: Model parameters to optimize config: FSDPOptimizerConfig with optimizer settings Returns: Optimizer instance Examples: # PyTorch AdamW config.optimizer_impl = "torch.optim" config.optimizer = "AdamW" # TorchAO AdamW with bf16 stochastic rounding config.optimizer_impl = "torchao.optim" config.optimizer = "_AdamW" config.override_optimizer_config = {"bf16_stochastic_round": True} # BitsAndBytes AdamW 8bit config.optimizer_impl = "bitsandbytes.optim" config.optimizer = "AdamW8bit" """ import importlib optimizer_args = { "lr": config.lr, "weight_decay": config.weight_decay, } optimizer_name_lower = config.optimizer.lower() if "adam" in optimizer_name_lower or "ademamix" in optimizer_name_lower: optimizer_args["betas"] = config.betas if config.override_optimizer_config is not None: optimizer_args.update(config.override_optimizer_config) try: module = importlib.import_module(config.optimizer_impl) optimizer_cls = getattr(module, config.optimizer) except ImportError as e: raise ImportError( f"Failed to import module '{config.optimizer_impl}'. Make sure the package is installed. Error: {e}" ) from e except AttributeError as e: raise AttributeError( f"Optimizer '{config.optimizer}' not found in module '{config.optimizer_impl}'. " f"Available optimizers: {dir(module)}" ) from e return optimizer_cls(parameters, **optimizer_args) ================================================ FILE: verl_distillation/verl/workers/config/reward_model.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from verl.base_config import BaseConfig from .model import HFModelConfig from .rollout import RolloutConfig __all__ = ["SandboxFusionConfig", "RewardModelConfig"] @dataclass class SandboxFusionConfig(BaseConfig): """Configuration for cloud/local sandbox fusion. Args: url (Optional[str]): Cloud/local function URL for sandbox execution. max_concurrent (int): Max concurrent requests allowed to sandbox. memory_limit_mb (int): Max memory limit for each sandbox process in MB. """ url: Optional[str] = None max_concurrent: int = 64 memory_limit_mb: int = 1024 @dataclass class RewardModelConfig(BaseConfig): _mutable_fields = BaseConfig._mutable_fields reward_manager: str = "naive" enable: bool = False enable_resource_pool: bool = False n_gpus_per_node: int = 0 nnodes: int = 0 # reward model args rollout: RolloutConfig = field(default_factory=RolloutConfig) model: HFModelConfig = field(default_factory=HFModelConfig) sandbox_fusion: SandboxFusionConfig = field(default_factory=SandboxFusionConfig) ================================================ FILE: verl_distillation/verl/workers/config/rollout.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from omegaconf import MISSING from verl.base_config import BaseConfig from verl.utils.profiler import ProfilerConfig __all__ = [ "SamplingConfig", "MultiTurnConfig", "CustomAsyncServerConfig", "AgentLoopConfig", "TraceConfig", "ServerConfig", "RolloutConfig", ] @dataclass class SamplingConfig(BaseConfig): temperature: float = 1.0 top_k: int = -1 top_p: float = 1.0 do_sample: bool = True n: int = 1 @dataclass class MultiTurnConfig(BaseConfig): _mutable_fields = {"max_assistant_turns", "max_user_turns"} enable: bool = False max_assistant_turns: Optional[int] = None tool_config_path: Optional[str] = None max_user_turns: Optional[int] = None max_parallel_calls: int = 1 max_tool_response_length: int = 256 tool_response_truncate_side: str = "middle" interaction_config_path: Optional[str] = None use_inference_chat_template: bool = False tokenization_sanity_check_mode: str = "strict" format: str = "hermes" num_repeat_rollouts: Optional[int] = None @dataclass class CustomAsyncServerConfig(BaseConfig): path: Optional[str] = None name: Optional[str] = None @dataclass class AgentLoopConfig(BaseConfig): num_workers: int = 8 default_agent_loop: str = "single_turn_agent" agent_loop_config_path: Optional[str] = None custom_async_server: CustomAsyncServerConfig = field(default_factory=CustomAsyncServerConfig) @dataclass class TraceConfig(BaseConfig): backend: Optional[str] = None token2text: bool = False @dataclass class ServerConfig(BaseConfig): """ Configuration for SGLang server when running in server mode """ timeout: float = 60.0 max_attempts: int = 3 retry_delay: float = 2.0 max_connections: int = 1000 max_start_wait_time: float = 300.0 @dataclass class RolloutConfig(BaseConfig): _mutable_fields = {"max_model_len", "load_format"} name: Optional[str] = MISSING mode: str = "sync" skip_tokenizer_init: bool = True temperature: float = 1.0 top_k: int = -1 top_p: float = 1.0 do_sample: bool = True n: int = 1 # Early termination threshold for multi-turn rollout in sglang. # Abort remaining requests when (1 - over_sample_rate) * total_requests are completed. over_sample_rate: float = 0.0 prompt_length: int = 512 response_length: int = 512 dtype: str = "bfloat16" gpu_memory_utilization: float = 0.5 ignore_eos: bool = False enforce_eager: bool = True cudagraph_capture_sizes: Optional[list] = None free_cache_engine: bool = True data_parallel_size: int = 1 expert_parallel_size: int = 1 tensor_model_parallel_size: int = 2 pipeline_model_parallel_size: int = 1 max_num_batched_tokens: int = 8192 # TODO: enable train_kwargs # train_sampling_config: SamplingConfig = field(default_factory=SamplingConfig) val_kwargs: SamplingConfig = field(default_factory=SamplingConfig) max_model_len: Optional[int] = None max_num_seqs: int = 1024 # note that the logprob computation should belong to the actor log_prob_micro_batch_size: Optional[int] = None log_prob_micro_batch_size_per_gpu: Optional[int] = None log_prob_use_dynamic_bsz: bool = False log_prob_max_token_len_per_gpu: int = 16384 disable_log_stats: bool = True multi_stage_wake_up: bool = False engine_kwargs: dict = field(default_factory=dict) calculate_log_probs: bool = False extend_vocab_start_token: Optional[int] = None mask_response_if_have_extend_token: bool = False agent: AgentLoopConfig = field(default_factory=AgentLoopConfig) trace: TraceConfig = field(default_factory=TraceConfig) multi_turn: MultiTurnConfig = field(default_factory=MultiTurnConfig) # Server configuration for sglang server mode server: ServerConfig = field(default_factory=ServerConfig) update_weights_bucket_megabytes: int = 512 skip_rollout: bool = False skip_dump_dir: str = "/tmp/rollout_dump" profiler: Optional[ProfilerConfig] = None enable_chunked_prefill: bool = True enable_prefix_caching: bool = True load_format: str = "dummy" layered_summon: bool = False layer_name_map: dict = field(default_factory=dict) sglang_engine_mode: str = "local" limit_images: Optional[int] = None skip_tokenizer_init: bool = False def __post_init__(self): """Validate the rollout config""" if self.expert_parallel_size > 1: assert self.expert_parallel_size == (self.tensor_model_parallel_size * self.data_parallel_size), ( "expert_parallel_size must be equal to tensor_model_parallel_size * data_parallel_size" ) if self.pipeline_model_parallel_size > 1: if self.name == "vllm" or self.name == "sglang": raise NotImplementedError( f"Current rollout {self.name=} not implemented pipeline_model_parallel_size > 1 yet." ) ================================================ FILE: verl_distillation/verl/workers/critic/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BasePPOCritic from .dp_critic import DataParallelPPOCritic __all__ = ["BasePPOCritic", "DataParallelPPOCritic"] ================================================ FILE: verl_distillation/verl/workers/critic/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base class for a critic """ from abc import ABC, abstractmethod import torch from verl import DataProto __all__ = ["BasePPOCritic"] class BasePPOCritic(ABC): def __init__(self, config): super().__init__() self.config = config @abstractmethod def compute_values(self, data: DataProto) -> torch.Tensor: """Compute values""" pass @abstractmethod def update_critic(self, data: DataProto): """Update the critic""" pass ================================================ FILE: verl_distillation/verl/workers/critic/dp_critic.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implement a multiprocess PPOCritic """ import logging import os import torch import torch.distributed from torch import nn, optim from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from verl import DataProto from verl.trainer.ppo import core_algos from verl.utils.attention_utils import index_first_axis, pad_input, rearrange, unpad_input from verl.utils.device import get_device_id, get_device_name from verl.utils.fsdp_utils import FSDPModule, fsdp2_clip_grad_norm_ from verl.utils.profiler import GPUMemoryLogger from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import prepare_dynamic_batch, restore_dynamic_batch from verl.utils.torch_functional import masked_mean from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad_and_slice_inputs from verl.workers.critic import BasePPOCritic logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class DataParallelPPOCritic(BasePPOCritic): def __init__(self, config, critic_module: nn.Module, critic_optimizer: optim.Optimizer): super().__init__(config=config) self.critic_module = critic_module self.critic_optimizer = critic_optimizer self.use_remove_padding = self.config.model.get("use_remove_padding", False) print(f"Critic use_remove_padding={self.use_remove_padding}") self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) self.device_name = get_device_name() def _forward_micro_batch(self, micro_batch): response_length = micro_batch["responses"].size(-1) multi_modal_inputs = {} if "multi_modal_inputs" in micro_batch.keys(): from verl.utils.model import extract_multi_modal_inputs multi_modal_inputs = extract_multi_modal_inputs(micro_batch["multi_modal_inputs"]) with torch.autocast(device_type=self.device_name, dtype=torch.bfloat16): input_ids = micro_batch["input_ids"] batch, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] if position_ids.dim() == 3: # qwen2vl mrope position_ids = position_ids.transpose(0, 1) if self.use_remove_padding: input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary if position_ids.dim() == 3: position_ids_rmpad = ( index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices) .transpose(0, 1) .unsqueeze(1) ) # (4, bsz, seqlen) -> (4, 1, bsz * seqlen) else: position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # pad and slice the inputs if sp > 1 if self.ulysses_sequence_parallel_size > 1: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size ) # only pass input_ids and position_ids to enable flash_attn_varlen output = self.critic_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, **multi_modal_inputs, use_cache=False, ) # prevent model thinks we are generating if hasattr(self.critic_module, "v_head"): # For trl.AutoModelForCausalLMWithValueHead values_rmpad = output[2].squeeze(0).unsqueeze(-1) else: values_rmpad = output.logits values_rmpad = values_rmpad.squeeze(0) # (total_nnz) # gather output if sp > 1 if self.ulysses_sequence_parallel_size > 1: values_rmpad = gather_outputs_and_unpad( values_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size ) # pad it back values = pad_input(values_rmpad, indices=indices, batch=batch, seqlen=seqlen).squeeze(-1) values = values[:, -response_length - 1 : -1] else: output = self.critic_module( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **multi_modal_inputs, use_cache=False, ) # prevent model thinks we are generating if hasattr(self.critic_module, "v_head"): # For trl.AutoModelForCausalLMWithValueHead values = output[2] else: values = output.logits values = values[:, -response_length - 1 : -1].squeeze(-1) return values def _optimizer_step(self): assert self.config.grad_clip is not None if isinstance(self.critic_module, FSDP): grad_norm = self.critic_module.clip_grad_norm_(self.config.grad_clip) elif isinstance(self.critic_module, FSDPModule): grad_norm = fsdp2_clip_grad_norm_(self.critic_module.parameters(), max_norm=self.config.grad_clip) else: grad_norm = torch.nn.utils.clip_grad_norm_(self.critic_module.parameters(), max_norm=self.config.grad_clip) # if grad_norm is not finite, skip the update if not torch.isfinite(grad_norm): print(f"WARN: grad_norm is not finite: {grad_norm}") self.critic_optimizer.zero_grad() else: self.critic_optimizer.step() return grad_norm @GPUMemoryLogger(role="dp critic", logger=logger) def compute_values(self, data: DataProto) -> torch.Tensor: self.critic_module.eval() micro_batch_size = data.meta_info["micro_batch_size"] use_dynamic_bsz = data.meta_info["use_dynamic_bsz"] has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() select_keys = ( ["responses", "input_ids", "response_mask", "attention_mask", "position_ids"] if "response_mask" in data.batch else ["responses", "input_ids", "attention_mask", "position_ids"] ) non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else [] data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys) if use_dynamic_bsz: max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size micro_batches, batch_idx_list = prepare_dynamic_batch(data, max_token_len=max_token_len) else: micro_batches = data.split(micro_batch_size) values_lst = [] for micro_batch in micro_batches: micro_batch = micro_batch.to(get_device_id()) model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch} with torch.no_grad(): values = self._forward_micro_batch(model_inputs) values_lst.append(values) values = torch.concat(values_lst, dim=0) if use_dynamic_bsz: values = restore_dynamic_batch(values, batch_idx_list) if "response_mask" in data.batch: response_mask = data.batch["response_mask"] response_mask = response_mask.to(values.device) values = values * response_mask # Only action tokens have values return values @GPUMemoryLogger(role="dp critic", logger=logger) def update_critic(self, data: DataProto): # make sure we are in training mode self.critic_module.train() metrics = {} select_keys = ["input_ids", "responses", "response_mask", "attention_mask", "position_ids", "values", "returns"] has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else [] data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys) # Split to make minibatch iterator for updating the actor # See PPO paper for details. https://arxiv.org/abs/1707.06347 mini_batches = data.split(self.config.ppo_mini_batch_size) for _ in range(self.config.ppo_epochs): for batch_idx, mini_batch in enumerate(mini_batches): if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, _ = prepare_dynamic_batch(mini_batch, max_token_len=max_token_len) else: self.gradient_accumulation = ( self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu ) micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu) self.critic_optimizer.zero_grad() for micro_batch in micro_batches: micro_batch = micro_batch.to(get_device_id()) micro_batch_metrics = {} model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch} response_mask = model_inputs["response_mask"] values = model_inputs["values"] returns = model_inputs["returns"] vpreds = self._forward_micro_batch(model_inputs) vf_loss, vf_clipfrac = core_algos.compute_value_loss( vpreds=vpreds, values=values, returns=returns, response_mask=response_mask, cliprange_value=self.config.cliprange_value, loss_agg_mode=self.config.loss_agg_mode, ) if self.config.use_dynamic_bsz: # relative to the dynamic bsz loss_scale_factor = response_mask.shape[0] / self.config.ppo_mini_batch_size loss = vf_loss * loss_scale_factor else: loss_scale_factor = 1 / self.gradient_accumulation loss = vf_loss * loss_scale_factor loss.backward() micro_batch_metrics.update( { "critic/vf_loss": vf_loss.detach().item() * loss_scale_factor, "critic/vf_clipfrac": vf_clipfrac.detach().item(), "critic/vpred_mean": masked_mean(vpreds, response_mask).detach().item(), } ) append_to_dict(metrics, micro_batch_metrics) grad_norm = self._optimizer_step() mini_batch_metrics = {"critic/grad_norm": grad_norm.detach().item()} append_to_dict(metrics, mini_batch_metrics) self.critic_optimizer.zero_grad() return metrics ================================================ FILE: verl_distillation/verl/workers/critic/megatron_critic.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implement a multiprocess PPOCritic """ import itertools import logging import os from functools import partial from typing import Iterable import torch import torch.distributed from megatron.core import parallel_state as mpu from megatron.core.optimizer import DistributedOptimizer, OptimizerConfig from megatron.core.pipeline_parallel import get_forward_backward_func from omegaconf import OmegaConf from torch import nn from verl import DataProto from verl.trainer.ppo import core_algos from verl.utils.device import get_device_id, get_torch_device from verl.utils.megatron.pipeline_parallel import make_batch_generator from verl.utils.profiler import GPUMemoryLogger from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.utils.torch_functional import broadcast_dict_tensor, masked_mean from verl.workers.critic import BasePPOCritic logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MegatronPPOCritic(BasePPOCritic): def __init__( self, config, model_config, hf_config, tf_config, critic_module: nn.ModuleList, critic_optimizer: DistributedOptimizer, critic_optimizer_config: OptimizerConfig, ): super().__init__(config=config) self._validate_config(config) self.model_config = model_config self.hf_config = hf_config # huggingface config self.tf_config = tf_config # mcore transformer config self.critic_module = critic_module self.critic_optimizer = critic_optimizer self.critic_optimizer_config = critic_optimizer_config # we create a separate nametuple for optimizer step so that global args won't affect it. self.optimizer_step_args = OmegaConf.create( { "skip_grad": None, "overlap_dp_param_comm": False, "overlap_dp_grad_comm": False, "gradient_accumulation_steps": 1, "sequence_parallel": self.tf_config.sequence_parallel, "DDP_impl": "local", "layernorm_allreduce_bucket_threshold": 0, "pipeline_model_parallel_split_rank": None, "reduce_grads_use_alltoall": False, } ) def _validate_config(self, config) -> None: """Validate config options not implemented for Megatron backend""" assert config.get("ulysses_sequence_parallel_size", 1) == 1 if config.shuffle: assert config.data_loader_seed is not None, "If shuffle dataloader, seed must be manually set" self.config = config @GPUMemoryLogger("megatron critic", logger=logger) def compute_values(self, data: DataProto) -> DataProto: responses = data.batch["responses"] attention_mask = data.batch["attention_mask"] use_dynamic_bsz = data.meta_info.get("use_dynamic_bsz", False) micro_batch_size = data.meta_info.get("micro_batch_size", None) max_token_len = data.meta_info.get("max_token_len", None) assert micro_batch_size is not None, "micro batch size is needed for forward compute" if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" max_token_len = max_token_len * self.config.megatron.context_parallel_size response_length = responses.size(1) with torch.no_grad(): output = self.forward_backward_batch( data=data, forward_only=True, use_dynamic_bsz=use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len, mini_batch_size=None, ) if mpu.is_pipeline_last_stage(ignore_virtual=True): # only on last rank. It should be on every tp rank values = [o["vpreds"] for o in output["output"]] # (bs, seq_size, vocal_size) values = torch.cat(values, dim=0).to(torch.float32) if use_dynamic_bsz: indices = output["indices"] indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == values.size(0), f"{len(indices)} vs. {values.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) values = values[revert_indices] else: values = torch.empty_like(attention_mask, dtype=torch.float32) # each tp ranks should contain the same value values = values[ :, -response_length - 1 : -1 ] # Values are predicted at the ends of prefixes, e.g., the last prompt token response_mask = attention_mask[:, -response_length:] values = values * response_mask # Only action tokens have values values = values.contiguous() # sync among pp ranks values = values.to(get_device_id()) torch.distributed.broadcast( tensor=values, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), ) values = values.to("cpu") # add empty cache after each compute get_torch_device().empty_cache() return values def make_minibatch_iterator(self, data: DataProto) -> Iterable[DataProto]: select_keys = ["input_ids", "responses", "attention_mask", "position_ids", "values", "returns"] data = data.select(batch_keys=select_keys) return data.make_iterator( mini_batch_size=self.config.ppo_mini_batch_size, epochs=self.config.ppo_epochs, seed=self.config.data_loader_seed, dataloader_kwargs={"shuffle": self.config.shuffle}, ) def forward_backward_batch( self, data: DataProto, forward_only=False, use_dynamic_bsz=False, micro_batch_size=None, max_token_len=None, mini_batch_size=None, ): # broadcast from last pp rank to all other pp ranks data.to(get_device_id()) mini_batch = data mini_batch.batch = mini_batch.batch.contiguous() broadcast_dict_tensor( mini_batch.batch, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), ) mini_batch.to("cpu") # split into micro-batches mini_batch.batch["attention_mask"] = mini_batch.batch["attention_mask"].to(bool) indices = None if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() if vpp_size is not None and vpp_size > 1: microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage micro_batches, indices = rearrange_micro_batches( batch=mini_batch.batch, num_batches_divided_by=microbatch_group_size_per_vp_stage, max_token_len=max_token_len, ) assert len(micro_batches) % self.tf_config.microbatch_group_size_per_vp_stage == 0, ( f"micro_batches {micro_batches} must be divisible by microbatch_group_size_per_vp_stage " f"{microbatch_group_size_per_vp_stage} for megatron backend" ) else: micro_batches, indices = rearrange_micro_batches(batch=mini_batch.batch, max_token_len=max_token_len) total_seqlen = max_token_len else: assert micro_batch_size is not None, ( "micro_batch_size is needed to be passed in when not using dynamic batch size" ) micro_batches = mini_batch.batch.split(micro_batch_size) seq_len = micro_batches[0]["input_ids"].shape[1] total_seqlen = micro_batch_size * seq_len n_micro_batch = len(micro_batches) forward_backward_func = get_forward_backward_func() def loss_func(output, data, meta_info): nonlocal use_dynamic_bsz if forward_only: return torch.tensor(1.0, device=output.device), {"vpreds": output} responses = data["responses"] attention_mask = data["attention_mask"] values = data["values"] returns = data["returns"] response_length = responses.size(1) response_mask = attention_mask[:, -response_length:] cliprange_value = self.config.cliprange_value vpreds = output # (bs, sequence_length) vpreds = vpreds[:, -response_length - 1 : -1] vf_loss, vf_clipfrac = core_algos.compute_value_loss( vpreds=vpreds, values=values, returns=returns, response_mask=response_mask, cliprange_value=cliprange_value, loss_agg_mode=self.config.loss_agg_mode, ) stats = { "critic/vf_loss": vf_loss.detach().item(), "critic/vf_clipfrac": vf_clipfrac.detach().item(), "critic/vpred_mean": masked_mean(vpreds, response_mask).detach().item(), } return vf_loss, stats def forward_step(batch_iter, model): batch = next(batch_iter) batch = batch.to(get_device_id()) batch = batch.contiguous() input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] position_ids = batch["position_ids"] from verl.models.mcore import get_mcore_forward_fn forward_fn = get_mcore_forward_fn(self.hf_config) output = forward_fn( model, input_ids, attention_mask, position_ids, {}, # multi_modal_inputs value_model=True, ) return output, partial(loss_func, data=batch, meta_info={}) # batch should be a list of batches inside micro-batches batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.critic_module)) # TODO: we may use the new schedule instead # for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size) if mpu.get_pipeline_model_parallel_world_size() > 1: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.critic_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # no use when input_shapes was set micro_batch_size=1, # no use when input_shapes was set forward_only=forward_only, ) else: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.critic_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # in use for pp = 1 micro_batch_size=1, # in use for pp = 1 forward_only=forward_only, ) # loss_reduces contains the stats returned from loss_func losses_reduced = {"output": losses_reduced} if use_dynamic_bsz: losses_reduced["indices"] = indices return losses_reduced @GPUMemoryLogger("megatron critic", logger=logger) def update_critic(self, dataloader: Iterable[DataProto]): metrics = {} for data in dataloader: self.critic_optimizer.zero_grad() # use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm for chunk in self.critic_module: chunk.zero_grad_buffer() micro_batch_size = self.config.ppo_micro_batch_size_per_gpu max_token_len = None if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.config.megatron.context_parallel_size metric_micro_batch = self.forward_backward_batch( data, forward_only=False, use_dynamic_bsz=self.config.use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len, mini_batch_size=self.config.ppo_mini_batch_size, ) metric_micro_batch = metric_micro_batch["output"] update_successful, grad_norm, num_zeros_in_grad = self.critic_optimizer.step() learning_rate = self.critic_optimizer.param_groups[-1]["lr"] data = {"critic/grad_norm": grad_norm, "critic/lr": learning_rate} append_to_dict(metrics, data) if update_successful: # allgather already execute in optimizer.step in new megatron pass else: raise NotImplementedError for metric in metric_micro_batch: append_to_dict(metrics, metric) # append the metric from this micro-batch to global metrics. # add empty cache after each compute get_torch_device().empty_cache() return metrics ================================================ FILE: verl_distillation/verl/workers/engine/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BaseEngine, EngineRegistry from .fsdp import FSDPEngine, FSDPEngineWithLMHead __all__ = ["BaseEngine", "EngineRegistry", "FSDPEngine", "FSDPEngineWithLMHead"] # Mindspeed must be imported before Megatron to ensure the related monkey patches take effect as expected try: from .mindspeed import MindspeedEngineWithLMHead __all__ += ["MindspeedEngineWithLMHead"] except ImportError: MindspeedEngineWithLMHead = None try: from .megatron import MegatronEngine, MegatronEngineWithLMHead __all__ += ["MegatronEngine", "MegatronEngineWithLMHead"] except ImportError: MegatronEngine = None MegatronEngineWithLMHead = None ================================================ FILE: verl_distillation/verl/workers/engine/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The abstract base class defining the interface for model training engines. """ from typing import Any, Callable, Optional import torch from tensordict import TensorDict from verl.utils.device import get_device_name class BaseEngine: """ Abstract base class defining the interface for model training engines. Interface is subject to change before release. Engine implementations must subclass BaseEngine and provide concrete behavior for all methods. """ def initialize(self): """ Instantiate or load the model, optimizer, and learning rate scheduler. Should prepare all components necessary for training or evaluation. """ raise NotImplementedError def train_mode(self): """ Context manager entry for switching the engine and model into training mode. Usage: with engine.train_mode(): # runs in training mode """ raise NotImplementedError def eval_mode(self): """ Context manager entry for switching the engine and model into evaluation mode. Usage: with engine.eval_mode(): # runs in evaluation mode """ raise NotImplementedError def optimizer_zero_grad(self): """ Zero the gradients of the optimizer. """ raise NotImplementedError def optimizer_step(self): """ Perform an optimization step using the optimizer. """ raise NotImplementedError def lr_scheduler_step(self): """ Advance the learning rate scheduler by one step. Returns: current_lr (float or list[float]): Updated learning rate(s). """ raise NotImplementedError def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> Any: """ Perform a forward pass and optionally a backward pass on a batch of data. Args: data: The input data for the forward pass, typically containing tensors and metadata. loss_function: The loss function to optimize. See `verl.workers.roles.utils.losses` for examples. forward_only: If True, perform only the forward pass. If False, perform forward and backward pass. Returns: Any: The output of the forward pass, which can be used for loss computation or other purposes. """ raise NotImplementedError def train_batch(self, data: TensorDict, loss_function: Callable) -> Any: """ Perform a training step on a batch of data. Args: data: The input data for training, typically containing tensors and metadata. loss_function: A function that computes the loss and metrics given a batch and predictions. Returns: dict[str, torch.Tensor]: A dictionary containing the aggregated training metrics for the batch. """ self.optimizer_zero_grad() outputs = self.forward_backward_batch(data, loss_function, forward_only=False) grad_norm = self.optimizer_step() if self.is_mp_src_rank_with_outputs(): outputs["metrics"]["grad_norm"] = grad_norm return outputs def infer_batch(self, data: TensorDict, loss_function: Optional[Callable] = None) -> Any: """ Perform inference on a batch of data. Args: data: The input data for inference, typically containing tensors and metadata. Returns: Any: The output of the inference, which can be used for predictions or other purposes. """ with torch.no_grad(): outputs = self.forward_backward_batch(data, loss_function, forward_only=True) return outputs def get_per_tensor_param(self): raise NotImplementedError def get_data_parallel_size(self): raise NotImplementedError def get_data_parallel_rank(self): raise NotImplementedError def get_data_parallel_group(self): raise NotImplementedError def to(self, device: str, model: bool = True, optimizer: bool = True): """ Move model parameters, optimizer states, or both to the specified device. Args: device: Target device identifier. model: If True, move the model. optimizer: If True, move the optimizer states. """ raise NotImplementedError def save_checkpoint( self, local_path: str, hdfs_path: Optional[str] = None, global_step: int = 0, max_ckpt_to_keep: Optional[int] = None, **kwargs, ) -> None: """ Save model, optimizer, and scheduler states to a checkpoint. Args: local_path: Local filesystem path to save checkpoint. hdfs_path: Optional HDFS path to copy checkpoint. global_step: Integer training step number for naming. max_ckpt_to_keep: Maximum number of recent checkpoints to retain. **kwargs: Arbitrary keyword arguments. """ raise NotImplementedError def load_checkpoint( self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: bool = True, **kwargs ) -> None: """ Load model, optimizer, and scheduler states from a checkpoint. Args: local_path: Local filesystem path of the checkpoint. hdfs_path: Optional HDFS path where checkpoint is stored. del_local_after_load: Whether to delete local copy after loading. **kwargs: Arbitrary keyword arguments. """ raise NotImplementedError def is_mp_src_rank_with_outputs(self): """ Whether the current rank is the first rank in model parallel group that contains model outputs """ raise NotImplementedError class EngineRegistry: """ A registry for managing and instantiating different types of training engines. This class uses a dictionary to store engine classes, mapping a string key to each class. It provides a decorator `register` to add new engines to the registry and a `new` method to create an instance of a registered engine. """ _engines = {} @classmethod def register(cls, model_type: str, backend: list[str] | str, device: list[str] | str = "cuda"): """ A class method decorator that registers an engine class with a given key. This allows for dynamic instantiation of engine classes by their registered key. Args: model_type (str): The type of the model backend (list[str] | str): The backend to use for the model type device (list[str] | str): The device type (e.g., "cuda", "npu", "cpu") this engine supports, default is "cuda" Returns: A decorator function that takes an engine class and registers it. """ def decorator(engine_class): assert issubclass(engine_class, BaseEngine) if model_type not in cls._engines: cls._engines[model_type] = {} backends = backend if isinstance(backend, list) else [backend] devices = device if isinstance(device, list) else [device] for current_backend in backends: for current_device in devices: if current_backend not in cls._engines[model_type]: cls._engines[model_type][current_backend] = {} if current_device not in cls._engines[model_type][current_backend]: cls._engines[model_type][current_backend][current_device] = engine_class return engine_class return decorator @classmethod def get_engine_cls(cls, model_type: str, backend: str): assert model_type in cls._engines, f"Unknown model_type: {model_type}" assert backend in cls._engines[model_type], f"Unknown backend: {backend}" device = get_device_name() assert device in cls._engines[model_type][backend], ( f"Unknown device: {device} for model_type: {model_type} and backend: {backend}" ) return cls._engines[model_type][backend][device] @classmethod def new(cls, model_type, backend, *args, **kwargs): """ Function to create a new training engine instance based on the provided config. Args: key: A configuration object containing the engine key and other settings. *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: engine: An instance of the training engine corresponding to the config. Raises: NotImplementedError: If the engine key in the config does not match any known engines. """ engine_cls = cls.get_engine_cls(model_type, backend) return engine_cls(*args, **kwargs) ================================================ FILE: verl_distillation/verl/workers/engine/fsdp/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .transformer_impl import FSDPEngine, FSDPEngineWithLMHead __all__ = ["FSDPEngine", "FSDPEngineWithLMHead"] ================================================ FILE: verl_distillation/verl/workers/engine/fsdp/transformer_impl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The concrete Engine implementation using PyTorch FullyShardedDataParallel (FSDP) """ import gc import logging import os import warnings from contextlib import nullcontext from typing import Callable, Optional import torch import torch.distributed from peft import LoraConfig, TaskType, get_peft_model from tensordict import TensorDict from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import FullStateDictConfig, ShardedStateDictConfig, StateDictType from torch.distributed.tensor import DTensor import verl.utils.torch_functional as verl_F from verl.models.transformers.monkey_patch import apply_monkey_patch from verl.trainer.config import CheckpointConfig from verl.utils import tensordict_utils as tu from verl.utils.activation_offload import enable_activation_offloading from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.dataset.dataset_utils import DatasetPadMode from verl.utils.debug import log_gpu_memory_usage from verl.utils.device import ( get_device_id, get_device_name, get_torch_device, ) from verl.utils.fsdp_utils import ( CPUOffloadPolicy, FSDPModule, MixedPrecisionPolicy, apply_fsdp2, collect_lora_params, fsdp2_clip_grad_norm_, fsdp2_load_full_state_dict, fsdp_version, get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, load_fsdp_model_to_gpu, load_fsdp_optimizer, offload_fsdp_model_to_cpu, offload_fsdp_optimizer, replace_lora_wrapper, ) from verl.utils.model import convert_weight_keys from verl.utils.py_functional import convert_to_regular_types from verl.utils.torch_functional import logprobs_from_logits from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad, ulysses_pad_and_slice_inputs from verl.workers.config import FSDPEngineConfig, FSDPOptimizerConfig, HFModelConfig from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager from ..base import BaseEngine, EngineRegistry from ..utils import postprocess_batch_func, prepare_micro_batches from .utils import create_device_mesh, get_sharding_strategy logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() class FSDPEngine(BaseEngine): """ Concrete Engine implementation using PyTorch FullyShardedDataParallel (FSDP). Supports model sharding, activation/optimizer offloading, LoRA, and sequence parallelism. """ def __init__( self, model_config: HFModelConfig, engine_config: FSDPEngineConfig, optimizer_config: FSDPOptimizerConfig, checkpoint_config: CheckpointConfig, ): """ Initialize the FSDPEngine. Sets up distributed device meshes, LoRA, and offload policies based on config. Args: config: Configuration object with FSDP and model settings. """ super().__init__() self.model_config = model_config self.engine_config = engine_config self.optimizer_config = optimizer_config self.checkpoint_config = checkpoint_config self.mode = None self.rank = torch.distributed.get_rank() # build device mesh for Ulysses Sequence Parallel self.use_remove_padding = self.model_config.use_remove_padding self._init_device_mesh() # set FSDP offload params self._is_offload_param = self.engine_config.param_offload self._is_offload_optimizer = self.engine_config.optimizer_offload self._is_lora = self.model_config.lora_rank > 0 if self.engine_config.entropy_from_logits_with_chunking: entropy_from_logits = verl_F.entropy_from_logits_with_chunking else: entropy_from_logits = verl_F.entropy_from_logits self.compute_entropy_from_logits = ( torch.compile(entropy_from_logits, dynamic=True) if self.engine_config.use_torch_compile # use torch compile by default else entropy_from_logits ) def is_mp_src_rank_with_outputs(self): if self.ulysses_device_mesh is not None: is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0 else: is_collect = True return is_collect def initialize(self): """ Build the model, optimizer, and learning rate scheduler under FSDP. Applies device, dtype, and precision configurations, including mixed precision. Sets up checkpoint manager and FLOPs counter. """ # This is used to import external_lib into the huggingface systems self._build_model_optimizer() if self._is_offload_param: offload_fsdp_model_to_cpu(self.module) log_gpu_memory_usage("After offload model during init", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.optimizer) log_gpu_memory_usage("After offload optimizer during init", logger=logger) self.checkpoint_manager = FSDPCheckpointManager( model=self.module, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, processing_class=self.model_config.get_processor(), checkpoint_contents=self.checkpoint_config, ) def _init_device_mesh(self): world_size = torch.distributed.get_world_size() from torch.distributed.device_mesh import init_device_mesh fsdp_size = self.engine_config.fsdp_size self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size) self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.engine_config.ulysses_sequence_parallel_size dp_size = self.get_data_parallel_size() if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( device_name, mesh_shape=(dp_size, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) self.use_ulysses_sp = self.ulysses_sequence_parallel_size > 1 def _build_module(self): from verl.utils.model import get_hf_auto_model_class from verl.utils.torch_dtypes import PrecisionType torch_dtype = self.engine_config.model_dtype if torch_dtype is None: # if it is training, we force torch_dtype to fp32 torch_dtype = torch.float32 if not self.engine_config.forward_only else torch.bfloat16 torch_dtype = PrecisionType.to_dtype(torch_dtype) init_context = get_init_weight_context_manager( use_meta_tensor=not self.model_config.hf_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") auto_class = get_hf_auto_model_class(hf_config=self.model_config.hf_config) module = auto_class.from_pretrained( pretrained_model_name_or_path=self.model_config.local_path, torch_dtype=torch_dtype, config=self.model_config.hf_config, trust_remote_code=self.model_config.trust_remote_code, ) use_liger = self.model_config.use_liger # Apply Liger kernel to the model if use_liger is set to True if use_liger: from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance _apply_liger_kernel_to_instance(model=module) fused_kernel_options = self.model_config.fused_kernel_options fused_kernels_backend = ( fused_kernel_options.get("impl_backend", None) if fused_kernel_options is not None else None ) use_fused_kernels = self.model_config.use_fused_kernels apply_monkey_patch( model=module, use_remove_padding=self.use_remove_padding, ulysses_sp_size=self.ulysses_sequence_parallel_size, use_fused_kernels=use_fused_kernels, fused_kernels_backend=fused_kernels_backend, ) # some parameters may not in torch_dtype module.to(torch_dtype) if self.model_config.enable_gradient_checkpointing: module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) return module def _build_lora_module(self, module): module.enable_input_require_grads() lora_adapter_path = getattr(self.model_config, "lora_adapter_path", None) if lora_adapter_path is not None: from peft import PeftModel from verl.utils.fs import copy_to_local print(f"Loading pre-trained LoRA adapter to from: {lora_adapter_path}") # Copy adapter to local if needed local_adapter_path = copy_to_local(lora_adapter_path, use_shm=self.model_config.use_shm) module = PeftModel.from_pretrained(module, local_adapter_path, is_trainable=True) peft_config = module.peft_config["default"] # Ensure task_type is TaskType enum, not string if isinstance(peft_config.task_type, str): peft_config.task_type = TaskType.CAUSAL_LM else: # Convert config to regular Python types before creating PEFT model lora_config = { "task_type": TaskType.CAUSAL_LM, "r": self.model_config.lora_rank, "lora_alpha": self.model_config.lora_alpha, "target_modules": convert_to_regular_types(self.model_config.target_modules), "exclude_modules": convert_to_regular_types(self.model_config.exclude_modules), "bias": "none", } module = get_peft_model(module, LoraConfig(**lora_config)) return module def _build_fsdp_module(self, module): # TODO(ziheng): need to improve from torch.distributed.fsdp import CPUOffload, MixedPrecision from verl.utils.torch_dtypes import PrecisionType mixed_precision_config = self.engine_config.mixed_precision if mixed_precision_config is not None: param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16")) reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32")) buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32")) else: param_dtype = torch.bfloat16 reduce_dtype = torch.float32 buffer_dtype = torch.float32 mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype) auto_wrap_policy = get_fsdp_wrap_policy( module=module, config=self.engine_config.wrap_policy, is_lora=self.model_config.lora_rank > 0, ) fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) # Note: We force turn off CPUOffload because it causes incorrect results when using grad accumulation if self.engine_config.strategy == "fsdp": # cpu_offload: # - actor: None # - critic: None # - ref: CPUOffload(offload_params=True) # We force reference policy to use CPUOffload to save memory. # We force turn off CPUOffload for actor because it causes incorrect results when using grad accumulation cpu_offload = None if self.engine_config.forward_only: cpu_offload = CPUOffload(offload_params=True) self._is_offload_param = False self._is_offload_optimizer = False module = FSDP( module, param_init_fn=init_fn, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, mixed_precision=mixed_precision, sync_module_states=True, device_mesh=self.device_mesh, forward_prefetch=self.engine_config.forward_prefetch, use_orig_params=self.engine_config.use_orig_params, cpu_offload=cpu_offload, ) elif self.engine_config.strategy == "fsdp2": # - actor: offload_policy # - critic: offload_policy # - ref: CPUOffloadPolicy(pin_memory=True) assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" mp_policy = MixedPrecisionPolicy( param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True ) offload_policy = None if self.engine_config.offload_policy or self.engine_config.forward_only: self._is_offload_param = False self._is_offload_optimizer = False offload_policy = CPUOffloadPolicy(pin_memory=True) fsdp_kwargs = { "mesh": fsdp_mesh, "mp_policy": mp_policy, "offload_policy": offload_policy, "reshard_after_forward": self.engine_config.reshard_after_forward, } full_state = module.state_dict() apply_fsdp2(module, fsdp_kwargs, self.engine_config) fsdp2_load_full_state_dict(module, full_state, fsdp_mesh, offload_policy) else: raise NotImplementedError(f"Unknown strategy {self.engine_config.strategy}") if self.model_config.enable_activation_offload: enable_gradient_checkpointing = self.model_config.enable_gradient_checkpointing enable_activation_offloading(module, self.engine_config.strategy, enable_gradient_checkpointing) if torch.distributed.get_world_size() == 1 and fsdp_version(module) == 1: FSDP.set_state_dict_type( module, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig(), ) elif fsdp_version(module) == 1: FSDP.set_state_dict_type( module, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) return module def _build_optimizer(self, module): from verl.workers.config.optimizer import build_optimizer optimizer = build_optimizer(module.parameters(), self.optimizer_config) return optimizer def _build_lr_scheduler(self, optimizer): from verl.utils.torch_functional import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup optim_config = self.optimizer_config total_steps = optim_config.total_training_steps num_warmup_steps = optim_config.lr_warmup_steps lr_scheduler_type = optim_config.lr_scheduler_type min_lr_ratio = optim_config.min_lr_ratio num_cycles = optim_config.num_cycles if num_warmup_steps <= 0: num_warmup_steps_ratio = optim_config.lr_warmup_steps_ratio num_warmup_steps = int(num_warmup_steps_ratio * total_steps) if self.rank == 0: print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}") if lr_scheduler_type == "constant": lr_scheduler = get_constant_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=num_warmup_steps) elif lr_scheduler_type == "cosine": lr_scheduler = get_cosine_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=total_steps, min_lr_ratio=min_lr_ratio, num_cycles=num_cycles, ) else: raise NotImplementedError(f"LR scheduler type {lr_scheduler_type} is not supported") return lr_scheduler def _build_model_optimizer(self): from verl.utils.model import print_model_size # Load base model with specified configuration and dtype module = self._build_module() # Apply LoRA adapters if low-rank adaptation is enabled if self._is_lora: module = self._build_lora_module(module) # Synchronize all distributed processes before proceeding torch.distributed.barrier() if self.rank == 0: print_model_size(module) log_gpu_memory_usage("After init model from HF AutoModel", logger=logger) # Wrap model with FSDP for distributed training (sharding, mixed precision, etc.) log_gpu_memory_usage("Before FSDP", logger=None) module = self._build_fsdp_module(module) log_gpu_memory_usage("After FSDP", logger=None) if not self.engine_config.forward_only: # Initialize optimizer with model parameters and config settings optimizer = self._build_optimizer(module) # Create learning rate scheduler with warmup and decay settings lr_scheduler = self._build_lr_scheduler(optimizer) else: optimizer = None lr_scheduler = None self.module = module self.optimizer = optimizer self.lr_scheduler = lr_scheduler def train_mode(self): """ Return a context manager that switches to training mode with FSDP-specific handling. Includes parameter and optimizer offload entry/exit. """ return EngineTrainModeCtx(self) def eval_mode(self): """ Return a context manager that switches to evaluation mode with FSDP-specific handling. Includes activation offload entry/exit. """ return EngineEvalModeCtx(self) def get_data_parallel_rank(self): if self.ulysses_device_mesh is not None: return self.ulysses_device_mesh["dp"].get_local_rank() else: return torch.distributed.get_rank() def get_data_parallel_size(self): return torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size def get_data_parallel_group(self): if self.ulysses_device_mesh is not None: return self.ulysses_device_mesh.get_group(mesh_dim="dp") else: return torch.distributed.group.WORLD def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> list[TensorDict]: # note that the global_batch_size should include data on all the dp tu.assign_non_tensor(data, sp_size=self.ulysses_sequence_parallel_size) # compute num_tokens in global batch for loss normalization batch_num_tokens = data["loss_mask"].sum().to(get_device_id()) torch.distributed.all_reduce( batch_num_tokens, op=torch.distributed.ReduceOp.SUM, group=self.get_data_parallel_group() ) tu.assign_non_tensor(data, batch_num_tokens=batch_num_tokens.item()) tu.assign_non_tensor(data, dp_size=self.get_data_parallel_size()) micro_batches, indices = prepare_micro_batches( data=data, dp_group=self.get_data_parallel_group(), same_micro_num_in_dp=True ) output_lst = [] ctx = torch.no_grad() if forward_only else nullcontext() for micro_batch in micro_batches: with ctx: loss, meta_info = self.forward_step(micro_batch, loss_function=loss_function, forward_only=forward_only) if not forward_only: loss.backward() output_lst.append(meta_info) # postprocess and return return postprocess_batch_func(output_lst=output_lst, indices=indices, data=data) def forward_step(self, micro_batch: TensorDict, loss_function, forward_only): raise NotImplementedError("forward_step must be implemented in subclass") def optimizer_zero_grad(self): """ Zero gradients and enforce FSDP grad-clipping logic. """ self.optimizer.zero_grad() def optimizer_step(self): """ Clip gradients, skip update if non-finite, and step optimizer. Returns: grad_norm (float): Norm of gradients before clipping. """ assert self.optimizer_config.clip_grad is not None if isinstance(self.module, FSDP): grad_norm = self.module.clip_grad_norm_(self.optimizer_config.clip_grad) elif isinstance(self.module, FSDPModule): grad_norm = fsdp2_clip_grad_norm_(self.module.parameters(), max_norm=self.optimizer_config.clip_grad) else: grad_norm = torch.nn.utils.clip_grad_norm_( self.module.parameters(), max_norm=self.optimizer_config.clip_grad ) if isinstance(grad_norm, DTensor): grad_norm = grad_norm.full_tensor() # if grad_norm is not finite, skip the update if not torch.isfinite(grad_norm): print(f"WARN: grad_norm is not finite: {grad_norm}") self.optimizer.zero_grad() else: self.optimizer.step() return grad_norm.item() def lr_scheduler_step(self): """ Advance FSDP scheduler and return updated learning rate. """ self.lr_scheduler.step() lr = self.lr_scheduler.get_last_lr()[0] # only return the first group return lr def to(self, device: str, model: bool = True, optimizer: bool = True): """ Move FSDP model and/or optimizer to CPU or GPU with offload support. """ if self.engine_config.forward_only: # force cpu_offload return device_name = get_device_name() assert device in (device_name, "cpu") if device == device_name: if not self.engine_config.param_offload: if model: load_fsdp_model_to_gpu(self.module) if optimizer and self.optimizer is not None: load_fsdp_optimizer(self.optimizer, device) gc.collect() elif device == "cpu": if not self.engine_config.param_offload: if model: offload_fsdp_model_to_cpu(self.module) if optimizer and self.optimizer is not None: offload_fsdp_optimizer(self.optimizer) else: raise ValueError(f"Invalid device type: {device}") def save_checkpoint( self, local_path: str, hdfs_path: Optional[str] = None, global_step: int = 0, max_ckpt_to_keep: Optional[int] = None, **kwargs, ) -> None: """ Save FSDP checkpoint, handling parameter offload as needed. """ if self._is_offload_param: load_fsdp_model_to_gpu(self.module) self.checkpoint_manager.save_checkpoint( local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.module) def load_checkpoint( self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: int = True, **kwargs ) -> None: """ Load FSDP checkpoint, restoring parameters and optimizer state. """ import torch if self._is_offload_param: load_fsdp_model_to_gpu(self.module) self.checkpoint_manager.load_checkpoint( local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.module) if self._is_offload_optimizer: offload_fsdp_optimizer(self.optimizer) def get_per_tensor_param(self, layered_summon=False, base_sync_done=False): log_gpu_memory_usage("Before load_fsdp_model_to_gpu", logger=logger) if self._is_offload_param: load_fsdp_model_to_gpu(self.module) log_gpu_memory_usage("After load_fsdp_model_to_gpu", logger=logger) peft_config = None peft_model = getattr(self.module, "_fsdp_wrapped_module", self.module) if hasattr(peft_model, "peft_config"): # LoRA peft_config = peft_model.peft_config.get("default", None) params = collect_lora_params( module=self.module, layered_summon=layered_summon, base_sync_done=base_sync_done, ) if not base_sync_done: params = {replace_lora_wrapper(k, peft_config): v for k, v in params.items()} else: params = self.module.state_dict() params = convert_weight_keys(params, getattr(self.module, "_fsdp_wrapped_module", self.module)) log_gpu_memory_usage("Before offload_fsdp_model_to_cpu", logger=logger) if self._is_offload_param: offload_fsdp_model_to_cpu(self.module) log_gpu_memory_usage("After offload_fsdp_model_to_cpu", logger=logger) if peft_config is not None and base_sync_done: per_tensor_param = params else: device = get_device_id() # used when fsdp2 set cpu_offload_policy per_tensor_param = ( (name, param.to(device, non_blocking=True).full_tensor() if isinstance(param, DTensor) else param) for name, param in params.items() ) return per_tensor_param class EngineEvalModeCtx: def __init__(self, engine: FSDPEngine): self.engine = engine def __enter__(self): self.engine.mode = "eval" if self.engine._is_offload_param: load_fsdp_model_to_gpu(self.engine.module) self.engine.ulysses_sharding_manager.__enter__() self.engine.module.eval() def __exit__(self, exc_type, exc_value, traceback): self.engine.ulysses_sharding_manager.__exit__(exc_type, exc_value, traceback) # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.engine.engine_config.fsdp_size > 1: if fsdp_version(self.engine.module) == 1: self.engine.module._handle.reshard(True) elif fsdp_version(self.engine.module) == 2: self.engine.module.reshard() if self.engine._is_offload_param: offload_fsdp_model_to_cpu(self.engine.module) self.engine.mode = None class EngineTrainModeCtx: def __init__(self, engine: FSDPEngine): self.engine = engine def __enter__(self): self.engine.mode = "train" if self.engine._is_offload_param: load_fsdp_model_to_gpu(self.engine.module) if self.engine._is_offload_optimizer: load_fsdp_optimizer(optimizer=self.engine.optimizer, device_id=get_torch_device().current_device()) self.engine.ulysses_sharding_manager.__enter__() self.engine.module.train() def __exit__(self, exc_type, exc_value, traceback): self.engine.ulysses_sharding_manager.__exit__(exc_type, exc_value, traceback) self.engine.optimizer_zero_grad() if self.engine._is_offload_param: offload_fsdp_model_to_cpu(self.engine.module) if self.engine._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.engine.optimizer) self.engine.mode = None @EngineRegistry.register(model_type="language_model", backend=["fsdp", "fsdp2"], device=["cuda", "npu"]) class FSDPEngineWithLMHead(FSDPEngine): def prepare_model_inputs(self, micro_batch: TensorDict): use_remove_padding = tu.get_non_tensor_data(data=micro_batch, key="use_remove_padding", default=True) pad_mode = tu.get_non_tensor_data(data=micro_batch, key="pad_mode", default=DatasetPadMode.NO_PADDING) use_fused_kernels = tu.get_non_tensor_data(data=micro_batch, key="use_fused_kernels", default=False) temperature = micro_batch["temperature"] assert pad_mode == DatasetPadMode.NO_PADDING, f"pad_mode {pad_mode} not supported" multi_modal_inputs = {} if "multi_modal_inputs" in micro_batch.keys(): from verl.utils.model import extract_multi_modal_inputs multi_modal_inputs = extract_multi_modal_inputs(micro_batch["multi_modal_inputs"]) input_ids = micro_batch["input_ids"] position_ids = micro_batch["position_ids"] if position_ids.dim() == 3: # qwen2vl mrope position_ids = position_ids.transpose(0, 1) # (bsz, 3, seqlen) -> (3, bsz, seqlen) # args used to get outputs output_args = {} if use_remove_padding: if pad_mode == DatasetPadMode.NO_PADDING: input_ids_rmpad = input_ids.values().unsqueeze(0) # (1, total_nnz) position_ids_rmpad = position_ids.values().unsqueeze(0) # (1, total_nnz) else: raise NotImplementedError(f"pad_mode {pad_mode} not implemented") # for compute the log_prob input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz) # pad and slice the inputs if sp > 1 if self.use_ulysses_sp: is_vlm_model = hasattr(getattr(self.module, "module", self.module).config, "vision_config") if is_vlm_model: # vlm model's inputs will be sliced after embedding input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad( input_ids_rmpad, position_ids_rmpad=position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size, ) else: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad=position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size, ) input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs( input_ids_rmpad_rolled, position_ids_rmpad=None, sp_size=self.ulysses_sequence_parallel_size, ) output_args["pad_size"] = pad_size input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad) output_args["input_ids_rmpad_rolled"] = input_ids_rmpad_rolled # only pass input_ids and position_ids to enable flash_attn_varlen model_inputs = { "input_ids": input_ids_rmpad, "attention_mask": None, "position_ids": position_ids_rmpad, } else: if pad_mode == DatasetPadMode.NO_PADDING: input_ids = micro_batch["input_ids"] position_ids = micro_batch["position_ids"] loss_mask = micro_batch["loss_mask"] pad_token_id = tu.get_non_tensor_data(data=micro_batch, key="pad_token_id", default=0) batch_size = micro_batch.batch_size[0] seq_len_effective = input_ids.offsets().diff() max_seq_len = max(seq_len_effective) input_ids_rmpad_rolled = torch.roll(input_ids.values(), shifts=-1, dims=0) output_args["input_ids_rmpad_rolled"] = input_ids_rmpad_rolled input_ids = torch.nested.to_padded_tensor( input_ids, padding=pad_token_id, output_size=(batch_size, max_seq_len) ) position_ids = torch.nested.to_padded_tensor( position_ids, padding=0, output_size=(batch_size, max_seq_len) ) attention_mask_list = [torch.ones_like(t, dtype=torch.int32) for t in loss_mask] attention_mask = torch.nested.as_nested_tensor(attention_mask_list, layout=torch.jagged) attention_mask = torch.nested.to_padded_tensor( attention_mask, padding=0, output_size=(batch_size, max_seq_len) ) model_inputs = { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, } else: raise NotImplementedError(f"pad_mode {pad_mode} not implemented") extra_args = {} if use_fused_kernels: extra_args["temperature"] = temperature extra_args["return_dict"] = True model_inputs.update(multi_modal_inputs) model_inputs.update(extra_args) return model_inputs, output_args def prepare_model_outputs(self, output, output_args, micro_batch: TensorDict): use_remove_padding = tu.get_non_tensor_data(data=micro_batch, key="use_remove_padding", default=True) pad_mode = tu.get_non_tensor_data(data=micro_batch, key="pad_mode", default=DatasetPadMode.NO_PADDING) use_fused_kernels = tu.get_non_tensor_data(data=micro_batch, key="use_fused_kernels", default=False) temperature = micro_batch["temperature"] calculate_entropy = tu.get_non_tensor_data(data=micro_batch, key="calculate_entropy", default=False) model_output = {} input_ids = micro_batch["input_ids"] if use_remove_padding: input_ids_rmpad_rolled = output_args["input_ids_rmpad_rolled"] if use_fused_kernels: log_probs = output.log_probs.squeeze(0) # (total_nnz,) entropy_rmpad = output.entropy.squeeze(0) # (total_nnz,) else: logits_rmpad = output.logits.squeeze(0) # (total_nnz, vocab_size) logits_rmpad.div_(temperature) # if use_sp: ((total_nnz / sp) + pad) ; if not use_sp: (batch, seqlen) inplace_backward = True if calculate_entropy: inplace_backward = False log_probs = logprobs_from_logits( logits=logits_rmpad, labels=input_ids_rmpad_rolled, inplace_backward=inplace_backward, ) # compute entropy if calculate_entropy: if not self.engine_config.entropy_checkpointing: entropy_rmpad = self.compute_entropy_from_logits(logits_rmpad) # ((total_nnz / sp) + pad) else: entropy_rmpad = torch.utils.checkpoint.checkpoint( self.compute_entropy_from_logits, logits_rmpad ) # gather log_prob if sp > 1 if self.use_ulysses_sp: pad_size = output_args["pad_size"] # gather and unpad for the ulysses sp log_probs = gather_outputs_and_unpad( log_probs, gather_dim=0, unpad_dim=0, padding_size=pad_size, ) if calculate_entropy: entropy_rmpad = gather_outputs_and_unpad( entropy_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size, ) if pad_mode == DatasetPadMode.NO_PADDING: cu_seqlens = input_ids.offsets() # (bsz, j1), for each sample, is the length of each sample: [real_prompt length + real_response length] log_probs = torch.nested.nested_tensor_from_jagged(log_probs, cu_seqlens) if calculate_entropy: entropy = torch.nested.nested_tensor_from_jagged(entropy_rmpad, cu_seqlens) else: raise NotImplementedError(f"pad_mode {pad_mode} not implemented") else: # not using rmpad and no ulysses sp response_length = tu.get_non_tensor_data(data=micro_batch, key="max_response_length", default=1024) if use_fused_kernels: log_probs = output.log_probs[:, -response_length - 1 : -1] entropy = output.entropy[:, -response_length - 1 : -1] # (bsz, response_length) else: logits = output.logits logits.div_(temperature) if calculate_entropy: if not self.engine_config.entropy_checkpointing: entropy = verl_F.entropy_from_logits(logits) else: entropy = torch.utils.checkpoint.checkpoint(verl_F.entropy_from_logits, logits) if pad_mode == DatasetPadMode.NO_PADDING: cu_seqlens = input_ids.offsets() seq_lengths = cu_seqlens.diff() starts = torch.zeros_like(seq_lengths, dtype=torch.int64) logits = torch.nested.narrow(logits, 1, starts, seq_lengths, layout=torch.jagged) logits_rmpad = torch.cat([t for t in logits.unbind()]) input_ids_rmpad_rolled = output_args["input_ids_rmpad_rolled"] log_probs = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (bsz, j1), for each sample, length of each sample: [real_prompt_length + real_response_length] log_probs = torch.nested.nested_tensor_from_jagged(log_probs, cu_seqlens) if calculate_entropy: entropy = torch.nested.narrow(entropy, 1, starts, seq_lengths, layout=torch.jagged) entropy_rmpad = torch.cat([t for t in entropy.unbind()]) entropy = torch.nested.nested_tensor_from_jagged(entropy_rmpad, cu_seqlens) else: raise NotImplementedError(f"pad_mode {pad_mode} not implemented") model_output["log_probs"] = log_probs if calculate_entropy: model_output["entropy"] = entropy return model_output def forward_step(self, micro_batch: TensorDict, loss_function, forward_only): device_name = get_device_name() # actually, we should avoid assigning like this... micro_batch = micro_batch.to(get_device_id()) model_inputs, output_args = self.prepare_model_inputs(micro_batch=micro_batch) with torch.autocast(device_type=device_name, dtype=torch.bfloat16): raw_output = self.module( **model_inputs, use_cache=False, ) # prevent model thinks we are generating model_output = self.prepare_model_outputs( output=raw_output, output_args=output_args, micro_batch=micro_batch ) if loss_function is not None: loss, metrics = loss_function( model_output=model_output, data=micro_batch, dp_group=self.get_data_parallel_group() ) else: assert forward_only, "forward_only must be True when loss_function is None" loss = torch.tensor(1.0, device=device_name) metrics = {} output = { "model_output": model_output, "loss": loss, "metrics": metrics, } return loss, output @EngineRegistry.register(model_type="value_model", backend=["fsdp", "fsdp2"], device=["cuda", "npu"]) class FSDPEngineWithValueHead(FSDPEngineWithLMHead): """ The only difference between critic and actor is how the raw model output is processed """ def prepare_model_outputs(self, output, output_args, micro_batch: TensorDict): use_remove_padding = tu.get_non_tensor_data(data=micro_batch, key="use_remove_padding", default=True) pad_mode = tu.get_non_tensor_data(data=micro_batch, key="pad_mode", default=DatasetPadMode.NO_PADDING) if use_remove_padding: input_ids = micro_batch["input_ids"] batch_size, seqlen = input_ids.shape if hasattr(self.module, "v_head"): # For trl.AutoModelForCausalLMWithValueHead values_rmpad = output[2].squeeze(0).unsqueeze(-1) else: values_rmpad = output.logits values_rmpad = values_rmpad.squeeze(0) # (total_nnz, 1) # critic model arch is like Qwen3ForTokenClassfication and num_labels=1 # so we squeeze the last dimension here to get the value for each token values_rmpad = values_rmpad.squeeze(-1) # gather output if sp > 1 if self.use_ulysses_sp: pad_size = output_args["pad_size"] values_rmpad = gather_outputs_and_unpad(values_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size) if pad_mode == DatasetPadMode.NO_PADDING: cu_seqlens = input_ids.offsets() # (bsz, j1), for each sample, is the length of each sample: [real_prompt length + real_response length] values = torch.nested.nested_tensor_from_jagged(values_rmpad, cu_seqlens) else: raise NotImplementedError(f"pad_mode {pad_mode} not implemented") else: if hasattr(self.module, "v_head"): # For trl.AutoModelForCausalLMWithValueHead values = output[2] else: values = output.logits if pad_mode == DatasetPadMode.NO_PADDING: cu_seqlens = input_ids.offsets() seq_lengths = cu_seqlens.diff() starts = torch.zeros_like(seq_lengths, dtype=torch.int64) values = torch.nested.narrow(values, 1, starts, seq_lengths, layout=torch.jagged) values_rmpad = torch.cat([t for t in values.unbind()]) # (bsz, j1), for each sample, length of each sample: [real_prompt_length + real_response_length] values = torch.nested.nested_tensor_from_jagged(values_rmpad, cu_seqlens) else: raise NotImplementedError(f"pad_mode {pad_mode} not implemented") return {"values": values} ================================================ FILE: verl_distillation/verl/workers/engine/fsdp/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch.distributed.device_mesh import init_device_mesh from verl.utils.device import get_device_name def create_device_mesh(world_size, fsdp_size): """ Create a device mesh for distributed training based on the world size and FSDP size. Args: world_size (int): Total number of processes in the distributed training setup. fsdp_size (int): Size of the Fully Sharded Data Parallel (FSDP) group. Returns: torch.distributed.device_mesh.DeviceMesh: The initialized device mesh. """ device_name = get_device_name() if fsdp_size < 0 or fsdp_size >= world_size: device_mesh = init_device_mesh(device_name, mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) else: device_mesh = init_device_mesh( device_name, mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"] ) return device_mesh def get_sharding_strategy(device_mesh): """ Determine the appropriate sharding strategy based on the number of dimensions of the device mesh. Args: device_mesh (torch.distributed.device_mesh.DeviceMesh): The device mesh used for distributed training. Returns: torch.distributed.fsdp.ShardingStrategy: The sharding strategy to be used with FSDP. Raises: NotImplementedError: If the number of dimensions of the device mesh is neither 1 nor 2. """ from torch.distributed.fsdp import ShardingStrategy if device_mesh.ndim == 1: sharding_strategy = ShardingStrategy.FULL_SHARD elif device_mesh.ndim == 2: sharding_strategy = ShardingStrategy.HYBRID_SHARD else: raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2") return sharding_strategy ================================================ FILE: verl_distillation/verl/workers/engine/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .transformer_impl import MegatronEngine, MegatronEngineWithLMHead __all__ = ["MegatronEngine", "MegatronEngineWithLMHead"] ================================================ FILE: verl_distillation/verl/workers/engine/megatron/transformer_impl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from functools import partial from typing import Any, Callable, Iterator, Optional import torch import torch.distributed from megatron.core import parallel_state as mpu from megatron.core.pipeline_parallel import get_forward_backward_func from omegaconf import OmegaConf from tensordict import TensorDict from verl.models.mcore import get_mcore_weight_converter from verl.trainer.config import CheckpointConfig from verl.utils import tensordict_utils as tu from verl.utils.checkpoint.megatron_checkpoint_manager import MegatronCheckpointManager from verl.utils.dataset.dataset_utils import DatasetPadMode from verl.utils.device import get_device_id, get_device_name from verl.utils.megatron.pipeline_parallel import make_batch_generator from verl.utils.megatron.tensor_parallel import vocab_parallel_entropy, vocab_parallel_log_probs_from_logits from verl.utils.megatron_utils import ( load_megatron_model_to_gpu, load_megatron_optimizer, offload_megatron_model_to_cpu, offload_megatron_optimizer, per_tensor_generator, ) from verl.utils.model import load_mcore_dist_weights, load_megatron_gptmodel_weights from verl.workers.config import HFModelConfig, McoreEngineConfig, McoreOptimizerConfig from ..base import BaseEngine, EngineRegistry from ..utils import postprocess_batch_func, prepare_micro_batches from .utils import set_random_seed logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MegatronEngine(BaseEngine): def __init__( self, model_config: HFModelConfig, engine_config: McoreEngineConfig, optimizer_config: McoreOptimizerConfig, checkpoint_config: CheckpointConfig, ): super().__init__() self.model_config = model_config self.engine_config = engine_config self.optimizer_config = optimizer_config self.checkpoint_config = checkpoint_config self._init_device_mesh() set_random_seed(seed=self.engine_config.seed) self._is_offload_param = self.engine_config.param_offload self._is_offload_grad = self.engine_config.grad_offload self._is_offload_optimizer = self.engine_config.optimizer_offload self.mode = None self.layer_name_mapping = { "qkv_layer_name": "self_attention.linear_qkv.", "gate_proj_layer_name": "linear_fc1.", } self.weight_converter = None def _init_device_mesh(self): mpu.initialize_model_parallel( tensor_model_parallel_size=self.engine_config.tensor_model_parallel_size, pipeline_model_parallel_size=self.engine_config.pipeline_model_parallel_size, virtual_pipeline_model_parallel_size=self.engine_config.virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank=None, use_sharp=False, context_parallel_size=self.engine_config.context_parallel_size, expert_model_parallel_size=self.engine_config.expert_model_parallel_size, expert_tensor_parallel_size=self.engine_config.expert_tensor_parallel_size, nccl_communicator_config_path=None, ) def _build_tf_config(self): from verl.models.mcore import hf_to_mcore_config from verl.utils.torch_dtypes import PrecisionType self.param_dtype = torch.bfloat16 self.dtype = PrecisionType.to_dtype(self.param_dtype) tf_config = hf_to_mcore_config( self.model_config.hf_config, self.dtype, **self.engine_config.override_transformer_config ) use_mbridge = self.engine_config.use_mbridge if use_mbridge: from verl.models.mcore.mbridge import AutoBridge bridge = AutoBridge.from_config(self.model_config.hf_config) bridge.set_extra_args(**self.engine_config.override_transformer_config) tf_config = bridge.config self.bridge = bridge else: self.bridge = None if not self.bridge: self.weight_converter = get_mcore_weight_converter(self.model_config.hf_config, self.dtype) if torch.distributed.get_rank() == 0: print(f"TF config: {tf_config}") self.tf_config = tf_config def _build_megatron_module(self): from verl.utils.megatron_utils import McoreModuleWrapperConfig, make_megatron_module from verl.utils.model import print_model_size # TODO: add more cases is_value_model = ( "ForTokenClassification" in self.model_config.architectures[0] or "ForSequenceClassification" in self.model_config.architectures[0] ) self.is_value_model = is_value_model if self.engine_config.forward_only: wrap_with_ddp = False else: wrap_with_ddp = True wrap_config = McoreModuleWrapperConfig( is_value_model=is_value_model, # actor is not value model share_embeddings_and_output_weights=self.model_config.share_embeddings_and_output_weights, wrap_with_ddp=wrap_with_ddp, use_distributed_optimizer=self.engine_config.use_distributed_optimizer, ) module = make_megatron_module( wrap_config=wrap_config, tf_config=self.tf_config, hf_config=self.model_config.hf_config, bridge=self.bridge, override_model_config=self.engine_config.override_mcore_model_config, override_ddp_config=self.engine_config.override_ddp_config, ) print(f"module: {len(module)}") if self.engine_config.use_dist_checkpointing: load_mcore_dist_weights(module, self.engine_config.dist_checkpointing_path, is_value_model=is_value_model) else: if self.bridge is not None: self.bridge.load_weights(module, self.model_config.local_path) else: # (vermouth1992) this is a workaround to be compatible with the old API tmp_config = OmegaConf.create( {"model": {"path": self.model_config.local_path, "use_shm": self.model_config.use_shm}} ) load_megatron_gptmodel_weights( tmp_config, self.model_config.hf_config, module, params_dtype=self.dtype, is_value_model=is_value_model, ) if torch.distributed.get_rank() == 0: print_model_size(module[0]) return module def _build_optimizer(self): from verl.utils.megatron.optimizer import get_megatron_optimizer, init_megatron_optim_config optim_config_megatron = init_megatron_optim_config(self.optimizer_config) optimizer = get_megatron_optimizer(model=self.module, config=optim_config_megatron) return optimizer def _build_lr_scheduler(self): from verl.utils.megatron.optimizer import get_megatron_optimizer_param_scheduler optimizer_scheduler = get_megatron_optimizer_param_scheduler( optimizer=self.optimizer, config=self.optimizer_config ) return optimizer_scheduler def is_mp_src_rank_with_outputs(self): return ( mpu.get_tensor_model_parallel_rank() == 0 and mpu.get_pipeline_model_parallel_rank() == mpu.get_pipeline_model_parallel_world_size() - 1 and mpu.get_context_parallel_rank() == 0 ) def initialize(self): self._build_tf_config() self.module = self._build_megatron_module() if not self.engine_config.forward_only: self.optimizer = self._build_optimizer() self.lr_scheduler = self._build_lr_scheduler() else: self.optimizer = None self.lr_scheduler = None tmp_config = OmegaConf.create({"model": {"path": self.model_config.local_path}}) role = "actor" if not self.is_value_model else "critic" self.checkpoint_mananager = MegatronCheckpointManager( config=tmp_config, checkpoint_config=self.checkpoint_config, model_config=self.model_config.hf_config, transformer_config=self.tf_config, role=role, model=self.module, arch=self.model_config.architectures[0], hf_config=self.model_config.hf_config, param_dtype=self.param_dtype, share_embeddings_and_output_weights=self.model_config.share_embeddings_and_output_weights, processing_class=self.model_config.get_processor(), optimizer=self.optimizer, optimizer_scheduler=self.lr_scheduler, use_distributed_optimizer=self.engine_config.use_distributed_optimizer, use_checkpoint_opt_param_scheduler=self.optimizer_config.use_checkpoint_opt_param_scheduler, bridge=self.bridge, use_dist_checkpointing=self.engine_config.use_dist_checkpointing, ) def train_mode(self): """ Context manager entry for switching the engine and model into training mode. Usage: with engine.train_mode(): # runs in training mode """ return EngineTrainModeCtx(self) def eval_mode(self): """ Context manager entry for switching the engine and model into evaluation mode. Usage: with engine.eval_mode(): # runs in evaluation mode """ return EngineEvalModeCtx(self) def optimizer_zero_grad(self): """ Zero out gradients of all parameters before starting a new backward pass. """ self.optimizer.zero_grad() # use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm for chunk in self.module: # if use distributed optimizer, zero grad buffer will be handled by optimizer chunk.zero_grad_buffer() def optimizer_step(self): """ Perform an optimization step to update model parameters based on accumulated gradients. Returns: grad_norm (float): The norm of the gradients before clipping or update. """ update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step() if update_successful: # allgather already execute in optimizer.step in new megatron pass else: raise NotImplementedError("Megatron optimizer step failed. This should not happen") return grad_norm def lr_scheduler_step(self): """ Advance the learning rate scheduler by one step. Returns: current_lr (float or list[float]): Updated learning rate(s). """ from verl.utils.megatron.optimizer import get_megatron_last_lr self.lr_scheduler.step(1) return get_megatron_last_lr(self.optimizer) def to(self, device: str, model: bool = True, optimizer: bool = True): """ Move model parameters, optimizer states, or both to the specified device. Args: device: Target device identifier. model: If True, move the model. optimizer: If True, move the optimizer states. """ device_name = get_device_name() assert device in (device_name, "cpu") if device == device_name: if not self.engine_config.param_offload: if model: load_megatron_model_to_gpu(self.module, load_grad=True) if optimizer and self.optimizer is not None: load_megatron_optimizer(self.optimizer, device) elif device == "cpu": if not self.engine_config.param_offload: if model: offload_megatron_model_to_cpu(self.module) if optimizer and self.optimizer is not None: offload_megatron_optimizer(self.optimizer) else: raise ValueError(f"Invalid device type: {device}") def get_data_parallel_rank(self): return mpu.get_data_parallel_rank() def get_data_parallel_size(self): return mpu.get_data_parallel_world_size() def get_data_parallel_group(self): return mpu.get_data_parallel_group() def save_checkpoint( self, local_path: str, hdfs_path: Optional[str] = None, global_step: int = 0, max_ckpt_to_keep: Optional[int] = None, **kwargs, ) -> None: """ Save model, optimizer, and scheduler states to a checkpoint. Args: local_path: Local filesystem path to save checkpoint. hdfs_path: Optional HDFS path to copy checkpoint. global_step: Integer training step number for naming. max_ckpt_to_keep: Maximum number of recent checkpoints to retain. """ if self._is_offload_param: load_megatron_model_to_gpu(self.module, load_grad=True) self.checkpoint_mananager.save_checkpoint( local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) torch.distributed.barrier() if self._is_offload_param: offload_megatron_model_to_cpu(self.module) def load_checkpoint( self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: bool = True, **kwargs ) -> None: """ Load model, optimizer, and scheduler states from a checkpoint. Args: local_path: Local filesystem path of the checkpoint. hdfs_path: Optional HDFS path where checkpoint is stored. del_local_after_load: Whether to delete local copy after loading. """ if self._is_offload_param: load_megatron_model_to_gpu(self.module) self.checkpoint_mananager.load_checkpoint( local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) if self._is_offload_param: offload_megatron_model_to_cpu(self.module) if self._is_offload_optimizer: offload_megatron_optimizer(self.optimizer) def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> Any: tu.assign_non_tensor(data, sp_size=self.engine_config.context_parallel_size) # compute num_tokens in global batch for loss normalization batch_num_tokens = data["loss_mask"].sum().to(get_device_id()) torch.distributed.all_reduce( batch_num_tokens, op=torch.distributed.ReduceOp.SUM, group=self.get_data_parallel_group() ) tu.assign_non_tensor(data, batch_num_tokens=batch_num_tokens.item()) tu.assign_non_tensor(data, dp_size=self.get_data_parallel_size()) vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() if vpp_size is not None and vpp_size > 1: num_batches_divided_by = self.tf_config.microbatch_group_size_per_vp_stage else: num_batches_divided_by = None micro_batches, indices = prepare_micro_batches( data=data, dp_group=self.get_data_parallel_group(), num_batches_divided_by=num_batches_divided_by, same_micro_num_in_dp=False, min_num_micro_batch=None, ) if num_batches_divided_by is not None: assert len(micro_batches) % num_batches_divided_by == 0, ( f"micro_batches {micro_batches} must be divisible by num_batches_divided_by " f"{num_batches_divided_by} for megatron backend" ) # compute input shapes for pp stages n_micro_batch = len(micro_batches) for micro_batch in micro_batches: tu.assign_non_tensor(micro_batch, num_micro_batch=n_micro_batch) forward_backward_func = get_forward_backward_func() postprocess_micro_batch_func = partial( self.postprocess_micro_batch_func, forward_only=forward_only, loss_function=loss_function, ) tu.assign_non_tensor(data, num_micro_batch=n_micro_batch) forward_step = partial(self.forward_step, postprocess_micro_batch_func=postprocess_micro_batch_func) # batch should be a list of batches inside micro-batches batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.module)) # TODO: we may use the new schedule instead # for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size) losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.module, num_microbatches=n_micro_batch, seq_length=1, # the communication shape is obtained via p2p comm micro_batch_size=1, # the communication shape is obtained via p2p comm forward_only=forward_only, ) # loss_reduces contains the stats returned from loss_func if mpu.is_pipeline_last_stage(ignore_virtual=True): return postprocess_batch_func(output_lst=losses_reduced, indices=indices, data=data) else: return {} def get_per_tensor_param(self): if self._is_offload_param: load_megatron_model_to_gpu(self.module, load_grad=False) if self.bridge is not None: per_tensor_param = self.bridge.export_weights(self.module) else: per_tensor_param = per_tensor_generator( self.module, self.model_config.hf_config, self.weight_converter, self.tf_config, self.layer_name_mapping, ) return per_tensor_param def forward_step(self, batch_iter, model, postprocess_micro_batch_func): raise NotImplementedError("forward_step must be implemented in subclass") def postprocess_micro_batch_func(self, output, data: TensorDict, forward_only: bool, loss_function): raise NotImplementedError("postprocess_micro_batch_func must be implemented in subclass") class EngineEvalModeCtx: def __init__(self, engine: MegatronEngine): self.engine = engine def __enter__(self): assert isinstance(self.engine, MegatronEngine) self.engine.mode = "eval" if self.engine._is_offload_param: load_megatron_model_to_gpu(self.engine.module, load_grad=True) # mcore module is a list of model chunk in each vpp stage for module in self.engine.module: module.eval() def __exit__(self, exc_type, exc_value, traceback): if self.engine._is_offload_param: offload_megatron_model_to_cpu(self.engine.module) self.engine.mode = None class EngineTrainModeCtx: def __init__(self, engine: MegatronEngine): self.engine = engine def __enter__(self): assert isinstance(self.engine, MegatronEngine) self.engine.mode = "train" if self.engine._is_offload_param: load_megatron_model_to_gpu(self.engine.module, load_grad=True) if self.engine._is_offload_optimizer: load_megatron_optimizer(optimizer=self.engine.optimizer) # mcore module is a list of model chunk in each vpp stage for module in self.engine.module: module.train() def __exit__(self, exc_type, exc_value, traceback): if self.engine._is_offload_param: offload_megatron_model_to_cpu(self.engine.module) if self.engine._is_offload_optimizer: offload_megatron_optimizer(optimizer=self.engine.optimizer) self.engine.mode = None @EngineRegistry.register(model_type="language_model", backend="megatron") class MegatronEngineWithLMHead(MegatronEngine): def prepare_model_inputs(self, batch: TensorDict): batch = batch.to(get_device_id()) batch = batch.contiguous() input_ids = batch["input_ids"] loss_mask = batch["loss_mask"].to(bool) position_ids = batch["position_ids"] # process vlm inputs has_multi_modal_inputs = "multi_modal_inputs" in batch.keys() if has_multi_modal_inputs: batch["multi_modal_inputs"] = batch["multi_modal_inputs"] batch["multi_modal_inputs_idx"] = torch.Tensor(list(range(len(batch["multi_modal_inputs"])))).to( torch.int64 ) if batch["position_ids"].dim() == 3: # qwen2vl mrope [bs, 3, seq_len] batch["position_ids"] = batch["position_ids"][ :, 0 ] # mcore patch recompute qwen2vl's pos ids during forward multi_modal_inputs = {} if "multi_modal_inputs" in batch: from verl.utils.model import extract_multi_modal_inputs indices = batch.get("multi_modal_inputs_idx", None) multi_modal_inputs = extract_multi_modal_inputs(batch["multi_modal_inputs"], indices) return { "input_ids": input_ids, "loss_mask": loss_mask, "position_ids": position_ids, "multi_modal_inputs": multi_modal_inputs, } def prepare_model_outputs(self, output: dict, data: TensorDict): calculate_entropy = tu.get_non_tensor_data(data, key="calculate_entropy", default=False) log_prob = output["log_probs"] model_output = {"log_probs": log_prob} if calculate_entropy: entropy = output["entropy"] model_output["entropy"] = entropy return model_output def forward_step(self, batch_iter: Iterator[TensorDict], model, postprocess_micro_batch_func): batch: TensorDict = next(batch_iter) batch = batch.to(get_device_id()) use_fused_kernels = tu.get_non_tensor_data(batch, key="use_fused_kernels", default=False) calculate_entropy = tu.get_non_tensor_data(batch, key="calculate_entropy", default=False) pad_mode = tu.get_non_tensor_data(batch, key="pad_mode", default=DatasetPadMode.NO_PADDING) temperature = batch["temperature"] model_inputs = self.prepare_model_inputs(batch) input_ids = model_inputs["input_ids"] multi_modal_inputs = model_inputs["multi_modal_inputs"] if pad_mode == DatasetPadMode.NO_PADDING: label = input_ids.clone() else: raise NotImplementedError(f"Pad mode {pad_mode} is not supported for megatron engine") from verl.models.mcore import get_mcore_forward_no_padding_fn if use_fused_kernels: raise NotImplementedError("Fused kernels are not supported for megatron engine") forward_fn = get_mcore_forward_no_padding_fn(self.model_config.hf_config) def logits_processor(logits, label): assert logits.shape[:2] == label.shape[:2] logits.div_(temperature) ret = {} if calculate_entropy: logits_bak = logits.clone() if torch.distributed.get_rank() == 0: logger.warning_once( "For memory-efficient computation, enable fused kernels via " "`actor_rollout_ref.model.use_fused_kernels=True`. " "The current `clone()` operation ensures correctness but increases memory usage." ) entropy = vocab_parallel_entropy(logits) ret["entropy"] = entropy else: logits_bak = logits # Create the final labels for next-token prediction. # The `label` tensor starts as a clone of `input_ids`. `torch.roll` is not applied # earlier because `input_ids` is a nested tensor, which is incompatible with the operation. # The `preprocess_packed_seqs_no_padding` function unnests and flattens the tensor # into `input_ids_rmpad` (shape: [1, total_seqlen]). # Now, on this simple, unpadded tensor, we can perform the standard left shift # to align the target token `t+1` with the prediction for token `t`. label = torch.roll(label, shifts=-1, dims=1) log_probs = vocab_parallel_log_probs_from_logits(logits_bak, label) ret["log_probs"] = log_probs return ret logits_processor_args = {"label": label} output = forward_fn( model, input_ids, multi_modal_inputs, logits_processor=logits_processor, logits_processor_args=logits_processor_args, ) return output, partial(postprocess_micro_batch_func, data=batch) def postprocess_micro_batch_func(self, output, data: TensorDict, forward_only: bool, loss_function): # For memory efficiency # We move calculation of entropy to compute_log_probs, forward_only == True device = data["input_ids"].device model_output = self.prepare_model_outputs(output, data) if loss_function is not None: loss, metrics = loss_function(model_output=model_output, data=data, dp_group=self.get_data_parallel_group()) # scale loss by num_micro_batch because megatron will scale loss # by n_micro_batch and cp size inside pp schedule loss = loss * data["num_micro_batch"] / mpu.get_context_parallel_world_size() else: assert forward_only, "forward_only must be True when loss_function is None" loss = torch.tensor(1.0, device=device) metrics = {} output = { "model_output": model_output, "loss": loss, "metrics": metrics, } # return loss and stats return loss, output @EngineRegistry.register(model_type="value_model", backend="megatron") class MegatronEngineWithValueHead(MegatronEngineWithLMHead): # for value head def forward_step(self, batch_iter, model, postprocess_micro_batch_func): batch: TensorDict = next(batch_iter) batch = batch.to(get_device_id()) model_inputs = self.prepare_model_inputs(batch) input_ids = model_inputs["input_ids"] multi_modal_inputs = model_inputs["multi_modal_inputs"] from verl.models.mcore import get_mcore_forward_no_padding_fn forward_fn = get_mcore_forward_no_padding_fn(self.model_config.hf_config) output = forward_fn( model, input_ids, multi_modal_inputs, value_model=True, ) return output, partial(postprocess_micro_batch_func, data=batch) def prepare_model_outputs(self, output: dict | torch.Tensor, data: TensorDict): return {"values": output} ================================================ FILE: verl_distillation/verl/workers/engine/megatron/utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from verl.utils.device import get_torch_device def set_random_seed(seed): import random import numpy as np import torch torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) if get_torch_device().device_count() > 0: from megatron.core import tensor_parallel tensor_parallel.model_parallel_cuda_manual_seed(seed) # FIXME: torch cumsum not support deterministic (used in vllm sampler), # https://github.com/pytorch/pytorch/issues/89492 # torch.use_deterministic_algorithms(True, warn_only=True) # os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' ================================================ FILE: verl_distillation/verl/workers/engine/mindspeed/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .transformer_impl import MindspeedEngineWithLMHead __all__ = ["MindspeedEngineWithLMHead"] ================================================ FILE: verl_distillation/verl/workers/engine/mindspeed/transformer_impl.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from mindspeed.megatron_adaptor import repatch from verl.trainer.config import CheckpointConfig from verl.workers.config import HFModelConfig, McoreEngineConfig, McoreOptimizerConfig from ..base import EngineRegistry from ..megatron import MegatronEngineWithLMHead logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) @EngineRegistry.register(model_type="language_model", backend="megatron", device="npu") class MindspeedEngineWithLMHead(MegatronEngineWithLMHead): def __init__( self, model_config: HFModelConfig, engine_config: McoreEngineConfig, optimizer_config: McoreOptimizerConfig, checkpoint_config: CheckpointConfig, ): super().__init__(model_config, engine_config, optimizer_config, checkpoint_config) repatch_config = {"use_flash_attn": True} if self.engine_config.context_parallel_size > 1: repatch_config["context_parallel_size"] = self.engine_config.context_parallel_size repatch(repatch_config) ================================================ FILE: verl_distillation/verl/workers/engine/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from tensordict import TensorDict from verl.utils import tensordict_utils as tu from verl.utils.dataset.dataset_utils import DatasetPadMode from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import rearrange_micro_batches, restore_dynamic_batch def prepare_micro_batches( data: TensorDict, dp_group=None, num_batches_divided_by=None, same_micro_num_in_dp=True, min_num_micro_batch=None, use_dynamic_bsz_balance=True, ): """ Prepare micro batches from data. """ use_dynamic_bsz = tu.get_non_tensor_data(data=data, key="use_dynamic_bsz", default=True) sp_size = tu.get_non_tensor_data(data=data, key="sp_size", default=1) if use_dynamic_bsz: assert "max_token_len_per_gpu" in data.keys(), "max_token_len_per_gpu must be set when use_dynamic_bsz is True" max_token_len_per_gpu = data["max_token_len_per_gpu"] max_token_len = max_token_len_per_gpu * sp_size micro_batches, batch_idx_list = rearrange_micro_batches( data, max_token_len=max_token_len, dp_group=dp_group, num_batches_divided_by=num_batches_divided_by, same_micro_num_in_dp=same_micro_num_in_dp, min_num_micro_batch=min_num_micro_batch, use_dynamic_bsz_balance=use_dynamic_bsz_balance, ) else: micro_batch_size_per_gpu = data["micro_batch_size_per_gpu"] micro_batches = data.split(micro_batch_size_per_gpu) batch_idx_list = None return micro_batches, batch_idx_list def postprocess_batch_func(output_lst, indices, data: TensorDict): """postprocess the output of a forward_backward_batch. output_lst is a list of dict containing outputs for each micro-batch reorder entropy and outputs. Return None for other pp ranks only on last rank. It should be on every tp rank each losses_reduced contains 1. model_output, 2. loss, 3. metrics. """ use_dynamic_bsz = tu.get_non_tensor_data(data=data, key="use_dynamic_bsz", default=True) pad_mode = tu.get_non_tensor_data(data=data, key="pad_mode", default=DatasetPadMode.NO_PADDING) assert pad_mode == DatasetPadMode.NO_PADDING, "postprocess_batch_func only support NO_PADDING pad_mode" # losses_reduced is a list of dict containing outputs for each micro-batch # reorder entropy and outputs. Return None for other pp ranks # only on last rank. It should be on every tp rank # losses_reduced contains 1. model_output, 2. loss, 3. metrics. # We perform reverse model_output = {} losses = [] aggregated_metrics = {} # model output for o in output_lst: if "model_output" in o: for key, val in o["model_output"].items(): if key not in model_output: model_output[key] = [] model_output[key].append(val) # concat results from micro batches for key, val in model_output.items(): if pad_mode == DatasetPadMode.NO_PADDING: tensors = [tensor for nt in model_output[key] for tensor in nt.unbind()] model_output[key] = torch.nested.as_nested_tensor(tensors, layout=torch.jagged) else: raise NotImplementedError(f"pad_mode {pad_mode} not implemented") # reverse with dynamic bsz if use_dynamic_bsz: model_output[key] = restore_dynamic_batch(model_output[key], indices) # loss for o in output_lst: if "loss" in o: losses.append(o["loss"]) # metrics for o in output_lst: if "metrics" in o: metrics = o["metrics"] append_to_dict(aggregated_metrics, metrics) output = { "model_output": model_output, "loss": losses, "metrics": aggregated_metrics, } return output ================================================ FILE: verl_distillation/verl/workers/fsdp_workers.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The main entry point to run the PPO algorithm """ import datetime import json import logging import os import warnings from dataclasses import asdict from typing import Any, Optional import numpy as np import psutil import torch import torch.distributed import torch.distributed as dist from codetiming import Timer from omegaconf import DictConfig, OmegaConf, open_dict from peft import LoraConfig, TaskType, get_peft_model from safetensors.torch import save_file from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import (FullStateDictConfig, ShardedStateDictConfig, StateDictType) try: # for torch 2.5+ from torch.distributed.tensor import DTensor except ImportError: from torch.distributed._tensor import DTensor import verl.utils.torch_functional as verl_F from verl import DataProto from verl.models.transformers.monkey_patch import apply_monkey_patch from verl.single_controller.base import Worker from verl.single_controller.base.decorator import ( Dispatch, make_nd_compute_dataproto_dispatch_fn, register) from verl.utils import hf_processor, hf_tokenizer from verl.utils.activation_offload import enable_activation_offloading from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.config import omega_conf_to_dataclass from verl.utils.device import (get_device_id, get_device_name, get_nccl_backend, get_torch_device, set_expandable_segments) from verl.utils.flops_counter import FlopsCounter from verl.utils.fs import copy_to_local from verl.utils.fsdp_utils import (CPUOffloadPolicy, MixedPrecisionPolicy, apply_fsdp2, collect_lora_params, fsdp2_load_full_state_dict, fsdp_version, get_fsdp_wrap_policy, get_init_weight_context_manager, get_shard_placement_fn, init_fn, layered_summon_lora_params, load_fsdp_model_to_gpu, load_fsdp_optimizer, offload_fsdp_model_to_cpu, offload_fsdp_optimizer, replace_lora_wrapper) from verl.utils.import_utils import import_external_libs from verl.utils.memory_utils import aggressive_empty_cache from verl.utils.model import compute_position_id_with_mask, convert_weight_keys from verl.utils.profiler import (DistProfiler, DistProfilerExtension, ProfilerConfig, log_gpu_memory_usage, simple_timer) from verl.utils.profiler.performance import (reduce_timing, topk_reduce_ratio_min_max) from verl.utils.py_functional import convert_to_regular_types from verl.utils.ray_utils import get_event_loop from verl.workers.config import (FSDPCriticConfig, FSDPEngineConfig, HFModelConfig, RolloutConfig) from verl.workers.config.optimizer import build_optimizer from verl.workers.rollout import get_rollout_class from verl.workers.sharding_manager.fsdp_ulysses import \ FSDPUlyssesShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() def create_device_mesh(world_size, fsdp_size): if fsdp_size < 0 or fsdp_size >= world_size: device_mesh = init_device_mesh(device_name, mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) else: device_mesh = init_device_mesh( device_name, mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"] ) return device_mesh def get_sharding_strategy(device_mesh): from torch.distributed.fsdp import ShardingStrategy if device_mesh.ndim == 1: sharding_strategy = ShardingStrategy.FULL_SHARD elif device_mesh.ndim == 2: sharding_strategy = ShardingStrategy.HYBRID_SHARD else: raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2") return sharding_strategy def get_vl_model_vision_tower(vl_model_instance): """ Util to extract Vision Tower from a VL model instance """ if hasattr(vl_model_instance, "model") and hasattr(vl_model_instance.model, "visual"): # transformers >= 4.52.0 return vl_model_instance.model.visual elif hasattr(vl_model_instance, "visual"): # transformers < 4.52.0 return vl_model_instance.visual return None class ActorRolloutRefWorker(Worker, DistProfilerExtension): """ This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy or a hybrid engine based on the config.rollout """ def __init__(self, config: DictConfig, role: str, **kwargs): Worker.__init__(self) self.config = config import torch.distributed if not torch.distributed.is_initialized(): rank = int(os.environ.get("RANK", 0)) world_size = int(os.environ.get("WORLD_SIZE", 1)) torch.distributed.init_process_group( backend=f"cpu:gloo,{get_device_name()}:{get_nccl_backend()}", rank=rank, world_size=world_size, timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) # build device mesh for FSDP world_size = torch.distributed.get_world_size() # TODO(sgm): support FSDP hybrid shard for larger model self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=self.config.actor.fsdp_config.fsdp_size) # build device mesh for Ulysses Sequence Parallel self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.actor.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) # create training dispatch if self.ulysses_device_mesh is not None: is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0 self._register_dispatch_collect_info( "actor", dp_rank=self.ulysses_device_mesh["dp"].get_local_rank(), is_collect=is_collect ) else: self._register_dispatch_collect_info("actor", dp_rank=self.rank, is_collect=True) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) self._lora_rank = self.config.model.get("lora_rank", 0) self._is_lora = self.config.model.get("lora_adapter_path") is not None or self._lora_rank > 0 self.role = role assert self.role in ["actor", "rollout", "ref", "actor_rollout", "actor_rollout_ref"] self._is_actor = self.role in ["actor", "actor_rollout", "actor_rollout_ref"] self._is_rollout = self.role in ["rollout", "actor_rollout", "actor_rollout_ref"] self._is_ref = self.role in ["ref", "actor_rollout_ref"] self.use_orig_params = self.config.actor.fsdp_config.get("use_orig_params", False) # TODO(haibin.lin): # As of now the type of config is DictConfig, if we assign config.profiler with ProfilerConfig, # it will actually convert the ProfilerConfig dataclass back to a DictConfig. # We can still use ProfilerConfig for testing purpose (tests/utils/test_nvtx_profile.py) # as they provides DictConfig-like interface # The benefit of creating the dataclass config is to perform validation during __post_init__ if self._is_actor: omega_profiler_config = config.actor.get("profiler", {}) elif self._is_rollout: # NOTE: In colocation mode, rollout config may not take effect (follow the actor config) # This is for extendability in AsyncRL cases omega_profiler_config = config.rollout.get("profiler", {}) elif self._is_ref: omega_profiler_config = config.ref.get("profiler", {}) else: raise ValueError( f"Invalid role {self.role}, should be one of " "['actor', 'rollout', 'ref', 'actor_rollout', 'actor_rollout_ref']" ) # omega_profiler_config is DictConfig # profiler_config is a ProfilerConfig dataclass profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig) if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]: tool_config = omega_conf_to_dataclass( omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool")) ) else: tool_config = None DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config) ) self._is_offload_param = False self._is_offload_optimizer = False if self._is_actor: self._is_offload_param = self.config.actor.fsdp_config.get("param_offload", False) self._is_offload_optimizer = self.config.actor.fsdp_config.get("optimizer_offload", False) elif self._is_ref: # TODO: it seems that manual offload is slowly than FSDP offload self._is_offload_param = self.config.ref.fsdp_config.get("param_offload", False) # normalize config if self._is_actor: self.config.actor.ppo_mini_batch_size *= self.config.rollout.n self.config.actor.ppo_mini_batch_size //= self.device_mesh.size() // self.ulysses_sequence_parallel_size assert self.config.actor.ppo_mini_batch_size > 0, ( f"ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be larger than 0 after " f"normalization" ) # micro bsz if self.config.actor.ppo_micro_batch_size is not None: self.config.actor.ppo_micro_batch_size //= ( self.device_mesh.size() // self.ulysses_sequence_parallel_size ) self.config.actor.ppo_micro_batch_size_per_gpu = self.config.actor.ppo_micro_batch_size if self.config.actor.ppo_micro_batch_size_per_gpu is not None: assert self.config.actor.ppo_mini_batch_size % self.config.actor.ppo_micro_batch_size_per_gpu == 0, ( f"normalized ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be divisible by " f"ppo_micro_batch_size_per_gpu {self.config.actor.ppo_micro_batch_size_per_gpu}" ) assert self.config.actor.ppo_mini_batch_size // self.config.actor.ppo_micro_batch_size_per_gpu > 0, ( f"normalized ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be larger than " f"ppo_micro_batch_size_per_gpu {self.config.actor.ppo_micro_batch_size_per_gpu}" ) # normalize rollout config if self._is_rollout and self.config.rollout.log_prob_micro_batch_size is not None: self.config.rollout.log_prob_micro_batch_size //= ( self.device_mesh.size() // self.ulysses_sequence_parallel_size ) self.config.rollout.log_prob_micro_batch_size_per_gpu = self.config.rollout.log_prob_micro_batch_size # normalize ref config if self._is_ref and self.config.ref.log_prob_micro_batch_size is not None: self.config.ref.log_prob_micro_batch_size //= self.device_mesh.size() // self.ulysses_sequence_parallel_size self.config.ref.log_prob_micro_batch_size_per_gpu = self.config.ref.log_prob_micro_batch_size def _build_model_optimizer( self, model_path, fsdp_config: FSDPEngineConfig, optim_config, override_model_config, use_remove_padding=False, use_fused_kernels=False, enable_gradient_checkpointing=False, trust_remote_code=False, use_liger=False, role="actor", enable_activation_offload=False, ): from torch.distributed.fsdp import CPUOffload, MixedPrecision from transformers import (AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForImageTextToText, AutoModelForVision2Seq) from verl.utils.model import (get_generation_config, print_model_size, update_model_config) from verl.utils.torch_dtypes import PrecisionType assert role in ["actor", "ref"] log_gpu_memory_usage(f"Before init {role} from HF AutoModel", logger=logger) local_path = model_path # note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect # TODO(zhangchi.usc1992): 1. support create from random initialized model. 2. Support init with FSDP directly self.tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) self.processor = hf_processor(local_path, trust_remote_code=trust_remote_code) if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template torch_dtype = fsdp_config.get("model_dtype", None) if torch_dtype is None: torch_dtype = torch.float32 if self._is_actor else torch.bfloat16 else: torch_dtype = PrecisionType.to_dtype(torch_dtype) # override model kwargs attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2") actor_model_config = AutoConfig.from_pretrained( local_path, trust_remote_code=trust_remote_code, attn_implementation=attn_implementation ) # TODO: VL models use VisionAttention, which directly uses flash_attention in transformers>=4.53 # which will be patched by _ulysses_flash_attention_forward, but errorly misses position_ids # Maybe support Ulysses in VisionAttention in the future and remove this patch if self.ulysses_sequence_parallel_size > 1 and hasattr(actor_model_config, "vision_config"): actor_model_config.vision_config._attn_implementation = "eager" # patch for kimi-vl if getattr(actor_model_config, "model_type", None) == "kimi_vl": actor_model_config.text_config.topk_method = "greedy" self.generation_config = get_generation_config(local_path, trust_remote_code=trust_remote_code) override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_model_config) update_model_config(actor_model_config, override_config_kwargs=override_config_kwargs) if self.rank == 0: print(f"Model config after override: {actor_model_config}") # NOTE(fix me): tie_word_embedding causes meta_tensor init to hang init_context = get_init_weight_context_manager( use_meta_tensor=not actor_model_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") has_remote_code = hasattr(actor_model_config, "auto_map") and any( actor_model_config.architectures[0] in val for val in actor_model_config.auto_map.values() ) if has_remote_code: auto_class = next( k for k, v in actor_model_config.auto_map.items() if actor_model_config.architectures[0] in v ) match auto_class: case "AutoModelForVision2Seq": actor_module_class = AutoModelForVision2Seq case "AutoModelForCausalLM": actor_module_class = AutoModelForCausalLM case "AutoModelForImageTextToText": actor_module_class = AutoModelForImageTextToText case _: actor_module_class = AutoModel else: if type(actor_model_config) in AutoModelForVision2Seq._model_mapping.keys(): actor_module_class = AutoModelForVision2Seq elif type(actor_model_config) in AutoModelForCausalLM._model_mapping.keys(): actor_module_class = AutoModelForCausalLM elif type(actor_model_config) in AutoModelForImageTextToText._model_mapping.keys(): actor_module_class = AutoModelForImageTextToText else: actor_module_class = AutoModel actor_module = actor_module_class.from_pretrained( pretrained_model_name_or_path=local_path, torch_dtype=torch_dtype, config=actor_model_config, trust_remote_code=trust_remote_code, attn_implementation=attn_implementation, ) # Apply Liger kernel to the model if use_liger is set to True if use_liger: from liger_kernel.transformers.monkey_patch import \ _apply_liger_kernel_to_instance _apply_liger_kernel_to_instance(model=actor_module) fused_kernel_options = self.config.model.get("fused_kernel_options", None) fused_kernels_backend = ( fused_kernel_options.get("impl_backend", None) if fused_kernel_options is not None else None ) apply_monkey_patch( model=actor_module, use_remove_padding=use_remove_padding, ulysses_sp_size=self.ulysses_sequence_parallel_size, use_fused_kernels=use_fused_kernels, fused_kernels_backend=fused_kernels_backend, ) # some parameters may not in torch_dtype. TODO(zhangchi.usc1992) remove this after we switch to fsdp2 actor_module.to(torch_dtype) if enable_gradient_checkpointing: actor_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) if self._is_lora: print("Applying LoRA to actor module") actor_module.enable_input_require_grads() lora_adapter_path = self.config.model.get("lora_adapter_path") if lora_adapter_path is not None: from peft import PeftModel print(f"Loading pre-trained LoRA adapter to {role} from: {lora_adapter_path}") # Copy adapter to local if needed local_adapter_path = copy_to_local(lora_adapter_path, use_shm=self.config.model.get("use_shm", False)) actor_module = PeftModel.from_pretrained(actor_module, local_adapter_path, is_trainable=True) peft_config = actor_module.peft_config["default"] # Ensure task_type is TaskType enum, not string if isinstance(peft_config.task_type, str): peft_config.task_type = TaskType.CAUSAL_LM else: # Convert config to regular Python types before creating PEFT model lora_config = { "task_type": TaskType.CAUSAL_LM, "r": self.config.model.lora_rank, "lora_alpha": self.config.model.lora_alpha, "target_modules": convert_to_regular_types(self.config.model.target_modules), "exclude_modules": convert_to_regular_types(self.config.model.exclude_modules), "bias": "none", } actor_module = get_peft_model(actor_module, LoraConfig(**lora_config)) self.use_orig_params = fsdp_config.get("use_orig_params", False) if self.config.actor.get("freeze_vision_tower", False): vision_tower = get_vl_model_vision_tower(actor_module) if vision_tower is not None: vision_tower.requires_grad_(False) self.use_orig_params = True if self.rank == 0: print("[actor model] Vision tower is set to not trainable.") else: if self.rank == 0: print("[actor model] No vision tower found.") torch.distributed.barrier() if self.rank == 0: print_model_size(actor_module) log_gpu_memory_usage(f"After init {role} from HF AutoModel", logger=logger) # We wrap FSDP for rollout as well mixed_precision_config = fsdp_config.get("mixed_precision", None) if mixed_precision_config is not None: param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16")) reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32")) buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32")) else: param_dtype = torch.bfloat16 reduce_dtype = torch.float32 buffer_dtype = torch.float32 mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype) auto_wrap_policy = get_fsdp_wrap_policy( module=actor_module, config=fsdp_config.get("wrap_policy", None), is_lora=self._is_lora, ) if self._is_rollout and self.config.rollout.name == "hf": # TODO(zhangchi.usc1992, shengguangming) fix me. Current, auto_wrap_policy causes HFRollout to hang in Gemma auto_wrap_policy = None if self.rank == 0: print(f"wrap_policy: {auto_wrap_policy}") fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) # TODO: add transformer policy # We force reference policy to use CPUOffload to save memory. # We force turn off CPUOffload for actor because it causes incorrect results when using grad accumulation cpu_offload = None if role == "actor" else CPUOffload(offload_params=True) fsdp_strategy = self.config.actor.strategy if fsdp_strategy == "fsdp": actor_module_fsdp = FSDP( actor_module, cpu_offload=cpu_offload, param_init_fn=init_fn, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, # zero3 mixed_precision=mixed_precision, sync_module_states=True, device_mesh=self.device_mesh, use_orig_params=self.use_orig_params, forward_prefetch=fsdp_config.get("forward_prefetch", False), ) elif fsdp_strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" mp_policy = MixedPrecisionPolicy( param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True ) if role == "actor" and fsdp_config.offload_policy: cpu_offload = CPUOffloadPolicy(pin_memory=True) self._is_offload_param = False self._is_offload_optimizer = False else: cpu_offload = None if role == "actor" else CPUOffloadPolicy(pin_memory=True) fsdp_kwargs = { "mesh": fsdp_mesh, "mp_policy": mp_policy, "offload_policy": cpu_offload, "reshard_after_forward": fsdp_config.reshard_after_forward, "shard_placement_fn": get_shard_placement_fn(fsdp_size=self.device_mesh.shape[-1]), } full_state = actor_module.state_dict() apply_fsdp2(actor_module, fsdp_kwargs, fsdp_config) fsdp2_load_full_state_dict(actor_module, full_state, fsdp_mesh, cpu_offload) actor_module_fsdp = actor_module else: raise NotImplementedError(f"not implement {fsdp_strategy}") if enable_activation_offload: enable_activation_offloading(actor_module_fsdp, fsdp_strategy, enable_gradient_checkpointing) log_gpu_memory_usage(f"After {role} FSDP init", logger=logger) # TODO: add more optimizer args into config if role == "actor" and optim_config is not None: from verl.utils.torch_functional import ( get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup) actor_optimizer = build_optimizer(actor_module_fsdp.parameters(), optim_config) total_steps = optim_config.get("total_training_steps", 0) num_warmup_steps = int(optim_config.get("lr_warmup_steps", -1)) lr_scheduler_type = optim_config.get("lr_scheduler_type", "constant") min_lr_ratio = optim_config.get("min_lr_ratio", 0.0) num_cycles = optim_config.get("num_cycles", 0.5) if num_warmup_steps < 0: num_warmup_steps_ratio = optim_config.get("lr_warmup_steps_ratio", 0.0) num_warmup_steps = int(num_warmup_steps_ratio * total_steps) if self.rank == 0: print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}") if lr_scheduler_type == "constant": actor_lr_scheduler = get_constant_schedule_with_warmup( optimizer=actor_optimizer, num_warmup_steps=num_warmup_steps ) elif lr_scheduler_type == "cosine": actor_lr_scheduler = get_cosine_schedule_with_warmup( optimizer=actor_optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=total_steps, min_lr_ratio=min_lr_ratio, num_cycles=num_cycles, ) else: raise NotImplementedError(f"LR scheduler type {lr_scheduler_type} is not supported") log_gpu_memory_usage(f"After {role} optimizer init", logger=logger) else: actor_optimizer = None actor_lr_scheduler = None return actor_module_fsdp, actor_optimizer, actor_lr_scheduler, actor_model_config def _build_rollout(self, trust_remote_code=False): from torch.distributed.device_mesh import init_device_mesh # 1. parse rollout and huggingface model config rollout_config: RolloutConfig = omega_conf_to_dataclass(self.config.rollout) model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model, dataclass_type=HFModelConfig) self.model_config = model_config # 2. build rollout device mesh infer_tp = self.config.rollout.tensor_model_parallel_size * self.config.rollout.data_parallel_size infer_pp = self.config.rollout.pipeline_model_parallel_size infer_world_size = infer_tp * infer_pp dp = self.world_size // infer_world_size assert self.world_size % infer_world_size == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_world_size: {infer_world_size}" ) rollout_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, infer_tp, infer_pp), mesh_dim_names=["dp", "infer_tp", "infer_pp"] ) rollout_name = self.config.rollout.name if rollout_name == "hf": self._register_dispatch_collect_info("rollout", dp_rank=self.rank, is_collect=True) else: is_collect = ( rollout_device_mesh["infer_tp"].get_local_rank() == 0 and rollout_device_mesh["infer_pp"].get_local_rank() == 0 ) self._register_dispatch_collect_info( "rollout", dp_rank=rollout_device_mesh["dp"].get_local_rank(), is_collect=is_collect ) # 3. init trainer and rollout random states self.torch_random_states = get_torch_device().get_rng_state() gen_dp_rank = rollout_device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) # 4. build rollout model log_gpu_memory_usage(f"Before building {self.config.rollout.name} rollout", logger=logger) self.rollout = get_rollout_class(rollout_config.name, rollout_config.mode)( config=rollout_config, model_config=model_config, device_mesh=rollout_device_mesh ) log_gpu_memory_usage(f"After building {self.config.rollout.name} rollout", logger=logger) # Full params if torch.distributed.get_world_size() == 1 and fsdp_version(self.actor_module_fsdp) == 1: FSDP.set_state_dict_type( self.actor_module_fsdp, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig(), ) elif fsdp_version(self.actor_module_fsdp) == 1: FSDP.set_state_dict_type( self.actor_module_fsdp, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) # used for LoRA self.base_sync_done: bool = "dummy" not in self.config.rollout.load_format self.layered_summon = self.config.rollout.get("layered_summon", False) # 5. switch to trainer mode # NOTE: It's critical that hybrid engine in trainer mode initially to load checkpoint. # For sync mode, we directly switch to trainer mode here. # For async mode, we can't call run_until_complete here, so we will switch to trainer mode in AgentLoopManager. if rollout_config.mode == "sync" and self._is_actor: loop = get_event_loop() loop.run_until_complete(self.trainer_mode()) async def rollout_mode(self): """Context switch hybridengine to rollout mode.""" aggressive_empty_cache(force_sync=True) log_gpu_memory_usage("Before load_fsdp_model_to_gpu", logger=logger) if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) log_gpu_memory_usage("After load_fsdp_model_to_gpu", logger=logger) peft_config = None peft_model = getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp) if hasattr(peft_model, "peft_config"): # LoRA peft_config = peft_model.peft_config.get("default", None) params = collect_lora_params( module=self.actor_module_fsdp, layered_summon=self.config.rollout.get("layered_summon", False), base_sync_done=self.base_sync_done, ) if not self.base_sync_done: params = {replace_lora_wrapper(k, peft_config): v for k, v in params.items()} else: params = self.actor_module_fsdp.state_dict() params = convert_weight_keys( params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp) ) # Special handling for LoRA with sleep_level=2: # When sleep_level=2, base model weights are destroyed during each sleep cycle. # separately collect and update LoRA weights and base model weights through their respective interfaces. # Here: params contains LoRA weights, base_model_params contains base model weights. if peft_config is not None and getattr(self.rollout, "sleep_level", None) == 2: base_model_params = collect_lora_params( module=self.actor_module_fsdp, layered_summon=self.layered_summon, base_sync_done=False, ) base_model_params = {replace_lora_wrapper(k, peft_config): v for k, v in base_model_params.items()} base_model_params = convert_weight_keys( base_model_params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp) ) log_gpu_memory_usage("Before offload_fsdp_model_to_cpu", logger=logger) if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After offload_fsdp_model_to_cpu", logger=logger) set_expandable_segments(False) if peft_config is not None and self.base_sync_done: per_tensor_param = params.items() if isinstance(params, dict) else params # Fixed: handle dict case else: device = get_device_id() # used when fsdp2 set cpu_offload_policy per_tensor_param = ( (name, param.to(device, non_blocking=True).full_tensor() if isinstance(param, DTensor) else param) for name, param in params.items() ) if self.config.rollout.free_cache_engine: await self.rollout.resume(tags=["weights"]) log_gpu_memory_usage("After resume weights", logger=logger) if peft_config is not None and getattr(self.rollout, "sleep_level", None) == 2: per_tensor_base_params = ( (name, param.to(device, non_blocking=True).full_tensor() if isinstance(param, DTensor) else param) for name, param in base_model_params.items() ) await self.rollout.update_weights(per_tensor_base_params, base_sync_done=False) del base_model_params, per_tensor_base_params await self.rollout.update_weights(per_tensor_param, peft_config=peft_config, base_sync_done=self.base_sync_done) log_gpu_memory_usage("After update_weights", logger=logger) del params, per_tensor_param aggressive_empty_cache(force_sync=True) if self.config.rollout.free_cache_engine: await self.rollout.resume(tags=["kv_cache"]) log_gpu_memory_usage("After resume kv_cache", logger=logger) self.base_sync_done = True # important: need to manually set the random states of each tp to be identical. self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) async def trainer_mode(self): """Context switch hybridengine to trainer mode.""" if self.config.rollout.free_cache_engine: log_gpu_memory_usage("Before rollout offload", logger=logger) await self.rollout.release() log_gpu_memory_usage("After rollout offload", logger=logger) self.actor_module_fsdp.train() # add empty cache after each compute aggressive_empty_cache(force_sync=True) set_expandable_segments(True) # restore random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): from verl.workers.actor import DataParallelPPOActor # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) use_remove_padding = self.config.model.get("use_remove_padding", False) use_shm = self.config.model.get("use_shm", False) use_fused_kernels = self.config.model.get("use_fused_kernels", False) if self._is_actor or self._is_rollout: # we need the model for actor and rollout if self._is_actor: optim_config = self.config.actor.optim fsdp_config = omega_conf_to_dataclass(self.config.actor.fsdp_config) else: optim_config = None fsdp_config = FSDPEngineConfig() local_path = copy_to_local(self.config.model.path, use_shm=use_shm) ( self.actor_module_fsdp, self.actor_optimizer, self.actor_lr_scheduler, self.actor_model_config, ) = self._build_model_optimizer( model_path=local_path, fsdp_config=fsdp_config, optim_config=optim_config, override_model_config=override_model_config, use_remove_padding=use_remove_padding, use_fused_kernels=use_fused_kernels, enable_gradient_checkpointing=self.config.model.get("enable_gradient_checkpointing", False), trust_remote_code=self.config.model.get("trust_remote_code", False), use_liger=self.config.model.get("use_liger", False), role="actor", enable_activation_offload=self.config.model.get("enable_activation_offload", False), ) # get the original unwrapped module if fsdp_version(self.actor_module_fsdp) == 1: self.actor_module = self.actor_module_fsdp._fsdp_wrapped_module if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After offload actor model during init", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during init", logger=logger) if self._is_actor: actor_cfg = omega_conf_to_dataclass(self.config.actor) self.actor = DataParallelPPOActor( config=actor_cfg, actor_module=self.actor_module_fsdp, actor_optimizer=self.actor_optimizer ) if self._is_rollout: self._build_rollout(trust_remote_code=self.config.model.get("trust_remote_code", False)) if self._is_ref: ref_model_path = self.config.model.path ref_model = self.config.ref.get("model", None) if ref_model is not None: ref_model_path = ref_model.get("path", self.config.model.path) if self.rank == 0: print("reference model:", ref_model_path) local_path = copy_to_local(ref_model_path, use_shm=use_shm) self.ref_module_fsdp = self._build_model_optimizer( model_path=local_path, fsdp_config=omega_conf_to_dataclass(self.config.ref.fsdp_config), optim_config=None, override_model_config=override_model_config, use_remove_padding=use_remove_padding, use_fused_kernels=use_fused_kernels, trust_remote_code=self.config.model.get("trust_remote_code", False), use_liger=self.config.model.get("use_liger", False), role="ref", )[0] OmegaConf.set_struct(self.config.ref, True) with open_dict(self.config.ref): self.config.ref.use_remove_padding = use_remove_padding self.config.ref.use_fused_kernels = use_fused_kernels self.ref_policy = DataParallelPPOActor(config=self.config.ref, actor_module=self.ref_module_fsdp) if self._is_actor: self.flops_counter = FlopsCounter(self.actor_model_config) self.checkpoint_manager = FSDPCheckpointManager( model=self.actor_module_fsdp, optimizer=self.actor.actor_optimizer, lr_scheduler=self.actor_lr_scheduler, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=self.config.actor.checkpoint, ) if not self._is_actor and self._is_rollout: # If ActorRolloutRefWorker is initialized as a standalone rollout, # create a checkpoint manager for FSDP model to allow loading FSDP checkpoints for rollout. checkpoint_contents = OmegaConf.create({"load_contents": ["model"], "save_contents": []}) self.checkpoint_manager = FSDPCheckpointManager( model=self.actor_module_fsdp, optimizer=None, lr_scheduler=None, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=checkpoint_contents, ) @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) @DistProfiler.annotate(color="red", role="actor_update") def update_actor(self, data: DataProto): assert self._is_actor if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) if self._is_offload_optimizer: load_fsdp_optimizer(optimizer=self.actor_optimizer, device_id=get_device_id()) with self.ulysses_sharding_manager: data = data.to("cpu") # data will to device with each micro batch on actor.update_policy # perform training with Timer(name="update_policy", logger=None) as timer: metrics = self.actor.update_policy(data=data) delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/actor"] = ( estimated_flops * self.config.actor.ppo_epochs / promised_flops / self.world_size ) metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3) metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3) metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3) lr = self.actor_lr_scheduler.get_last_lr()[0] metrics["actor/lr"] = lr.item() if torch.is_tensor(lr) else lr self.actor_lr_scheduler.step() # TODO: here, we should return all metrics output = DataProto(meta_info={"metrics": metrics}) output = output.to("cpu") if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After offload actor model during update_actor", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during update_actor", logger=logger) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="rollout")) @DistProfiler.annotate(color="red", role="rollout_generate") def generate_sequences(self, prompts: DataProto): # Support all hardwares assert self._is_rollout prompts = prompts.to(get_device_id()) meta_info = { "eos_token_id": self.generation_config.eos_token_id if self.generation_config is not None else self.tokenizer.eos_token_id, "pad_token_id": self.generation_config.pad_token_id if self.generation_config is not None else self.tokenizer.pad_token_id, } prompts.meta_info.update(meta_info) timing_generate = {} if self._is_actor: # For rollout only, we do not switch context. loop = get_event_loop() loop.run_until_complete(self.rollout_mode()) log_gpu_memory_usage("After switch to rollout mode", logger=logger) with simple_timer("generate_sequences", timing_generate): output = self.rollout.generate_sequences(prompts=prompts) if self._is_actor: loop.run_until_complete(self.trainer_mode()) log_gpu_memory_usage("After switch to trainer mode", logger=logger) # We calculate the average timing across all ranks # to make sure meta_info["timing"] is the same timing_generate_topk_ratio, timing_generate_min, timing_generate_max = topk_reduce_ratio_min_max( timing_generate["generate_sequences"] ) timing_generate = reduce_timing(timing_generate) timing_generate.update( { "generation_timing/max": timing_generate_max, "generation_timing/min": timing_generate_min, "generation_timing/topk_ratio": timing_generate_topk_ratio, } ) output.meta_info["timing"] = timing_generate output = output.to("cpu") # clear kv cache get_torch_device().empty_cache() return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) @DistProfiler.annotate(color="blue", role="actor_compute_log_prob") def compute_log_prob(self, data: DataProto): # when is_lora is True, we use the actor without lora applied to calculate the log_prob # which is mostly used for ref log_prob calculation assert self._is_actor if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) # Support all hardwares from contextlib import nullcontext is_lora = data.meta_info.pop("is_lora", False) adapter_ctx = self.actor.actor_module.disable_adapter() if is_lora else nullcontext() # we should always recompute old_log_probs when it is HybridEngine data.meta_info["micro_batch_size"] = self.config.rollout.log_prob_micro_batch_size_per_gpu data.meta_info["max_token_len"] = self.config.rollout.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.rollout.log_prob_use_dynamic_bsz data.meta_info["temperature"] = self.config.rollout.temperature # perform recompute log_prob with self.ulysses_sharding_manager: with adapter_ctx: output, entropys = self.actor.compute_log_prob(data=data, calculate_entropy=True) output = DataProto.from_dict( tensors={"old_log_probs": output, "entropys": entropys}, meta_info={"temperature": self.config.rollout.temperature}, ) output = output.to("cpu") # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.world_size > 1 and fsdp_version(self.actor.actor_module) == 1: self.actor.actor_module._handle.reshard(True) if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After offload actor model during compute_log_prob", logger=logger) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) @DistProfiler.annotate(color="olive", role="ref_compute_log_prob") def compute_ref_log_prob(self, data: DataProto): if self._is_lora: # if _is_lora, actor without lora applied is the ref data.meta_info["is_lora"] = True data = self.compute_log_prob(data) # this old_log_probs is in fact ref_log_prob data = DataProto.from_dict(tensors={"ref_log_prob": data.batch["old_log_probs"]}) return data assert self._is_ref # else: # otherwise, the class have a standalone ref model micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["temperature"] = self.config.rollout.temperature data.meta_info["max_token_len"] = self.config.ref.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.ref.log_prob_use_dynamic_bsz with self.ulysses_sharding_manager: data = data.to("cpu") # data will to device with each micro batch on ref.compute_log_prob output, _ = self.ref_policy.compute_log_prob(data=data, calculate_entropy=False, mask_special_token=True) output = DataProto.from_dict(tensors={"ref_log_prob": output}) output = output.to("cpu") # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.world_size > 1: if fsdp_version(self.ref_policy.actor_module) == 1: self.ref_policy.actor_module._handle.reshard(True) elif fsdp_version(self.ref_policy.actor_module) == 2: self.ref_policy.actor_module.reshard() return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): from verl.utils.logger import log_with_rank # only support save and load ckpt for actor assert self._is_actor if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) self.checkpoint_manager.save_checkpoint( local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) dist.barrier() if self._is_lora and hasattr(getattr(self, "actor_module", self.actor_module_fsdp), "peft_config"): lora_save_path = os.path.join(local_path, "lora_adapter") peft_model = getattr(self, "actor_module", self.actor_module_fsdp) peft_config = {} if dist.get_rank() == 0: os.makedirs(lora_save_path, exist_ok=True) peft_config = asdict(peft_model.peft_config.get("default", {})) peft_config["task_type"] = peft_config["task_type"].value peft_config["peft_type"] = peft_config["peft_type"].value peft_config["target_modules"] = list(peft_config["target_modules"]) try: if fsdp_version(self.actor_module_fsdp) > 0: self.actor_module_fsdp = self.actor_module_fsdp.to(get_device_name()) lora_params = layered_summon_lora_params(self.actor_module_fsdp) if dist.get_rank() == 0: save_file(lora_params, os.path.join(lora_save_path, "adapter_model.safetensors")) with open(os.path.join(lora_save_path, "adapter_config.json"), "w", encoding="utf-8") as f: json.dump(peft_config, f, ensure_ascii=False, indent=4) except Exception as e: log_with_rank( f"Save LoRA Adapter Error ({e})", rank=dist.get_rank(), logger=logger, log_only_rank_0=True ) dist.barrier() log_with_rank( f"[rank-{self.rank}]: Saved LoRA adapter to: {lora_save_path}", rank=dist.get_rank(), logger=logger, log_only_rank_0=True, ) if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False): assert self._is_actor or (not self._is_actor and self._is_rollout), ( f"Checkpoint loading is only supported for Actor or standalone Rollout Workers, but got " f"{self._is_actor} and {self._is_rollout}" ) # No checkpoint to load, just offload the model and optimizer to CPU if local_path is None: if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) if self._is_offload_optimizer: offload_fsdp_optimizer(self.actor_optimizer) return if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) self.checkpoint_manager.load_checkpoint( local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) if self._is_offload_optimizer: offload_fsdp_optimizer(self.actor_optimizer) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def start_profile(self, **kwargs) -> None: """Start profiling for the current rank in the current training step.""" self.profiler.start(**kwargs) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def stop_profile(self) -> None: """Stop profiling for the current rank in the current training step.""" self.profiler.stop() @register(dispatch_mode=Dispatch.ONE_TO_ALL) def dump_memory_snapshot(self, tag: str = "manual", sub_dir: str = None) -> None: """Manually trigger a CUDA memory snapshot dump on all ranks.""" # Memory snapshot is now handled by the profiler system # This method is kept for backward compatibility but delegates to profiler if hasattr(self, "profiler") and hasattr(self.profiler, "_impl"): try: # Try to use the profiler's memory snapshot functionality if hasattr(self.profiler._impl, "sampler"): out_dir = OmegaConf.select(self.config, "actor.profiler.save_path") or "." self.profiler._impl.sampler.dump_memory_snapshot(out_dir=out_dir, tag=tag, sub_dir=sub_dir) except Exception: # silently ignore if profiler doesn't support memory snapshots pass class CriticWorker(Worker, DistProfilerExtension): def __init__(self, config: FSDPCriticConfig): Worker.__init__(self) omega_profiler_config = config.get("profiler", {}) profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig) if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]: tool_config = omega_conf_to_dataclass( omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool")) ) else: tool_config = None DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config) ) import torch.distributed self.config = config if not torch.distributed.is_initialized(): torch.distributed.init_process_group( backend=get_nccl_backend(), timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) self.config: FSDPCriticConfig = config # build device mesh for Ulysses Sequence Parallel world_size = torch.distributed.get_world_size() from torch.distributed.device_mesh import init_device_mesh fsdp_size = self.config.model.fsdp_config.fsdp_size self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size) self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) # create training dispatch if self.ulysses_device_mesh is not None: is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0 self._register_dispatch_collect_info( "critic", dp_rank=self.ulysses_device_mesh["dp"].get_local_rank(), is_collect=is_collect ) else: self._register_dispatch_collect_info("critic", dp_rank=self.rank, is_collect=True) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) # set FSDP offload params self._is_offload_param = self.config.model.fsdp_config.param_offload self._is_offload_optimizer = self.config.model.fsdp_config.optimizer_offload # normalize config self.config.ppo_mini_batch_size *= self.config.rollout_n self.config.ppo_mini_batch_size //= torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size if self.config.ppo_micro_batch_size is not None: self.config.ppo_micro_batch_size //= ( torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size ) self.config.forward_micro_batch_size //= ( torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size ) self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size self.config.forward_micro_batch_size_per_gpu = self.config.forward_micro_batch_size if self.config.ppo_micro_batch_size_per_gpu is not None: assert self.config.ppo_mini_batch_size % self.config.ppo_micro_batch_size_per_gpu == 0, ( f"normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be divisible by " f"ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}" ) assert self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu > 0, ( f"normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be larger than " f"ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}" ) self._is_lora = ( self.config.model.get("lora_adapter_path") is not None or self.config.model.get("lora_rank", 0) > 0 ) self.use_orig_params = self.config.model.fsdp_config.get("use_orig_params", False) def _build_critic_model_optimizer(self, config): # the following line is necessary from torch.distributed.fsdp import MixedPrecision from verl.utils.model import load_valuehead_model, print_model_size from verl.utils.torch_dtypes import PrecisionType use_shm = config.model.get("use_shm", False) local_path = copy_to_local(config.model.path, use_shm=use_shm) # note that the tokenizer between actor and critic may be different. So override tokenizer info with actor info # using random initialized model from any architecture. May not be the same as Actor. tokenizer_path = copy_to_local(config.model.tokenizer_path, use_shm=use_shm) self.tokenizer = hf_tokenizer(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False)) self.processor = hf_processor(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False)) if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template override_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_config) if self.rank == 0: print(f"Critic overriding config {override_config_kwargs}") torch_dtype = self.config.model.fsdp_config.get("model_dtype", "fp32") torch_dtype = PrecisionType.to_dtype(torch_dtype) from transformers import AutoConfig # override model kwargs attn_implementation = override_config.get("attn_implementation", "flash_attention_2") critic_model_config = AutoConfig.from_pretrained( local_path, attn_implementation=attn_implementation, trust_remote_code=config.model.get("trust_remote_code", False), ) # TODO: VL models use VisionAttention, which directly uses flash_attention in transformers>=4.53 # which will be patched by _ulysses_flash_attention_forward, but errorly misses position_ids # Maybe support Ulysses in VisionAttention in the future and remove this patch if self.ulysses_sequence_parallel_size > 1 and hasattr(critic_model_config, "vision_config"): critic_model_config.vision_config._attn_implementation = "eager" critic_model_config.num_labels = 1 # patch for kimi-vl if getattr(critic_model_config, "model_type", None) == "kimi_vl": critic_model_config.text_config.topk_method = "greedy" init_context = get_init_weight_context_manager( use_meta_tensor=not critic_model_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") critic_model_config.classifier_dropout = 0.0 critic_model_config.hidden_dropout = "0" critic_model_config.summary_dropout_prob = 0.0 critic_module = load_valuehead_model( local_path, torch_dtype, critic_model_config, config.model.get("trust_remote_code", False), ) use_remove_padding = config.model.get("use_remove_padding", False) apply_monkey_patch( model=critic_module, use_remove_padding=use_remove_padding, ulysses_sp_size=self.ulysses_sequence_parallel_size, ) # some parameters may not in torch_dtype critic_module.to(torch_dtype) if config.model.get("enable_gradient_checkpointing", False): critic_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) if self._is_lora: print("Applying LoRA to critic module") critic_module.enable_input_require_grads() # Check if we should load a pre-trained LoRA adapter lora_adapter_path = self.config.model.get("lora_adapter_path") if lora_adapter_path is not None: from peft import PeftModel print(f"Loading pre-trained LoRA adapter to critic from: {lora_adapter_path}") # Copy adapter to local if needed local_adapter_path = copy_to_local(lora_adapter_path, use_shm=self.config.model.get("use_shm", False)) critic_module = PeftModel.from_pretrained(critic_module, local_adapter_path, is_trainable=True) peft_config = critic_module.peft_config["default"] # Ensure task_type is TaskType enum, not string if isinstance(peft_config.task_type, str): peft_config.task_type = TaskType.CAUSAL_LM else: # Convert config to regular Python types before creating PEFT model lora_config = { "task_type": TaskType.CAUSAL_LM, "r": self.config.model.lora_rank, "lora_alpha": self.config.model.lora_alpha, "target_modules": convert_to_regular_types(self.config.model.target_modules), "bias": "none", } critic_module = get_peft_model(critic_module, LoraConfig(**lora_config)) if self.rank == 0: print_model_size(critic_module) self.critic_model_config = critic_model_config fsdp_config = self.config.model.fsdp_config mixed_precision_config = fsdp_config.get("mixed_precision", None) if mixed_precision_config is not None: param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16")) reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32")) buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32")) else: param_dtype = torch.bfloat16 reduce_dtype = torch.float32 buffer_dtype = torch.float32 mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype) auto_wrap_policy = get_fsdp_wrap_policy( module=critic_module, config=self.config.model.fsdp_config.wrap_policy, is_lora=self._is_lora, ) log_gpu_memory_usage("Before critic FSDP", logger=None) fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) self.use_orig_params = fsdp_config.get("use_orig_params", False) if self.config.model.get("freeze_vision_tower", False): vision_tower = get_vl_model_vision_tower(critic_module) if vision_tower is not None: vision_tower.requires_grad_(False) self.use_orig_params = True if self.rank == 0: print("[critic model] Vision tower is set to not trainable.") else: if self.rank == 0: print("[critic model] No vision tower found.") # Note: We force turn off CPUOffload for critic because it causes incorrect results when using grad accumulation if config.strategy == "fsdp": critic_module = FSDP( critic_module, param_init_fn=init_fn, use_orig_params=self.use_orig_params, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, mixed_precision=mixed_precision, sync_module_states=True, forward_prefetch=self.config.model.fsdp_config.forward_prefetch, device_mesh=self.device_mesh, cpu_offload=None, ) elif config.strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" mp_policy = MixedPrecisionPolicy( param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True ) offload_policy = None if fsdp_config.offload_policy: self._is_offload_param = False self._is_offload_optimizer = False offload_policy = CPUOffloadPolicy(pin_memory=True) fsdp_kwargs = { "mesh": fsdp_mesh, "mp_policy": mp_policy, "offload_policy": offload_policy, "reshard_after_forward": fsdp_config.reshard_after_forward, "shard_placement_fn": get_shard_placement_fn(fsdp_size=self.device_mesh.shape[-1]), } full_state = critic_module.state_dict() apply_fsdp2(critic_module, fsdp_kwargs, fsdp_config) fsdp2_load_full_state_dict(critic_module, full_state, fsdp_mesh, offload_policy) else: raise NotImplementedError(f"Unknown strategy {config.strategy}") if config.model.get("enable_activation_offload", False): enable_gradient_checkpointing = config.model.get("enable_gradient_checkpointing", False) enable_activation_offloading(critic_module, config.strategy, enable_gradient_checkpointing) log_gpu_memory_usage("After critic FSDP", logger=None) critic_optimizer = build_optimizer(critic_module.parameters(), config.optim) total_steps = config.optim.get("total_training_steps", 0) num_warmup_steps = int(config.optim.get("lr_warmup_steps", -1)) lr_scheduler_type = config.optim.get("lr_scheduler_type", "constant") if num_warmup_steps < 0: num_warmup_steps_ratio = config.optim.get("lr_warmup_steps_ratio", 0.0) num_warmup_steps = int(num_warmup_steps_ratio * total_steps) if self.rank == 0: print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}") from verl.utils.torch_functional import ( get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup) if lr_scheduler_type == "constant": critic_lr_scheduler = get_constant_schedule_with_warmup( optimizer=critic_optimizer, num_warmup_steps=num_warmup_steps ) elif lr_scheduler_type == "cosine": min_lr_ratio = config.optim.get("min_lr_ratio", 0.0) num_cycles = config.optim.get("num_cycles", 0.5) critic_lr_scheduler = get_cosine_schedule_with_warmup( optimizer=critic_optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=total_steps, min_lr_ratio=min_lr_ratio, num_cycles=num_cycles, ) else: raise NotImplementedError(f"LR scheduler type {lr_scheduler_type} is not supported") return critic_module, critic_optimizer, critic_lr_scheduler @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) from verl.workers.critic import DataParallelPPOCritic self.critic_module, self.critic_optimizer, self.critic_lr_scheduler = self._build_critic_model_optimizer( self.config ) if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) log_gpu_memory_usage("After offload critic model during init", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.critic_optimizer) log_gpu_memory_usage("After offload critic optimizer during init", logger=logger) self.critic = DataParallelPPOCritic( config=self.config, critic_module=self.critic_module, critic_optimizer=self.critic_optimizer ) self.flops_counter = FlopsCounter(self.critic_model_config) self.checkpoint_manager = FSDPCheckpointManager( model=self.critic_module, optimizer=self.critic_optimizer, lr_scheduler=self.critic_lr_scheduler, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=self.config.checkpoint, ) @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic")) @DistProfiler.annotate(color="cyan") def compute_values(self, data: DataProto): if self._is_offload_param: load_fsdp_model_to_gpu(self.critic_module) micro_batch_size = self.config.forward_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz # perform forward computation with self.ulysses_sharding_manager: data = data.to("cpu") # data will to device with each micro batch on critic.compute_values values = self.critic.compute_values(data=data) output = DataProto.from_dict(tensors={"values": values}) output = output.to("cpu") if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic")) @DistProfiler.annotate(color="pink") def update_critic(self, data: DataProto): if self._is_offload_param: load_fsdp_model_to_gpu(self.critic_module) if self._is_offload_optimizer: load_fsdp_optimizer(optimizer=self.critic_optimizer, device_id=get_device_id()) # perform forward computation with self.ulysses_sharding_manager: data = data.to("cpu") # data will to device with each micro batch on critic.update_critic with Timer(name="update_critic", logger=None) as timer: metrics = self.critic.update_critic(data=data) delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/critic"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size lr = self.critic_lr_scheduler.get_last_lr()[0] metrics["critic/lr"] = lr self.critic_lr_scheduler.step() output = DataProto(batch=None, meta_info={"metrics": metrics}) if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.critic_optimizer) output = output.to("cpu") return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): import torch if self._is_offload_param: load_fsdp_model_to_gpu(self.critic_module) self.checkpoint_manager.save_checkpoint( local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=True): import torch if self._is_offload_param: load_fsdp_model_to_gpu(self.critic_module) self.checkpoint_manager.load_checkpoint( local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_fsdp_optimizer(self.critic_optimizer) # TODO(sgm): we may need to extract it to dp_reward_model.py class RewardModelWorker(Worker, DistProfilerExtension): """ Note that we only implement the reward model that is subclass of AutoModelForTokenClassification. """ def __init__(self, config): Worker.__init__(self) omega_profiler_config = config.get("profiler", {}) profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig) if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]: tool_config = omega_conf_to_dataclass( omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool")) ) else: tool_config = None DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config), ) import torch.distributed self.config = config if not torch.distributed.is_initialized(): torch.distributed.init_process_group( backend=get_nccl_backend(), timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) # build device mesh for Ulysses Sequence Parallel world_size = torch.distributed.get_world_size() from torch.distributed.device_mesh import init_device_mesh fsdp_size = self.config.model.fsdp_config.fsdp_size self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size) self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) # create training dispatch if self.ulysses_device_mesh is not None: is_collect = self.ulysses_device_mesh["sp"].get_local_rank() == 0 self._register_dispatch_collect_info( "reward", dp_rank=self.ulysses_device_mesh["dp"].get_local_rank(), is_collect=is_collect ) else: self._register_dispatch_collect_info("reward", dp_rank=self.rank, is_collect=True) self.use_remove_padding = self.config.model.get("use_remove_padding", False) # normalize config if self.config.micro_batch_size is not None: self.config.micro_batch_size //= torch.distributed.get_world_size() self.config.micro_batch_size_per_gpu = self.config.micro_batch_size def _build_model(self, config): # the following line is necessary from torch.distributed.fsdp import CPUOffload from transformers import AutoConfig, AutoModelForTokenClassification use_shm = config.model.get("use_shm", False) # download the checkpoint from hdfs local_path = copy_to_local(config.model.path, use_shm=use_shm) if self.config.model.input_tokenizer is None: self._do_switch_chat_template = False else: self._do_switch_chat_template = True input_tokenizer_local_path = copy_to_local(config.model.input_tokenizer, use_shm=use_shm) self.input_tokenizer = hf_tokenizer( input_tokenizer_local_path, trust_remote_code=config.model.get("trust_remote_code", False) ) self.tokenizer = hf_tokenizer(local_path, trust_remote_code=config.model.get("trust_remote_code", False)) trust_remote_code = config.model.get("trust_remote_code", False) model_config = AutoConfig.from_pretrained(local_path, trust_remote_code=trust_remote_code) model_config.num_labels = 1 # note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect init_context = get_init_weight_context_manager( use_meta_tensor=not model_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") model_config.classifier_dropout = 0.0 reward_module = AutoModelForTokenClassification.from_pretrained( pretrained_model_name_or_path=local_path, config=model_config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", trust_remote_code=trust_remote_code, ) apply_monkey_patch( model=reward_module, use_remove_padding=config.model.get("use_remove_padding", False), ulysses_sp_size=self.ulysses_sequence_parallel_size, ) reward_module.to(torch.bfloat16) auto_wrap_policy = get_fsdp_wrap_policy(module=reward_module, config=self.config.model.fsdp_config) fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) if config.strategy == "fsdp": reward_module = FSDP( reward_module, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, # zero3 sync_module_states=True, cpu_offload=CPUOffload(offload_params=True), forward_prefetch=self.config.model.fsdp_config.forward_prefetch, device_mesh=self.device_mesh, ) elif config.strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" cpu_offload = CPUOffloadPolicy(pin_memory=True) fsdp_kwargs = { "mesh": fsdp_mesh, "offload_policy": cpu_offload, "reshard_after_forward": config.model.fsdp_config.reshard_after_forward, "shard_placement_fn": get_shard_placement_fn(fsdp_size=self.device_mesh.shape[-1]), } full_state = reward_module.state_dict() apply_fsdp2(reward_module, fsdp_kwargs, config.model.fsdp_config) fsdp2_load_full_state_dict(reward_module, full_state, fsdp_mesh, cpu_offload) else: raise NotImplementedError(f"Unknown strategy: {config.strategy}") return reward_module @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) self.reward_module = self._build_model(config=self.config) def _forward_micro_batch(self, micro_batch): from verl.utils.attention_utils import (index_first_axis, pad_input, rearrange, unpad_input) from verl.utils.ulysses import (gather_outputs_and_unpad, ulysses_pad_and_slice_inputs) with torch.no_grad(), torch.autocast(device_type=device_name, dtype=torch.bfloat16): input_ids = micro_batch["input_ids"] batch_size, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] if position_ids.dim() == 3: # qwen2vl mrope position_ids = position_ids.transpose(0, 1) # (bsz, 3, seqlen) -> (3, bsz, seqlen) if self.use_remove_padding: input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary if position_ids.dim() == 3: position_ids_rmpad = ( index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices) .transpose(0, 1) .unsqueeze(1) ) # (3, bsz, seqlen) -> (3, 1, bsz * seqlen) else: position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # pad and slice the inputs if sp > 1 if self.ulysses_sequence_parallel_size > 1: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size ) # only pass input_ids and position_ids to enable flash_attn_varlen output = self.reward_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, use_cache=False ) reward_rmpad = output.logits reward_rmpad = reward_rmpad.squeeze(0) # (total_nnz) # gather output if sp > 1 if self.ulysses_sequence_parallel_size > 1: reward_rmpad = gather_outputs_and_unpad( reward_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size ) # pad it back rm_score = pad_input(reward_rmpad, indices=indices, batch=batch_size, seqlen=seqlen).squeeze(-1) else: output = self.reward_module( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False ) rm_score = output.logits # (batch_size, seq_len, 1) rm_score = rm_score.squeeze(-1) # extract the result of the last valid token eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,) rm_score = rm_score[torch.arange(batch_size), eos_mask_idx] return rm_score def _expand_to_token_level(self, data: DataProto, scores: torch.Tensor): batch_size = data.batch.batch_size[0] # expand as token_level_reward attention_mask = data.batch["attention_mask"] position_ids = data.batch["position_ids"] response_length = data.batch["responses"].shape[-1] if position_ids.dim() == 3: # qwen2vl mrope [bs, 3, seq_len] position_ids = position_ids[:, 0, :] eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,) token_level_scores = torch.zeros_like(attention_mask, dtype=scores.dtype) # (bsz, seqlen) token_level_scores[torch.arange(batch_size), eos_mask_idx] = scores # select the response part token_level_scores = token_level_scores[:, -response_length:] return token_level_scores def _switch_chat_template(self, data: DataProto): src_max_length = data.batch["attention_mask"].shape[-1] src_tokenizer = self.input_tokenizer target_tokenizer = self.tokenizer rm_input_ids = [] rm_attention_mask = [] for i in range(data.batch.batch_size[0]): if not isinstance(data.non_tensor_batch["raw_prompt"][i], list | np.ndarray): raise TypeError( f"raw_prompt must be a list or numpy array, got {type(data.non_tensor_batch['raw_prompt'][i])}" ) # extract raw prompt chat: list = list(data.non_tensor_batch["raw_prompt"][i]) # extract response response_ids = data.batch["responses"][i] response_length = response_ids.shape[-1] valid_response_length = data.batch["attention_mask"][i][-response_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode response = src_tokenizer.decode(valid_response_ids) # remove bos and eos response = response.replace(src_tokenizer.eos_token, "") chat.append({"role": "assistant", "content": response}) prompt_with_chat_template = target_tokenizer.apply_chat_template( chat, add_generation_prompt=False, tokenize=False ) if self.rank == 0 and i == 0: # for debugging purpose print(f"Switch template. chat: {prompt_with_chat_template}") # the maximum length is actually determined by the reward model itself max_length = self.config.get("max_length", src_max_length) if max_length is None: max_length = src_max_length model_inputs = target_tokenizer(prompt_with_chat_template, return_tensors="pt", add_special_tokens=False) input_ids, attention_mask = verl_F.postprocess_data( input_ids=model_inputs["input_ids"], attention_mask=model_inputs["attention_mask"], max_length=max_length, pad_token_id=target_tokenizer.pad_token_id, left_pad=False, # right padding truncation=self.config.get("truncation", "right"), ) # truncate from the right rm_input_ids.append(input_ids) rm_attention_mask.append(attention_mask) rm_input_ids = torch.cat(rm_input_ids, dim=0) rm_attention_mask = torch.cat(rm_attention_mask, dim=0) rm_position_ids = compute_position_id_with_mask(rm_attention_mask) rm_inputs = {"input_ids": rm_input_ids, "attention_mask": rm_attention_mask, "position_ids": rm_position_ids} return DataProto.from_dict(rm_inputs) @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="reward")) @DistProfiler.annotate(color="brown") def compute_rm_score(self, data: DataProto): import itertools from verl.utils.seqlen_balancing import (get_reverse_idx, rearrange_micro_batches) # Support all hardwares data = data.to(get_device_id()) if self._do_switch_chat_template: rm_data = self._switch_chat_template(data) else: rm_input_ids = data.batch["input_ids"] rm_attention_mask = data.batch["attention_mask"] rm_position_ids = data.batch["position_ids"] rm_inputs = { "input_ids": rm_input_ids, "attention_mask": rm_attention_mask, "position_ids": rm_position_ids, } rm_data = DataProto.from_dict(rm_inputs) # Support all hardwares rm_data = rm_data.to(get_device_id()) # perform forward computation with self.ulysses_sharding_manager: use_dynamic_bsz = self.config.use_dynamic_bsz if use_dynamic_bsz: max_token_len = self.config.forward_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, indices = rearrange_micro_batches(batch=rm_data.batch, max_token_len=max_token_len) else: micro_batches = rm_data.batch.split(self.config.micro_batch_size_per_gpu) output = [] for micro_batch in micro_batches: rm_score = self._forward_micro_batch(micro_batch) output.append(rm_score) scores = torch.cat(output, dim=0) # (batch_size) if use_dynamic_bsz: indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == scores.size(0), f"{len(indices)} vs. {scores.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) scores = scores[revert_indices] token_level_scores = self._expand_to_token_level(data, scores) # Note that this is only the scores, may not be the final rewards used to train RL output = DataProto.from_dict(tensors={"rm_scores": token_level_scores}) # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.world_size > 1 and fsdp_version(self.reward_module) == 1: self.reward_module._handle.reshard(True) output = output.to("cpu") return output # ================================= Async related workers ================================= class AsyncActorRolloutRefWorker(ActorRolloutRefWorker): @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) async def wake_up(self): await self.rollout_mode() return True @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) async def sleep(self): await self.trainer_mode() return True # ============================ vLLM related ============================ @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) def get_zeromq_address(self): return self.rollout.get_zeromq_address() # ============================ SGLang related ============================ @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD, blocking=False) async def chat_completion(self, json_request): ret = await self.rollout.chat_completion(json_request) return ret @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD, blocking=False) async def generate( self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, ) -> list[int]: ret = await self.rollout.generate(prompt_ids, sampling_params, request_id, image_data=image_data) return ret ================================================ FILE: verl_distillation/verl/workers/megatron_workers.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The main entry point to run the PPO algorithm """ import datetime import logging import os import time from typing import Any, Optional import psutil import torch import torch.distributed from codetiming import Timer from omegaconf import DictConfig, OmegaConf try: from mindspeed.megatron_adaptor import repatch except ImportError: repatch = None from megatron.core import parallel_state as mpu from verl import DataProto from verl.models.mcore import get_mcore_weight_converter from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register from verl.utils import hf_tokenizer from verl.utils.checkpoint.megatron_checkpoint_manager import MegatronCheckpointManager from verl.utils.config import omega_conf_to_dataclass from verl.utils.device import ( get_device_id, get_device_name, get_nccl_backend, get_torch_device, set_expandable_segments, ) from verl.utils.distributed import set_numa_affinity from verl.utils.flops_counter import FlopsCounter from verl.utils.fs import copy_to_local from verl.utils.megatron_utils import ( load_megatron_model_to_gpu, load_megatron_optimizer, offload_megatron_model_to_cpu, offload_megatron_optimizer, per_tensor_generator, ) from verl.utils.memory_utils import aggressive_empty_cache from verl.utils.model import get_hf_model_path, load_mcore_dist_weights, load_megatron_gptmodel_weights from verl.utils.profiler import ( DistProfiler, DistProfilerExtension, GPUMemoryLogger, ProfilerConfig, log_gpu_memory_usage, simple_timer, ) from verl.utils.profiler.performance import reduce_timing, topk_reduce_ratio_min_max from verl.utils.ray_utils import get_event_loop from verl.workers.actor.megatron_actor import MegatronPPOActor from verl.workers.config import HFModelConfig, McoreCriticConfig, RolloutConfig from verl.workers.critic.megatron_critic import MegatronPPOCritic from verl.workers.reward_model.megatron.reward_model import MegatronRewardModel from verl.workers.rollout import get_rollout_class logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def set_random_seed(seed): import random import numpy as np import torch torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) if get_torch_device().device_count() > 0: from megatron.core import tensor_parallel tensor_parallel.model_parallel_cuda_manual_seed(seed) # FIXME: torch cumsum not support deterministic (used in vllm sampler), # https://github.com/pytorch/pytorch/issues/89492 # torch.use_deterministic_algorithms(True, warn_only=True) # os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' class MegatronWorker(Worker): def _init_hf_config_and_tf_config( self, model_path, tokenizer_or_path, dtype, override_model_config, override_transformer_config, trust_remote_code=False, use_mbridge=False, ): from transformers import AutoConfig from verl.models.mcore import hf_to_mcore_config from verl.utils import hf_processor, hf_tokenizer from verl.utils.fs import copy_to_local from verl.utils.model import update_model_config # Step 1: initialize the tokenizer self.local_path = copy_to_local(model_path) if tokenizer_or_path is None: self.tokenizer = hf_tokenizer(self.local_path, trust_remote_code=trust_remote_code) self.processor = hf_processor(self.local_path, trust_remote_code=trust_remote_code) elif isinstance(tokenizer_or_path, str): self.tokenizer = hf_tokenizer(copy_to_local(tokenizer_or_path), trust_remote_code=trust_remote_code) self.processor = hf_processor(copy_to_local(tokenizer_or_path), trust_remote_code=trust_remote_code) else: self.tokenizer = tokenizer_or_path self.processor = tokenizer_or_path if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template # Step 2: get the hf hf_config = AutoConfig.from_pretrained(self.local_path, trust_remote_code=trust_remote_code) # Step 3: override the hf config override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_model_config.get("model_config", {})) self.share_embeddings_and_output_weights = getattr(hf_config, "tie_word_embeddings", False) update_model_config(hf_config, override_config_kwargs=override_config_kwargs) self.architectures = getattr(hf_config, "architectures", None) if self.rank == 0: print(f"Model config after override: {hf_config}") from verl.models.mcore.config_converter import mapping_string_to_attn_backend # todo: remove this line after mcore adopt mbridge 0.15, now for compatibility override_transformer_config = mapping_string_to_attn_backend(override_transformer_config) if use_mbridge: from verl.models.mcore.mbridge import AutoBridge bridge = AutoBridge.from_config(hf_config) bridge.set_extra_args(**override_transformer_config) tf_config = bridge.config self.bridge = bridge else: tf_config = hf_to_mcore_config(hf_config, dtype, **override_transformer_config) self.bridge = None print(f"TF config: {tf_config}") self.hf_config = hf_config self.tf_config = tf_config class ActorRolloutRefWorker(MegatronWorker, DistProfilerExtension): """ This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy or a hybrid engine based on the config.rollout """ def __init__(self, config: DictConfig, role: str, **kwargs): Worker.__init__(self) self.config = config if repatch is not None: # NPU MindSpeed patch, will be refactored with MindSpeedEngine. repatch(self.config.actor.megatron.get("override_transformer_config", {})) # NOTE(sgm): We utilize colocate WorkerGroup by default. # As a result, Workers for different model share the same process. # Therefore, we only require one distribute initialization. # To utilize different parallel strategy in different models: # 1, users should disable WorkerDict; 2.assign different ResourcePool to different models, # 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385 if not torch.distributed.is_initialized(): set_numa_affinity() rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group( backend=get_nccl_backend(), timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) get_torch_device().set_device(rank) mpu.initialize_model_parallel( tensor_model_parallel_size=self.config.actor.megatron.tensor_model_parallel_size, pipeline_model_parallel_size=self.config.actor.megatron.pipeline_model_parallel_size, virtual_pipeline_model_parallel_size=self.config.actor.megatron.virtual_pipeline_model_parallel_size, use_sharp=False, context_parallel_size=self.config.actor.megatron.context_parallel_size, expert_model_parallel_size=self.config.actor.megatron.expert_model_parallel_size, expert_tensor_parallel_size=self.config.actor.megatron.expert_tensor_parallel_size, nccl_communicator_config_path=None, ) is_collect = ( mpu.get_tensor_model_parallel_rank() == 0 and mpu.get_pipeline_model_parallel_rank() == mpu.get_pipeline_model_parallel_world_size() - 1 and mpu.get_context_parallel_rank() == 0 ) self._register_dispatch_collect_info( mesh_name="actor", dp_rank=mpu.get_data_parallel_rank(), is_collect=is_collect ) set_random_seed(seed=self.config.actor.megatron.seed) self.role = role assert self.role in ["actor", "rollout", "ref", "actor_rollout", "actor_rollout_ref"] self._is_actor = self.role in ["actor", "actor_rollout", "actor_rollout_ref"] self._is_rollout = self.role in ["rollout", "actor_rollout", "actor_rollout_ref"] self._is_ref = self.role in ["ref", "actor_rollout_ref"] if self._is_actor: omega_profiler_config = config.actor.get("profiler", {}) elif self._is_rollout: # NOTE: In colocation mode, rollout config may not take effect (follow the actor config) # This is for extendability in AsyncRL cases omega_profiler_config = config.rollout.get("profiler", {}) elif self._is_ref: omega_profiler_config = config.ref.get("profiler", {}) else: raise ValueError( f"Invalid role {self.role}, should be one of " "['actor', 'rollout', 'ref', 'actor_rollout', 'actor_rollout_ref']" ) # omega_profiler_config is DictConfig # profiler_config is a ProfilerConfig dataclass profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig) if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]: tool_config = omega_conf_to_dataclass( omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool")) ) else: tool_config = None DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config) ) # TODO(sgm): Currently, we only support reference model param offload # will support other offload later self._is_offload_param = False self._is_offload_grad = False self._is_offload_optimizer = False # normalize config if self._is_actor and self._is_rollout: self.config.actor.ppo_mini_batch_size *= self.config.rollout.n self.config.actor.ppo_mini_batch_size //= mpu.get_data_parallel_world_size() if self.config.actor.get("ppo_micro_batch_size", None): self.config.actor.ppo_micro_batch_size //= mpu.get_data_parallel_world_size() self.config.rollout.log_prob_micro_batch_size //= mpu.get_data_parallel_world_size() self.config.actor.ppo_micro_batch_size_per_gpu = self.config.actor.ppo_micro_batch_size self.config.rollout.log_prob_micro_batch_size_per_gpu = self.config.rollout.log_prob_micro_batch_size self._is_offload_param = self.config.actor.megatron.get("param_offload", False) self._is_offload_grad = self.config.actor.megatron.get("grad_offload", False) self._is_offload_optimizer = self.config.actor.megatron.get("optimizer_offload", False) elif self._is_ref: if self.config.ref.get("log_prob_micro_batch_size", None): self.config.ref.log_prob_micro_batch_size //= mpu.get_data_parallel_world_size() self.config.ref.log_prob_micro_batch_size_per_gpu = self.config.ref.log_prob_micro_batch_size else: assert self.config.ref.get("log_prob_micro_batch_size_per_gpu", None) is not None, ( "Please note that in the ref policy configuration, `log_prob_micro_batch_size_per_gpu` and " "`log_prob_micro_batch_size` should not be None at the same time." ) self._ref_is_offload_param = self.config.ref.megatron.get("param_offload", False) def _build_model_optimizer( self, model_path, optim_config, override_model_config, override_transformer_config, override_ddp_config=None ): from verl.utils.megatron.optimizer import ( get_megatron_optimizer, get_megatron_optimizer_param_scheduler, init_megatron_optim_config, ) from verl.utils.megatron_utils import McoreModuleWrapperConfig, make_megatron_module from verl.utils.model import get_generation_config, print_model_size self._init_hf_config_and_tf_config( model_path, model_path, self.dtype, override_model_config, override_transformer_config, self.config.model.get("trust_remote_code", False), self.config.actor.megatron.use_mbridge, ) self.generation_config = get_generation_config(self.local_path) if self._is_actor or self._is_rollout: wrap_config = McoreModuleWrapperConfig( is_value_model=False, # actor is not value model share_embeddings_and_output_weights=self.share_embeddings_and_output_weights, wrap_with_ddp=True, use_distributed_optimizer=self.config.actor.megatron.use_distributed_optimizer, ) actor_module = make_megatron_module( wrap_config=wrap_config, tf_config=self.tf_config, hf_config=self.hf_config, bridge=self.bridge, override_model_config=override_model_config, override_ddp_config=override_ddp_config, ) print(f"actor_module: {len(actor_module)}") if self.config.actor.load_weight: if self.config.actor.megatron.use_dist_checkpointing: load_mcore_dist_weights( actor_module, self.config.actor.megatron.dist_checkpointing_path, is_value_model=False ) else: if self.bridge is not None: local_model_path = get_hf_model_path(self.config) self.bridge.load_weights(actor_module, local_model_path) else: load_megatron_gptmodel_weights( self.config, self.hf_config, actor_module, params_dtype=self.dtype, is_value_model=False ) if self.rank == 0: print_model_size(actor_module[0]) log_gpu_memory_usage("After MegatronPPOActor init", logger=logger) elif self._is_ref: wrap_config = McoreModuleWrapperConfig( is_value_model=False, # ref is not value model share_embeddings_and_output_weights=self.share_embeddings_and_output_weights, wrap_with_ddp=False, use_distributed_optimizer=self.config.ref.megatron.use_distributed_optimizer, ) ref_module = make_megatron_module( wrap_config=wrap_config, tf_config=self.tf_config, hf_config=self.hf_config, bridge=self.bridge, override_model_config=override_model_config, ) if self.config.ref.load_weight: # should align with the actor: assert self.config.actor.load_weight == self.config.ref.load_weight print("load ref weight start") if self.config.ref.megatron.use_dist_checkpointing: load_mcore_dist_weights( ref_module, self.config.ref.megatron.dist_checkpointing_path, is_value_model=False ) else: if self.bridge is not None: local_model_path = get_hf_model_path(self.config) self.bridge.load_weights(ref_module, local_model_path) else: load_megatron_gptmodel_weights( self.config, self.hf_config, ref_module, params_dtype=self.dtype, is_value_model=False ) log_gpu_memory_usage("After ref module init", logger=logger) return ref_module, self.hf_config # TODO: add more optimizer args into config if self._is_actor: optim_config_megatron = init_megatron_optim_config(optim_config) actor_optimizer = get_megatron_optimizer(model=actor_module, config=optim_config_megatron) actor_optimizer_scheduler = get_megatron_optimizer_param_scheduler( optimizer=actor_optimizer, config=optim_config ) else: optim_config = None actor_optimizer = None actor_optimizer_scheduler = None log_gpu_memory_usage("After actor optimizer init", logger=logger) return actor_module, actor_optimizer, actor_optimizer_scheduler, self.hf_config, optim_config def _build_rollout(self, trust_remote_code=False): from torch.distributed.device_mesh import init_device_mesh # 1. parse rollout and huggingface model config rollout_config: RolloutConfig = omega_conf_to_dataclass(self.config.rollout) model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model, dataclass_type=HFModelConfig) # 2. build rollout device mesh infer_tp = self.config.rollout.tensor_model_parallel_size * self.config.rollout.data_parallel_size infer_pp = self.config.rollout.pipeline_model_parallel_size infer_world_size = infer_tp * infer_pp dp = self.world_size // infer_world_size assert self.world_size % infer_world_size == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_world_size: {infer_world_size}" ) rollout_device_mesh = init_device_mesh( get_device_name(), mesh_shape=(dp, infer_tp, infer_pp), mesh_dim_names=["dp", "infer_tp", "infer_pp"] ) is_collect = ( rollout_device_mesh["infer_tp"].get_local_rank() == 0 and rollout_device_mesh["infer_pp"].get_local_rank() == 0 ) self._register_dispatch_collect_info( "rollout", dp_rank=rollout_device_mesh["dp"].get_local_rank(), is_collect=is_collect ) # 3. init trainer and rollout random states self.torch_random_states = get_torch_device().get_rng_state() gen_dp_rank = rollout_device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) # 4. build rollout model log_gpu_memory_usage(f"Before building {self.config.rollout.name} rollout", logger=logger) self.rollout = get_rollout_class(rollout_config.name, rollout_config.mode)( config=rollout_config, model_config=model_config, device_mesh=rollout_device_mesh ) log_gpu_memory_usage(f"After building {self.config.rollout.name} rollout", logger=logger) # 5. switch to trainer mode # NOTE: It's critical that hybrid engine in trainer mode initially to load checkpoint. # For sync mode, we directly switch to trainer mode here. # For async mode, we can't call run_until_complete here, so we will switch to trainer mode in AgentLoopManager. if rollout_config.mode == "sync" and self._is_actor: loop = get_event_loop() loop.run_until_complete(self.trainer_mode()) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) from verl.utils.torch_dtypes import PrecisionType override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) if self._is_actor: override_transformer_config = OmegaConf.to_container( OmegaConf.create(self.config.actor.megatron.get("override_transformer_config", {})) ) override_ddp_config = OmegaConf.to_container( OmegaConf.create(self.config.actor.megatron.get("override_ddp_config", {})) ) elif self._is_ref: override_transformer_config = OmegaConf.to_container( OmegaConf.create(self.config.ref.megatron.get("override_transformer_config", {})) ) else: override_transformer_config = {} self.param_dtype = torch.bfloat16 log_gpu_memory_usage("Before init actor model and optimizer", logger=logger) self.dtype = PrecisionType.to_dtype(self.param_dtype) if self._is_actor: # we need the model for actor and rollout optim_config = self.config.actor.optim if self._is_actor else None ( self.actor_module, self.actor_optimizer, self.actor_optimizer_scheduler, self.actor_model_config, self.actor_optim_config, ) = self._build_model_optimizer( model_path=self.config.model.path, optim_config=optim_config, override_model_config=override_model_config, override_transformer_config=override_transformer_config, override_ddp_config=override_ddp_config, ) if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) log_gpu_memory_usage("After offload actor params and grad during init", logger=logger) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during init", logger=logger) if self._is_actor: actor_cfg = omega_conf_to_dataclass(self.config.actor) self.actor = MegatronPPOActor( config=actor_cfg, model_config=self.actor_model_config, hf_config=self.hf_config, tf_config=self.tf_config, actor_module=self.actor_module, actor_optimizer=self.actor_optimizer, ) log_gpu_memory_usage("After MegatronPPOActor init", logger=logger) if self._is_rollout: self._build_rollout(trust_remote_code=self.config.model.get("trust_remote_code", False)) log_gpu_memory_usage("After rollout init", logger=logger) if self._is_ref: self.ref_module, self.ref_model_config = self._build_model_optimizer( model_path=self.config.model.path, optim_config=None, override_model_config=override_model_config, override_transformer_config=override_transformer_config, ) log_gpu_memory_usage("After ref model init", logger=logger) self.ref_policy = MegatronPPOActor( config=self.config.ref, model_config=self.ref_model_config, hf_config=self.hf_config, tf_config=self.tf_config, actor_module=self.ref_module, actor_optimizer=None, ) if self._ref_is_offload_param: offload_megatron_model_to_cpu(self.ref_module) log_gpu_memory_usage("After offload ref params during init", logger=logger) if self._is_actor: self.flops_counter = FlopsCounter(self.actor_model_config) self.checkpoint_mananager = MegatronCheckpointManager( config=self.config, checkpoint_config=self.config.actor.checkpoint, model_config=self.actor_model_config, transformer_config=self.tf_config, role="actor", model=self.actor_module, arch=self.architectures[0], hf_config=self.hf_config, param_dtype=self.param_dtype, share_embeddings_and_output_weights=self.share_embeddings_and_output_weights, processing_class=self.processor if self.processor is not None else self.tokenizer, optimizer=self.actor_optimizer, optimizer_scheduler=self.actor_optimizer_scheduler, use_distributed_optimizer=self.config.actor.megatron.use_distributed_optimizer, use_checkpoint_opt_param_scheduler=self.config.actor.optim.use_checkpoint_opt_param_scheduler, bridge=self.bridge, use_dist_checkpointing=self.config.actor.megatron.use_dist_checkpointing, ) self.layer_name_mapping = { "qkv_layer_name": "self_attention.linear_qkv.", "gate_proj_layer_name": "linear_fc1.", } self.weight_converter = None if not self.config.actor.megatron.use_mbridge: self.weight_converter = get_mcore_weight_converter(self.actor_model_config, self.dtype) get_torch_device().empty_cache() log_gpu_memory_usage("After init_model finish", logger=logger) async def rollout_mode(self): """Context switch hybridengine to rollout mode.""" aggressive_empty_cache(force_sync=True) if self._is_offload_param: load_megatron_model_to_gpu(self.actor.actor_module, load_grad=False) log_gpu_memory_usage("After load actor params during rollout_mode", logger=logger) if self.bridge is not None: per_tensor_param = self.bridge.export_weights(self.actor.actor_module) else: per_tensor_param = per_tensor_generator( self.actor.actor_module, self.actor_model_config, self.weight_converter, self.tf_config, self.layer_name_mapping, ) set_expandable_segments(False) if self.config.rollout.free_cache_engine: await self.rollout.resume(tags=["weights"]) await self.rollout.update_weights(per_tensor_param) if self._is_offload_param: offload_megatron_model_to_cpu(self.actor.actor_module) aggressive_empty_cache(force_sync=True) if self.config.rollout.free_cache_engine: await self.rollout.resume(tags=["kv_cache"]) # important: need to manually set the random states of each tp to be identical. self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) async def trainer_mode(self): """Context switch hybridengine to trainer mode.""" if self.config.rollout.free_cache_engine: log_gpu_memory_usage("Before rollout offload", logger=logger) await self.rollout.release() log_gpu_memory_usage("After rollout offload", logger=logger) for model in self.actor.actor_module: model.train() # add empty cache after each compute aggressive_empty_cache(force_sync=True) # FIXME(@wuxibin): megatron+sglang failed with `expandable_segments:True` in ci, # can't reproduce it in dev environment, temporary disable it. # https://github.com/volcengine/verl/actions/runs/17382936845/job/49344264323?pr=3285 if os.environ.get("MEGATRON_CI_DISABLE_EXPANDABLE_SEGMENTS", "0") == "0": set_expandable_segments(True) # restore random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) @GPUMemoryLogger(role="update_actor", logger=logger) @DistProfiler.annotate(color="red") def update_actor(self, data: DataProto): assert self._is_actor if self._is_offload_param: load_megatron_model_to_gpu(self.actor_module) log_gpu_memory_usage("After load actor params and grad during update_actor", logger=logger) if self._is_offload_optimizer: load_megatron_optimizer(self.actor_optimizer) log_gpu_memory_usage("After load actor optimizer during update_actor", logger=logger) micro_batch_size = self.config.actor.ppo_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size dataloader = self.actor.make_minibatch_iterator(data=data) with Timer(name="update_policy", logger=None) as timer: metrics = self.actor.update_policy(dataloader=dataloader) delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/actor"] = estimated_flops * self.config.actor.ppo_epochs / promised_flops / self.world_size metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3) metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3) metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3) from verl.utils.megatron.optimizer import get_megatron_last_lr metrics["actor/lr"] = get_megatron_last_lr(self.actor_optimizer) self.actor_optimizer_scheduler.step(1) # TODO: here, we should return all metrics output = DataProto(meta_info={"metrics": metrics}) output = output.to("cpu") if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) log_gpu_memory_usage("After offload actor params and grad during update_actor", logger=logger) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during update_actor", logger=logger) aggressive_empty_cache(force_sync=True) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="rollout")) @GPUMemoryLogger(role="generate_sequences", logger=logger) @DistProfiler.annotate(color="red") def generate_sequences(self, prompts: DataProto): assert self._is_rollout prompts = prompts.to(get_device_name()) meta_info = { "eos_token_id": self.generation_config.eos_token_id if self.generation_config is not None else self.tokenizer.eos_token_id, "pad_token_id": self.generation_config.pad_token_id if self.generation_config is not None else self.tokenizer.pad_token_id, } prompts.meta_info.update(meta_info) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) timing_generate = {} if self._is_actor: # For rollout only, we do not switch context. loop = get_event_loop() loop.run_until_complete(self.rollout_mode()) log_gpu_memory_usage("After switch to rollout mode", logger=logger) with simple_timer("generate_sequences", timing_generate): output = self.rollout.generate_sequences(prompts=prompts) if self._is_actor: loop.run_until_complete(self.trainer_mode()) log_gpu_memory_usage("After switch to trainer mode", logger=logger) # We calculate the average timing across all ranks # to make sure meta_info["timing"] is the same timing_generate_topk_ratio, timing_generate_min, timing_generate_max = topk_reduce_ratio_min_max( timing_generate["generate_sequences"] ) timing_generate = reduce_timing(timing_generate) timing_generate.update( { "generation_timing/max": timing_generate_max, "generation_timing/min": timing_generate_min, "generation_timing/topk_ratio": timing_generate_topk_ratio, } ) output.meta_info["timing"] = timing_generate output = output.to("cpu") # clear kv cache aggressive_empty_cache(force_sync=True) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) @GPUMemoryLogger(role="compute_ref_log_prob", logger=logger) @DistProfiler.annotate(color="olive") def compute_ref_log_prob(self, data: DataProto): assert self._is_ref if self._ref_is_offload_param: load_megatron_model_to_gpu(self.ref_module, load_grad=False) log_gpu_memory_usage("After load ref params and grad during compute_ref_log_prob", logger=logger) micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["max_token_len"] = self.config.ref.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.ref.log_prob_use_dynamic_bsz data.meta_info["temperature"] = self.config.rollout.temperature output, _ = self.ref_policy.compute_log_prob(data=data, calculate_entropy=False) output = DataProto.from_dict(tensors={"ref_log_prob": output}) output = output.to("cpu") if self._ref_is_offload_param: offload_megatron_model_to_cpu(self.ref_module) log_gpu_memory_usage("After offload ref params and grad during compute_ref_log_prob", logger=logger) aggressive_empty_cache(force_sync=True) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) @GPUMemoryLogger(role="compute_log_prob", logger=logger) @DistProfiler.annotate(color="blue") def compute_log_prob(self, data: DataProto): assert self._is_actor if self._is_offload_param: load_megatron_model_to_gpu(self.actor_module, load_grad=False) log_gpu_memory_usage("After load actor params and grad during compute_log_prob", logger=logger) # we should always recompute old_log_probs when it is HybridEngine data.meta_info["micro_batch_size"] = self.config.rollout.log_prob_micro_batch_size_per_gpu data.meta_info["max_token_len"] = self.config.rollout.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.rollout.log_prob_use_dynamic_bsz data.meta_info["temperature"] = self.config.rollout.temperature output, entropys = self.actor.compute_log_prob(data=data, calculate_entropy=True) output = DataProto.from_dict( tensors={"old_log_probs": output, "entropys": entropys}, meta_info={"temperature": self.config.rollout.temperature}, ) output = output.to("cpu") # clear kv cache if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) log_gpu_memory_usage("After offload actor params and grad during compute_log_prob", logger=logger) aggressive_empty_cache(force_sync=True) return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, checkpoint_path, hdfs_path=None, del_local_after_load=True): # No checkpoint to load, just offload the model and optimizer to CPU if checkpoint_path is None: if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) log_gpu_memory_usage("After offload actor params and optimizer during load_checkpoint", logger=logger) return if self._is_offload_param: load_megatron_model_to_gpu(self.actor_module) self.checkpoint_mananager.load_checkpoint( local_path=checkpoint_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_pretrained_model(self, checkpoint_path, del_local_after_load=True): pass @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, checkpoint_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): if self._is_offload_param: load_megatron_model_to_gpu(self.actor_module) self.checkpoint_mananager.save_checkpoint( local_path=checkpoint_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) torch.distributed.barrier() if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) class AsyncActorRolloutRefWorker(ActorRolloutRefWorker): @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) async def wake_up(self): await self.rollout_mode() return True @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) async def sleep(self): await self.trainer_mode() return True # ============================ vLLM related ============================ @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) def get_zeromq_address(self): return self.rollout.get_zeromq_address() # ============================ SGLang related ============================ @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD, blocking=False) async def chat_completion(self, json_request): ret = await self.rollout.chat_completion(json_request) return ret @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD, blocking=False) async def generate( self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, ) -> list[int]: ret = await self.rollout.generate(prompt_ids, sampling_params, request_id, image_data=image_data) return ret class CriticWorker(MegatronWorker, DistProfilerExtension): def __init__(self, config: McoreCriticConfig): Worker.__init__(self) omega_profiler_config = config.get("profiler", {}) profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig) if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]: tool_config = omega_conf_to_dataclass( omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool")) ) else: tool_config = None DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config) ) self.config: McoreCriticConfig = config # NOTE(sgm): We utilize colocate WorkerGroup by default. # As a result, Workers for different model share the same process. # Therefore, we only require one distribute initialization. # To utilize different parallel strategy in different models: # 1, users should disable WorkerDict; 2.assign different ResourcePool to different models, # 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385 if not torch.distributed.is_initialized(): set_numa_affinity() rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group( backend=get_nccl_backend(), timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) get_torch_device().set_device(rank) mpu.initialize_model_parallel( tensor_model_parallel_size=self.config.megatron.tensor_model_parallel_size, pipeline_model_parallel_size=self.config.megatron.pipeline_model_parallel_size, virtual_pipeline_model_parallel_size=self.config.megatron.virtual_pipeline_model_parallel_size, use_sharp=False, context_parallel_size=self.config.megatron.context_parallel_size, expert_model_parallel_size=self.config.megatron.expert_model_parallel_size, expert_tensor_parallel_size=self.config.megatron.expert_tensor_parallel_size, nccl_communicator_config_path=None, ) is_collect = ( mpu.get_tensor_model_parallel_rank() == 0 and mpu.get_pipeline_model_parallel_rank() == mpu.get_pipeline_model_parallel_world_size() - 1 and mpu.get_context_parallel_rank() == 0 ) self._register_dispatch_collect_info( mesh_name="critic", dp_rank=mpu.get_data_parallel_rank(), is_collect=is_collect ) set_random_seed(seed=self.config.megatron.seed) # set FSDP offload params self._is_offload_param = self.config.megatron.param_offload self._is_offload_optimizer = self.config.megatron.optimizer_offload # normalize config self.config.ppo_mini_batch_size *= self.config.rollout_n self.config.ppo_mini_batch_size //= mpu.get_data_parallel_world_size() if self.config.get("ppo_micro_batch_size", None): self.config.ppo_micro_batch_size //= mpu.get_data_parallel_world_size() self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size # TODO(sgm): support critic model offload def _build_critic_model_optimizer( self, model_path, optim_config, override_model_config, override_transformer_config, override_ddp_config ): from verl.utils.megatron.optimizer import ( get_megatron_optimizer, get_megatron_optimizer_param_scheduler, init_megatron_optim_config, ) from verl.utils.megatron_utils import McoreModuleWrapperConfig, make_megatron_module from verl.utils.model import print_model_size self._init_hf_config_and_tf_config( model_path, self.config.model.tokenizer_path, self.dtype, override_model_config, override_transformer_config, self.config.model.get("trust_remote_code", False), self.config.megatron.use_mbridge, ) wrap_config = McoreModuleWrapperConfig( is_value_model=True, # critic is value model share_embeddings_and_output_weights=False, wrap_with_ddp=True, use_distributed_optimizer=self.config.megatron.use_distributed_optimizer, ) critic_module = make_megatron_module( wrap_config=wrap_config, tf_config=self.tf_config, hf_config=self.hf_config, bridge=self.bridge, override_model_config=override_model_config, override_ddp_config=override_ddp_config, ) # note that here critic_module will be a list to be compatible with the construction of interleaved pp (vpp). # but here, we do not use pp (vpp) yet. For simplicity, we remove the list # critic_module = nn.ModuleList(critic_module) if self.config.load_weight: t0 = time.time() if self.config.megatron.use_dist_checkpointing: load_mcore_dist_weights( critic_module, self.config.megatron.dist_checkpointing_path, is_value_model=True ) else: if self.bridge is not None: local_model_path = get_hf_model_path(self.config) self.bridge.load_weights(critic_module, local_model_path) else: load_megatron_gptmodel_weights( self.config, self.hf_config, critic_module, params_dtype=self.dtype, is_value_model=True ) t1 = time.time() if torch.distributed.get_rank() == 0: print(f"critic load_weight time: {t1 - t0}") if self.rank == 0: print_model_size(critic_module[0]) # TODO: add more optimizer args into config optim_config_megatron = init_megatron_optim_config(optim_config) critic_optimizer = get_megatron_optimizer(model=critic_module, config=optim_config_megatron) critic_optimizer_scheduler = get_megatron_optimizer_param_scheduler( optimizer=critic_optimizer, config=optim_config ) get_torch_device().empty_cache() return critic_module, critic_optimizer, critic_optimizer_scheduler, self.hf_config, optim_config @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # create critic from verl.utils.torch_dtypes import PrecisionType if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) override_transformer_config = OmegaConf.to_container( OmegaConf.create(self.config.megatron.get("override_transformer_config", {})) ) override_ddp_config = OmegaConf.to_container( OmegaConf.create(self.config.megatron.get("override_ddp_config", {})) ) self.param_dtype = torch.bfloat16 self.dtype = PrecisionType.to_dtype(self.param_dtype) ( self.critic_module, self.critic_optimizer, self.critic_optimizer_scheduler, self.critic_model_config, critic_optimizer_config, ) = self._build_critic_model_optimizer( model_path=self.config.model.path, optim_config=self.config.optim, override_model_config=override_model_config, override_transformer_config=override_transformer_config, override_ddp_config=override_ddp_config, ) if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.critic_optimizer) self.critic = MegatronPPOCritic( config=self.config, model_config=self.critic_model_config, hf_config=self.hf_config, tf_config=self.tf_config, critic_module=self.critic_module, critic_optimizer=self.critic_optimizer, critic_optimizer_config=critic_optimizer_config, ) self.flops_counter = FlopsCounter(self.critic_model_config) self.checkpoint_mananager = MegatronCheckpointManager( config=self.config, checkpoint_config=self.config.checkpoint, model_config=self.critic_model_config, transformer_config=self.tf_config, role="critic", model=self.critic_module, arch=self.architectures[0], hf_config=self.hf_config, param_dtype=self.param_dtype, share_embeddings_and_output_weights=False, processing_class=self.processor if self.processor is not None else self.tokenizer, optimizer=self.critic_optimizer, optimizer_scheduler=self.critic_optimizer_scheduler, use_distributed_optimizer=self.config.megatron.use_distributed_optimizer, use_checkpoint_opt_param_scheduler=self.config.optim.use_checkpoint_opt_param_scheduler, bridge=self.bridge, use_dist_checkpointing=self.config.megatron.use_dist_checkpointing, ) @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic")) @DistProfiler.annotate(color="cyan") def compute_values(self, data: DataProto): micro_batch_size = self.config.ppo_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz data = data.to(get_device_id()) if self._is_offload_param: load_megatron_model_to_gpu(self.critic_module) values = self.critic.compute_values(data=data) output = DataProto.from_dict(tensors={"values": values}) output = output.to("cpu") if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic")) @DistProfiler.annotate(color="pink") def update_critic(self, data: DataProto): data = data.to(get_device_id()) if self._is_offload_param: load_megatron_model_to_gpu(self.critic_module) if self._is_offload_optimizer: load_megatron_optimizer(self.critic_optimizer) dataloader = self.critic.make_minibatch_iterator(data) with Timer(name="update_critic", logger=None) as timer: metrics = self.critic.update_critic(dataloader=dataloader) delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/critic"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size from verl.utils.megatron.optimizer import get_megatron_last_lr metrics["critic/lr"] = get_megatron_last_lr(self.critic_optimizer) self.critic_optimizer_scheduler.step(1) output = DataProto(batch=None, meta_info={"metrics": metrics}) if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.critic_optimizer) output = output.to("cpu") return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, checkpoint_path, hdfs_path=None, del_local_after_load=True): if self._is_offload_param: load_megatron_model_to_gpu(self.critic_module) self.checkpoint_mananager.load_checkpoint( local_path=checkpoint_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.critic_optimizer) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, checkpoint_path, hdfs_path=None, global_steps=0, max_ckpt_to_keep=None): if self._is_offload_param: load_megatron_model_to_gpu(self.critic_module) self.checkpoint_mananager.save_checkpoint( local_path=checkpoint_path, hdfs_path=hdfs_path, global_step=global_steps, max_ckpt_to_keep=max_ckpt_to_keep ) if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) class RewardModelWorker(MegatronWorker, DistProfilerExtension): """ Note that we only implement the reward model that is subclass of AutoModelForSequenceClassification. """ def __init__(self, config): Worker.__init__(self) profiler_config = omega_conf_to_dataclass(config.get("profiler", {}), dataclass_type=ProfilerConfig) omega_profiler_config = config.get("profiler", {}) profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig) if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]: tool_config = omega_conf_to_dataclass( omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool")) ) else: tool_config = None DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config), ) self.config = config # NOTE(sgm): We utilize colocate WorkerGroup by default. # As a result, Workers for different model share the same process. # Therefore, we only require one distribute initialization. # To utilize different parallel strategy in different models: # 1, users should disable WorkerDict; 2.assign different ResourcePool to different models, # 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385 if not torch.distributed.is_initialized(): set_numa_affinity() rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group( backend=get_nccl_backend(), timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) get_torch_device().set_device(rank) mpu.initialize_model_parallel( tensor_model_parallel_size=self.config.megatron.tensor_model_parallel_size, pipeline_model_parallel_size=self.config.megatron.pipeline_model_parallel_size, virtual_pipeline_model_parallel_size=self.config.megatron.virtual_pipeline_model_parallel_size, use_sharp=False, context_parallel_size=self.config.megatron.context_parallel_size, expert_model_parallel_size=self.config.megatron.expert_model_parallel_size, expert_tensor_parallel_size=self.config.megatron.expert_tensor_parallel_size, nccl_communicator_config_path=None, ) is_collect = ( mpu.get_tensor_model_parallel_rank() == 0 and mpu.get_pipeline_model_parallel_rank() == mpu.get_pipeline_model_parallel_world_size() - 1 and mpu.get_context_parallel_rank() == 0 ) self._register_dispatch_collect_info( mesh_name="reward", dp_rank=mpu.get_data_parallel_rank(), is_collect=is_collect ) set_random_seed(seed=self.config.megatron.seed) # normalize config if self.config.micro_batch_size is not None: self.config.micro_batch_size //= mpu.get_data_parallel_world_size() self.config.micro_batch_size_per_gpu = self.config.micro_batch_size def _build_rm_model(self, model_path, tokenizer, override_model_config, override_transformer_config): from verl.utils.megatron_utils import McoreModuleWrapperConfig, make_megatron_module self._init_hf_config_and_tf_config( model_path, tokenizer, self.dtype, override_model_config, override_transformer_config, self.config.model.get("trust_remote_code", False), self.config.megatron.use_mbridge, ) wrap_config = McoreModuleWrapperConfig( is_value_model=True, # reward model is value model share_embeddings_and_output_weights=False, wrap_with_ddp=False, use_distributed_optimizer=self.config.megatron.use_distributed_optimizer, ) reward_model = make_megatron_module( wrap_config=wrap_config, tf_config=self.tf_config, hf_config=self.hf_config, bridge=self.bridge, override_model_config=override_model_config, ) if self.config.load_weight: if self.config.megatron.use_dist_checkpointing: load_mcore_dist_weights(reward_model, self.config.megatron.dist_checkpointing_path, is_value_model=True) else: if self.bridge is not None: local_model_path = get_hf_model_path(self.config) self.bridge.load_weights(reward_model, local_model_path) else: load_megatron_gptmodel_weights( self.config, self.hf_config, reward_model, params_dtype=self.dtype, is_value_model=True ) get_torch_device().empty_cache() return reward_model, self.hf_config @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # create critic from verl.utils.torch_dtypes import PrecisionType if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) override_model_config = OmegaConf.to_container(OmegaConf.create(self.config.model.get("override_config", {}))) override_transformer_config = OmegaConf.to_container( OmegaConf.create(self.config.megatron.get("override_transformer_config", {})) ) use_shm = self.config.model.get("use_shm", False) sft_tokenizer_local_path = copy_to_local(self.config.model.input_tokenizer, use_shm=use_shm) sft_tokenizer = hf_tokenizer(sft_tokenizer_local_path) rm_tokenizer_path = self.config.model.get("rm_tokenizer", None) rm_tokenizer = None if rm_tokenizer_path is not None: rm_tokenizer_local_path = copy_to_local(rm_tokenizer_path, use_shm=use_shm) rm_tokenizer = hf_tokenizer( rm_tokenizer_local_path, trust_remote_code=self.config.model.get("trust_remote_code", False) ) self.param_dtype = torch.bfloat16 self.dtype = PrecisionType.to_dtype(self.param_dtype) reward_model_module, reward_model_config = self._build_rm_model( model_path=self.config.model.path, tokenizer=rm_tokenizer, override_model_config=override_model_config, override_transformer_config=override_transformer_config, ) # FIXME(sgm): reward model param offload is implemented in MegatronRewardModel # should be implemented in workers self.rm = MegatronRewardModel( config=self.config, reward_model_module=reward_model_module, model_config=reward_model_config, hf_config=self.hf_config, tf_config=self.tf_config, sft_tokenizer=sft_tokenizer, rm_tokenizer=rm_tokenizer, ) # TODO: reward model use itself tokenizer instead of sft tokenizer # the input_ids, responses, attention_mask and position_ids may be different! @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="reward")) @DistProfiler.annotate(color="brown") def compute_rm_score(self, data: DataProto): data.meta_info["micro_batch_size"] = self.config.micro_batch_size_per_gpu data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz data = data.to(get_device_id()) output = self.rm.compute_reward(data) output = output.to("cpu") return output ================================================ FILE: verl_distillation/verl/workers/reward_manager/__init__.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .registry import get_reward_manager_cls, register # noqa: I001 from .batch import BatchRewardManager from .dapo import DAPORewardManager from .naive import NaiveRewardManager from .prime import PrimeRewardManager # Note(haibin.lin): no need to include all reward managers here in case of complicated dependencies __all__ = [ "BatchRewardManager", "DAPORewardManager", "NaiveRewardManager", "PrimeRewardManager", "register", "get_reward_manager_cls", ] ================================================ FILE: verl_distillation/verl/workers/reward_manager/abstract.py ================================================ # Copyright 2023-2025 SGLang Team # Copyright Amazon.com, Inc. or its affiliates. # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from typing import Any, Callable import torch from verl.protocol import DataProto RawRewardFn = Callable[..., Any] class AbstractRewardManager(ABC): @abstractmethod def __init__( self, tokenizer: Any, num_examine: int, compute_score: RawRewardFn | None, reward_fn_key: str = "data_source", **kwargs: Any, ): pass @abstractmethod def __call__( self, data: DataProto, return_dict: bool = False, ) -> torch.Tensor | dict[str, Any]: pass ================================================ FILE: verl_distillation/verl/workers/reward_manager/batch.py ================================================ # Copyright 2025 Individual Contributor: Mert Unsal # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import Any import torch from verl import DataProto from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager, RawRewardFn @register("batch") class BatchRewardManager(AbstractRewardManager): """ A batch reward manager that computes rewards for a batch of data. Args: tokenizer (Tokenizer): The tokenizer to use for decoding the responses. num_examine (int): The number of responses to examine. compute_score (callable): The function to compute the rewards. reward_fn_key (str): The key to use for the reward function. reward_kwargs (dict): The keyword arguments to pass to the reward function. """ def __init__( self, tokenizer, num_examine, compute_score: RawRewardFn, reward_fn_key="data_source", **reward_kwargs ): self.tokenizer = tokenizer self.num_examine = num_examine self.compute_score = compute_score self.reward_fn_key = reward_fn_key self.reward_kwargs = reward_kwargs def verify(self, data): prompt_ids = data.batch["prompts"] response_ids = data.batch["responses"] attention_mask = data.batch["attention_mask"] prompt_len = prompt_ids.shape[-1] valid_response_lengths = attention_mask[:, prompt_len:].sum(dim=-1) responses_str = [] for i in range(len(data)): valid_len = valid_response_lengths[i] valid_response_ids = response_ids[i][:valid_len] response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) responses_str.append(response_str) ground_truths = [item.non_tensor_batch["reward_model"].get("ground_truth", None) for item in data] data_sources = data.non_tensor_batch[self.reward_fn_key] rollout_reward_scores = data.non_tensor_batch.get("reward_scores", [{} for _ in range(len(data))]) extras = data.non_tensor_batch.get("extra_info", [{} for _ in range(len(data))]) for i in range(len(data)): extras[i]["rollout_reward_scores"] = rollout_reward_scores[i] scores = self.compute_score( data_sources=data_sources, solution_strs=responses_str, ground_truths=ground_truths, extra_infos=extras, **self.reward_kwargs, ) return scores def __call__(self, data: DataProto, return_dict: bool = False) -> torch.Tensor | dict[str, Any]: # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): if return_dict: reward_extra_keys = data.meta_info.get("reward_extra_keys", []) reward_extra_info = {key: data.non_tensor_batch[key] for key in reward_extra_keys} return {"reward_tensor": data.batch["rm_scores"], "reward_extra_info": reward_extra_info} else: return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) prompt_ids = data.batch["prompts"] prompt_len = prompt_ids.shape[-1] attention_mask = data.batch["attention_mask"] valid_response_lengths = attention_mask[:, prompt_len:].sum(dim=-1) data_sources = data.non_tensor_batch[self.reward_fn_key] scores = self.verify(data) rewards = [] already_printed: dict[str, Any] = {} for i in range(len(data)): length = valid_response_lengths[i].item() score = scores[i] if isinstance(score, dict): reward = score["score"] for key, value in score.items(): reward_extra_info[key].append(value) else: reward = score rewards.append(reward) reward_tensor[i, length - 1] = reward data_source = data_sources[i] if already_printed.get(data_source, 0) < self.num_examine: response_str = self.tokenizer.decode(data.batch["responses"][i][:length], skip_special_tokens=True) prompt_str = self.tokenizer.decode(data.batch["prompts"][i], skip_special_tokens=True) ground_truth = data[i].non_tensor_batch["reward_model"].get("ground_truth", None) print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) print("[score]", scores[i]) already_printed[data_source] = already_printed.get(data_source, 0) + 1 data.batch["acc"] = torch.tensor(rewards, dtype=torch.float32, device=prompt_ids.device) if return_dict: return {"reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info} else: return reward_tensor ================================================ FILE: verl_distillation/verl/workers/reward_manager/dapo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict import torch from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager @register("dapo") class DAPORewardManager(AbstractRewardManager): """The reward manager.""" def __init__( self, tokenizer, num_examine, compute_score=None, reward_fn_key="data_source", max_resp_len=None, overlong_buffer_cfg=None, ) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key self.overlong_buffer_cfg = overlong_buffer_cfg self.max_resp_len = max_resp_len if self.overlong_buffer_cfg is not None: assert self.max_resp_len is not None, ( f"max_resp_len must be provided if {overlong_buffer_cfg=}, but got None" ) assert self.max_resp_len >= self.overlong_buffer_cfg.len, ( "max_resp_len must be larger than overlong_buffer.len" ) def __call__(self, data: DataProto, return_dict: bool = False): """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): if return_dict: reward_extra_keys = data.meta_info.get("reward_extra_keys", []) reward_extra_info = {key: data.non_tensor_batch[key] for key in reward_extra_keys} return {"reward_tensor": data.batch["rm_scores"], "reward_extra_info": reward_extra_info} else: return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) already_print_data_sources = {} for i in range(len(data)): data_item = data[i] # DataProtoItem prompt_ids = data_item.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() valid_prompt_ids = prompt_ids[-valid_prompt_length:] response_ids = data_item.batch["responses"] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode prompt_str = self.tokenizer.decode(valid_prompt_ids, skip_special_tokens=True) response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) eos_token = self.tokenizer.eos_token if response_str.endswith(eos_token): response_str = response_str[: -len(eos_token)] ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] data_source = data_item.non_tensor_batch[self.reward_fn_key] extra_info = data_item.non_tensor_batch.get("extra_info", {}) rollout_reward_scores = data_item.non_tensor_batch.get("reward_scores", {}) extra_info["rollout_reward_scores"] = rollout_reward_scores result = self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, ) score: float if isinstance(result, dict): score = result["score"] # Store the information including original reward for key, value in result.items(): reward_extra_info[key].append(value) else: score = result reward_extra_info["acc"].append(score) reward = score if self.overlong_buffer_cfg.enable: overlong_buffer_len = self.overlong_buffer_cfg.len expected_len = self.max_resp_len - overlong_buffer_len exceed_len = valid_response_length - expected_len overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0) reward += overlong_reward if self.overlong_buffer_cfg.log: reward_extra_info["overlong_reward"].append(overlong_reward) reward_extra_info["overlong"].append(overlong_reward < 0) reward_tensor[i, valid_response_length - 1] = reward if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) if isinstance(result, dict): for key, value in result.items(): print(f"[{key}]", value) else: print("[score]", score) if return_dict: return { "reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info, } else: return reward_tensor ================================================ FILE: verl_distillation/verl/workers/reward_manager/naive.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import Any import torch from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager @register("naive") class NaiveRewardManager(AbstractRewardManager): """The reward manager.""" def __init__(self, tokenizer, num_examine, compute_score=None, reward_fn_key="data_source") -> None: """ Initialize the NaiveRewardManager instance. Args: tokenizer: The tokenizer used to decode token IDs into text. num_examine: The number of batches of decoded responses to print to the console for debugging purpose. compute_score: A function to compute the reward score. If None, `default_compute_score` will be used. reward_fn_key: The key used to access the data source in the non-tensor batch data. Defaults to "data_source". """ self.tokenizer = tokenizer # Store the tokenizer for decoding token IDs self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key # Store the key for accessing the data source def __call__(self, data: DataProto, return_dict: bool = False) -> torch.Tensor | dict[str, Any]: """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): if return_dict: reward_extra_keys = data.meta_info.get("reward_extra_keys", []) reward_extra_info = {key: data.non_tensor_batch[key] for key in reward_extra_keys} return {"reward_tensor": data.batch["rm_scores"], "reward_extra_info": reward_extra_info} else: return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) already_print_data_sources = {} for i in range(len(data)): data_item = data[i] # DataProtoItem prompt_ids = data_item.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() valid_prompt_ids = prompt_ids[-valid_prompt_length:] response_ids = data_item.batch["responses"] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode prompt_str = self.tokenizer.decode(valid_prompt_ids, skip_special_tokens=True) response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] data_source = data_item.non_tensor_batch[self.reward_fn_key] extra_info = data_item.non_tensor_batch.get("extra_info", {}) num_turns = data_item.non_tensor_batch.get("__num_turns__", None) rollout_reward_scores = data_item.non_tensor_batch.get("reward_scores", {}) extra_info["num_turns"] = num_turns extra_info["rollout_reward_scores"] = rollout_reward_scores score = self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, ) if isinstance(score, dict): reward = score["score"] # Store the information including original reward for key, value in score.items(): reward_extra_info[key].append(value) else: reward = score reward_tensor[i, valid_response_length - 1] = reward if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) if isinstance(score, dict): for key, value in score.items(): print(f"[{key}]", value) else: print("[score]", score) if return_dict: return { "reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info, } else: return reward_tensor ================================================ FILE: verl_distillation/verl/workers/reward_manager/prime.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from concurrent.futures import ProcessPoolExecutor from functools import partial from typing import Any, Callable, Optional import psutil import torch from transformers import PreTrainedTokenizer from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register from verl.workers.reward_manager.abstract import AbstractRewardManager async def single_compute_score(evaluation_func, completion, reference, task, task_extra_info, executor, timeout=300.0): loop = asyncio.get_running_loop() try: # Ensure process_completion is called properly future = loop.run_in_executor(executor, partial(evaluation_func, task, completion, reference, task_extra_info)) return await asyncio.wait_for(future, timeout=timeout) except asyncio.TimeoutError: print(f"[Timeout] Task timeout: {completion}") return None # Default value for timed-out rows except Exception as e: print(f"[Error] Task failed: {e}, completion: {completion[:80]}") return None # Default value for failed rows async def parallel_compute_score_async( evaluation_func, completions, references, tasks, extra_info=None, num_processes=64 ): if extra_info is None: extra_info = [None] * len(tasks) scores = [] with ProcessPoolExecutor(max_workers=num_processes) as executor: # to prevent very occasional starvation caused by some anomalous programs ( like infinite loop ), the # exceptions in async programs will instantly halt the evaluation, and all summoned processes will be killed. try: # Create tasks for all rows tasks_async = [ single_compute_score(evaluation_func, c, r, t, ei, executor, timeout=300.0) for c, r, t, ei in zip(completions, references, tasks, extra_info, strict=True) ] results = await asyncio.gather(*tasks_async, return_exceptions=False) except Exception as e: print(f"[Exception] async gather failed: {e}") raise finally: terminated_count = 0 for pid, proc in executor._processes.items(): try: p = psutil.Process(pid) p.terminate() try: p.wait(timeout=5) except psutil.TimeoutExpired: p.kill() terminated_count += 1 except Exception: pass print(f"[Shutdown] {terminated_count} subprocess(es) terminated.") # Process results for result, completion, reference, task in zip(results, completions, references, tasks, strict=True): if isinstance(result, Exception) or result is None: # Handle failed or timed-out tasks scores.append(0.0) elif isinstance(result, int | float | bool): scores.append(float(result)) else: scores.append(float(result[0])) return scores def run_reward_scoring(evaluation_func, completions, references, tasks, extra_info=None, num_processes=64): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: return loop.run_until_complete( parallel_compute_score_async(evaluation_func, completions, references, tasks, extra_info, num_processes) ) finally: loop.close() @register("prime") class PrimeRewardManager(AbstractRewardManager): """ The Reward Manager used in https://github.com/PRIME-RL/PRIME """ def __init__( self, tokenizer: PreTrainedTokenizer, num_examine: int, compute_score: Optional[Callable] = None, reward_fn_key: str = "data_source", ) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key def verify(self, data): """ verify the batch and save as ``acc`` tensor """ # batched scoring prompt_ids = data.batch["prompts"] response_ids = data.batch["responses"] sequences_str = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True) ground_truth = [data_item.non_tensor_batch["reward_model"]["ground_truth"] for data_item in data] data_sources = data.non_tensor_batch[self.reward_fn_key] extra_info = data.non_tensor_batch.get("extra_info", None) assert len(sequences_str) == len(ground_truth) == len(data_sources) try: scores = run_reward_scoring( self.compute_score, completions=sequences_str, references=ground_truth, tasks=data_sources, extra_info=extra_info, num_processes=64, ) except asyncio.TimeoutError: print("[Timeout] Global reward scoring timed out. Setting all as 0.") scores = [0.0 for _ in range(len(sequences_str))] except Exception as e: print(f"[Error] Unexpected error during scoring. Setting all as 0. {e}") scores = [0.0 for _ in range(len(sequences_str))] data.batch["acc"] = torch.tensor(scores, dtype=torch.float32, device=prompt_ids.device) return scores def __call__(self, data: DataProto, return_dict: bool = False) -> torch.Tensor | dict[str, Any]: """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): if return_dict: reward_extra_keys = data.meta_info.get("reward_extra_keys", []) reward_extra_info = {key: data.non_tensor_batch[key] for key in reward_extra_keys} return {"reward_tensor": data.batch["rm_scores"], "reward_extra_info": reward_extra_info} else: return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) already_print_data_sources = {} # batched scoring prompt_ids = data.batch["prompts"] prompt_length = prompt_ids.shape[-1] response_ids = data.batch["responses"] valid_response_length = data.batch["attention_mask"][:, prompt_length:].sum(dim=-1) sequences_str = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True) data_sources = data.non_tensor_batch["data_source"] scores = self.verify(data) for i in range(len(data)): data_source = data_sources[i] reward_tensor[i, valid_response_length[i].item() - 1] = scores[i] if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print(sequences_str) if return_dict: return {"reward_tensor": reward_tensor} else: return reward_tensor ================================================ FILE: verl_distillation/verl/workers/reward_manager/registry.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable from verl.workers.reward_manager.abstract import AbstractRewardManager __all__ = ["register", "get_reward_manager_cls"] REWARD_MANAGER_REGISTRY: dict[str, type[AbstractRewardManager]] = {} def register(name: str) -> Callable[[type[AbstractRewardManager]], type[AbstractRewardManager]]: """Decorator to register a reward manager class with a given name. Args: name: `(str)` The name of the reward manager. """ def decorator(cls: type[AbstractRewardManager]) -> type[AbstractRewardManager]: if name in REWARD_MANAGER_REGISTRY and REWARD_MANAGER_REGISTRY[name] != cls: raise ValueError( f"Reward manager {name} has already been registered: {REWARD_MANAGER_REGISTRY[name]} vs {cls}" ) REWARD_MANAGER_REGISTRY[name] = cls return cls return decorator def get_reward_manager_cls(name: str) -> type[AbstractRewardManager]: """Get the reward manager class with a given name. Args: name: `(str)` The name of the reward manager. Returns: `(type)`: The reward manager class. """ if name not in REWARD_MANAGER_REGISTRY: raise ValueError(f"Unknown reward manager: {name}") return REWARD_MANAGER_REGISTRY[name] ================================================ FILE: verl_distillation/verl/workers/reward_model/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BasePPORewardModel __all__ = ["BasePPORewardModel"] ================================================ FILE: verl_distillation/verl/workers/reward_model/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The base class for reward model """ from abc import ABC, abstractmethod from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.workers.config import HFModelConfig, RewardModelConfig __all__ = ["BasePPORewardModel"] class BasePPORewardModel(ABC): """base class for reward model""" def __init__( self, config: RewardModelConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): self.config = config self.model_config = model_config self.device_mesh = device_mesh @abstractmethod def compute_reward(self, data: DataProto) -> DataProto: """Computing reward given input_ids. The transformers should output a tensor with shape [batch_size, sequence_length], and the value at [EOS] mask should be gathered. Args: data: must contain keys "input_ids", "attention_mask" and "position_ids". - input_ids: [batch_size, sequence_length] - attention_mask: [batch_size, sequence_length] - position_ids: [batch_size, sequence_length] Returns: a data pass protocol containing "reward". Only the [EOS] position contains the reward. Other position should have zero reward. Note that this may change in the future if we use dense reward. So, we leave the interface for general case. - reward: [batch_size, sequence_length]. """ pass ================================================ FILE: verl_distillation/verl/workers/reward_model/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .reward_model import MegatronRewardModel __all__ = ["MegatronRewardModel"] ================================================ FILE: verl_distillation/verl/workers/reward_model/megatron/reward_model.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Megatron Reward Model. """ import itertools import torch import torch.distributed from megatron.core import parallel_state as mpu from megatron.core.pipeline_parallel import get_forward_backward_func from tensordict import TensorDict from verl import DataProto from verl.utils.device import get_device_id, get_device_name, get_torch_device from verl.utils.megatron.pipeline_parallel import make_batch_generator from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.utils.torch_functional import broadcast_dict_tensor, pad_sequence_to_length from verl.workers.reward_model import BasePPORewardModel class MegatronRewardModel(BasePPORewardModel): def __init__( self, config, model_config, reward_model_module: torch.nn.ModuleList, hf_config, tf_config, sft_tokenizer=None, rm_tokenizer=None, ): self.config = config self.reward_model_module = reward_model_module self.hf_config = hf_config self.tf_config = tf_config self.model_config = model_config self.device = "cuda" self.sft_tokenizer = sft_tokenizer self.rm_tokenizer = rm_tokenizer self.use_different_tokenizer = rm_tokenizer is not None print(f"MegatronRewardModel.config: {self.config}") if self.config.megatron.param_offload: self.offload_params_to_cpu() def re_encode_by_rm_tokenizer(self, data: DataProto) -> DataProto: assert self.use_different_tokenizer, "re-encode need rm tokenizer not be None!" # need to use rm tokenizer to re-generate input_ids, attention_mask and position_ids # 1. remove pad for each sequence # 2. decode by sft_tokenizer, remove sft system prompts # 3. encode by rm_tokenizer with rm system prompts, get rm_input_ids # 4. generate attention_mask and position_ids input_ids = data.batch["input_ids"] # (bs, seq_len) attention_mask = data.batch["attention_mask"] position_ids = data.batch["position_ids"] ori_values = {"input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids} _, ori_seqlen = input_ids.size(0), input_ids.size(1) input_ids_for_rm = [] attention_mask_for_rm = [] position_ids_for_rm = [] print_decode = True ori_seqlen = ori_seqlen + 128 for id, mask in zip(input_ids, attention_mask, strict=True): # 1. remove pad for each sequence non_zero_indices = torch.nonzero(mask).view(-1) begin_pos, end_pos = non_zero_indices[0].item(), non_zero_indices[-1].item() valid_id = id[begin_pos : end_pos + 1] # 2. decode by sft_tokenizer, remove sft system prompts decode_result = self.sft_tokenizer.decode(valid_id) # workaround decode_with_rm_chat = ( decode_result.replace("<|user|>\n", "[INST] ") .replace("\n<|assistant|>\n", " [/INST]") .replace(" \n<|assistant|>\n", " [/INST]") + "" ) if print_decode and torch.distributed.get_rank() == 0: # only print first decode result print( f"device {get_device_id()}: sft decode result:\n{decode_result}\n \ \ndevice {get_device_id()}: sft decode result with \ rm chat template:\n{decode_with_rm_chat}\n\n" ) print_decode = False # 3. encode by rm_tokenizer rm_input_ids = self.rm_tokenizer(decode_with_rm_chat, return_tensors="pt")["input_ids"][0].to( input_ids.device ) # 4. generate attention_mask and position_ids rm_attention_mask = torch.ones_like(rm_input_ids, device=input_ids.device) cur_seqlen = rm_input_ids.shape[-1] # NOTE(gh): the later reward compute will process the shape (bs, seqlen_pad_128) if cur_seqlen > ori_seqlen: print(f"warninig: rm encode seqlen {cur_seqlen} > sft encode seqlen {ori_seqlen}") rm_input_ids = rm_input_ids[:ori_seqlen] rm_attention_mask = rm_attention_mask[:ori_seqlen] else: # right padding rm_input_ids = pad_sequence_to_length(rm_input_ids, ori_seqlen, self.rm_tokenizer.pad_token_id) rm_attention_mask = pad_sequence_to_length(rm_attention_mask, ori_seqlen, 0) rm_position_ids = torch.arange(0, ori_seqlen, device=input_ids.device) input_ids_for_rm.append(torch.unsqueeze(rm_input_ids, dim=0)) attention_mask_for_rm.append(torch.unsqueeze(rm_attention_mask, dim=0)) position_ids_for_rm.append(torch.unsqueeze(rm_position_ids, dim=0)) input_ids_for_rm = torch.cat(input_ids_for_rm, dim=0) attention_mask_for_rm = torch.cat(attention_mask_for_rm, dim=0) position_ids_for_rm = torch.cat(position_ids_for_rm, dim=0) # (bs, seqlen) will not change, but input_ids, attention_mask and position_ids will change # NOTE(gh): need to replace into origin values after compute reward! data.batch["input_ids"] = input_ids_for_rm data.batch["attention_mask"] = attention_mask_for_rm data.batch["position_ids"] = position_ids_for_rm return data, ori_values @torch.no_grad() def compute_reward(self, data: DataProto) -> DataProto: if self.config.megatron.param_offload: self.load_params_to_cuda() if self.use_different_tokenizer: data, ori_values = self.re_encode_by_rm_tokenizer(data) input_ids = data.batch["input_ids"] # (bs, seq_len') attention_mask = data.batch["attention_mask"] position_ids = data.batch["position_ids"] use_dynamic_bsz = data.meta_info.get("use_dynamic_bsz", False) micro_batch_size = data.meta_info.get("micro_batch_size", None) max_token_len = data.meta_info.get("max_token_len", None) assert micro_batch_size is not None, "micro batch size is needed for forward compute" if use_dynamic_bsz: assert max_token_len is not None, "use_dynamic_bsz is True, but max_token_len is None!" max_token_len = max_token_len * self.config.megatron.context_parallel_size responses = data.batch["responses"] batch_size = responses.size(0) response_length = responses.size(1) with torch.no_grad(): output = self.forward_batch( data, use_dynamic_bsz=use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len ) if mpu.is_pipeline_last_stage(ignore_virtual=True): logits = torch.cat(output["output"], dim=0) if use_dynamic_bsz: indices = output["indices"] indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == logits.size(0), f"{len(indices)} vs. {logits.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) logits = logits[revert_indices] else: logits = torch.empty( (input_ids.shape[0], input_ids.shape[1]), device=input_ids.device, ) logits = logits.to(torch.float32) # broadcast across pp ranks torch.distributed.broadcast( tensor=logits, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), async_op=False, ) # (bs, seqlen', hidden_size) -> (bs, seqlen', 1) -> (bs, seqlen') token_level_rewards = logits # find the last token reward ends = attention_mask.cumsum(dim=-1).argmax(dim=-1).view(-1, 1) # (bs, 1) rewards = torch.gather(token_level_rewards, dim=1, index=ends) # (bs, 1) if self.use_different_tokenizer: data.batch.update(ori_values) input_ids = ori_values["input_ids"] attention_mask = ori_values["attention_mask"] position_ids = ori_values["position_ids"] token_level_rewards = rewards.expand(attention_mask.shape[0], attention_mask.shape[1]) # (bs, ori_seqlen) # assign last valid token reward to ori position if position_ids.dim() == 3: # qwen2vl mrope [bs, 3, seq_len] position_ids = position_ids[:, 0, :] eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bs,) eos_mask = torch.zeros_like(attention_mask) eos_mask[torch.arange(batch_size), eos_mask_idx] = 1.0 token_level_rewards = token_level_rewards * eos_mask token_level_rewards = token_level_rewards[:, -response_length:] if self.config.megatron.param_offload: self.offload_params_to_cpu() else: # add empty cache after each compute get_torch_device().empty_cache() batch = TensorDict({"rm_scores": token_level_rewards}, batch_size=input_ids.shape[0]) return DataProto(batch=batch) def forward_batch(self, data: DataProto, use_dynamic_bsz=False, micro_batch_size=None, max_token_len=None): """ We assume: - The model takes input: (input_ids, attention_mask, position_ids). No rmpad for the input - The communication shape is (total_nnz_pad_to_sp // tp_size, 1, hidden_size) if sequence parallel is enabled """ # broadcast from last pp rank to all other pp ranks # TODO: actually, we just need to control the sampling order. mini_batch = data mini_batch.batch = mini_batch.batch.contiguous() broadcast_dict_tensor( mini_batch.batch, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), ) mini_batch.batch["attention_mask"] = mini_batch.batch["attention_mask"].to(bool) self.has_multi_modal_inputs = "multi_modal_inputs" in mini_batch.non_tensor_batch.keys() if self.has_multi_modal_inputs: mini_batch.batch["multi_modal_inputs"] = mini_batch.non_tensor_batch["multi_modal_inputs"] mini_batch.batch["multi_modal_inputs_idx"] = torch.Tensor( list(range(len(mini_batch.non_tensor_batch["multi_modal_inputs"]))) ).to(torch.int64) indices = None if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() if vpp_size is not None and vpp_size > 1: microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage micro_batches, indices = rearrange_micro_batches( batch=mini_batch.batch, num_batches_divided_by=microbatch_group_size_per_vp_stage, max_token_len=max_token_len, ) assert len(micro_batches) % self.tf_config.microbatch_group_size_per_vp_stage == 0, ( f"micro_batches {micro_batches} must be divisible by microbatch_group_size_per_vp_stage " f"{microbatch_group_size_per_vp_stage} for megatron backend" ) else: micro_batches, indices = rearrange_micro_batches(batch=mini_batch.batch, max_token_len=max_token_len) total_seqlen = max_token_len else: assert micro_batch_size is not None, ( "micro_batch_size is needed to be passed in when not using dynamic batch size" ) micro_batches = mini_batch.batch.split(micro_batch_size) seq_len = micro_batches[0]["input_ids"].shape[1] total_seqlen = micro_batch_size * seq_len n_micro_batch = len(micro_batches) # compute input shapes for pp stages forward_backward_func = get_forward_backward_func() def loss_func(output): return torch.tensor(1.0, device=output.device), output def forward_step(batch_iter, model): batch = next(batch_iter) input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] position_ids = batch["position_ids"] from verl.models.mcore import get_mcore_forward_fn forward_fn = get_mcore_forward_fn(self.hf_config) multi_modal_inputs = {} if "multi_modal_inputs" in batch: from verl.utils.model import extract_multi_modal_inputs indices = batch.get("multi_modal_inputs_idx", None) multi_modal_inputs = extract_multi_modal_inputs(batch["multi_modal_inputs"], indices) output = forward_fn( model, input_ids, attention_mask, position_ids, multi_modal_inputs, value_model=True, ) return output, loss_func # batch should be a list of batches inside micro-batches batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.reward_model_module)) # TODO: we may use the new schedule instead # for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size) if mpu.get_pipeline_model_parallel_world_size() > 1: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.reward_model_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # no use when input_shapes was set micro_batch_size=1, # no use when input_shapes was set forward_only=True, ) else: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.reward_model_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # in use for pp = 1 micro_batch_size=1, # in use for pp = 1 forward_only=True, ) if self.has_multi_modal_inputs: data.batch.pop("multi_modal_inputs") data.batch.pop("multi_modal_inputs_idx") data.non_tensor_batch.pop("multi_modal_inputs") # loss_reduces contains the stats returned from loss_func losses_reduced = {"output": losses_reduced} if use_dynamic_bsz: losses_reduced["indices"] = indices return losses_reduced def offload_params_to_cpu(self): if self.device in ["cuda", "npu"]: for reward_model_module in self.reward_model_module: for name, param in reward_model_module.named_parameters(): param.data = param.data.to("cpu", non_blocking=True) self.device = "cpu" get_torch_device().empty_cache() def load_params_to_cuda(self): if self.device == "cpu": for reward_model_module in self.reward_model_module: for name, param in reward_model_module.named_parameters(): param.data = param.data.to(get_device_id(), non_blocking=True) self.device = get_device_name() ================================================ FILE: verl_distillation/verl/workers/roles/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .actor import ActorWorker from .critic import CriticWorker try: from .reward_model import RewardModelWorker except ImportError: RewardModelWorker = None __all__ = ["CriticWorker", "ActorWorker"] if RewardModelWorker is not None: __all__.append("RewardModelWorker") ================================================ FILE: verl_distillation/verl/workers/roles/actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from functools import partial import psutil from codetiming import Timer from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register from verl.utils.device import ( get_device_id, get_device_name, get_torch_device, ) from verl.utils.distributed import initialize_global_process_group_ray from verl.utils.flops_counter import FlopsCounter from verl.utils.profiler import DistProfiler, DistProfilerExtension from verl.utils.py_functional import append_to_dict from verl.workers.config import ActorConfig from verl.workers.roles.utils.losses import ppo_loss from verl.workers.roles.utils.padding import left_right_2_no_padding, no_padding_2_padding logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() class ActorWorker(Worker, DistProfilerExtension): """ This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy or a hybrid engine based on the config.rollout """ def __init__(self, config: ActorConfig): self.config = config Worker.__init__(self) self.profiler_config = self.config.profiler tool_config = self.profiler_config.tool_config DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=self.profiler_config, tool_config=tool_config) ) initialize_global_process_group_ray(timeout_second=None) self.loss_fn = partial(ppo_loss, config=self.config) def _build_engine(self): self.model_config = self.config.model_config self.engine_config = self.config.engine self.optimizer_config = self.config.optim self.checkpoint_config = self.config.checkpoint from verl.workers.engine import BaseEngine, EngineRegistry self.engine: BaseEngine = EngineRegistry.new( model_type="language_model", backend=self.config.strategy, model_config=self.model_config, engine_config=self.engine_config, optimizer_config=self.optimizer_config, checkpoint_config=self.checkpoint_config, ) # build dispatch info self._register_dispatch_collect_info( mesh_name="actor", dp_rank=self.engine.get_data_parallel_rank(), is_collect=self.engine.is_mp_src_rank_with_outputs(), ) # aggregate with bon sampling self.ppo_mini_batch_size = self.config.ppo_mini_batch_size * self.config.rollout_n assert self.ppo_mini_batch_size % self.engine.get_data_parallel_size() == 0, ( f"{self.ppo_mini_batch_size=} is not divisible by {self.engine.get_data_parallel_size()=}" ) self.ppo_mini_batch_size_per_dp = self.ppo_mini_batch_size // self.engine.get_data_parallel_size() # setup flops counter self.flops_counter = FlopsCounter(self.model_config.hf_config) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): self._build_engine() self.engine.initialize() @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_loss_fn(self, loss_fn): self.loss_fn = loss_fn @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) @DistProfiler.annotate(color="blue", role="actor_compute_log_prob") def compute_log_prob(self, data: DataProto): data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz data.meta_info["use_fused_kernels"] = self.config.use_fused_kernels data.meta_info["calculate_entropy"] = True if self.config.use_dynamic_bsz: data.meta_info["max_token_len_per_gpu"] = self.config.ppo_infer_max_token_len_per_gpu else: data.meta_info["micro_batch_size_per_gpu"] = self.config.ppo_infer_micro_batch_size_per_gpu with self.engine.eval_mode(): # TODO: make worker API to accept TensorDict as well data = data.to_tensordict() data = left_right_2_no_padding(data) output = self.engine.infer_batch(data) if self.engine.is_mp_src_rank_with_outputs(): output = output["model_output"] log_probs = output["log_probs"] log_probs = no_padding_2_padding(log_probs, data) # (bsz, response_length) entropy = output["entropy"] if entropy is not None: entropy = no_padding_2_padding(entropy, data) # (bsz, response_length) # in megatron, only last pp contains valid data and returned to the single controller output = DataProto.from_dict( tensors={"old_log_probs": log_probs.float(), "entropy": entropy.float()}, ) output = output.to("cpu") return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor")) @DistProfiler.annotate(color="red", role="actor_update") def update_actor(self, data: DataProto): data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz data.meta_info["use_fused_kernels"] = self.config.use_fused_kernels data.meta_info["calculate_entropy"] = self.config.entropy_coeff != 0.0 if self.config.use_dynamic_bsz: data.meta_info["max_token_len_per_gpu"] = self.config.ppo_max_token_len_per_gpu else: data.meta_info["micro_batch_size_per_gpu"] = self.config.ppo_micro_batch_size_per_gpu metrics = {} # Support all hardwares data = data.to(get_device_id()) # perform forward computation with self.engine.train_mode(): dataloader = data.make_iterator( mini_batch_size=self.ppo_mini_batch_size_per_dp, epochs=self.config.ppo_epochs, seed=self.config.data_loader_seed + self.engine.get_data_parallel_rank(), dataloader_kwargs={"shuffle": self.config.shuffle}, ) with Timer(name="update_policy", logger=None) as timer: for batch_idx, mini_batch in enumerate(dataloader): mini_batch.meta_info["global_batch_size"] = self.config.ppo_mini_batch_size # TODO: make worker API to accept TensorDict as well mini_batch = mini_batch.to_tensordict() mini_batch = left_right_2_no_padding(mini_batch) output = self.engine.train_batch(mini_batch, self.loss_fn) mini_batch_metrics = output.get("metrics", {}) append_to_dict(metrics, mini_batch_metrics, prefix="actor/") delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/actor"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3) metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3) metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3) lr = self.engine.lr_scheduler_step() metrics["actor/lr"] = lr output = DataProto(batch=None, meta_info={"metrics": metrics}) return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): return self.engine.save_checkpoint(local_path, hdfs_path, global_step, max_ckpt_to_keep) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False): return self.engine.load_checkpoint(local_path, hdfs_path, del_local_after_load) ================================================ FILE: verl_distillation/verl/workers/roles/critic.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import warnings from functools import partial import psutil from codetiming import Timer from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register from verl.utils.device import ( get_device_id, get_device_name, get_torch_device, ) from verl.utils.distributed import initialize_global_process_group_ray from verl.utils.flops_counter import FlopsCounter from verl.utils.profiler import DistProfiler, DistProfilerExtension from verl.utils.py_functional import append_to_dict from verl.workers.config import CriticConfig from verl.workers.roles.utils.losses import value_loss from verl.workers.roles.utils.padding import left_right_2_no_padding, no_padding_2_padding logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() class CriticWorker(Worker, DistProfilerExtension): """ This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy or a hybrid engine based on the config.rollout """ def __init__(self, config: CriticConfig): self.config = config Worker.__init__(self) self.profiler_config = self.config.profiler tool_config = self.profiler_config.tool_config DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=self.profiler_config, tool_config=tool_config) ) initialize_global_process_group_ray(timeout_second=None) self.loss_fn = partial(value_loss, config=self.config) def _build_engine(self): from copy import copy, deepcopy self.model_config = copy(self.config.model_config) self.model_config.hf_config = deepcopy(self.config.model_config.hf_config) self.engine_config = self.config.engine self.optimizer_config = self.config.optim self.checkpoint_config = self.config.checkpoint from verl.workers.engine import BaseEngine, EngineRegistry # replace AutoModelForSequenceClassification to AutoModelForTokenClassification hf_config = self.model_config.hf_config arch = hf_config.architectures[0] # This logic assumes the critic is a token classification model. # If the provided model is a CausalLM, we adapt it. if "ForCausalLM" in arch: model_name = arch.split("ForCausalLM")[0] new_arch = f"{model_name}ForTokenClassification" warnings.warn(f"Implicitly changing critic architecture from '{arch}' to '{new_arch}'", stacklevel=2) hf_config.architectures[0] = new_arch elif "ForTokenClassification" not in arch and "ForSequenceClassification" not in arch: raise ValueError( f"Unsupported critic architecture: {arch}. " f"Critic worker expects an architecture suitable for value function estimation, " f"such as '...ForTokenClassification' or '...ForSequenceClassification'." ) # make sure output dropout is 0 hf_config.classifier_dropout = 0 self.engine: BaseEngine = EngineRegistry.new( model_type="value_model", backend=self.config.strategy, model_config=self.model_config, engine_config=self.engine_config, optimizer_config=self.optimizer_config, checkpoint_config=self.checkpoint_config, ) # build dispatch info self._register_dispatch_collect_info( mesh_name="critic", dp_rank=self.engine.get_data_parallel_rank(), is_collect=self.engine.is_mp_src_rank_with_outputs(), ) # aggregate with bon sampling self.ppo_mini_batch_size = self.config.ppo_mini_batch_size * self.config.rollout_n assert self.ppo_mini_batch_size % self.engine.get_data_parallel_size() == 0, ( f"{self.ppo_mini_batch_size=} is not divisible by {self.engine.get_data_parallel_size()=}" ) self.ppo_mini_batch_size_per_dp = self.ppo_mini_batch_size // self.engine.get_data_parallel_size() # setup flops counter self.flops_counter = FlopsCounter(self.model_config.hf_config) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): self._build_engine() self.engine.initialize() @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_loss_fn(self, loss_fn): self.loss_fn = loss_fn @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic")) @DistProfiler.annotate(color="blue", role="critic_compute_values") def compute_values(self, data: DataProto): data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz if self.config.use_dynamic_bsz: data.meta_info["max_token_len_per_gpu"] = self.config.ppo_infer_max_token_len_per_gpu else: data.meta_info["micro_batch_size_per_gpu"] = self.config.ppo_infer_micro_batch_size_per_gpu with self.engine.eval_mode(): # TODO: make worker API to accept TensorDict as well data = data.to_tensordict() data = left_right_2_no_padding(data) output = self.engine.infer_batch(data) if self.engine.is_mp_src_rank_with_outputs(): # in megatron, only last pp contains valid data and returned to the single controller output = output["model_output"] values = output["values"] values = no_padding_2_padding(values, data) # (bsz, response_length) output = DataProto.from_dict( tensors={"values": values.float()}, ) output = output.to("cpu") return output @register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic")) @DistProfiler.annotate(color="red", role="critic_update") def update_critic(self, data: DataProto): data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz if self.config.use_dynamic_bsz: data.meta_info["max_token_len_per_gpu"] = self.config.ppo_max_token_len_per_gpu else: data.meta_info["micro_batch_size_per_gpu"] = self.config.ppo_micro_batch_size_per_gpu metrics = {} # Support all hardwares data = data.to(get_device_id()) # perform forward computation with self.engine.train_mode(): dataloader = data.make_iterator( mini_batch_size=self.ppo_mini_batch_size_per_dp, epochs=self.config.ppo_epochs, seed=self.config.data_loader_seed + self.engine.get_data_parallel_rank(), dataloader_kwargs={"shuffle": self.config.shuffle}, ) with Timer(name="update_policy", logger=None) as timer: for batch_idx, mini_batch in enumerate(dataloader): mini_batch.meta_info["global_batch_size"] = self.config.ppo_mini_batch_size # TODO: make worker API to accept TensorDict as well mini_batch = mini_batch.to_tensordict() mini_batch = left_right_2_no_padding(mini_batch) output = self.engine.train_batch(mini_batch, self.loss_fn) mini_batch_metrics = output.get("metrics", {}) append_to_dict(metrics, mini_batch_metrics, prefix="critic/") delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/critic"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3) metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3) metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3) lr = self.engine.lr_scheduler_step() metrics["critic/lr"] = lr output = DataProto(batch=None, meta_info={"metrics": metrics}) return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): return self.engine.save_checkpoint(local_path, hdfs_path, global_step, max_ckpt_to_keep) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False): return self.engine.load_checkpoint(local_path, hdfs_path, del_local_after_load) ================================================ FILE: verl_distillation/verl/workers/roles/hybrid_engine.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/workers/roles/utils/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/workers/roles/utils/losses.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from tensordict import TensorDict from verl.trainer.ppo.core_algos import agg_loss, compute_value_loss, get_policy_loss_fn, kl_penalty from verl.utils import tensordict_utils as tu from verl.utils.dataset.dataset_utils import DatasetPadMode from verl.utils.torch_functional import masked_mean, masked_sum from verl.workers.config import ActorConfig, CriticConfig from verl.workers.roles.utils.padding import no_padding_2_padding def sft_loss(config: ActorConfig, model_output, data: TensorDict, dp_group=None): pad_mode = tu.get_non_tensor_data(data=data, key="pad_mode", default=DatasetPadMode.NO_PADDING) dp_size = data["dp_size"] batch_num_tokens = data["batch_num_tokens"] log_prob = model_output["log_probs"] if pad_mode == DatasetPadMode.NO_PADDING: # log_prob and loss mask are nested tensors of shape [bsz, j1] # for each sample, loss mask shape is [1, prompt_length + response_length] loss_mask = data["loss_mask"] log_prob_flatten = log_prob.values() loss_mask_flatten = loss_mask.values() # left-shift the loss mask by one token to align with log_prob loss_mask_flatten = torch.roll(loss_mask_flatten, shifts=-1, dims=0) # NOTE: loss is averaged over all tokens in the batch across all data parallel groups, # For FSDP backend, the loss is directly used for backward; while for Megatron backend, # the loss should be scaled by `num_microbatches` and `cp_size` for pp schedule. loss = -masked_sum(log_prob_flatten, loss_mask_flatten) / batch_num_tokens * dp_size else: response_mask = data["response_mask"].to(bool) loss = -masked_sum(log_prob, response_mask) / batch_num_tokens * dp_size return loss, {"loss": loss.detach().item()} def ppo_loss(config: ActorConfig, model_output, data: TensorDict, dp_group=None): log_prob = model_output["log_probs"] entropy = model_output.get("entropy", None) log_prob = no_padding_2_padding(log_prob, data) # (bsz, response_length) if entropy is not None: entropy = no_padding_2_padding(entropy, data) # (bsz, response_length) metrics = {} response_mask = data["response_mask"].to(bool) # compute policy loss old_log_prob = data["old_log_probs"] advantages = data["advantages"] loss_agg_mode = config.loss_agg_mode loss_mode = config.policy_loss.get("loss_mode", "vanilla") policy_loss_fn = get_policy_loss_fn(loss_mode) pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = policy_loss_fn( old_log_prob=old_log_prob, log_prob=log_prob, advantages=advantages, response_mask=response_mask, loss_agg_mode=loss_agg_mode, config=config, ) metrics.update( { "pg_loss": pg_loss.detach().item(), "pg_clipfrac": pg_clipfrac.detach().item(), "ppo_kl": ppo_kl.detach().item(), "pg_clipfrac_lower": pg_clipfrac_lower.detach().item(), } ) policy_loss = pg_loss # add entropy loss if entropy is not None: entropy_loss = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) entropy_coeff = config.entropy_coeff policy_loss -= entropy_coeff * entropy_loss # add kl loss if config.use_kl_loss: ref_log_prob = data["ref_log_prob"] # compute kl loss kld = kl_penalty(logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=config.kl_loss_type) kl_loss = agg_loss(loss_mat=kld, loss_mask=response_mask, loss_agg_mode=config.loss_agg_mode) policy_loss += kl_loss * config.kl_loss_coef metrics["kl_loss"] = kl_loss.detach().item() metrics["kl_coef"] = config.kl_loss_coef return policy_loss, metrics def value_loss(config: CriticConfig, model_output, data: TensorDict, dp_group=None): vpreds = model_output["values"] vpreds = no_padding_2_padding(vpreds, data) # (bsz, response_length) values = data["values"] returns = data["returns"] response_mask = data["response_mask"].to(bool) vf_loss, vf_clipfrac = compute_value_loss( vpreds=vpreds, values=values, returns=returns, response_mask=response_mask, cliprange_value=config.cliprange_value, loss_agg_mode=config.loss_agg_mode, ) metrics = {} metrics.update( { "critic/vf_loss": vf_loss.detach().item(), "critic/vf_clipfrac": vf_clipfrac.detach().item(), "critic/vpred_mean": masked_mean(vpreds, response_mask).detach().item(), } ) return vf_loss, metrics ================================================ FILE: verl_distillation/verl/workers/roles/utils/padding.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from tensordict import TensorDict from verl.utils import tensordict_utils as tu from verl.utils.device import ( is_cuda_available, is_npu_available, ) if is_cuda_available: from flash_attn.bert_padding import pad_input, unpad_input elif is_npu_available: from transformers.integrations.npu_flash_attention import pad_input, unpad_input def left_right_2_no_padding(data: TensorDict) -> TensorDict: """ Convert TensorDict from left-right padding to no-padding format. Args: data: TensorDict with "input_ids", "attention_mask", "response_mask", "position_ids" Returns: data: TensorDict with - Tensor includes NestedTensors like "input_ids", "loss_mask", "position_ids" - NonTensorData includes "max_seq_len", "max_response_len", "indices" Note: 1. the return input_ids/position_ids/loss_mask are nested tensor. 2. we will remove "attention_mask", "response" in the return data, but "response_mask" is kept. """ assert "input_ids" in data, "input_ids is required in left-right padding data" assert "attention_mask" in data, "attention_mask is required in left-right padding data" assert "response_mask" in data, "response_mask is required in left-right padding data" assert "position_ids" in data, "position_ids is required in left-right padding data" input_ids = data.pop("input_ids") attention_mask = data.pop("attention_mask") response_mask = data["response_mask"] if "responses" in data: _ = data.pop("responses") max_seq_len, max_response_len = input_ids.shape[1], response_mask.shape[1] tu.assign_non_tensor_data(data, "max_seq_len", max_seq_len) tu.assign_non_tensor_data(data, "max_response_len", max_response_len) input_ids_rmpad, indices, cu_seqlens, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask) tu.assign_non_tensor_data(data, "indices", indices) input_ids_nested = torch.nested.nested_tensor_from_jagged(input_ids_rmpad.squeeze(-1), offsets=cu_seqlens) seq_lens = cu_seqlens.diff().tolist() response_lens = response_mask.sum(dim=1).tolist() position_ids_list = [] loss_mask_list = [] for seq_len, response_len in zip(seq_lens, response_lens, strict=False): position_ids_list.append(torch.arange(seq_len, device=input_ids.device)) loss_mask = torch.zeros(seq_len, dtype=torch.bool, device=input_ids.device) assert seq_len >= response_len, f"{seq_len=} is less than {response_len=}" loss_mask[-response_len:] = 1 loss_mask_list.append(loss_mask) position_ids_nested = torch.nested.as_nested_tensor(position_ids_list, layout=torch.jagged) loss_mask_nested = torch.nested.as_nested_tensor(loss_mask_list, layout=torch.jagged) data["input_ids"] = input_ids_nested data["position_ids"] = position_ids_nested data["loss_mask"] = loss_mask_nested return data def no_padding_2_padding(nested_tensor: torch.Tensor, data: TensorDict) -> torch.Tensor: """ Convert NestedTensor from no-padding to right padding format. Args: nested_tensor: NestedTensor with no-padding format data: TensorDict with - Tensor includes NestedTensors like "input_ids", "loss_mask", "position_ids" - NonTensorData includes "max_seq_len", "max_response_len", "indices" Returns: values: regular tensor right padded to max_response_len """ assert "indices" in data, "indices is required in left-right padding data" assert "max_seq_len" in data, "max_seq_len is required in left-right padding data" assert "max_response_len" in data, "max_response_len is required in left-right padding data" indices = tu.get_non_tensor_data(data=data, key="indices", default=None) max_seq_len = tu.get_non_tensor_data(data=data, key="max_seq_len", default=2048) max_response_len = tu.get_non_tensor_data(data=data, key="max_response_len", default=1024) batch_size = nested_tensor.size(0) values = nested_tensor.values() full_values = pad_input( hidden_states=values.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=max_seq_len, ) values = full_values.squeeze(-1)[:, -max_response_len - 1 : -1] # (bsz, response_length) return values ================================================ FILE: verl_distillation/verl/workers/rollout/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BaseRollout, get_rollout_class from .hf_rollout import HFRollout from .naive import NaiveRollout __all__ = ["BaseRollout", "NaiveRollout", "HFRollout", "get_rollout_class"] ================================================ FILE: verl_distillation/verl/workers/rollout/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from abc import ABC, abstractmethod from typing import Generator import torch from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.workers.config import HFModelConfig, RolloutConfig __all__ = ["BaseRollout"] class BaseRollout(ABC): """Base class for rollout.""" def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): self.config = config self.model_config = model_config self.device_mesh = device_mesh @abstractmethod async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tags: weights or kv_cache. """ pass @abstractmethod async def update_weights( self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs, ): """Update the weights of the rollout model. Args: weights: A generator that yields the name of the weight tensor and the tensor itself. """ pass @abstractmethod async def release(self): """Release weights and kv cache in GPU memory.""" pass def generate_sequences(self, prompts: DataProto) -> DataProto: """Batch generate sequences in sync mode. Args: prompts: The input prompts. Returns: The output sequences. """ raise NotImplementedError _ROLLOUT_REGISTRY = { ("vllm", "sync"): "verl.workers.rollout.vllm_rollout.vLLMRollout", ("vllm", "async"): "verl.workers.rollout.vllm_rollout.vLLMAsyncRollout", ("sglang", "sync"): "verl.workers.rollout.sglang_rollout.sglang_rollout.SGLangRollout", ("sglang", "async"): "verl.workers.rollout.sglang_rollout.sglang_rollout.ServerAdapter", } def get_rollout_class(rollout_name: str, mode: str) -> type[BaseRollout]: """Get the rollout class by name. Args: rollout_name: The name of the rollout. mode: The mode of the rollout, sync: spmd mode, async: server mode. Returns: The rollout class. """ assert (rollout_name, mode) in _ROLLOUT_REGISTRY, f"Rollout {rollout_name} with mode {mode} not found" fqdn = _ROLLOUT_REGISTRY[(rollout_name, mode)] module_name, class_name = fqdn.rsplit(".", 1) rollout_module = importlib.import_module(module_name) return getattr(rollout_module, class_name) ================================================ FILE: verl_distillation/verl/workers/rollout/hf_rollout.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rollout with huggingface models. TODO: refactor this class. Currently, it will hang when using FSDP HybridShard. We should actually create a single GPU model. Then, get full state_dict and bind the state_dict to the single GPU model. Then, use the single GPU model to perform generation. """ import contextlib import torch import torch.distributed from tensordict import TensorDict from torch import nn from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from transformers import GenerationConfig from verl import DataProto from verl.utils.device import get_device_name, get_torch_device from verl.utils.torch_functional import get_response_mask from .base import BaseRollout __all__ = ["HFRollout"] class HFRollout(BaseRollout): def __init__(self, module: nn.Module, config): super().__init__() self.config = config self.module = module def generate_sequences(self, prompts: DataProto) -> DataProto: batch_size = prompts.batch.batch_size[0] num_chunks = max(batch_size // self.config.get("micro_batch_size", batch_size), 1) batch_prompts = prompts.chunk(chunks=num_chunks) output = [self._generate_minibatch(p) for p in batch_prompts] output = DataProto.concat(output) return output @torch.no_grad() def _generate_minibatch(self, prompts: DataProto) -> DataProto: # make sampling args can be overridden by inputs do_sample = prompts.meta_info.get("do_sample", self.config.do_sample) is_validate = prompts.meta_info.get("validate", False) temperature = prompts.meta_info.get("temperature", self.config.temperature) response_length = prompts.meta_info.get("response_length", self.config.response_length) top_p = prompts.meta_info.get("top_p", self.config.get("top_p", 1.0)) top_k = max(0, prompts.meta_info.get("top_k", self.config.get("top_k", 0))) # to be compatible with vllm if not do_sample: # do_sample==False -> greedy decoding kwargs = { "do_sample": False, "num_beams": 1, } elif is_validate: # do validate and do sample -> use val_kwargs kwargs = { "do_sample": True, "num_beams": 1, "top_k": max(0, self.config.val_kwargs.top_k), # to be compatible with vllm "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "num_return_sequences": 1, # if validate, already repeat in ray_trainer } else: # do_sample -> use rollout config kwargs = { "do_sample": True, "num_beams": 1, "top_p": top_p, "top_k": top_k, "temperature": temperature, # already repeat in ray_trainer # https://github.com/volcengine/verl/blob/2fdfbdcba6f2e076f64bc47922d8fe6cf7dc7da5/verl/trainer/ppo/ray_trainer.py#L1117 "num_return_sequences": 1, } # make config according to generate mode generation_config = GenerationConfig(**kwargs) idx = prompts.batch["input_ids"] # (bs, prompt_length) prompt_length = idx.size(1) attention_mask = prompts.batch["attention_mask"] # left-padded attention_mask position_ids = prompts.batch["position_ids"] # used to construct attention_mask eos_token_id = prompts.meta_info["eos_token_id"] pad_token_id = prompts.meta_info["pad_token_id"] self.module.eval() param_ctx = contextlib.nullcontext() if isinstance(self.module, FSDP): # recurse need to set to False according to https://github.com/pytorch/pytorch/issues/100069 param_ctx = FSDP.summon_full_params(self.module, writeback=False, recurse=False) with param_ctx, torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16): output = self.module.generate( input_ids=idx, attention_mask=attention_mask, position_ids=position_ids, do_sample=do_sample, max_new_tokens=response_length, eos_token_id=eos_token_id, pad_token_id=pad_token_id, generation_config=generation_config, output_scores=False, # this is potentially very large return_dict_in_generate=True, use_cache=True, ) # TODO: filter out the seq with no answers like ds-chat seq = output.sequences generated_batch_size = seq.size(0) # bs * num_return_sequences # huggingface generate will stop generating when all the batch reaches [EOS]. # We have to pad to response_length sequence_length = prompt_length + self.config.response_length delta_length = sequence_length - seq.shape[1] if delta_length > 0: delta_tokens = torch.ones(size=(generated_batch_size, delta_length), device=seq.device, dtype=seq.dtype) delta_tokens = pad_token_id * delta_tokens seq = torch.cat((seq, delta_tokens), dim=1) assert seq.shape[1] == sequence_length # make necessary reputations if num_return_sequences > 1 num_return_sequences = kwargs.get("num_return_sequences", 1) if num_return_sequences > 1: position_ids = position_ids.repeat_interleave(num_return_sequences, dim=0) attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) prompt = seq[:, :prompt_length] # (generated_batch_size, prompt_length) response = seq[:, prompt_length:] # (generated_batch_size, response_length) response_length = response.size(1) delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device) delta_position_id = delta_position_id.unsqueeze(0).repeat(generated_batch_size, 1) response_position_ids = position_ids[:, -1:] + delta_position_id position_ids = torch.cat([position_ids, response_position_ids], dim=-1) response_attention_mask = get_response_mask( response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype ) attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1) batch = TensorDict( { "prompts": prompt, "responses": response, "input_ids": seq, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=generated_batch_size, ) # empty cache before compute old_log_prob get_torch_device().empty_cache() self.module.train() return DataProto(batch=batch) ================================================ FILE: verl_distillation/verl/workers/rollout/naive/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .naive_rollout import NaiveRollout __all__ = ["NaiveRollout"] ================================================ FILE: verl_distillation/verl/workers/rollout/naive/naive_rollout.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ In single GPU rollout, the sequences are generated directly by sampling from the model. The output will contain 1. output_ids 2. attention_masks (left padding) 3. eos_masks 4. log_probs """ import torch import torch.nn.functional as F from tensordict import TensorDict from torch import nn from verl import DataProto from verl.utils.torch_functional import logprobs_from_logits from ..base import BaseRollout __all__ = ["NaiveRollout"] class NaiveRollout(BaseRollout): def __init__(self, module: nn.Module, config): """A naive rollout. It requires the module to be compatible with huggingface APIs. That is: The module should define __call__ to receive input_ids, attention_mask and position_ids. It outputs a structure that contains logits field. Args: module: module here follows huggingface APIs config: DictConfig """ super().__init__() self.config = config self.module = module @torch.no_grad() def generate_sequences(self, prompts: DataProto) -> DataProto: """Generate sequences""" idx = prompts.batch["input_ids"] # (bs, prompt_length) attention_mask = prompts.batch["attention_mask"] # left-padded attention_mask position_ids = prompts.batch["position_ids"] # used to construct attention_mask eos_token_id = prompts.meta_info["eos_token_id"] batch_size = idx.size(0) prompt_length = idx.size(1) self.module.eval() prev_attention_mask = torch.ones(size=(batch_size, 1), dtype=attention_mask.dtype, device=attention_mask.device) logits_lst = [] for _ in range(self.config.response_length): # if the sequence context is growing too long we must crop it at block_size # idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:] idx_cond = idx # forward the model to get the logits for the index in the sequence # we use huggingface APIs here output = self.module(input_ids=idx_cond, attention_mask=attention_mask, position_ids=position_ids) logits = output.logits # pluck the logits at the final step and scale by desired temperature logits = logits[:, -1, :] / self.config.temperature # (bs, vocab_size) # optionally crop the logits to only the top k options if self.config.top_k is not None: v, _ = torch.topk(logits, min(self.config.top_k, logits.size(-1))) logits[logits < v[:, [-1]]] = -float("Inf") # apply softmax to convert logits to (normalized) probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution if self.config.do_sample: idx_next = torch.multinomial(probs, num_samples=1) else: idx_next = torch.argmax(probs, dim=-1, keepdim=True) attention_mask = torch.cat((attention_mask, prev_attention_mask), dim=-1) for token_id in eos_token_id: prev_attention_mask = torch.logical_and(idx_next != token_id, prev_attention_mask.bool()) prev_attention_mask.to(attention_mask.dtype) position_ids = torch.cat((position_ids, position_ids[:, -1:] + 1), dim=-1) # append sampled index to the running sequence and continue idx = torch.cat((idx, idx_next), dim=1) logits_lst.append(logits) logits = torch.stack(logits_lst, dim=1) # (bs, response_length, vocab_size) prompts = idx[:, :prompt_length] # (bs, prompt_length) response = idx[:, prompt_length:] # (bs, response_length) log_probs = logprobs_from_logits(logits=logits, labels=response) batch = TensorDict( { "input_ids": prompts, "responses": response, "sequences": idx, "old_log_probs": log_probs, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=batch_size, ) self.module.train() return DataProto(batch=batch) ================================================ FILE: verl_distillation/verl/workers/rollout/replica.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os from abc import ABC, abstractmethod from enum import Enum from typing import Callable, Optional from pydantic import BaseModel from ray.actor import ActorHandle from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup from verl.trainer.ppo.ray_trainer import RayResourcePool, ResourcePoolManager from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig logger = logging.getLogger(__file__) class TokenOutput(BaseModel): token_ids: list[int] """response token ids""" log_probs: Optional[list[float]] = None """logprobs of response token ids""" class RolloutMode(Enum): # Rollout engine and training engine(fsdp/megatron) fused in same process # Rollout and trainer share GPUs, switch context with weight synchronization. # Usage scenarios: on-policy training. HYBRID = "hybrid" # Rollout engine colocated with hybrid engine in same ray placement group but in separate process. # Rollout and hybrid processes share GPUs, switch context without weight synchronization. # Usage scenarios: GRM (LLM as a judge). COLOCATED = "colocated" # Standalone rollout server with separate GPU resource, disaggregated architecture. # Usage scenarios: off-policy training. STANDALONE = "standalone" class RolloutReplica(ABC): """Rollout replica is an individual server instance, which may be deployed on single or multiple nodes. It is equivalent to launch server in each node with command line: SGLang: ``` python -m sglang.launch_server --node-rank 0 --nnode 2 ... python -m sglang.launch_server --node-rank 1 --nnode 2 ... ``` vLLM: ``` vllm serve --data-parallel-size 16 --data-parallel-size-local 8 --data-parallel-start-rank 0 ... vllm serve --data-parallel-size 16 --data-parallel-size-local 8 --data-parallel-start-rank 8 ... ``` Args: replica_rank: int, rank of this rollout replica. config: RolloutConfig, full config. gpus_per_node: int, number of gpus per node. """ def __init__( self, replica_rank: int, config: RolloutConfig, model_config: HFModelConfig, gpus_per_node: int = 8, is_reward_model: bool = False, ) -> None: self.replica_rank = replica_rank self.config = omega_conf_to_dataclass(config) self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config, dataclass_type=HFModelConfig) self.world_size = ( self.config.tensor_model_parallel_size * self.config.data_parallel_size * self.config.pipeline_model_parallel_size ) self.gpus_per_node = min(gpus_per_node, self.world_size) assert self.world_size % self.gpus_per_node == 0, ( f"world_size {self.world_size} must be divisible by gpus_per_node {self.gpus_per_node}" ) self.nnodes = self.world_size // self.gpus_per_node self.is_reward_model = is_reward_model self.rollout_mode: RolloutMode = None self.workers: list[ActorHandle] = [] self.resource_pool: RayResourcePool = None self.servers: list[ActorHandle] = [] self._server_address: str = None self._server_handle: ActorHandle = None async def init_hybrid(self, worker_group: RayWorkerGroup): """Init hybrid rollout server, rollout engine and training engine(fsdp/megatron) fused in same process. Args: worker_group: RayWorkerGroup, fused workers where training engine(fsdp/megatron) have been initialized. """ self.rollout_mode = RolloutMode.HYBRID self.workers = worker_group.workers[ self.world_size * self.replica_rank : self.world_size * (self.replica_rank + 1) ] await self.launch_servers() # TODO(@dyy): init with resource_pool? async def init_colocated(self, worker_group: RayWorkerGroup): """Init colocated rollout server, rollout engine and hybrid engine colocated in same ray placement group but in separate processes. Args: resource_pool: RayResourcePool, ray placement group where hybrid engine processes have been launched. """ self.rollout_mode = RolloutMode.COLOCATED self.workers = worker_group.workers[ self.world_size * self.replica_rank : self.world_size * (self.replica_rank + 1) ] await self.launch_servers() async def init_standalone(self): """Init standalone rollout server, create new resource pool for this rollout.""" # create resource pool for this rollout self.rollout_mode = RolloutMode.STANDALONE resource_pool_name = ( f"rollout_pool_{self.replica_rank}" if self.is_reward_model else f"rollout_pool_reward_{self.replica_rank}" ) resource_pool_spec = { resource_pool_name: [self.gpus_per_node] * self.nnodes, } resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=None) resource_pool_manager.create_resource_pool() self.resource_pool = resource_pool_manager.resource_pool_dict[resource_pool_name] # create worker group for this rollout worker_group = RayWorkerGroup( resource_pool=self.resource_pool, ray_cls_with_init=self.get_ray_class_with_init_args(), bin_pack=False, name_prefix=f"rollout_standalone_{self.replica_rank}" if not self.is_reward_model else f"rollout_reward_standalone_{self.replica_rank}", ) self.workers = worker_group.workers await self.launch_servers() @abstractmethod def get_ray_class_with_init_args(self) -> RayClassWithInitArgs: """Get rollout worker actor class for colocated and standalone mode.""" raise NotImplementedError @abstractmethod async def launch_servers(self): """Launch http server in each node.""" raise NotImplementedError @property def server_address(self) -> str: """Get rollout server address for OpenAI chat completion.""" return self._server_address @property def server_handle(self) -> ActorHandle: """Get rollout server handle for Token-in-token-out generation.""" return self._server_handle async def wake_up(self): """Wake up each rollout server.""" await asyncio.gather(*[server.wake_up.remote() for server in self.servers]) async def sleep(self): """Sleep each rollout server.""" await asyncio.gather(*[server.sleep.remote() for server in self.servers]) class RolloutReplicaRegistry: """Factory for managing rollout replica implementations.""" _registry: dict[str, Callable[[], type[RolloutReplica]]] = {} @classmethod def register(cls, name: str, loader: Callable[[], type[RolloutReplica]]) -> None: """Register a new rollout replica type.""" cls._registry[name] = loader @classmethod def get(cls, name: str) -> type[RolloutReplica]: """Get a rollout replica class by name.""" if name not in cls._registry: raise ValueError(f"Unknown rollout mode: {name}. Available: {list(cls._registry.keys())}") return cls._registry[name]() # Loader functions for built-in types def _load_vllm(): from verl.workers.rollout.vllm_rollout.vllm_async_server import vLLMReplica return vLLMReplica def _load_sglang(): os.environ["SGLANG_USE_CPU_ENGINE"] = "1" try: import vllm # noqa: F401 except ImportError: import sys from unittest.mock import Mock mock_vllm = Mock() mock_vllm._custom_ops = Mock() mock_vllm._custom_ops.scaled_fp8_quant = Mock() sys.modules["vllm"] = mock_vllm sys.modules["vllm._custom_ops"] = mock_vllm._custom_ops from verl.workers.rollout.sglang_rollout.async_sglang_server import SGLangReplica del os.environ["SGLANG_USE_CPU_ENGINE"] return SGLangReplica # Register built-in types RolloutReplicaRegistry.register("vllm", _load_vllm) RolloutReplicaRegistry.register("sglang", _load_sglang) # Original function for backward compatibility def get_rollout_replica_class(rollout: str) -> type[RolloutReplica]: return RolloutReplicaRegistry.get(rollout) ================================================ FILE: verl_distillation/verl/workers/rollout/schemas.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import difflib import logging import os from enum import Enum from typing import Any, Optional import torch from pydantic import BaseModel, ConfigDict, model_validator from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast, ProcessorMixin from verl.tools.schemas import OpenAIFunctionToolCall, OpenAIFunctionToolSchema, ToolResponse from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) BASE_CHAT_HISTORY = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "I am a user."}, ] class FinishReasonTypeEnum(str, Enum): """The enum for finish reason type.""" LENGTH = "length" STOP = "stop" TOOL_CALL = "tool_calls" @classmethod def from_str(cls, value: str) -> "FinishReasonTypeEnum": if value == "stop": return cls.STOP elif value == "length": return cls.LENGTH elif value == "tool_calls": return cls.TOOL_CALL else: raise ValueError(f"Unsupported finish reason type: {value}") class Message(BaseModel): role: str content: str | dict[str, Any] | list[dict[str, Any]] | ToolResponse tool_calls: Optional[list[OpenAIFunctionToolCall]] = None class AsyncRolloutRequestStateEnum(str, Enum): """The enum for async rollout request state.""" PENDING = "pending" RUNNING = "running" COMPLETED = "completed" FAILED = "failed" TOOL_CALLING = "tool_calling" INTERACTING = "interacting" class TokenizationSanityCheckModeEnum(str, Enum): """The enum for tokenization sanity check mode.""" DISABLE = "disable" STRICT = "strict" IGNORE_STRIPPABLE = "ignore_strippable" class AsyncRolloutRequest(BaseModel): """The data model for async rollout.""" model_config = ConfigDict(arbitrary_types_allowed=True) batch_data_id: int = 0 rollout_offset: int = 0 request_id: str state: AsyncRolloutRequestStateEnum messages: list[Message] multi_modal_keys: Optional[list[str]] = None multi_modal_data: Optional[dict[str, Any]] = None multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None tool_schemas: Optional[list[OpenAIFunctionToolSchema]] = None tools_kwargs: dict[str, Any] = {} interaction_kwargs: dict[str, Any] = {} input_ids: Optional[torch.Tensor] = None prompt_ids: Optional[torch.Tensor] = None response_ids: Optional[torch.Tensor] = None attention_mask: Optional[torch.Tensor] = None prompt_attention_mask: Optional[torch.Tensor] = None response_attention_mask: Optional[torch.Tensor] = None position_ids: Optional[torch.Tensor] = None prompt_position_ids: Optional[torch.Tensor] = None response_position_ids: Optional[torch.Tensor] = None loss_mask: Optional[torch.Tensor] = None prompt_loss_mask: Optional[torch.Tensor] = None response_loss_mask: Optional[torch.Tensor] = None reward_scores: dict[str, float] max_prompt_len: int max_response_len: int = 8192 max_model_len: int = 32768 metrics: dict[str, list[Any]] = {} output_token_ids: torch.Tensor | None = None rollout_log_probs: torch.Tensor | None = None use_inference_chat_template: bool tokenization_sanity_check_mode: TokenizationSanityCheckModeEnum generation_prompt_ids: Optional[torch.Tensor] = None base_conv_wo_gen_prompt_end_pos: int base_conv_with_gen_prompt_end_pos: int @model_validator(mode="before") @classmethod def initialize_request(cls, values): if not (messages := values.get("messages")): raise ValueError("messages is required for AsyncRolloutRequest initialization") if not (max_prompt_len := values.get("max_prompt_len")): raise ValueError("max_prompt_len is required for AsyncRolloutRequest initialization") if not (processing_class := values.pop("processing_class", None)): raise ValueError("processing_class is required for AsyncRolloutRequest initialization") values["messages"] = [Message.model_validate(msg) for msg in messages] # If there is no multi_modal_keys, we assume the multi-modal data is image and video. if not values.get("multi_modal_keys"): values["multi_modal_keys"] = ["image", "video"] if not values.get("multi_modal_data"): values["multi_modal_data"] = {key: [] for key in values["multi_modal_keys"]} else: # check if all multi_modal_keys are in multi_modal_data for key in values["multi_modal_keys"]: if key not in values["multi_modal_data"]: values["multi_modal_data"][key] = [] if not values.get("multi_modal_inputs"): values["multi_modal_inputs"] = {} tools = ( [tool.model_dump() for tool in tool_schemas] if (tool_schemas := values.get("tool_schemas", [])) else None ) multi_modal_data = values["multi_modal_data"] tokens_without_prompt = cls._handle_apply_chat_template( processing_class, messages, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, ) if ( values.get("input_ids") is None or values.get("attention_mask") is None or values.get("position_ids") is None ): tokenization_dict_with_prompt = cls._handle_apply_chat_template( processing_class, messages, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, return_dict=True, ) values["input_ids"], values["attention_mask"] = ( tokenization_dict_with_prompt["input_ids"], tokenization_dict_with_prompt["attention_mask"], ) if values["input_ids"].shape[-1] > max_prompt_len: # Only log the warning to avoid truncating in the middle of generation prompt. Consider raising an # error for this case in the future. # Ensure batch_data_id exists with default value if not provided if "batch_data_id" not in values: values["batch_data_id"] = cls.model_fields["batch_data_id"].default logger.warning( f"Prompt {values['batch_data_id']} has length {values['input_ids'].shape[-1]} " f"which is greater than max_prompt_len {max_prompt_len} after applied chat template with tools." ) # Process multi_modal_inputs multi_modal_inputs = tokenization_dict_with_prompt.copy() multi_modal_inputs.pop("input_ids", None) multi_modal_inputs.pop("attention_mask", None) values["multi_modal_inputs"] = multi_modal_inputs values["position_ids"] = values["prompt_position_ids"] = cls._get_position_ids( processing_class, values["input_ids"], values["attention_mask"], multi_modal_inputs ) values["prompt_ids"], values["prompt_attention_mask"] = values["input_ids"], values["attention_mask"] values["loss_mask"] = values["prompt_loss_mask"] = torch.zeros_like(values["input_ids"], dtype=torch.bool) values["generation_prompt_ids"] = values["input_ids"][..., tokens_without_prompt.shape[-1] :] values["base_conv_wo_gen_prompt_end_pos"] = cls._handle_apply_chat_template( processing_class, BASE_CHAT_HISTORY, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, ).shape[-1] values["base_conv_with_gen_prompt_end_pos"] = cls._handle_apply_chat_template( processing_class, BASE_CHAT_HISTORY, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, ).shape[-1] return values @staticmethod def _handle_apply_chat_template( processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, messages: list[Message], multi_modal_data: dict[str, Any], tools: Optional[list[OpenAIFunctionToolSchema]] = None, add_generation_prompt: bool = False, tokenize: bool = False, return_dict: bool = False, ): raw_prompt = processing_class.apply_chat_template( messages, tools=tools, add_generation_prompt=add_generation_prompt, tokenize=False ) if not tokenize: return raw_prompt if isinstance(processing_class, PreTrainedTokenizer) or isinstance(processing_class, PreTrainedTokenizerFast): if any(len(values) > 0 for values in multi_modal_data.values()): logger.warning( "There is multi_modal_data but you are not using a processor. Multi-modal data will be ignored." ) model_inputs = processing_class(text=[raw_prompt], return_tensors="pt") elif isinstance(processing_class, ProcessorMixin): # When we update multi_model_keys, we also need to update this logic images = images if len(images := multi_modal_data.get("image", [])) > 0 else None videos = videos if len(videos := multi_modal_data.get("video", [])) > 0 else None model_inputs = processing_class(text=[raw_prompt], images=images, videos=videos, return_tensors="pt") else: raise ValueError(f"Unsupported processing class type: {type(processing_class)}") model_inputs = dict(model_inputs) if return_dict: return model_inputs else: return model_inputs["input_ids"] @staticmethod def _get_position_ids( processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, input_ids: torch.Tensor, attention_mask: torch.Tensor, multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None, ) -> torch.Tensor: # special case for qwen2vl is_qwen2vl = ( hasattr(processing_class, "image_processor") and "Qwen2VLImageProcessor" in processing_class.image_processor.__class__.__name__ ) if is_qwen2vl: from verl.models.transformers.qwen2_vl import get_rope_index image_grid_thw = video_grid_thw = second_per_grid_ts = None if multi_modal_inputs: image_grid_thw = multi_modal_inputs.get("image_grid_thw") video_grid_thw = multi_modal_inputs.get("video_grid_thw") second_per_grid_ts = multi_modal_inputs.get("second_per_grid_ts") assert input_ids.dim() == 2 and input_ids.shape[0] == 1, ( f"input_ids should be 2D with batch size 1, but got shape {input_ids.shape}" ) assert attention_mask.dim() == 2 and attention_mask.shape[0] == 1, ( f"attention_mask should be 2D with batch size 1, but got shape {attention_mask.shape}" ) new_position_ids = get_rope_index( processing_class, input_ids=input_ids.squeeze(0), image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, second_per_grid_ts=second_per_grid_ts, attention_mask=attention_mask.squeeze(0), ) return new_position_ids # (3, seq_len) else: return compute_position_id_with_mask(attention_mask) # (1, seq_len) def _update_input_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, new_input_ids: torch.Tensor, attention_mask: bool, loss_mask: bool, new_multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None, ) -> None: """ Update the input_ids, attention_mask, position_ids, and loss_mask of the request in additive manner. """ self.input_ids = torch.cat([self.input_ids, new_input_ids], dim=-1) attention_mask = torch.ones_like(new_input_ids) * int(attention_mask) self.attention_mask = torch.cat([self.attention_mask, attention_mask], dim=-1) loss_mask = torch.ones_like(new_input_ids) * int(loss_mask) self.loss_mask = torch.cat([self.loss_mask, loss_mask], dim=-1) if new_multi_modal_inputs: self._update_multi_modal_inputs(new_multi_modal_inputs) new_position_ids = self._get_position_ids( processing_class, new_input_ids, attention_mask, new_multi_modal_inputs ) last_pos = self.position_ids[..., -1:] new_position_ids = new_position_ids + (last_pos + 1) self.position_ids = torch.cat([self.position_ids, new_position_ids], dim=-1) assert ( self.input_ids.shape[-1] == self.attention_mask.shape[-1] == self.position_ids.shape[-1] == self.loss_mask.shape[-1] ), f"""Request {self.request_id} has different length of {self.input_ids.shape[-1]=}, {self.attention_mask.shape[-1]=}, {self.position_ids.shape[-1]=}, {self.loss_mask.shape[-1]=}""" def _update_multi_modal_inputs(self, new_multi_modal_inputs: dict[str, torch.Tensor]) -> None: """ Update the multi_modal_inputs of the request in additive manner. """ for key in new_multi_modal_inputs: input_tensor = new_multi_modal_inputs[key] self.multi_modal_inputs[key] = ( torch.cat([self.multi_modal_inputs[key], input_tensor], dim=0) if key in self.multi_modal_inputs else input_tensor ) def get_generation_prompt_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin ) -> list[int]: """ Get the generation prompt ids for rollout engine. Because rollout engine(SGLang) requires the ids to be a list, we need to convert the tensor to a list. """ generation_prompt_ids = ( None if self.input_ids[..., -self.generation_prompt_ids.shape[-1] :].eq(self.generation_prompt_ids).all() else self.generation_prompt_ids ) if generation_prompt_ids is not None: self._update_input_ids(processing_class, generation_prompt_ids, attention_mask=True, loss_mask=False) if self.use_inference_chat_template: messages = [msg.model_dump() for msg in self.messages] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None generation_prompt_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=self.multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, ) return generation_prompt_ids.squeeze(0).tolist() else: return self.input_ids.squeeze(0).tolist() def add_user_message( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, content: str, ) -> None: self.messages.append(Message(role="user", content=content)) messages = [*BASE_CHAT_HISTORY, self.messages[-1]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None # We don't need to pass multi_modal_data here because we don't have any multi-modal data from Engine # Inference, it is pure text. content_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data={}, tools=tools, add_generation_prompt=False, tokenize=True )[..., self.base_conv_wo_gen_prompt_end_pos :] self._update_input_ids(processing_class, content_ids, attention_mask=True, loss_mask=False) def add_assistant_message( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, content: str, content_ids: Optional[torch.Tensor] = None, tool_calls: Optional[list[OpenAIFunctionToolCall]] = None, ) -> None: self.messages.append(Message(role="assistant", content=content, tool_calls=tool_calls)) if content_ids is None: messages = [*BASE_CHAT_HISTORY, self.messages[-1]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None # We don't need to pass multi_modal_data here because we don't have any multi-modal data from Engine # Inference, it is pure text. content_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data={}, tools=tools, add_generation_prompt=False, tokenize=True )[..., self.base_conv_with_gen_prompt_end_pos :] self._update_input_ids(processing_class, content_ids, attention_mask=True, loss_mask=True) def add_tool_response_messages( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, contents: list[ToolResponse], ) -> None: if not contents or all(content.is_empty() for content in contents): return # We also handle the case when tool returns image # We require the processing of the image and video to be done at tool.execute() level delta_multi_modal_data = {key: [] for key in self.multi_modal_keys} for content in contents: if content.is_text_only(): self.messages.append(Message(role="tool", content=content.text)) else: content_list = [] # When we update multi_model_keys, we also need to update this logic if content.image: content_list.extend([{"type": "image"} for _ in content.image]) delta_multi_modal_data["image"].extend(content.image) if content.video: content_list.extend([{"type": "video"} for _ in content.video]) delta_multi_modal_data["video"].extend(content.video) if content.text: content_list.append({"type": "text", "text": content.text}) self.messages.append(Message(role="tool", content=content_list)) messages = [*BASE_CHAT_HISTORY, *self.messages[-len(contents) :]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None for key in self.multi_modal_keys: if len(delta_multi_modal_data[key]) > 0: self.multi_modal_data[key].extend(delta_multi_modal_data[key]) # We just passed the new multi-modal data to the chat template to update the input_ids. content_info = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=delta_multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, return_dict=True, ) content_ids = content_info["input_ids"][..., self.base_conv_wo_gen_prompt_end_pos :] # process multi_modal_inputs multi_modal_inputs = content_info.copy() multi_modal_inputs.pop("input_ids", None) multi_modal_inputs.pop("attention_mask", None) # chat templates include generation prompt tokens (e.g., "assistant\n") # So when tool response is added, we need to explicitly remove these tokens. self._remove_generation_prompt_ids_if_present() self._update_input_ids( processing_class, content_ids, attention_mask=True, loss_mask=False, new_multi_modal_inputs=multi_modal_inputs, ) def update_metrics(self, metrics: Any, tool_id: str) -> None: """ metrics: should be a dict of tools_name -> Any """ if self.metrics.get(tool_id) is None: self.metrics[tool_id] = [] self.metrics[tool_id].append(metrics) def _get_prompt_diffs( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, full_prompt_ids: torch.Tensor, current_prompt_ids: torch.Tensor, diff_surrounding_chars: int = 10, ) -> list[dict[str, Any]]: """Get differences between full prompt and current prompt with surrounding context. This function helps debug tokenization mismatches by showing the differences between full prompt and current prompt with surrounding context. Instead of just showing the exact diff, it includes additional tokens before and after to help locate the issue in the chat template. For example, if the actual diff is a newline change from "\n\n" to "\n", with diff_surrounding_chars the output might look like: full_prompt_chunk: "<|im_start|>assistant\n\nI think..." current_prompt_chunk: "<|im_start|>assistant\nI think..." This context makes it much easier to identify where in the chat template the mismatch occurs. Args: processing_class: The processing class to use for decoding the token IDs full_prompt_ids: Token IDs from applying chat template to all messages at once current_prompt_ids: Token IDs from incremental chat template application diff_surrounding_chars: Number of surrounding characters to include for context (default: 10) Returns: List of dicts containing the differing chunks with context and their indices """ full_prompt_ids = full_prompt_ids.squeeze(0) current_prompt_ids = current_prompt_ids.squeeze(0) full_prompt = processing_class.decode(full_prompt_ids, skip_special_tokens=False) current_prompt = processing_class.decode(current_prompt_ids, skip_special_tokens=False) s = difflib.SequenceMatcher(None, full_prompt, current_prompt, autojunk=False) diffs = [] for tag, i1, i2, j1, j2 in s.get_opcodes(): if tag == "equal": continue # Get the surrounding context for better readability start_i = max(0, i1 - diff_surrounding_chars) end_i = min(len(full_prompt), i2 + diff_surrounding_chars) start_j = max(0, j1 - diff_surrounding_chars) end_j = min(len(current_prompt), j2 + diff_surrounding_chars) diffs.append( { "full_prompt_chunk": full_prompt[start_i:end_i], "current_prompt_chunk": current_prompt[start_j:end_j], "indices": (start_i, end_i, start_j, end_j), } ) return diffs def _remove_generation_prompt_ids_if_present(self) -> None: """ Remove generation prompt IDs from input tensors if they are present at the end. """ if self.input_ids[..., -self.generation_prompt_ids.shape[-1] :].eq(self.generation_prompt_ids).all(): self.input_ids = self.input_ids[..., : -self.generation_prompt_ids.shape[-1]] self.attention_mask = self.attention_mask[..., : -self.generation_prompt_ids.shape[-1]] self.position_ids = self.position_ids[..., : -self.generation_prompt_ids.shape[-1]] self.loss_mask = self.loss_mask[..., : -self.generation_prompt_ids.shape[-1]] def finalize( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, reward_scores: dict[str, list[float]], finish_reason_type: FinishReasonTypeEnum = FinishReasonTypeEnum.STOP, ) -> None: self.state = AsyncRolloutRequestStateEnum.COMPLETED self.reward_scores = reward_scores # In case we failed to generate the assistant message and the generation prompt ids were already added to # input_ids, remove them from the end of input_ids self._remove_generation_prompt_ids_if_present() self.response_ids = self.input_ids[..., self.prompt_ids.shape[-1] :] if self.tokenization_sanity_check_mode != TokenizationSanityCheckModeEnum.DISABLE: # When there is a diff, we log the diffs with diff_surrounding_chars context diff_surrounding_chars = 10 messages = [msg.model_dump() for msg in self.messages] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None full_prompt_info = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=self.multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, return_dict=True, ) full_prompt_ids = full_prompt_info["input_ids"] # We must use dict(full_prompt_info) to convert BatchFeature values to a new dict # because np.array() only keeps the keys for BatchFeature. full_prompt_multi_modal_inputs = full_prompt_info.copy() full_prompt_multi_modal_inputs.pop("input_ids", None) full_prompt_multi_modal_inputs.pop("attention_mask", None) for multi_modal_inputs_key in self.multi_modal_inputs: if multi_modal_inputs_key in full_prompt_multi_modal_inputs: if ( not self.multi_modal_inputs[multi_modal_inputs_key] .eq(full_prompt_multi_modal_inputs[multi_modal_inputs_key]) .all() ): logger.warning( f"Multi-modal data {multi_modal_inputs_key} is not consistent. " f"This may lead to unexpected behavior during training. " f"Please review your multi_modal_inputs logic." ) else: logger.warning( f"Multi-modal inputs key {multi_modal_inputs_key} is not found in the multi_modal_inputs. " f"This may lead to unexpected behavior during training." f"Please review your multi_modal_inputs logic." ) if diffs := self._get_prompt_diffs( processing_class, full_prompt_ids, self.input_ids, diff_surrounding_chars=diff_surrounding_chars ): log_warning = False if self.tokenization_sanity_check_mode == TokenizationSanityCheckModeEnum.STRICT: log_warning = True elif self.tokenization_sanity_check_mode == TokenizationSanityCheckModeEnum.IGNORE_STRIPPABLE: non_strippable_diffs_exist = any( d["full_prompt_chunk"].strip() or d["current_prompt_chunk"].strip() for d in diffs ) if non_strippable_diffs_exist: log_warning = True if log_warning: mode_str = f" ({self.tokenization_sanity_check_mode.value})" logger.warning( f"Inconsistent training and inference tokenization detected{mode_str}. This may lead to " f"unexpected behavior during training. Please review your chat template to determine if this " f"is intentional. For more information, refer to the multiturn README.md." ) logger.warning( f"Showing {diff_surrounding_chars} characters before and after the diffs for context and " f"better readability." ) diff_details_list = [] for d in diffs: i1, i2, j1, j2 = d["indices"] diff_details_list.append( f"idx {i1}:{i2} -> {j1}:{j2} | full_prompt_chunk: {repr(d['full_prompt_chunk'])} | " f"current_prompt_chunk: {repr(d['current_prompt_chunk'])}" ) diff_details = "\n".join(diff_details_list) logger.warning(f"Found differences:\n{diff_details}") if finish_reason_type == FinishReasonTypeEnum.STOP: pass elif finish_reason_type == FinishReasonTypeEnum.LENGTH: pass else: raise ValueError(f"Unsupported finalize finish reason type: {finish_reason_type}") self.truncate_output_ids(processing_class) assert ( self.input_ids.shape[-1] == self.attention_mask.shape[-1] == self.position_ids.shape[-1] == self.loss_mask.shape[-1] ), f"""Request {self.request_id} has different length of {self.input_ids.shape[-1]=}, {self.attention_mask.shape[-1]=}, {self.position_ids.shape[-1]=}, {self.loss_mask.shape[-1]=}""" def truncate_output_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin ) -> None: self.input_ids = self.input_ids[..., : self.max_model_len] self.attention_mask = self.attention_mask[..., : self.max_model_len] self.position_ids = self.position_ids[..., : self.max_model_len] self.loss_mask = self.loss_mask[..., : self.max_model_len] self.response_ids = self.input_ids[..., self.prompt_ids.shape[-1] :][..., : self.max_response_len] self.response_attention_mask = self.attention_mask[..., self.prompt_attention_mask.shape[-1] :][ ..., : self.max_response_len ] self.response_position_ids = self.position_ids[..., self.prompt_position_ids.shape[-1] :][ ..., : self.max_response_len ] self.response_loss_mask = self.loss_mask[..., self.prompt_loss_mask.shape[-1] :][..., : self.max_response_len] ================================================ FILE: verl_distillation/verl/workers/rollout/sglang_rollout/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and ================================================ FILE: verl_distillation/verl/workers/rollout/sglang_rollout/async_sglang_server.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import dataclasses import logging import os from typing import Any, Optional import ray import sglang.srt.entrypoints.engine import torch from ray.actor import ActorHandle from sglang.srt.entrypoints.http_server import ( ServerArgs, _GlobalState, _launch_subprocesses, app, set_global_state, ) from sglang.srt.managers.io_struct import ( GenerateReqInput, ReleaseMemoryOccupationReqInput, ResumeMemoryOccupationReqInput, ) from sglang.srt.managers.tokenizer_manager import ServerStatus from verl.single_controller.ray import RayClassWithInitArgs from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.replica import RolloutMode, RolloutReplica, TokenOutput from verl.workers.rollout.sglang_rollout.sglang_rollout import ServerAdapter, _set_envs_and_config from verl.workers.rollout.utils import get_free_port, is_valid_ipv6_address, run_unvicorn logger = logging.getLogger(__file__) logger.setLevel(logging.INFO) @ray.remote(num_cpus=1) class SGLangHttpServer: """SGLang http server in single node, this is equivalent to launch server with command line: ``` python -m sglang.launch_server --node-rank 0 --nnode 1 ... ``` Args: config (DictConfig): full config. rollout_mode (RolloutMode): rollout mode. replica_rank (int): replica rank, a replica may contain multiple nodes. node_rank (int): node rank. nnodes (int): number of nodes. cuda_visible_devices (str): cuda visible devices. """ def __init__( self, config: RolloutConfig, model_config: HFModelConfig, rollout_mode: RolloutMode, workers: list[ActorHandle], replica_rank: int, node_rank: int, nnodes: int, cuda_visible_devices: str, ): print(f"SGLang http server: {rollout_mode=}, {replica_rank=}, {node_rank=}, {nnodes=}, {cuda_visible_devices=}") os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices assert torch.cuda.is_available(), "SGLang http server should run on GPU node" self.config: RolloutConfig = omega_conf_to_dataclass(config) self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config, dataclass_type=HFModelConfig) self.config.max_model_len = self.config.prompt_length + self.config.response_length self.rollout_mode = rollout_mode self.workers = workers self.replica_rank = replica_rank self.node_rank = node_rank self.nnodes = nnodes if self.rollout_mode != RolloutMode.HYBRID and self.config.load_format == "dummy": logger.warning(f"rollout mode is {self.rollout_mode}, load_format is dummy, set to auto") self.config.load_format = "auto" # used for http server self._server_address = ray.util.get_node_ip_address().strip("[]") self._server_port = None # used for NCCL process group if self.node_rank == 0: self._master_address = self._server_address self._master_port, self._master_sock = get_free_port(self._server_address) logger.info( f"SGLangHttpServer, replica_rank: {self.replica_rank}, " f"master address: {self._master_address}, port: {self._master_port}" ) else: self._master_address = None self._master_port = None def get_master_address(self): """Get master address and port for init NCCL process group.""" return self._master_address, self._master_port def get_server_address(self): """Get http server address and port.""" assert self._server_port is not None, "http server is not launched, port is None" return self._server_address, self._server_port async def launch_server(self, master_address: str = None, master_port: int = None): if self.node_rank != 0: assert master_address and master_port, "non-master node should provide master address and port" self._master_address = master_address self._master_port = master_port engine_kwargs = self.config.get("engine_kwargs", {}).get("sglang", {}) or {} attention_backend = engine_kwargs.pop("attention_backend", None) dist_init_addr = ( f"[{self._master_address}]:{self._master_port}" if is_valid_ipv6_address(self._master_address) else f"{self._master_address}:{self._master_port}" ) args = { "model_path": self.model_config.local_path, "dtype": self.config.dtype, "mem_fraction_static": self.config.gpu_memory_utilization, "disable_cuda_graph": self.config.enforce_eager, "enable_memory_saver": True, "base_gpu_id": 0, "gpu_id_step": 1, "tp_size": self.config.tensor_model_parallel_size, "dp_size": self.config.data_parallel_size, "ep_size": self.config.expert_parallel_size, "node_rank": self.node_rank, "load_format": self.config.load_format, "dist_init_addr": dist_init_addr, "nnodes": self.nnodes, "trust_remote_code": self.model_config.trust_remote_code, "max_running_requests": self.config.get("max_num_seqs", None), "log_level": "error", "mm_attention_backend": "fa3", "attention_backend": attention_backend if attention_backend is not None else "fa3", "skip_tokenizer_init": self.config.skip_tokenizer_init, **engine_kwargs, } # enable_weights_cpu_backup is supported in sglang>=0.5.3 if "enable_weights_cpu_backup" in [f.name for f in dataclasses.fields(ServerArgs)]: enable_weights_cpu_backup = True if self.rollout_mode == RolloutMode.COLOCATED else False args["enable_weights_cpu_backup"] = enable_weights_cpu_backup # NOTE: We can't directly call SGLang's launch_server since it's not an async function. # https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/entrypoints/http_server.py sglang.srt.entrypoints.engine._set_envs_and_config = _set_envs_and_config os.environ["SGLANG_BLOCK_NONZERO_RANK_CHILDREN"] = "0" server_args = ServerArgs(**args) self.tokenizer_manager, self.template_manager, self.scheduler_info = _launch_subprocesses( server_args=server_args ) # In multi-node cases, non-zero rank nodes should not launch http server. if self.node_rank > 0: return set_global_state( _GlobalState( tokenizer_manager=self.tokenizer_manager, template_manager=self.template_manager, scheduler_info=self.scheduler_info, ) ) app.is_single_tokenizer_mode = True self._server_port, self._server_task = await run_unvicorn(app, server_args, self._server_address) self.tokenizer_manager.server_status = ServerStatus.Up async def wake_up(self): if self.rollout_mode == RolloutMode.HYBRID: # Call all workers to switch between trainer mode and rollout mode. await asyncio.gather(*[worker.wake_up.remote() for worker in self.workers]) elif self.rollout_mode == RolloutMode.COLOCATED: # Directly call engine to wake up without sync weights. # FIXME(@wuxibin): sglang seems resume with random weights. obj = ResumeMemoryOccupationReqInput(tags=["kv_cache", "weights"]) await self.tokenizer_manager.resume_memory_occupation(obj, None) await self.tokenizer_manager.flush_cache() elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip wake_up in standalone mode") async def sleep(self): if self.rollout_mode == RolloutMode.HYBRID: await asyncio.gather(*[worker.sleep.remote() for worker in self.workers]) elif self.rollout_mode == RolloutMode.COLOCATED: obj = ReleaseMemoryOccupationReqInput(tags=["kv_cache", "weights"]) await self.tokenizer_manager.release_memory_occupation(obj, None) elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip sleep in standalone mode") async def generate( self, prompt_ids: torch.Tensor, sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, ) -> TokenOutput: """Generate sequence with token-in-token-out.""" # TODO(@wuxibin): switch to `/generate` http endpoint once multi-modal support ready. max_new_tokens = min(self.config.response_length, self.config.max_model_len - len(prompt_ids) - 1) sampling_params["max_new_tokens"] = max_new_tokens return_logprob = sampling_params.pop("logprobs", False) request = GenerateReqInput( rid=request_id, input_ids=prompt_ids, sampling_params=sampling_params, return_logprob=return_logprob, image_data=image_data, ) output = await self.tokenizer_manager.generate_request(request, None).__anext__() if return_logprob: output_token_logprobs = output["meta_info"]["output_token_logprobs"] log_probs, token_ids = zip( *[(log_prob, token_ids) for log_prob, token_ids, _ in output_token_logprobs], strict=True ) else: token_ids = output["output_ids"] log_probs = None return TokenOutput(token_ids=token_ids, log_probs=log_probs) _rollout_worker_actor_cls = ray.remote(ServerAdapter) class SGLangReplica(RolloutReplica): def get_ray_class_with_init_args(self) -> RayClassWithInitArgs: """Get rollout worker actor class for colocated and standalone mode.""" worker_dict_cls = RayClassWithInitArgs( cls=_rollout_worker_actor_cls, config=self.config, model_config=self.model_config, device_mesh=None, ) return worker_dict_cls async def launch_servers(self): """Launch http server in each node.""" assert len(self.workers) == self.world_size, ( f"worker number {len(self.workers)} not equal to world size {self.world_size}" ) # get (node_id, CUDA_VISIBLE_DEVICES) of all workers worker_infos = await asyncio.gather( *[ worker.__ray_call__.remote( lambda self: (ray.get_runtime_context().get_node_id(), os.environ["CUDA_VISIBLE_DEVICES"]) ) for worker in self.workers ] ) worker_cuda_visible_devices = [worker_info[1] for worker_info in worker_infos] worker_node_ids = [worker_info[0] for worker_info in worker_infos] # create server actor in each node with node affinity and cuda visible devices for node_rank in range(self.nnodes): workers = self.workers[node_rank * self.gpus_per_node : (node_rank + 1) * self.gpus_per_node] node_cuda_visible_devices = ",".join( worker_cuda_visible_devices[node_rank * self.gpus_per_node : (node_rank + 1) * self.gpus_per_node] ) node_id = worker_node_ids[node_rank * self.gpus_per_node] name = ( f"sglang_server_{self.replica_rank}_{node_rank}" if not self.is_reward_model else f"sglang_server_reward_{self.replica_rank}_{node_rank}" ) server = SGLangHttpServer.options( scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=node_id, soft=False, ), runtime_env={"env_vars": {"RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES": "1"}}, name=name, ).remote( config=self.config, model_config=self.model_config, rollout_mode=self.rollout_mode, workers=workers, replica_rank=self.replica_rank, node_rank=node_rank, nnodes=self.nnodes, cuda_visible_devices=node_cuda_visible_devices, ) self.servers.append(server) # launch http server in each node master_address, master_port = await self.servers[0].get_master_address.remote() await asyncio.gather( *[ server.launch_server.remote(master_address=master_address, master_port=master_port) for server in self.servers ] ) # get http server address from first server server_address, server_port = await self.servers[0].get_server_address.remote() self._server_handle = self.servers[0] self._server_address = ( f"[{server_address}]:{server_port}" if is_valid_ipv6_address(server_address) else f"{server_address}:{server_port}" ) ================================================ FILE: verl_distillation/verl/workers/rollout/sglang_rollout/http_server_engine.py ================================================ # Copyright 2025 z.ai # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is adapted from multiple sources: # 1. THUDM/slime project # Original source: https://github.com/THUDM/slime/blob/main/slime/backends/sglang_utils/http_server_engine.py # Copyright 2025 z.ai # Licensed under the Apache License, Version 2.0 # 2. SGLang project # Original source: https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/entrypoints/http_server_engine.py # Copyright 2023-2024 SGLang Team # Licensed under the Apache License, Version 2.0 # # Modifications made by z.ai and ModelBest Inc. include but are not limited to: # - Enhanced error handling and retry logic # - Added async support with connection pooling # - Extended functionality for distributed weight updates # - Improved logging and monitoring capabilities # - Additional configuration options and optimizations """HTTP Server Engine Adapter for SGLang. This module provides HTTP-based adapters for SGLang engines, allowing communication with SGLang servers through HTTP requests instead of direct engine calls. Classes: HttpServerAdapter: Synchronous HTTP adapter for SGLang engines AsyncHttpServerAdapter: Asynchronous HTTP adapter for SGLang engines Functions: launch_server_process: Launch and initialize an SGLang HTTP server process """ import asyncio import logging import multiprocessing import os import time from contextlib import asynccontextmanager from typing import Any, Callable, Optional import aiohttp import requests from sglang.srt.entrypoints.EngineBase import EngineBase from sglang.srt.entrypoints.http_server import launch_server from sglang.srt.managers.io_struct import ( UpdateWeightsFromTensorReqInput, ) from sglang.srt.server_args import ServerArgs from sglang.srt.utils import kill_process_tree # Configure logger logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # Default configuration constants DEFAULT_TIMEOUT = 60.0 DEFAULT_MAX_ATTEMPTS = 3 DEFAULT_RETRY_DELAY = 2.0 DEFAULT_MAX_CONNECTIONS = 2000 DEFAULT_MAX_WAIT_TIME = 300.0 def _read_response(response: requests.Response): if response.status_code == 204 or not response.content: return {} try: return response.json() except ValueError: return { "content_type": response.headers.get("Content-Type", ""), "text": response.text, } async def _read_async_response(resp: aiohttp.ClientResponse) -> dict[str, Any]: if resp.status == 204 or (resp.content_length == 0): return {} try: return await resp.json(content_type=None) except Exception: try: text = await resp.text() except Exception: return {} return { "content_type": (resp.headers.get("Content-Type") or ""), "text": text, } def launch_server_process( server_args: ServerArgs, timeout: float = DEFAULT_TIMEOUT, max_wait_time=DEFAULT_MAX_WAIT_TIME, first_rank_in_node=False, ) -> multiprocessing.Process: """Launch an SGLang HTTP server process and wait for it to be ready. This function starts a new process running an SGLang HTTP server, then waits for the server to become ready by polling its health endpoints. It ensures the server is fully operational before returning. Args: server_args (ServerArgs): Server configuration arguments including host, port, and other settings timeout (float, optional): Timeout for individual HTTP requests during health checks. Defaults to DEFAULT_TIMEOUT. Returns: multiprocessing.Process: The launched multiprocessing.Process instance Raises: RuntimeError: If the server process terminates unexpectedly during startup or cache flush TimeoutError: If server fails to become ready within reasonable time (300 seconds) requests.RequestException: If health check requests fail repeatedly Note: This function will return immediately for non-master nodes (node_rank != 0), but the process will still be started and returned. This is for consistency; except for the process obtained by node_rank = 0, other processes have no actual effect. """ p = multiprocessing.Process(target=launch_server, args=(server_args,)) if server_args.node_rank != 0 or not first_rank_in_node: logger.info(f"Server process started with PID {p.pid} for node rank {server_args.node_rank}", flush=True) return p p.start() base_url = server_args.url() headers = { "Content-Type": "application/json; charset=utf-8", "Authorization": f"Bearer {server_args.api_key}", } # Health check with overall timeout start_time = time.time() with requests.Session() as session: while time.time() - start_time < max_wait_time: if not p.is_alive(): raise RuntimeError("Server process terminated unexpectedly during startup") try: if server_args.is_embedding: response = session.get(f"{base_url}/health", headers=headers, timeout=timeout) else: response = session.get(f"{base_url}/health_generate", headers=headers, timeout=timeout) if response.status_code == 200: break except requests.RequestException as e: logger.debug(f"Health check failed: {e}") time.sleep(2) else: p.terminate() logger.error(f"Server in {base_url} failed to become healthy within timeout period") raise TimeoutError("Server failed to become healthy within timeout period") # Ensure cache is ready while time.time() - start_time < max_wait_time: if not p.is_alive(): raise RuntimeError("Server process terminated unexpectedly during cache flush") try: response = session.get(f"{base_url}/flush_cache", headers=headers, timeout=timeout) if response.status_code == 200: break except requests.RequestException as e: logger.debug(f"Cache flush check failed: {e}") time.sleep(2) else: p.terminate() raise TimeoutError("Server cache flush failed within timeout period") return p class HttpServerAdapter(EngineBase): """HTTP-based adapter for SGLang engines. This adapter allows interaction with SGLang engines through HTTP requests instead of direct engine calls. It launches an HTTP server process and provides methods to communicate with it via REST API calls. You can use this class to launch a server from a HttpServerAdapter instance. We recommend using this class only when you need to use http server. Otherwise, you can use Engine directly. Attributes: router_ip (Optional[str]): IP address of the router for worker registration router_port (Optional[int]): Port of the router for worker registration server_args (ServerArgs): Server configuration arguments node_rank (int): Rank of this node in distributed setup process (multiprocessing.Process): The launched server process timeout (float): HTTP request timeout in seconds max_attempts (int): Maximum number of attempts for requests retry_delay (float): Base delay between retries in seconds """ def __init__( self, router_ip: Optional[str] = None, router_port: Optional[int] = None, timeout: float = DEFAULT_TIMEOUT, max_attempts: int = DEFAULT_MAX_ATTEMPTS, retry_delay: float = DEFAULT_RETRY_DELAY, first_rank_in_node: bool = False, max_start_wait_time: float = DEFAULT_MAX_WAIT_TIME, launch_server: bool = True, **kwargs: Any, ) -> None: """Initialize the HTTP server engine adapter. Args: router_ip (Optional[str], optional): IP address of router for worker registration. Defaults to None. router_port (Optional[int], optional): Port of router for worker registration. Defaults to None. timeout (float, optional): HTTP request timeout in seconds. Defaults to DEFAULT_TIMEOUT. max_attempts (int, optional): Maximum number of retry attempts for failed requests. Defaults to DEFAULT_MAX_ATTEMPTS. retry_delay (float, optional): Base delay between retries in seconds. Defaults to DEFAULT_RETRY_DELAY. launch_server (bool, optional): Whether to launch the server process. Defaults to True. **kwargs (Any): Additional arguments passed to ServerArgs Note: TODO: @ChangyiYang Enable SGLang router for this http server engine If both router_ip and router_port are provided and this is the master node (node_rank == 0), the adapter will automatically register with the router. """ self.router_ip: Optional[str] = router_ip self.router_port: Optional[int] = router_port self.timeout: float = timeout self.max_attempts: int = max_attempts self.retry_delay: float = retry_delay self.server_args: ServerArgs = ServerArgs(**kwargs) self.node_rank: int = self.server_args.node_rank self.max_start_wait_time: float = max_start_wait_time logger.info( f"Launch HttpServerAdapter at: {self.server_args.host}:{self.server_args.port} with {first_rank_in_node}" ) if launch_server: self.process: multiprocessing.Process = launch_server_process( self.server_args, self.timeout, self.max_start_wait_time, first_rank_in_node ) if self.node_rank == 0 and self.router_ip and self.router_port: self._register_with_router() def _register_with_router(self) -> None: """Register worker with router with error handling. This method attempts to register the current worker with a router service. If registration fails, it logs an error but does not raise an exception, allowing the server to continue operating without router integration. Raises: Does not raise exceptions - all errors are logged and handled gracefully. """ try: url = f"http://{self.router_ip}:{self.router_port}/add_worker" params = {"url": f"http://{self.server_args.host}:{self.server_args.port}"} response = requests.post(url, params=params, timeout=self.timeout) response.raise_for_status() logger.info("Successfully registered with router") except Exception as e: logger.error(f"Failed to register with router: {e}") # Don't raise here - server can still work without router def _make_request( self, endpoint: str, payload: Optional[dict[str, Any]] = None, method: str = "POST", timeout: float = DEFAULT_TIMEOUT, only_master: bool = True, ) -> dict[str, Any]: """Make a HTTP request with retry logic and consistent error handling. Args: endpoint (str): The API endpoint to call (without leading slash) payload (Optional[Dict[str, Any]], optional): The JSON payload to send. Defaults to empty dict if None. method (str, optional): HTTP method to use. Defaults to "POST". Returns: Dict[str, Any]: The JSON response from the server Raises: requests.HTTPError: If the HTTP request fails with a client/server error RuntimeError: If all retry attempts are exhausted Note: - For non-master nodes (node_rank != 0), returns empty dict immediately - Uses exponential backoff for retries - Logs warnings for timeout and connection errors, errors for HTTP errors """ if only_master and self.node_rank != 0: return {} url = f"http://{self.server_args.host}:{self.server_args.port}/{endpoint}" for attempt in range(self.max_attempts): try: if method.upper() == "GET": response = requests.get(url, timeout=self.timeout) else: response = requests.post(url, json=payload or {}, timeout=self.timeout) response.raise_for_status() return _read_response(response) except requests.exceptions.Timeout: logger.warning(f"Request to {endpoint} timed out (attempt {attempt + 1})") except requests.exceptions.ConnectionError: logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})") except requests.exceptions.HTTPError as e: logger.error(f"HTTP error for {endpoint}: {e}") raise except Exception as e: logger.error(f"Unexpected error for {endpoint}: {e}") if attempt == self.max_attempts - 1: raise if attempt < self.max_attempts - 1: time.sleep(self.retry_delay * (2**attempt)) raise RuntimeError(f"Failed to complete request to {endpoint} after {self.max_attempts} attempts") def update_weights_from_tensor(self, req: UpdateWeightsFromTensorReqInput) -> dict[str, Any]: """Update model weights from tensor data. The HTTP server will only post meta data, and the real weights will be copied directly from GPUs. Args: serialized_named_tensors (List[str]): List of serialized tensor data load_format (Optional[str], optional): Format specification for loading weights. Defaults to None. flush_cache (bool, optional): Whether to flush cache after updating weights. Defaults to False. Returns: Dict[str, Any]: Server response containing update status Note: The model should be on GPUs rather than CPU for this functionality to work properly. If you encounter issues, ensure your model is loaded on GPU devices rather than CPU. """ import base64 named_tensors = req.serialized_named_tensors load_format = req.load_format flush_cache = req.flush_cache if named_tensors: serialized_named_tensors = [ base64.b64encode(named_tensor).decode("utf-8") for named_tensor in named_tensors ] else: serialized_named_tensors = [] return self._make_request( "update_weights_from_tensor", { "serialized_named_tensors": serialized_named_tensors, "load_format": load_format, "flush_cache": flush_cache, }, ) def shutdown(self) -> None: """Shutdown the HTTP server and clean up resources. This method performs the following cleanup operations: 1. Unregisters the worker from the router (if configured) 2. Terminates the server process tree All operations are performed with error handling to ensure graceful shutdown even if individual steps fail. Note: This method should be called when the adapter is no longer needed to ensure proper cleanup of resources and processes. """ # Unregister from router if self.router_ip and self.router_port: try: url = f"http://{self.router_ip}:{self.router_port}/remove_worker" params = {"url": f"http://{self.server_args.host}:{self.server_args.port}"} requests.post(url, params=params, timeout=5.0) # Short timeout for shutdown logger.info("Successfully unregistered from router") except Exception as e: logger.warning(f"Failed to unregister from router: {e}") # Kill server process if hasattr(self, "process") and self.process is not None: try: kill_process_tree(self.process.pid) logger.info("Server process terminated") except Exception as e: logger.error(f"Failed to terminate server process: {e}") def generate( self, prompt: Optional[str] = None, sampling_params: Optional[dict[str, Any]] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, return_logprob: bool = False, logprob_start_len: Optional[int] = None, top_logprobs_num: Optional[int] = None, token_ids_logprob: Optional[list[int]] = None, lora_path: Optional[str] = None, custom_logit_processor: Optional[Callable] = None, ) -> dict[str, Any]: """Generate text using the SGLang server. Args: prompt (Optional[str], optional): Text prompt for generation. Defaults to None. sampling_params (Optional[Dict[str, Any]], optional): Parameters controlling text generation sampling. Defaults to None. input_ids (Optional[List[int]], optional): Alternative to prompt, direct token IDs input. Defaults to None. image_data (Optional[Any], optional): Image data for multimodal generation. Defaults to None. return_logprob (bool, optional): Whether to return log probabilities. Defaults to False. logprob_start_len (Optional[int], optional): Starting length for log probability calculation. Defaults to None. top_logprobs_num (Optional[int], optional): Number of top log probabilities to return. Defaults to None. token_ids_logprob (Optional[List[int]], optional): Specific token IDs for log probability calculation. Defaults to None. lora_path (Optional[str], optional): Path to LoRA adapter weights. Defaults to None. custom_logit_processor (Optional[Callable], optional): Custom logit processing function. Defaults to None. Returns: Dict[str, Any]: Generated text and associated metadata from the server Note: Either prompt or input_ids should be provided, but not both. The response format depends on the server configuration and parameters. """ payload = { "text": prompt, "sampling_params": sampling_params, "input_ids": input_ids, "image_data": image_data, "return_logprob": return_logprob, "logprob_start_len": logprob_start_len, "top_logprobs_num": top_logprobs_num, "token_ids_logprob": token_ids_logprob, "lora_path": lora_path, "custom_logit_processor": custom_logit_processor, } # Filter out None values payload = {k: v for k, v in payload.items() if v is not None} return self._make_request("generate", payload, only_master=False) def reward_score( self, prompt: Optional[str] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, lora_path: Optional[str] = None, ) -> dict[str, Any]: assert self.server_args.is_embedding, "Score is only supported for embedding models" payload = { "text": prompt, "input_ids": input_ids, "image_data": image_data, "lora_path": lora_path, } # Filter out None values payload = {k: v for k, v in payload.items() if v is not None} return self._make_request("classify", payload, only_master=False) def flush_cache(self) -> dict[str, Any]: """Flush the cache of the server. This method repeatedly attempts to flush the server cache until successful. The flush operation will not return status 200 when there are pending requests. Returns: Dict[str, Any]: Server response indicating cache flush status. For non-master nodes, returns empty dict. Note: Uses retry logic with limited attempts (max_attempts * 2) to avoid infinite loops. Each retry includes a delay to allow pending requests to complete. """ if self.node_rank != 0: return {} # Use retry logic with limited attempts to avoid infinite loops for attempt in range(self.max_attempts * 2): # Allow more retries for cache flush try: response = requests.get( f"http://{self.server_args.host}:{self.server_args.port}/flush_cache", timeout=self.timeout ) if response.status_code == 200: return _read_response(response) except Exception as e: logger.warning(f"Error flushing cache (attempt {attempt + 1}): {e}") time.sleep(self.retry_delay) logger.error("Failed to flush cache after maximum attempts") return {} def release_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]: """Release GPU memory occupation temporarily. Args: tags (Optional[List[str]], optional): List of tags to specify which memory to release. If None, releases all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory release status """ return self._make_request("release_memory_occupation", {"tags": tags}) def resume_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]: """Resume GPU memory occupation. Args: tags (Optional[List[str]], optional): List of tags to specify which memory to resume. If None, resumes all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory resume status """ return self._make_request("resume_memory_occupation", {"tags": tags}) def abort_request(self, rid: str = "", abort_all: bool = False) -> dict[str, Any]: """Abort a request. Args: rid (str): The ID of the request to abort abort_all (bool, optional): Whether to abort all requests. Defaults to False. Returns: Dict[str, Any]: Server response indicating abort status """ return self._make_request("abort_request", {"rid": rid, "abort_all": abort_all}) class AsyncHttpServerAdapter(HttpServerAdapter): """Asynchronous HTTP-based adapter for SGLang engines. This class inherits from HttpServerAdapter and adds async capabilities for non-blocking HTTP requests to the SGLang server. It provides the same functionality as the synchronous version but with async/await support. The async adapter is useful when you need to make multiple concurrent requests or integrate with async frameworks. It uses aiohttp for efficient async HTTP communication and maintains connection pooling for better performance. Attributes: max_connections (int): Maximum number of connections in the connection pool """ def __init__( self, router_ip: Optional[str] = None, router_port: Optional[int] = None, timeout: float = DEFAULT_TIMEOUT, max_attempts: int = DEFAULT_MAX_ATTEMPTS, retry_delay: float = DEFAULT_RETRY_DELAY, max_connections: int = DEFAULT_MAX_CONNECTIONS, first_rank_in_node: bool = False, launch_server: bool = True, **kwargs: Any, ) -> None: """Initialize the async HTTP server engine adapter. Args: router_ip (Optional[str], optional): IP address of router for worker registration. Defaults to None. router_port (Optional[int], optional): Port of router for worker registration. Defaults to None. timeout (float, optional): HTTP request timeout in seconds. Defaults to DEFAULT_TIMEOUT. max_attempts (int, optional): Maximum number of retry attempts for failed requests. Defaults to DEFAULT_MAX_ATTEMPTS. retry_delay (float, optional): Base delay between retries in seconds. Defaults to DEFAULT_RETRY_DELAY. max_connections (int, optional): Maximum number of connections in the connection pool. Defaults to DEFAULT_MAX_CONNECTIONS. launch_server (bool, optional): Whether to launch the server process. Defaults to True. **kwargs (Any): Additional arguments passed to ServerArgs """ super().__init__( router_ip, router_port, timeout, max_attempts, retry_delay, first_rank_in_node, launch_server=launch_server, **kwargs, ) self.max_connections: int = max_connections @asynccontextmanager async def _get_session(self) -> aiohttp.ClientSession: """Context manager for safe session access with proper connection pooling. Yields: aiohttp.ClientSession: Session instance for making HTTP requests Note: This method creates a new session for each request to avoid resource competition while still maintaining proper connection pooling through the shared connector. """ # Create a new session for each request to avoid resource competition connector = aiohttp.TCPConnector( limit=self.max_connections, limit_per_host=self.max_connections // 4, ttl_dns_cache=300, use_dns_cache=True, ) timeout = aiohttp.ClientTimeout(total=self.timeout) session = aiohttp.ClientSession(connector=connector, timeout=timeout) try: yield session finally: # Always close the session to free up resources if not session.closed: await session.close() async def _make_async_request( self, endpoint: str, payload: Optional[dict[str, Any]] = None, method: str = "POST", timeout: float = DEFAULT_TIMEOUT, only_master: bool = True, ) -> dict[str, Any]: """Make an async HTTP request with retry logic and consistent error handling. Args: endpoint (str): The API endpoint to call (without leading slash) payload (Optional[Dict[str, Any]], optional): The JSON payload to send. Defaults to empty dict if None. method (str, optional): HTTP method to use. Defaults to "POST". Returns: Dict[str, Any]: The JSON response from the server Raises: aiohttp.ClientResponseError: If the HTTP request fails with a client/server error RuntimeError: If all retry attempts are exhausted Note: - For non-master nodes (node_rank != 0), returns empty dict immediately - Uses exponential backoff for retries - Logs warnings for timeout and connection errors, errors for HTTP errors """ if only_master and self.node_rank != 0: return {} url = f"http://{self.server_args.host}:{self.server_args.port}/{endpoint}" for attempt in range(self.max_attempts): try: async with self._get_session() as session: if method.upper() == "GET": async with session.get(url, timeout=timeout) as response: response.raise_for_status() return await _read_async_response(response) else: async with session.post(url, json=payload or {}, timeout=timeout) as response: response.raise_for_status() return await _read_async_response(response) except asyncio.TimeoutError: logger.warning(f"Async request to {endpoint} timed out (attempt {attempt + 1})") except aiohttp.ClientConnectorError: logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})") except aiohttp.ClientResponseError as e: logger.error(f"HTTP error for {endpoint}: {e}") raise except Exception as e: logger.error(f"Unexpected error for {endpoint}: {e}") if attempt == self.max_attempts - 1: raise if attempt < self.max_attempts - 1: await asyncio.sleep(self.retry_delay * (2**attempt)) raise RuntimeError(f"Failed to complete async request to {endpoint} after {self.max_attempts} attempts") async def release_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]: """Release GPU memory occupation temporarily (async version). Args: tags (Optional[List[str]], optional): List of tags to specify which memory to release. If None, releases all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory release status """ return await self._make_async_request("release_memory_occupation", {"tags": tags}) async def resume_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]: """Resume GPU memory occupation (async version). Similar to AsyncEngine, this method handles first-time weight reloading by calling release_memory_occupation if needed. Args: tags (Optional[List[str]], optional): List of tags to specify which memory to resume. If None, resumes all memory. Defaults to None. ["weights", "kv_cache"] Returns: Dict[str, Any]: Server response indicating memory resume status """ return await self._make_async_request("resume_memory_occupation", {"tags": tags}) async def update_weights_from_tensor( self, req: UpdateWeightsFromTensorReqInput, ) -> dict[str, Any]: """Update model weights from tensor data asynchronously. Args: serialized_named_tensors (List[str]): List of serialized tensor data load_format (Optional[str], optional): Format specification for loading weights. Defaults to None. flush_cache (bool, optional): Whether to flush cache after updating weights. Defaults to True. Returns: Dict[str, Any]: Server response containing update status """ import base64 named_tensors = req.serialized_named_tensors load_format = req.load_format flush_cache = req.flush_cache serialized_named_tensors = [base64.b64encode(named_tensor).decode("utf-8") for named_tensor in named_tensors] return await self._make_async_request( "update_weights_from_tensor", { "serialized_named_tensors": serialized_named_tensors, "load_format": load_format, "flush_cache": flush_cache, }, ) async def flush_cache(self) -> dict[str, Any]: """Flush the cache of the server asynchronously. Similar to the sync version, this method retries until the cache is successfully flushed. It uses async sleep between retries. Returns: Dict[str, Any]: Server response indicating cache flush status. For non-master nodes, returns empty dict. Note: Uses retry logic with limited attempts (max_attempts * 4) to avoid infinite loops. Each retry includes an async delay to allow pending requests to complete. """ if self.node_rank != 0: return {} # Use retry logic with limited attempts to avoid infinite loops for attempt in range(self.max_attempts * 4): # Allow more retries for cache flush try: async with self._get_session() as session: url = f"http://{self.server_args.host}:{self.server_args.port}/flush_cache" async with session.get(url) as response: if response.status == 200: return await _read_async_response(response) except Exception as e: logger.warning(f"Error flushing cache (attempt {attempt + 1}): {e}") await asyncio.sleep(self.retry_delay) logger.error("Failed to flush cache after maximum attempts") return {} async def generate( self, prompt: Optional[str] = None, sampling_params: Optional[dict[str, Any]] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, return_logprob: bool = False, logprob_start_len: Optional[int] = None, top_logprobs_num: Optional[int] = None, token_ids_logprob: Optional[list[int]] = None, lora_path: Optional[str] = None, custom_logit_processor: Optional[Callable] = None, ) -> dict[str, Any]: """Generate text using the SGLang server asynchronously.""" logger.info("generate() started") payload = { "text": prompt, "sampling_params": sampling_params, "input_ids": input_ids, "image_data": image_data, "return_logprob": return_logprob, "logprob_start_len": logprob_start_len, "top_logprobs_num": top_logprobs_num, "token_ids_logprob": token_ids_logprob, "lora_path": lora_path, "custom_logit_processor": custom_logit_processor, } # Filter out None values payload = {k: v for k, v in payload.items() if v is not None} # Send request response = await self._make_async_request("generate", payload, timeout=self.timeout, only_master=False) return response async def async_generate( self, prompt: Optional[str] = None, sampling_params: Optional[dict[str, Any]] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, return_logprob: bool = False, logprob_start_len: Optional[int] = None, top_logprobs_num: Optional[int] = None, token_ids_logprob: Optional[list[int]] = None, lora_path: Optional[str] = None, custom_logit_processor: Optional[Callable] = None, ) -> dict[str, Any]: """Async generate method that mirrors AsyncEngine.async_generate interface. This method provides compatibility with AsyncEngine's async_generate method by forwarding the call to the generate method. It ensures API consistency between direct engine usage and HTTP-based engine usage. Args: prompt (Optional[str], optional): Text prompt for generation. Defaults to None. sampling_params (Optional[Dict[str, Any]], optional): Parameters controlling text generation sampling. Defaults to None. input_ids (Optional[List[int]], optional): Alternative to prompt, direct token IDs input. Defaults to None. image_data (Optional[Any], optional): Image data for multimodal generation. Defaults to None. return_logprob (bool, optional): Whether to return log probabilities. Defaults to False. logprob_start_len (Optional[int], optional): Starting length for log probability calculation. Defaults to None. top_logprobs_num (Optional[int], optional): Number of top log probabilities to return. Defaults to None. token_ids_logprob (Optional[List[int]], optional): Specific token IDs for log probability calculation. Defaults to None. lora_path (Optional[str], optional): Path to LoRA adapter weights. Defaults to None. custom_logit_processor (Optional[Callable], optional): Custom logit processing function. Defaults to None. Returns: Dict[str, Any]: Generated text and associated metadata from the server Note: This method is provided for API compatibility with AsyncEngine. It forwards all calls to the generate method. """ return await self.generate( prompt=prompt, sampling_params=sampling_params, input_ids=input_ids, image_data=image_data, return_logprob=return_logprob, logprob_start_len=logprob_start_len, top_logprobs_num=top_logprobs_num, token_ids_logprob=token_ids_logprob, lora_path=lora_path, custom_logit_processor=custom_logit_processor, ) async def reward_score( self, prompt: Optional[str] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, lora_path: Optional[str] = None, ) -> dict[str, Any]: logger.info("reward_score() started") payload = { "text": prompt, "input_ids": input_ids, "image_data": image_data, "lora_path": lora_path, } # Filter out None values payload = {k: v for k, v in payload.items() if v is not None} # Send request response = await self._make_async_request("classify", payload, timeout=self.timeout, only_master=False) return response async def async_reward_score( self, prompt: Optional[str] = None, input_ids: Optional[list[int]] = None, image_data: Optional[Any] = None, lora_path: Optional[str] = None, ) -> dict[str, Any]: return await self.reward_score( prompt=prompt, input_ids=input_ids, image_data=image_data, lora_path=lora_path, ) async def abort_request(self, rid: str = "", abort_all: bool = False) -> dict[str, Any]: """Abort a request asynchronously. Args: rid (str): The ID of the request to abort abort_all (bool, optional): Whether to abort all requests. Defaults to False. Returns: Dict[str, Any]: Server response indicating abort status """ return await self._make_async_request("abort_request", {"rid": rid, "abort_all": abort_all}) ================================================ FILE: verl_distillation/verl/workers/rollout/sglang_rollout/sglang_rollout.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import asyncio import logging import multiprocessing as mp import os from copy import deepcopy from json import JSONDecodeError from typing import Any, Generator, Optional from uuid import uuid4 import numpy as np import ray import sglang.srt.entrypoints.engine import torch import torch.distributed as dist from sglang.srt.managers.io_struct import ( ReleaseMemoryOccupationReqInput, ResumeMemoryOccupationReqInput, UpdateWeightsFromTensorReqInput, ) from sglang.srt.sampling.sampling_params import SamplingParams from sglang.srt.server_args import ServerArgs from sglang.srt.utils import ( assert_pkg_version, get_open_port, is_cuda, set_prometheus_multiproc_dir, set_ulimit, ) from sglang.srt.weight_sync.utils import update_weights as sgl_update_weights from tensordict import TensorDict from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.nn.utils.rnn import pad_sequence from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast, ProcessorMixin from verl import DataProto from verl.interactions.base import BaseInteraction from verl.interactions.utils.interaction_registry import initialize_interactions_from_config from verl.third_party.sglang import parallel_state as sglang_ps from verl.tools.base_tool import BaseTool from verl.tools.schemas import OpenAIFunctionCallSchema, OpenAIFunctionParsedSchema, OpenAIFunctionToolCall from verl.tools.utils.tool_registry import initialize_tools_from_config from verl.utils.device import get_visible_devices_keyword from verl.utils.net_utils import is_ipv6 from verl.utils.profiler import GPUMemoryLogger from verl.utils.torch_functional import get_response_mask, pad_sequence_to_length from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.base import BaseRollout from verl.workers.rollout.schemas import ( AsyncRolloutRequest, AsyncRolloutRequestStateEnum, FinishReasonTypeEnum, ) from verl.workers.rollout.sglang_rollout.http_server_engine import AsyncHttpServerAdapter from verl.workers.rollout.sglang_rollout.utils import broadcast_pyobj, get_named_tensor_buckets from verl.workers.rollout.utils import is_valid_ipv6_address try: from sglang.srt.function_call.function_call_parser import FunctionCallParser except ImportError: from sglang.srt.function_call_parser import FunctionCallParser try: from sglang.srt.entrypoints.openai.protocol import Tool except ImportError: from sglang.srt.openai_api.protocol import Tool # compatible with sglang 0.5.3 try: from sglang.srt.utils import get_ip except ImportError: from sglang.srt.utils import get_local_ip_auto as get_ip logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # patch to avoid issue https://github.com/sgl-project/sglang/issues/6723 def _set_envs_and_config(server_args: ServerArgs): # Set global environments os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" os.environ["NCCL_CUMEM_ENABLE"] = "0" os.environ["NCCL_NVLS_ENABLE"] = str(int(server_args.enable_nccl_nvls)) os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "4" os.environ["CUDA_MODULE_LOADING"] = "AUTO" # Set prometheus env vars if server_args.enable_metrics: set_prometheus_multiproc_dir() # Set ulimit set_ulimit() # Check flashinfer version if server_args.attention_backend == "flashinfer": assert_pkg_version( "flashinfer_python", "0.2.5", "Please uninstall the old version and reinstall the latest version by following the instructions at https://docs.flashinfer.ai/installation.html.", ) if is_cuda(): assert_pkg_version( "sgl-kernel", "0.1.1", "Please reinstall the latest version with `pip install sgl-kernel --force-reinstall`", ) # Set mp start method mp.set_start_method("spawn", force=True) sglang.srt.entrypoints.engine._set_envs_and_config = _set_envs_and_config # because chatCompletion is an async method, it makes the whole ray actor be an async actor # which can not call loop.run_until_complete. So we need to make the engine to be an async class class AsyncEngine(sglang.srt.entrypoints.engine.Engine): def __init__(self, **kwargs): super().__init__(**kwargs) async def release_memory_occupation(self, tags: Optional[list[str]] = None): """Release GPU occupation temporarily.""" if tags is None: obj = ReleaseMemoryOccupationReqInput() else: obj = ReleaseMemoryOccupationReqInput(tags=tags) return await self.tokenizer_manager.release_memory_occupation(obj, None) async def resume_memory_occupation(self, tags: Optional[list[str]] = None): """Resume GPU occupation.""" if tags is None: obj = ResumeMemoryOccupationReqInput() else: obj = ResumeMemoryOccupationReqInput(tags=tags) return await self.tokenizer_manager.resume_memory_occupation(obj, None) async def update_weights_from_tensor(self, update_weights_request: UpdateWeightsFromTensorReqInput): return await self.tokenizer_manager.update_weights_from_tensor(update_weights_request, None) async def flush_cache(self): return await self.tokenizer_manager.flush_cache() async def abort_request(self, rid: str = "", abort_all: bool = False): """Abort a specific request or all requests. Args: rid: The request ID to abort. If empty and abort_all is False, no action is taken. abort_all: If True, abort all running requests regardless of rid. """ return self.tokenizer_manager.abort_request(rid=rid, abort_all=abort_all) # NOTE(sgm): add for verl. We can optimize it by making # the dataloader yield List[int] without padding. def _pre_process_inputs( pad_token_id, prompt_token_ids: torch.Tensor, ) -> torch.Tensor: # remove the left padding in the prompt token_id non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0] return prompt_token_ids[non_pad_index:] def _extract_logprob_from_output(output): """ extract log_prob from single sglang inference output """ def _map_each_response(resp): input_token_logprobs = resp["meta_info"]["input_token_logprobs"] log_probs, output_token_ids = zip( *[(log_prob, token_ids) for log_prob, token_ids, _ in input_token_logprobs[1:]], strict=False ) return torch.tensor(output_token_ids), torch.tensor(log_probs) output_token_ids, log_probs = _map_each_response(output) return output_token_ids, log_probs # NOTE(linjunrong): adhoc def _post_process_outputs(processing_class, output): try: # This is when processing_class is a processor tokenizer = processing_class.tokenizer except AttributeError: try: # This is when processing_class is a tokenizer tokenizer = processing_class except AttributeError as e: raise ValueError(f"Cannot get tokenizer from processing_class {processing_class}") from e def _map_each_response(resp): output_token_logprobs = resp["meta_info"]["output_token_logprobs"] log_probs, output_token_ids = zip( *[(log_prob, token_ids) for log_prob, token_ids, _ in output_token_logprobs], strict=True ) return torch.tensor(output_token_ids), torch.tensor(log_probs) out_map = map(lambda x: _map_each_response(x), output) batched_output_token_ids = [] batched_logprobs = [] for output_token_ids, log_probs in out_map: batched_output_token_ids.append(output_token_ids) batched_logprobs.append(log_probs) pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id batched_output_token_ids = pad_sequence(batched_output_token_ids, batch_first=True, padding_value=pad_token_id) if len(batched_logprobs) > 0: batched_logprobs = pad_sequence(batched_logprobs, batch_first=True, padding_value=pad_token_id) return batched_output_token_ids, batched_logprobs def get_tool_call_parser_type( processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, ) -> str: items = FunctionCallParser.ToolCallParserEnum.items() if "gpt-oss" in getattr(processing_class, "name_or_path", "").lower(): logger.debug(f"gpt-oss model detected from name_or_path: {processing_class.name_or_path}") logger.debug("Using 'gpt-oss' tool call parser.") return "gpt-oss" for parser_type, parser_cls in items: parser = parser_cls() try: # This is when processing_class is a tokenizer tokenizer_vocab = processing_class.get_vocab() except AttributeError: try: # This is when processing_class is a processor tokenizer_vocab = processing_class.tokenizer.get_vocab() except AttributeError as e: raise ValueError(f"Cannot get vocab from processing_class {processing_class}") from e if parser.bot_token.strip() in tokenizer_vocab and ( parser.eot_token == "" or parser.eot_token.strip() in tokenizer_vocab ): return parser_type else: raise ValueError(f"No tool call parser found for processing_class {processing_class}") class SGLangRollout(BaseRollout): def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): super().__init__(config, model_config, device_mesh) actor_module = model_config.local_path processing_class = model_config.get_processor() model_hf_config = model_config.hf_config trust_remote_code = model_config.trust_remote_code port = None kwargs = {} os.environ.setdefault("SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK", "true") ( self._tool_schemas, self._tool_map, self._tool_call_parser_type, self._sgl_tools, self._function_call_parser, ) = self._initialize_tools(config, processing_class) self.interaction_map: dict[str, BaseInteraction] = self._initialize_interactions(config) # If turn on `free_cache_engine`, SGLang engine's KV cache # will be freed after each `generate_sequences` call. logger.info( f"tool_schemas: {self._tool_schemas}, tool_map: {self._tool_map}, tool_call_parser_type: " f"{self._tool_call_parser_type}, sgl_tools: {self._sgl_tools}, function_call_parser: " f"{self._function_call_parser}" ) self._init_distributed_env(device_mesh_cpu=None, **kwargs) self._verify_config(model_hf_config=model_hf_config) # initialize the inference engine self._init_inference_engine(trust_remote_code, actor_module, port) self._init_sampling_params(**kwargs) self.processing_class = processing_class try: # This is when processing_class is a tokenizer self.pad_token_id = self.processing_class.pad_token_id except AttributeError: try: # This is when processing_class is a processor self.pad_token_id = self.processing_class.tokenizer.pad_token_id except AttributeError as e: raise ValueError(f"Cannot get pad_token_id from processing_class {self.processing_class}") from e def _init_distributed_env(self, device_mesh_cpu, **kwargs): self._device_mesh_cpu = device_mesh_cpu os.environ.setdefault("SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK", "true") self.tensor_parallel_size = self.config.get("tensor_model_parallel_size", 1) assert self.tensor_parallel_size <= dist.get_world_size(), ( "tensor parallel size should be less than or equal to the world size" ) self.train_tp = kwargs.get("train_tp", None) if self.train_tp is not None: # deployed with megatron os.environ["CUDA_TIMER_STREAM_KAFKA_ENABLE"] = "0" os.environ["MEGATRON_IMPORT_TIMERS"] = "0" train_tp = kwargs.get("train_tp", None) num_tp_per_train_tp = train_tp // self.tensor_parallel_size sglang_ps.initialize_parallel_state( tensor_model_parallel_size=self.tensor_parallel_size, num_tp_per_train_tp=num_tp_per_train_tp, ) tp_size = self.tensor_parallel_size world_size = int(os.getenv("WORLD_SIZE", "-1")) # init device mesh if self._device_mesh_cpu is None: device_mesh_kwargs = dict( mesh_shape=(world_size // tp_size, tp_size, 1), mesh_dim_names=["dp", "tp", "pp"], ) self._device_mesh_cpu = init_device_mesh("cpu", **device_mesh_kwargs) self._rank = self._device_mesh_cpu.get_rank() self._tp_rank = self._device_mesh_cpu["tp"].get_local_rank() self._tp_size = self._device_mesh_cpu["tp"].size() if self._rank == 0: logger.info(f"_init_distributed_env: :tp_world: {self._tp_size}, global_world: {world_size}") # get tp_rank of this process in this tp group visible_devices = [None] * self._device_mesh_cpu.size(1) devices_keyword = get_visible_devices_keyword() torch.distributed.all_gather_object( visible_devices, os.environ[devices_keyword], self._device_mesh_cpu.get_group("tp") ) self.visible_devices_set = set(",".join(visible_devices).split(",")) os.environ[devices_keyword] = ",".join(sorted(list(self.visible_devices_set), key=int)) def _verify_config(self, model_hf_config): if not self.config.get("max_model_len", None): self.config.max_model_len = self.config.prompt_length + self.config.response_length assert ( self.config.max_model_len >= self.config.prompt_length + self.config.response_length ), f"""max_model_len should be greater than total sequence length (prompt_length + response_length): {self.config.max_model_len} >= {self.config.prompt_length} + {self.config.response_length}""" max_position_embeddings = None if hasattr(model_hf_config, "max_position_embeddings"): max_position_embeddings = model_hf_config.max_position_embeddings elif hasattr(model_hf_config, "llm_config") and hasattr(model_hf_config.llm_config, "max_position_embeddings"): max_position_embeddings = model_hf_config.llm_config.max_position_embeddings elif hasattr(model_hf_config, "text_config") and hasattr( model_hf_config.text_config, "max_position_embeddings" ): max_position_embeddings = model_hf_config.text_config.max_position_embeddings if max_position_embeddings is None: raise ValueError("max_position_embeddings not found in model_hf_config") rope_scaling_config = getattr(model_hf_config, "rope_scaling", None) if not rope_scaling_config: assert max_position_embeddings >= self.config.prompt_length + self.config.response_length, ( "model context length should be greater than total sequence length" ) else: # handle type where there's a length extend factor # see https://qwen.readthedocs.io/en/latest/deployment/vllm.html#extended-context-support # for using yarn as an example rope_scaling_factor = rope_scaling_config.get("factor", 1.0) assert ( model_hf_config.max_position_embeddings * rope_scaling_factor >= self.config.prompt_length + self.config.response_length ), ( f"model context length should be greater than total sequence length, " f"got rope_scaling_factor={rope_scaling_factor} and " f"max_position_embeddings={model_hf_config.max_position_embeddings}" ) # currently max_assistant_turns stand for max number of tool calls if self.config.multi_turn.max_assistant_turns is None: self.config.multi_turn.max_assistant_turns = self.config.max_model_len // 3 if self.config.multi_turn.max_user_turns is None: self.config.multi_turn.max_user_turns = self.config.max_model_len // 3 def _init_inference_engine(self, trust_remote_code, actor_module, port): # initialize the inference engine nnodes = -(-self._tp_size // len(self.visible_devices_set)) if nnodes > 1: ip = get_ip() port = get_open_port() if port is None else port [ip, port] = broadcast_pyobj( [ip, port], rank=self._rank, dist_group=self._device_mesh_cpu.get_group("tp"), src=self._device_mesh_cpu["tp"].mesh[0].item(), force_cpu_device=False, ) dist_init_addr = f"[{ip}]:{port}" if is_ipv6(ip) else f"{ip}:{port}" else: dist_init_addr = None load_format = "dummy" if self.config.load_format.startswith("dummy") else self.config.load_format tp_size_per_node = self._tp_size // nnodes node_rank = self._tp_rank // tp_size_per_node first_rank_in_node = self._tp_rank % tp_size_per_node == 0 engine_kwargs = self.config.get("engine_kwargs", {}).get("sglang", {}) or {} engine_kwargs = {key: val for key, val in engine_kwargs.items() if val is not None} # attention backend will be changed to fa3 if not specified attention_backend = engine_kwargs.pop("attention_backend", None) max_running_requests = self.config.get("max_num_seqs", None) try: is_server_mode = self.config.sglang_rollout_mode == "server" except Exception: is_server_mode = False effective_first = first_rank_in_node or is_server_mode if self.config.mode == "async" and not self.config.skip_tokenizer_init: raise ValueError("async mode requires skip_tokenizer_init to be True") backend = attention_backend if attention_backend is not None else "fa3" sglang_port = int(os.getenv("SGLANG_PORT", "30000")) + (dist.get_rank() * 2) if effective_first: os.environ["SGLANG_BLOCK_NONZERO_RANK_CHILDREN"] = "0" args = { "model_path": actor_module, "dtype": self.config.dtype, "mem_fraction_static": self.config.gpu_memory_utilization, "enable_memory_saver": True, "base_gpu_id": 0, "gpu_id_step": 1, "tp_size": self._tp_size, "node_rank": node_rank, "load_format": load_format, "dist_init_addr": dist_init_addr, "nnodes": nnodes, "trust_remote_code": trust_remote_code, "max_running_requests": max_running_requests, # NOTE(linjunrong): add rank to prevent SGLang generate same port inside PortArgs.init_new # when random.seed is being set during training "port": sglang_port, "nccl_port": sglang_port + 1, # NOTE(Chenyang): if you want to debug the SGLang engine output # please set the following parameters # Otherwise, it will make the engine run too slow "log_level": "info", # "log_level": "error", # log_requests=True, # log_requests_level=2, # NOTE(Chenyang): turn on max_running_requests to set the max concurrent running requests # max_running_requests=1, "mm_attention_backend": backend, "attention_backend": backend, # In async mode, we want token in token out. "skip_tokenizer_init": self.config.skip_tokenizer_init, "dist_timeout": 1800, } if is_server_mode: # add server specific args args["first_rank_in_node"] = first_rank_in_node args["timeout"] = self.config.server["timeout"] args["max_attempts"] = self.config.server["max_attempts"] args["retry_delay"] = self.config.server["retry_delay"] args["max_connections"] = self.config.server["max_connections"] args["max_start_wait_time"] = self.config.server["max_start_wait_time"] self._engine = AsyncHttpServerAdapter(**args) else: self._engine = AsyncEngine(**args) else: self._engine = None self.sharding_manager = None self.is_sleep = True def _init_sampling_params(self, **kwargs): kwargs = dict( n=1, max_new_tokens=self.config.response_length, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=self.config.get("repetition_penalty", 1.0), ) # supporting adding any sampling params from the config file for k in self.config.keys(): if hasattr(SamplingParams(), str(k)) or "stop" in str(k): kwargs[k] = self.config.get(k) kwargs["n"] = 1 # already repeat in ray_trainer self.sampling_params = kwargs def _initialize_tools(self, config, processing_class): """Initialize tools from configuration. Args: config: Configuration object containing tool-related settings, specifically `config.multi_turn.tool_config_path`. tokenizer: The tokenizer instance used for parsing tool calls from the model's generated text. Returns: tuple: A tuple containing: - tool_schemas (list[dict]): OpenAI-formatted JSON schemas defining each tool's capabilities. - tool_map (dict[str, BaseTool]): A dictionary mapping tool names to their executable `BaseTool` objects. - tool_call_parser_type (str): The identifier for the specific parser type (e.g., 'json_mode', 'tool_code') used to extract tool calls. - sgl_tools (list[sglang.srt.openai_api.protocol.Tool]): Tool definitions optimized for SGLang's internal engine. - function_call_parser (sglang.srt.function_call_parser.FunctionCallParser): The active parser instance responsible for extracting structured tool calls from model outputs. """ if config.multi_turn.tool_config_path is None: return [], {}, None, [], None tools_config_file = config.multi_turn.tool_config_path tool_list = initialize_tools_from_config(tools_config_file) logger.info(f"Initialize tools from configuration.: tool_list: {tool_list}") tool_schemas = [tool.get_openai_tool_schema().model_dump() for tool in tool_list] tool_map = {tool.name: tool for tool in tool_list} tool_call_parser_type = get_tool_call_parser_type(processing_class) sgl_tools = [Tool.model_validate(tool_schema) for tool_schema in tool_schemas] function_call_parser = FunctionCallParser( sgl_tools, tool_call_parser_type, ) return ( tool_schemas, tool_map, tool_call_parser_type, sgl_tools, function_call_parser, ) def _initialize_interactions(self, config): """Initialize interactions from configuration. Returns: dict[str, BaseInteraction]: A dictionary mapping interaction names to interaction instances. """ if config.multi_turn.interaction_config_path is None: return {} interaction_config_file = config.multi_turn.interaction_config_path interaction_map = initialize_interactions_from_config(interaction_config_file) logger.info(f"Initialize interactions from configuration: interaction_map: {list(interaction_map.keys())}") return interaction_map @GPUMemoryLogger(role="sglang rollout", logger=logger) @torch.no_grad() def generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto: """Generate sequences for a batch of prompts. Args: batch (DataProto): Input batch. Returns: DataProto: Output batch. - prompts: [bsz, prompt_length], prompt token ids from dataset. - responses: [bsz, response_length], output token ids include response tokens from LLM generation and observation tokens from tool_calls. - response_mask: [bsz, response_length], 1 for LLM generated tokens, 0 for observation/padding tokens. - input_ids: [bsz, prompt_length + response_length], whole sequence token ids, including prompt tokens and response tokens. - attention_mask: [bsz, prompt_length + response_length], 0 for padding tokens, 1 for other tokens. - position_ids: [bsz, prompt_length + response_length], incremental position ids. For multi-turn conversations: responses: |<- LLM generation ->|<- tool_calls ->|<- LLM generation ->|<- padding ->| response_mask: | 1, 1, 1, ..., 1, 1 | 0, 0, .., 0, 0 | 1, 1, 1, ..., 1, 1 | 0, 0, ..., 0| """ if self.config.multi_turn.enable: return self._req_level_generate_sequences(prompts, **kwargs) return self._batch_level_generate_sequences(prompts, **kwargs) @GPUMemoryLogger(role="sglang rollout", logger=logger) @torch.no_grad() def _batch_level_generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto: """Generates single-turn sequences for a batch of prompts. For single-turn generation, all prompts are processed in one request. `_batch_level_generate_sequences` involves: 1. Extracting and pre-processing prompt token IDs from the input `prompts`. This includes handling padding and preparing raw token ID lists. 2. Preparing inputs for the SGLang engine, including multi-modal data if present. 3. Invoking the SGLang engine (`self._engine.async_generate`, an async coroutine) with the batch of processed inputs and specified sampling parameters on the master TP rank. 4. Broadcasting the results from the master TP rank to all other TP ranks. 5. Post-processing the engine's output to format the generated token IDs and (if applicable) log probabilities. 6. Constructing the final sequences by concatenating original prompts with the generated responses. 7. Updating attention masks and position IDs to reflect the full concatenated sequences. 8. If `self.config.free_cache_engine` is true, the SGLang engine's KV cache is flushed after generation on the master TP rank. Args: prompts: A `DataProto` object containing the batch of input prompts, including tensor data (like `input_ids`, `attention_mask`) and meta-information (like `eos_token_id`, `do_sample`). **kwargs: Additional keyword arguments that can override the default sampling parameters (e.g., `temperature`, `top_p`, `max_new_tokens`). These are temporarily applied using `update_sampling_params`. Returns: DataProto: A `DataProto` object containing the batch of generated sequences. This includes tensors for `prompts` (original input IDs), `responses` (generated token IDs), `input_ids` (concatenated prompt and response), `attention_mask`, and `position_ids` for the full sequences. Note that in GRPO, if the prompts are validated, we repeat the prompts for rollout.n times in ray_trainer. Thus we do not need to repeat the prompts here and set the sampling parameter n to 1. """ # input ids: (bs, prompt_length), left-padded idx = prompts.batch["input_ids"] # attention_mask: (bs, seq_length), left-padded attention_mask = prompts.batch["attention_mask"] position_ids = prompts.batch["position_ids"] # used to generate attention mask for the # response based on EOS token position eos_token_id = prompts.meta_info["eos_token_id"] batch_size = idx.size(0) # Extract non-tensor data non_tensor_batch = prompts.non_tensor_batch if "raw_prompt_ids" not in non_tensor_batch: non_tensor_batch["raw_prompt_ids"] = np.array( [_pre_process_inputs(self.pad_token_id, idx[i]).tolist() for i in range(batch_size)], dtype=object, ) if "multi_modal_data" in non_tensor_batch: sglang_inputs = [] for raw_prompt_ids, multi_modal_data in zip( non_tensor_batch.pop("raw_prompt_ids"), non_tensor_batch.pop("multi_modal_data"), strict=True, ): sglang_inputs.append( { "prompt_token_ids": raw_prompt_ids, "multi_modal_data": multi_modal_data, "image_data": ( multi_modal_data.get("image", None) if isinstance(multi_modal_data, dict) else None ), } ) else: sglang_inputs = [ {"prompt_token_ids": raw_prompt_ids} for raw_prompt_ids in non_tensor_batch.pop("raw_prompt_ids") ] for input_data in sglang_inputs: # Ensure token IDs are lists or numpy arrays if not isinstance(input_data["prompt_token_ids"], list | np.ndarray): raise TypeError( f"prompt_token_ids must be a list or numpy array, got {type(input_data['prompt_token_ids'])}" ) input_data["prompt_token_ids"] = list(input_data["prompt_token_ids"]) # Extract token IDs and image data for SGLang Engine idx_list = [input_data["prompt_token_ids"] for input_data in sglang_inputs] image_list = [input_data.get("image_data", None) for input_data in sglang_inputs] do_sample = prompts.meta_info.get("do_sample", True) is_validate = prompts.meta_info.get("validate", False) # Create request-level sampling parameters request_sampling_params = self.sampling_params.copy() if not do_sample: request_sampling_params.update( { "n": 1, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, "temperature": 0, "top_p": 1, "top_k": -1, "ignore_eos": False, "min_new_tokens": 0, "max_new_tokens": self.config.response_length, "skip_special_tokens": True, "spaces_between_special_tokens": True, } ) elif is_validate: request_sampling_params.update( { "top_k": self.config.val_kwargs.top_k, "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "n": 1, # if validate, already repeat in ray_trainer } ) # Update with any additional kwargs request_sampling_params.update(kwargs) if self._tp_rank == 0: loop = asyncio.get_event_loop() output = loop.run_until_complete( self._engine.async_generate( prompt=None, # because we have already convert it to prompt token id sampling_params=request_sampling_params, return_logprob=True, input_ids=idx_list, image_data=image_list, ) ) else: output = None # Most naive implementation, can extract tensor and send via gloo if too slow dist.barrier() # Because the logic below requires GPU memory proportional to the batch size, so free cache first to avoid OOM if self._engine is not None and self._tp_rank == 0: loop = asyncio.get_event_loop() loop.run_until_complete(self._engine.flush_cache()) [output] = broadcast_pyobj( data=[output], rank=self._rank, dist_group=self._device_mesh_cpu["tp"].get_group(), src=self._device_mesh_cpu["tp"].mesh[0].item(), force_cpu_device=False, ) out = _post_process_outputs(self.processing_class, output) response = out[0].to(idx.device) rollout_log_probs = None if self.config.calculate_log_probs: rollout_log_probs = out[1].to(idx.device) if response.shape[1] < self.config.response_length: response = pad_sequence_to_length(response, self.config.response_length, self.pad_token_id) if self.config.calculate_log_probs: rollout_log_probs = pad_sequence_to_length( rollout_log_probs, self.config.response_length, self.pad_token_id ) seq = torch.cat([idx, response], dim=-1) response_length = response.size(1) delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device) delta_position_id = delta_position_id.unsqueeze(0).repeat(batch_size, 1) if position_ids.dim() == 3: # qwen2vl mrope (batch size, 4, seq len) delta_position_id = delta_position_id.view(batch_size, 1, -1).expand(batch_size, position_ids.size(1), -1) # TODO(sgm): fix position_ids on right_pad # prompt: left pad + response: right pad # attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0] # position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11] response_position_ids = position_ids[..., -1:] + delta_position_id position_ids = torch.cat([position_ids, response_position_ids], dim=-1) response_attention_mask = get_response_mask( response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype ) attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1) # all the tp ranks should contain the same data here. data in all ranks are valid batch = TensorDict( { "prompts": idx, "responses": response, "input_ids": seq, # here input_ids become the whole sentences "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=batch_size, ) if self.config.calculate_log_probs: # we will recompute old log prob with actor batch["rollout_log_probs"] = rollout_log_probs return DataProto(batch=batch, non_tensor_batch=non_tensor_batch) async def _async_rollout_a_request( self, req: AsyncRolloutRequest, do_sample: bool = True, is_validate: bool = False, **kwargs, ) -> AsyncRolloutRequest: assert self._tp_rank == 0, "only the master process can call this function" _req = deepcopy(req) finish_reason_type = None output = None current_turns = 0 user_turns = 0 user_turn_rewards = [] # Create request-level sampling parameters request_sampling_params = self.sampling_params.copy() if not do_sample: request_sampling_params.update( { "n": 1, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, "temperature": 0, "top_p": 1, "top_k": -1, "ignore_eos": False, "min_new_tokens": 0, "max_new_tokens": self.config.response_length, "skip_special_tokens": True, "spaces_between_special_tokens": True, } ) elif is_validate: request_sampling_params.update( { "top_k": self.config.val_kwargs.top_k, "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "n": 1, # if validate, already repeat in ray_trainer } ) # Update with any additional kwargs request_sampling_params.update(kwargs) while current_turns < self.config.multi_turn.max_assistant_turns: if _req.state == AsyncRolloutRequestStateEnum.PENDING: await self._handle_pending_state(_req) _req.state = AsyncRolloutRequestStateEnum.RUNNING elif _req.state == AsyncRolloutRequestStateEnum.TOOL_CALLING: if _req.messages[-1].tool_calls is not None: parsed_tool_calls = _req.messages[-1].tool_calls if self.config.skip_tokenizer_init: _req.messages[-1].tool_calls = None tool_call_results = await asyncio.gather( *[ self._tool_map[tool_call.function.name].execute( _req.request_id, tool_call.function.arguments, **_req.tools_kwargs.get(tool_call.function.name, {}).get("execute_kwargs", {}), ) for tool_call in parsed_tool_calls ] ) _req.add_tool_response_messages(self.processing_class, [resp for resp, _, _ in tool_call_results]) for tool_call, (resp, reward, metrics) in zip(parsed_tool_calls, tool_call_results, strict=True): _req.update_metrics(metrics, tool_call.function.name) if _req.input_ids.size(-1) >= self.config.max_model_len: finish_reason_type = FinishReasonTypeEnum.STOP break _req.state = AsyncRolloutRequestStateEnum.RUNNING else: raise ValueError(f"Unexpected tool calling last message state: {_req.messages[-1]}") elif _req.state == AsyncRolloutRequestStateEnum.RUNNING: # Only continue the conversation if the prompt length is not greater than max_model_len - 1, # since SGLang raises an error when max_new_tokens + 1 is greater to max_model_len (the extra # token accounts for the EOS token). prompt_length = len(_req.get_generation_prompt_ids(self.processing_class)) if prompt_length + 1 >= self.config.max_model_len: finish_reason_type = FinishReasonTypeEnum.LENGTH break # Video support is not implemented yet image_data = ( _req.multi_modal_data["image"] if _req.multi_modal_data and "image" in _req.multi_modal_data else None ) video_data = ( _req.multi_modal_data["video"] if _req.multi_modal_data and "video" in _req.multi_modal_data else None ) if video_data: logger.warning( "video support is not implemented yet, current length of video data is %d", len(video_data) ) output = await self._handle_engine_call(_req, request_sampling_params, image_data=image_data) if self.config.skip_tokenizer_init: content_ids = output["output_ids"] content = self.processing_class.decode(content_ids, skip_special_tokens=True) content_ids = torch.tensor( content_ids, dtype=_req.input_ids.dtype, device=_req.input_ids.device ).unsqueeze(0) else: content_ids = None content = output["text"] finish_reason_type = FinishReasonTypeEnum.from_str(output["meta_info"]["finish_reason"]["type"]) current_turns += 1 if finish_reason_type == FinishReasonTypeEnum.LENGTH: _req.add_assistant_message(self.processing_class, content=content, content_ids=content_ids) break else: if self._function_call_parser and self._function_call_parser.has_tool_call(content): finish_reason_type = FinishReasonTypeEnum.TOOL_CALL _req.state = AsyncRolloutRequestStateEnum.TOOL_CALLING try: normed_content, tool_calls = self._function_call_parser.parse_non_stream(content) except JSONDecodeError: normed_content = content tool_calls = [] except AttributeError: normed_content = content tool_calls = [] parsed_tool_calls = [] for tool_call in tool_calls: function, has_decode_error = OpenAIFunctionCallSchema.from_openai_function_parsed_schema( OpenAIFunctionParsedSchema( name=tool_call.name, arguments=tool_call.parameters, ) ) # Drop the tool call if its arguments has decode error if has_decode_error: continue parsed_tool_calls.append( OpenAIFunctionToolCall( id=str(tool_call.tool_index), function=function, ) ) if len(parsed_tool_calls) > 0: _req.add_assistant_message( # since the content is updated, we just pass the content not content_ids self.processing_class, content=normed_content, tool_calls=parsed_tool_calls, ) else: _req.add_assistant_message(self.processing_class, content=content, content_ids=content_ids) finish_reason_type = FinishReasonTypeEnum.STOP _req.state = AsyncRolloutRequestStateEnum.COMPLETED break else: _req.add_assistant_message( self.processing_class, content=content, content_ids=content_ids, ) if ( _req.interaction_kwargs and self.interaction_map and user_turns < self.config.multi_turn.max_user_turns and current_turns < self.config.multi_turn.max_assistant_turns ): _req.state = AsyncRolloutRequestStateEnum.INTERACTING else: # Add ending condition finish_reason_type = FinishReasonTypeEnum.STOP _req.state = AsyncRolloutRequestStateEnum.COMPLETED break elif _req.state == AsyncRolloutRequestStateEnum.INTERACTING: user_turns += 1 messages = [{"role": x.role, "content": x.content} for x in _req.messages] # Get interaction by name from interaction_kwargs interaction_name = _req.interaction_kwargs.get( "name", "gsm8k" ) # Default to gsm8k for backward compatibility if interaction_name not in self.interaction_map: raise ValueError( f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: " f"{list(self.interaction_map.keys())}" ) interaction = self.interaction_map[interaction_name] should_terminate_sequence, content, reward, metrics = await interaction.generate_response( _req.request_id, messages, **_req.interaction_kwargs ) user_turn_rewards.append(reward) # Add turn check if ( should_terminate_sequence or user_turns > self.config.multi_turn.max_user_turns or current_turns > self.config.multi_turn.max_assistant_turns ): finish_reason_type = FinishReasonTypeEnum.STOP _req.state = AsyncRolloutRequestStateEnum.COMPLETED break else: _req.add_user_message(self.processing_class, content) if _req.input_ids.size(-1) >= self.config.max_model_len: finish_reason_type = FinishReasonTypeEnum.STOP break else: _req.state = AsyncRolloutRequestStateEnum.RUNNING if current_turns >= self.config.multi_turn.max_assistant_turns: finish_reason_type = FinishReasonTypeEnum.STOP # Calculate the reward for each tool async def calc_reward_and_release_fn(name: str, tool: BaseTool): reward = await tool.calc_reward(_req.request_id, **_req.tools_kwargs[name].get("calc_reward_kwargs", {})) await tool.release(_req.request_id, **_req.tools_kwargs[name].get("release_kwargs", {})) return name, reward tool_reward_tasks = [] for name in _req.tools_kwargs.keys(): tool = self._tool_map[name] tool_reward_tasks.append(calc_reward_and_release_fn(name, tool)) tool_reward_scores = await asyncio.gather(*tool_reward_tasks) tool_reward_scores = dict(tool_reward_scores) all_rewards = {**tool_reward_scores, **{"user_turn_rewards": user_turn_rewards}} _req.finalize(self.processing_class, all_rewards, finish_reason_type) if self.config.calculate_log_probs: debug_sampling_params = {**self.sampling_params} debug_sampling_params["max_new_tokens"] = 0 output = await self._engine.async_generate( prompt=None, input_ids=_req.input_ids, sampling_params=debug_sampling_params, return_logprob=True, logprob_start_len=0, ) # len(input_token_logprobs) = len(input_tokens)-1,because logprob of 1st token is None _req.output_token_ids, _req.rollout_log_probs = _extract_logprob_from_output(output) return _req async def _handle_engine_call( self, _req: AsyncRolloutRequest, sampling_params: dict, image_data: Optional[list[Any]] = None ) -> dict: generation_prompt_ids = _req.get_generation_prompt_ids(self.processing_class) return await self._handle_engine_generate(generation_prompt_ids, sampling_params, image_data) async def _handle_engine_generate( self, generation_prompt_ids: list[int], sampling_params: dict, image_data: Optional[list[Any]] = None ) -> dict: max_new_tokens = min(self.config.response_length, self.config.max_model_len - len(generation_prompt_ids) - 1) kwargs = sampling_params.copy() kwargs["max_new_tokens"] = max_new_tokens kwargs["n"] = 1 # group size is supported in preprocess return_logprob = kwargs.pop("logprobs", False) output = await self._engine.async_generate( input_ids=generation_prompt_ids, sampling_params=kwargs, return_logprob=return_logprob, image_data=image_data, ) return output async def _handle_pending_state(self, _req: AsyncRolloutRequest) -> AsyncRolloutRequest: if _req.tool_schemas is not None: tool_creation_coroutines = [] for tool_schema in _req.tool_schemas: tool = self._tool_map[tool_schema.function.name] create_kwargs = _req.tools_kwargs[tool.name].get("create_kwargs", {}) tool_creation_coroutines.append(tool.create(_req.request_id, **create_kwargs)) tool_creation_results = await asyncio.gather(*tool_creation_coroutines) _req.add_tool_response_messages( self.processing_class, [tool_result for _, tool_result in tool_creation_results] ) if _req.interaction_kwargs and self.interaction_map: interaction_kwargs = _req.interaction_kwargs # Get interaction by name from interaction_kwargs interaction_name = interaction_kwargs.get("name", "gsm8k") # Default to gsm8k for backward compatibility if interaction_name not in self.interaction_map: raise ValueError( f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: " f"{list(self.interaction_map.keys())}" ) interaction = self.interaction_map[interaction_name] await interaction.start_interaction(_req.request_id, **interaction_kwargs) @GPUMemoryLogger(role="sglang rollout", logger=logger) @torch.no_grad() def _req_level_generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto: """Generates multi-turn sequences for a batch of prompts. For multi-turn generation, each prompt is processed separately via `_req_level_generate_sequences` for better tool calling control. Note that in multi-turn generation, we repeat the prompts for rollout.n times in ray_trainer. Thus we do not need to repeat the prompts here and set the sampling parameter n to 1. """ # Async rollout with tools support do_sample = prompts.meta_info.get("do_sample", True) is_validate = prompts.meta_info.get("validate", False) tgt_device = prompts.batch["input_ids"].device if self._tp_rank == 0: req_list = self._preprocess_prompt_to_async_rollout_requests( prompts, ) # distinguish training and validation if is_validate: # Validation mode: process all requests without abort loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[self._async_rollout_a_request(req, do_sample, is_validate, **kwargs) for req in req_list], ) ) else: # add progress monitoring and abort function total_requests = len(req_list) target_completion = int(total_requests * (1 - self.config.get("over_sample_rate", 0.0))) # abort when target_completion of requests are completed completed_count = 0 aborted_requests = [] all_tasks = [] async def rollout_a_request_with_cancellation_handler(req): try: result = await self._async_rollout_a_request(req, do_sample, is_validate, **kwargs) return result except asyncio.CancelledError: # request is cancelled, return padding logger.info(f"Request {req.request_id} was cancelled, creating padding") aborted_requests.append(req.request_id) return self._create_padding_request(req) async def run_with_cancellation(): nonlocal all_tasks nonlocal completed_count all_tasks = [ asyncio.create_task(rollout_a_request_with_cancellation_handler(req)) for req in req_list ] # Wait for target_completion tasks to complete try: for completed_task in asyncio.as_completed(all_tasks): await completed_task completed_count += 1 if completed_count >= target_completion: break finally: # Cancel remaining tasks for t in all_tasks: if not t.done(): t.cancel() # Wait for all tasks to finish (including cancelled ones) final_results = await asyncio.gather(*all_tasks, return_exceptions=True) # Abort all requests in SGLang engine await self._engine.abort_request(abort_all=True) return final_results loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete(run_with_cancellation()) sorted_output_req_list = sorted(output_req_list, key=lambda x: (x.batch_data_id, x.rollout_offset)) else: sorted_output_req_list = None dist.barrier() # Because the logic below requires GPU memory proportional to the batch size, so free cache first to avoid OOM if self._engine is not None and self._tp_rank == 0: loop = asyncio.get_event_loop() loop.run_until_complete(self._engine.flush_cache()) [sorted_output_req_list] = broadcast_pyobj( data=[sorted_output_req_list], rank=self._rank, dist_group=self._device_mesh_cpu["tp"].get_group(), src=self._device_mesh_cpu["tp"].mesh[0].item(), force_cpu_device=False, ) # Construct the batch data prompt_ids, response_ids = [], [] prompt_attention_mask, response_attention_mask = [], [] prompt_position_ids, response_position_ids = [], [] response_loss_mask = [] messages = [] reward_scores = [] multi_modal_inputs = [] request_ids = [] if self.config.calculate_log_probs: output_logprobs = [] rollout_output_token_ids = [] for req in sorted_output_req_list: assert req.state == AsyncRolloutRequestStateEnum.COMPLETED, f"Request {req.request_id} is not completed" assert ( req.input_ids.shape[-1] == req.attention_mask.shape[-1] == req.position_ids.shape[-1] == req.loss_mask.shape[-1] ), f"""Request {req.request_id} has different length of {req.input_ids.shape[-1]=}, {req.attention_mask.shape[-1]=}, {req.position_ids.shape[-1]=}, {req.loss_mask.shape[-1]=}""" error_message_lines = [ f"""Request {req.request_id} has input_ids length {req.input_ids.shape[-1]} greater than max_model_len {self.config.max_model_len}""", f"Decoded input_ids: {self.processing_class.decode(req.input_ids.squeeze(0))}", f"Decoded prompt_ids: {self.processing_class.decode(req.prompt_ids.squeeze(0))}", f"Decoded response_ids: {self.processing_class.decode(req.response_ids.squeeze(0))}", f"Messages: {req.messages}", f"Max model length: {req.max_model_len}", ] error_message = "\n".join(error_message_lines) assert req.input_ids.shape[-1] <= self.config.max_model_len, error_message prompt_ids.append(req.prompt_ids.to(tgt_device).squeeze(0)) response_ids.append(req.response_ids.to(tgt_device).squeeze(0)) if req.response_ids.shape[-1] > self.config.response_length: logger.warning( f"""{req.request_id=} has response_ids length {req.response_ids.shape[-1]} greater than max_response_len {self.config.response_length},\n{req=}""" ) prompt_attention_mask.append(req.prompt_attention_mask.to(tgt_device).squeeze(0)) response_attention_mask.append(req.response_attention_mask.to(tgt_device).squeeze(0)) prompt_position_ids.append(req.prompt_position_ids.to(tgt_device).squeeze(0)) response_position_ids.append(req.response_position_ids.to(tgt_device).squeeze(0)) response_loss_mask.append(req.response_loss_mask.to(tgt_device).squeeze(0)) messages.append({"messages": req.messages}) reward_scores.append(req.reward_scores) multi_modal_inputs.append(req.multi_modal_inputs) request_ids.append(req.request_id) if self.config.calculate_log_probs: # extract output log_probs output_logprobs.append(req.rollout_log_probs[-len(req.response_ids) :]) rollout_output_token_ids.append(req.output_token_ids[-len(req.response_ids) :]) prompt_ids = pad_sequence( prompt_ids, batch_first=True, padding_value=self.pad_token_id, padding_side="left", ) if prompt_ids.shape[-1] < self.config.prompt_length: prompt_ids = pad_sequence_to_length(prompt_ids, self.config.prompt_length, self.pad_token_id, left_pad=True) response_ids = pad_sequence(response_ids, batch_first=True, padding_value=self.pad_token_id) if response_ids.shape[-1] < self.config.response_length: response_ids = pad_sequence_to_length(response_ids, self.config.response_length, self.pad_token_id) prompt_attention_mask = pad_sequence( prompt_attention_mask, batch_first=True, padding_value=0, padding_side="left", ) if prompt_attention_mask.shape[-1] < self.config.prompt_length: prompt_attention_mask = pad_sequence_to_length( prompt_attention_mask, self.config.prompt_length, 0, left_pad=True ) response_attention_mask = pad_sequence(response_attention_mask, batch_first=True, padding_value=0) if response_attention_mask.shape[-1] < self.config.response_length: response_attention_mask = pad_sequence_to_length(response_attention_mask, self.config.response_length, 0) # padding prompt_position_ids if prompt_position_ids[0].dim() == 2: # if prompt_position_ids is a 2D tensor # e.g. from qwen2vl, prompt_position_ids.shape = (3, seq_len) transposed_prompt_position_ids = [p.transpose(0, 1) for p in prompt_position_ids] prompt_position_ids = pad_sequence( transposed_prompt_position_ids, batch_first=True, padding_value=0, padding_side="left" ) prompt_position_ids = prompt_position_ids.transpose(1, 2) else: prompt_position_ids = pad_sequence( prompt_position_ids, batch_first=True, padding_value=0, padding_side="left" ) if prompt_position_ids.shape[-1] < self.config.prompt_length: prompt_position_ids = pad_sequence_to_length( prompt_position_ids, self.config.prompt_length, 0, left_pad=True ) # padding response_position_ids if response_position_ids[0].dim() == 2: # if response_position_ids is a 2D tensor # e.g. from qwen2vl, response_position_ids.shape = (3, seq_len) transposed_response_position_ids = [p.transpose(0, 1) for p in response_position_ids] response_position_ids = pad_sequence( transposed_response_position_ids, batch_first=True, padding_value=0, padding_side="left" ) response_position_ids = response_position_ids.transpose(1, 2) else: response_position_ids = pad_sequence(response_position_ids, batch_first=True, padding_value=0) if response_position_ids.shape[-1] < self.config.response_length: response_position_ids = pad_sequence_to_length(response_position_ids, self.config.response_length, 0) response_loss_mask = pad_sequence(response_loss_mask, batch_first=True, padding_value=0) if response_loss_mask.shape[1] < self.config.response_length: response_loss_mask = pad_sequence_to_length(response_loss_mask, self.config.response_length, 0) if self.config.calculate_log_probs: output_logprobs = pad_sequence(output_logprobs, padding_value=0.0, batch_first=True) output_logprobs = pad_sequence_to_length( output_logprobs, pad_token_id=0.0, max_seq_len=response_ids.shape[-1] ).to(tgt_device) rollout_output_token_ids = pad_sequence( rollout_output_token_ids, padding_value=self.pad_token_id, batch_first=True ) rollout_output_token_ids = pad_sequence_to_length( rollout_output_token_ids, pad_token_id=self.pad_token_id, max_seq_len=response_ids.shape[-1] ).to(tgt_device) input_ids = torch.cat((prompt_ids, response_ids), dim=-1) attention_mask = torch.cat((prompt_attention_mask, response_attention_mask), dim=-1) position_ids = torch.cat((prompt_position_ids, response_position_ids), dim=-1) # Construct the batch data batch = TensorDict( { "prompts": prompt_ids, "responses": response_ids, "response_mask": response_loss_mask, "input_ids": input_ids, # here input_ids become the whole sentences "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=len(sorted_output_req_list), ) if self.config.calculate_log_probs: batch["rollout_log_probs"] = output_logprobs batch["rollout_output_token_ids"] = rollout_output_token_ids non_tensor_batch = { "messages": np.array(messages), "reward_scores": np.array(reward_scores), "request_id": np.array(request_ids), } is_multimodal = isinstance(self.processing_class, ProcessorMixin) and ( hasattr(self.processing_class, "image_processor") or hasattr(self.model_hf_config, "vision_config") ) if is_multimodal: non_tensor_batch["multi_modal_inputs"] = np.array(multi_modal_inputs, dtype=object) return DataProto( batch=batch, non_tensor_batch=non_tensor_batch, ) def _create_padding_request(self, original_req: AsyncRolloutRequest) -> AsyncRolloutRequest: # create a padding request to replace the aborted request # the padding request has the following characteristics: # 1. state is COMPLETED, but contains empty response # 2. response_loss_mask is all 0, ensuring it is ignored in loss calculation # 3. keep the original request structure, but the content is empty # create padding response_ids (all pad_token_id) padding_response_length = self.config.response_length device = original_req.input_ids.device if original_req.input_ids is not None else "cpu" padding_response_ids = torch.full( (1, padding_response_length), self.pad_token_id, dtype=torch.long, device=device, ) # create padding attention_mask (all 0) padding_response_attention_mask = torch.zeros( (1, padding_response_length), dtype=torch.long, device=device, ) # create padding position_ids if original_req.position_ids is not None: first_dim = 1 # if position_ids is a 2D tensor (e.g. qwen2vl) if original_req.position_ids.dim() == 2: first_dim = original_req.position_ids.shape[0] padding_response_position_ids = torch.zeros( (first_dim, padding_response_length), dtype=torch.long, device=device, ) else: padding_response_position_ids = None # create padding prompt_attention_mask (all 0) padding_prompt_attention_mask = torch.zeros( (1, original_req.prompt_attention_mask.shape[-1]), dtype=torch.long, device=device, ) # create padding loss_mask (all 0, ensuring it is ignored) padding_response_loss_mask = torch.zeros( (1, padding_response_length), dtype=torch.long, device=device, ) padding_req = original_req.model_copy(deep=True) padding_req.state = AsyncRolloutRequestStateEnum.COMPLETED padding_req.response_ids = padding_response_ids padding_req.prompt_attention_mask = padding_prompt_attention_mask padding_req.response_attention_mask = padding_response_attention_mask padding_req.response_position_ids = padding_response_position_ids padding_req.response_loss_mask = padding_response_loss_mask padding_req.reward_scores = {} padding_req.metrics = {} padding_req.output_token_ids = None padding_req.rollout_log_probs = None return padding_req def _preprocess_prompt_to_async_rollout_requests(self, prompts: DataProto, n: int = 1) -> list[AsyncRolloutRequest]: assert "raw_prompt" in prompts.non_tensor_batch, ( "need data.return_raw_chat=True, due to no official way do parse_messages" ) logger.info( "n is deprecated for SGLang rollout since ray ppo trainer will repeat the prompts for rollout.n times" ) req_list = [] multi_modal_data_list = prompts.non_tensor_batch.get( "multi_modal_data", [None] * len(prompts.non_tensor_batch["raw_prompt"]) ) for data_idx, (raw_prompt, multi_modal_data) in enumerate( zip(prompts.non_tensor_batch["raw_prompt"], multi_modal_data_list, strict=True) ): if self._tool_schemas: _tools_kwargs = prompts.non_tensor_batch["tools_kwargs"][data_idx] _tool_schemas = [self._tool_map[k].get_openai_tool_schema() for k in _tools_kwargs.keys()] _input_ids = None _attention_mask = None else: _input_ids = _pre_process_inputs(self.pad_token_id, prompts.batch["input_ids"][data_idx]) _attention_mask = _pre_process_inputs(0, prompts.batch["attention_mask"][data_idx]) _tools_kwargs = {} _tool_schemas = None if self.interaction_map: _interaction_kwargs = prompts.non_tensor_batch["interaction_kwargs"][data_idx] else: _interaction_kwargs = {} if not isinstance(raw_prompt, list | np.ndarray): raise TypeError(f"raw_prompt must be a list or numpy array, got {type(raw_prompt)}") req = AsyncRolloutRequest( batch_data_id=data_idx, rollout_offset=0, request_id=str(uuid4()), state=AsyncRolloutRequestStateEnum.PENDING, messages=list(raw_prompt), multi_modal_data=multi_modal_data, tool_schemas=_tool_schemas, tools_kwargs=_tools_kwargs, interaction_kwargs=_interaction_kwargs, input_ids=_input_ids, response_ids=None, attention_mask=_attention_mask, response_attention_mask=None, response_position_ids=None, response_loss_mask=None, reward_scores={}, max_prompt_len=self.config.prompt_length, max_response_len=self.config.response_length, max_model_len=min(self.config.max_model_len, self.config.prompt_length + self.config.response_length), use_inference_chat_template=self.config.multi_turn.use_inference_chat_template, tokenization_sanity_check_mode=self.config.multi_turn.tokenization_sanity_check_mode, processing_class=self.processing_class, ) error_message = f"""Request {req.request_id} has mismatched lengths: input_ids={req.input_ids.shape[-1]}, attention_mask={req.attention_mask.shape[-1]}, position_ids={req.position_ids.shape[-1]}, loss_mask={req.loss_mask.shape[-1]}""" assert ( req.input_ids.shape[-1] == req.attention_mask.shape[-1] == req.position_ids.shape[-1] == req.loss_mask.shape[-1] ), error_message req_list.append(req) return req_list async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tag: weights or kv_cache. """ if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.config.free_cache_engine: await self._engine.resume_memory_occupation(tags=tags) async def release(self): """Release weights and kv cache in GPU memory.""" if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.config.free_cache_engine: await self._engine.release_memory_occupation(tags=["kv_cache", "weights"]) async def update_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs): """ Update model weights using tensor buckets, similar to THUDM/slime's implementation. Notes: - For the best performance of `rebuild_cuda_tensor`, it is recommended to: 1. Enable `RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES`. 2. Manually set `CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7` when using Tensor Parallelism (TP >= 8). - See reference implementations in SLIME: - Main logic: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L452 - runtime envs: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L39 """ update_weights_bucket_bytes = int(self.config.update_weights_bucket_megabytes) << 20 for params_batch in get_named_tensor_buckets(weights, update_weights_bucket_bytes): await sgl_update_weights( engine=self._engine, params_batch=params_batch, device_mesh_key="infer_tp", device_mesh=self.device_mesh, ) if self.device_mesh["infer_tp"].get_local_rank() == 0: await self._engine.flush_cache() class ServerAdapter(BaseRollout): """SGLang server adapter used in native http server mode, serve as http client to request SGLang server to resume/release/update weights and kv_cache. - hybrid mode: reside in each hybrid worker to sync weights between training engine and SGLang server. - standalone/colocated mode: just a dummy placeholder to occupy the GPU to prevent ray scheduling new GPU actor. """ def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): super().__init__(config, model_config, device_mesh) self._engine: AsyncHttpServerAdapter = None rank = int(os.environ["RANK"]) local_world_size = int(os.environ["RAY_LOCAL_WORLD_SIZE"]) rollout_world_size = self.config.tensor_model_parallel_size * self.config.data_parallel_size self.replica_rank = rank // rollout_world_size self.rollout_rank = rank % rollout_world_size self.node_rank = self.rollout_rank // local_world_size self.local_rank = self.rollout_rank % local_world_size async def _init_server_adapter(self): if self._engine is not None: return # Lazy init http server adapter because http server is launched after hybrid engine. self.server_actor = ray.get_actor(f"sglang_server_{self.replica_rank}_{self.node_rank}") server_address, server_port = await self.server_actor.get_server_address.remote() logger.debug( f"replica_rank={self.replica_rank} node_rank={self.node_rank}, " f"server address: {server_address}, port: {server_port}" ) host = f"[{server_address}]" if is_valid_ipv6_address(server_address) else server_address self._engine = AsyncHttpServerAdapter( model_path=self.model_config.local_path, host=host, port=server_port, launch_server=False ) async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tag: weights or kv_cache. """ if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.config.free_cache_engine: await self._init_server_adapter() await self._engine.resume_memory_occupation(tags=tags) async def release(self): """Release weights and kv cache in GPU memory.""" if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.config.free_cache_engine: await self._init_server_adapter() await self._engine.release_memory_occupation(tags=["kv_cache", "weights"]) async def update_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs): """ Update model weights using tensor buckets, similar to THUDM/slime's implementation. Notes: - For the best performance of `rebuild_cuda_tensor`, it is recommended to: 1. Enable `RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES`. 2. Manually set `CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7` when using Tensor Parallelism (TP >= 8). - See reference implementations in SLIME: - Main logic: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L452 - runtime envs: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L39 """ if self.device_mesh["infer_tp"].get_local_rank() == 0: await self._init_server_adapter() update_weights_bucket_bytes = int(self.config.update_weights_bucket_megabytes) << 20 for params_batch in get_named_tensor_buckets(weights, update_weights_bucket_bytes): await sgl_update_weights( engine=self._engine, params_batch=params_batch, device_mesh_key="infer_tp", device_mesh=self.device_mesh, ) if self.device_mesh["infer_tp"].get_local_rank() == 0: await self._engine.flush_cache() ================================================ FILE: verl_distillation/verl/workers/rollout/sglang_rollout/utils.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle from typing import Any, Iterator, Optional import numpy as np import torch import torch.distributed as dist from verl.utils.device import get_device_name def broadcast_pyobj( data: list[Any], rank: int, dist_group: Optional[torch.distributed.ProcessGroup] = None, src: int = 0, force_cpu_device: bool = False, ): """from https://github.com/sgl-project/sglang/blob/844e2f227ab0cce6ef818a719170ce37b9eb1e1b/python/sglang/srt/utils.py#L905 Broadcast inputs from src rank to all other ranks with torch.dist backend. The `rank` here refer to the source rank on global process group (regardless of dist_group argument). """ device = torch.device(get_device_name() if not force_cpu_device else "cpu") if rank == src: if len(data) == 0: tensor_size = torch.tensor([0], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) else: serialized_data = pickle.dumps(data) size = len(serialized_data) tensor_data = torch.ByteTensor(np.frombuffer(serialized_data, dtype=np.uint8)).to(device) tensor_size = torch.tensor([size], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) dist.broadcast(tensor_data, src=src, group=dist_group) return data else: tensor_size = torch.tensor([0], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) size = tensor_size.item() if size == 0: return [] tensor_data = torch.empty(size, dtype=torch.uint8, device=device) dist.broadcast(tensor_data, src=src, group=dist_group) serialized_data = bytes(tensor_data.cpu().numpy()) data = pickle.loads(serialized_data) return data def get_named_tensor_buckets( iterable: Iterator[tuple[str, torch.Tensor]], bucket_bytes: int ) -> Iterator[list[tuple[str, torch.Tensor]]]: """ Group tensors into buckets based on a specified size in megabytes. Args: iterable: An iterator of tuples containing tensor names and tensors. bucket_bytes: The maximum size of each bucket in bytes. Yields: Lists of tuples, where each tuple contains a tensor name and its corresponding tensor. Example: >>> tensors = [('tensor1', torch.randn(1000, 1000)), ('tensor2', torch.randn(2000, 2000))] >>> for bucket in get_named_tensor_buckets(tensors, bucket_size_mb=10): ... print(bucket) [('tensor1', tensor(...)), ('tensor2', tensor(...))] """ if bucket_bytes <= 0: raise ValueError(f"bucket_bytes must be greater than 0, got {bucket_bytes}") current_bucket = [] current_size = 0 for name, tensor in iterable: tensor_size = tensor.element_size() * tensor.numel() if current_size + tensor_size > bucket_bytes: if current_bucket: yield current_bucket current_bucket = [(name, tensor)] current_size = tensor_size else: current_bucket.append((name, tensor)) current_size += tensor_size if current_bucket: yield current_bucket ================================================ FILE: verl_distillation/verl/workers/rollout/tokenizer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The base tokenizer class, required for any hybrid engine based rollout or inference with vLLM. """ from abc import ABC, abstractmethod import numpy as np import torch __all__ = ["HybridEngineBaseTokenizer"] class HybridEngineBaseTokenizer(ABC): """the tokenizer property and function name should align with HF's to meet vllm requirement""" @property @abstractmethod def vocab_size(self): """ `int`: Size of the base vocabulary (without the added tokens). """ pass @property @abstractmethod def pad_token_id(self): """ `Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set. """ pass @property @abstractmethod def eos_token_id(self): """ `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been set. """ pass @property @abstractmethod def all_special_ids(self) -> list[int]: """ `List[int]`: List the ids of the special tokens(`''`, `''`, etc.) mapped to class attributes. """ pass @property @abstractmethod def all_special_tokens(self) -> list[str]: """ `List[str]`: A list of the unique special tokens (`''`, `''`, ..., etc.). Convert tokens of `tokenizers.AddedToken` type to string. """ pass @abstractmethod def encode(self, text): """ Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Args: text (`str`, `List[str]` or `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers. text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers. """ pass @abstractmethod def decode( self, token_ids: int | list[int] | np.ndarray | torch.Tensor, skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ pass @abstractmethod def convert_ids_to_tokens(self, ids: int | list[int], skip_special_tokens: bool = False) -> str | list[str]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `List[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Returns: `str` or `List[str]`: The decoded token(s). """ pass @abstractmethod def get_added_vocab(self) -> dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from the fast call because for now we always add the tokens even if they are already in the vocabulary. This is something we should change. Returns: `Dict[str, int]`: The added tokens. """ pass @abstractmethod def convert_tokens_to_string(self, tokens: list[str]) -> str: """ Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we often want to remove sub-word tokenization artifacts at the same time. Args: tokens (`List[str]`): The token to join in a string. Returns: `str`: The joined tokens. """ pass @property def is_fast(self): return False ================================================ FILE: verl_distillation/verl/workers/rollout/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import ipaddress import logging import os import socket import uvicorn from fastapi import FastAPI logger = logging.getLogger(__file__) def is_valid_ipv6_address(address: str) -> bool: try: ipaddress.IPv6Address(address) return True except ValueError: return False def get_free_port(address: str) -> tuple[int, socket.socket]: family = socket.AF_INET if is_valid_ipv6_address(address): family = socket.AF_INET6 sock = socket.socket(family=family, type=socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) sock.bind((address, 0)) port = sock.getsockname()[1] return port, sock async def run_unvicorn(app: FastAPI, server_args, server_address, max_retries=5) -> tuple[int, asyncio.Task]: server_port, server_task = None, None for i in range(max_retries): try: server_port, sock = get_free_port(server_address) app.server_args = server_args config = uvicorn.Config(app, host=server_address, port=server_port, log_level="warning") server = uvicorn.Server(config) server.should_exit = True await server.serve() server_task = asyncio.create_task(server.main_loop()) break except (OSError, SystemExit) as e: logger.error(f"Failed to start HTTP server on port {server_port} at try {i}, error: {e}") else: logger.error(f"Failed to start HTTP server after {max_retries} retries, exiting...") os._exit(-1) logger.info(f"HTTP server started on port {server_port}") return server_port, server_task ================================================ FILE: verl_distillation/verl/workers/rollout/vllm_rollout/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from importlib.metadata import PackageNotFoundError, version from .vllm_rollout_spmd import vLLMAsyncRollout, vLLMRollout # noqa: F401 def get_version(pkg): try: return version(pkg) except PackageNotFoundError: return None vllm_package_name = "vllm" vllm_package_version = get_version(vllm_package_name) if vllm_package_version is None: raise PackageNotFoundError( "To use vllm rollout, please ensure the 'vllm' package is properly installed. See " "https://verl.readthedocs.io/en/latest/start/install.html for more details" ) if "ROCM_PATH" in os.environ: import re match = re.match(r"(\d+\.\d+\.?\d*)", vllm_package_version) if match: vllm_package_version = match.group(1) else: raise ValueError(f"Warning: Could not parse version format: {vllm_package_version}") ================================================ FILE: verl_distillation/verl/workers/rollout/vllm_rollout/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # magic numbers that ensure we are using the same LoRA adapter during the rollout and training process VLLM_LORA_INT_ID = 123 VLLM_LORA_NAME = "123" VLLM_LORA_PATH = "simon_lora_path" def get_vllm_max_lora_rank(lora_rank: int): """ For vLLM, the smallest `max_lora_rank` is 8, and allowed values are (8, 16, 32, 64, 128, 256, 320, 512) This function automatically adjusts the `max_lora_rank` to the nearest allowed value. Reference: https://github.com/vllm-project/vllm/blob/8a297115e2367d463b781adb86b55ac740594cf6/vllm/config/lora.py#L27 """ assert lora_rank > 0, f"lora_rank must be greater than 0 to invoke this function, get {lora_rank}" vllm_max_lora_ranks = [8, 16, 32, 64, 128, 256, 320, 512] for rank in vllm_max_lora_ranks: if lora_rank <= rank: return rank raise ValueError(f"lora_rank must be less than or equal to {vllm_max_lora_ranks[-1]}, but got {lora_rank}") ================================================ FILE: verl_distillation/verl/workers/rollout/vllm_rollout/vllm_async_server.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import asyncio import json import logging import os import pickle from pprint import pprint from typing import Any, Callable, Optional import numpy as np import ray import vllm.entrypoints.cli.serve import zmq from ray.actor import ActorHandle from vllm import SamplingParams from vllm.engine.arg_utils import AsyncEngineArgs from vllm.entrypoints.openai.api_server import ( build_app, init_app_state, ) from vllm.inputs import TokensPrompt from vllm.lora.request import LoRARequest from vllm.outputs import RequestOutput from vllm.usage.usage_lib import UsageContext from vllm.utils import FlexibleArgumentParser, get_tcp_uri from vllm.v1.engine.async_llm import AsyncLLM from vllm.v1.engine.core import EngineCoreProc from vllm.v1.engine.utils import CoreEngineProcManager from vllm.v1.executor.abstract import Executor from verl.single_controller.ray import RayClassWithInitArgs from verl.utils.config import omega_conf_to_dataclass from verl.workers.config import HFModelConfig, RewardModelConfig, RolloutConfig from verl.workers.rollout.replica import RolloutMode, RolloutReplica, TokenOutput from verl.workers.rollout.utils import get_free_port, is_valid_ipv6_address, run_unvicorn from verl.workers.rollout.vllm_rollout import vLLMAsyncRollout from verl.workers.rollout.vllm_rollout.utils import ( VLLM_LORA_INT_ID, VLLM_LORA_NAME, VLLM_LORA_PATH, get_vllm_max_lora_rank, ) logger = logging.getLogger(__file__) logger.setLevel(logging.INFO) class ExternalZeroMQDistributedExecutor(Executor): """An executor that engines are launched by external ray actors.""" uses_ray: bool = False def _init_executor(self) -> None: dp_rank_local = self.vllm_config.parallel_config.data_parallel_rank_local tp_size = self.vllm_config.parallel_config.tensor_parallel_size addresses = os.environ["VERL_VLLM_ZMQ_ADDRESSES"].split(",") addresses = addresses[dp_rank_local * tp_size : (dp_rank_local + 1) * tp_size] self.context = zmq.Context() self.sockets = [] for address in addresses: socket = self.context.socket(zmq.REQ) if address.startswith("tcp://["): socket.setsockopt(zmq.IPV6, 1) socket.connect(address) self.sockets.append(socket) kwargs = dict( vllm_config=self.vllm_config, local_rank=None, rank=None, distributed_init_method="env://", is_driver_worker=True, ) self.collective_rpc("init_worker", args=([kwargs],)) self.collective_rpc("init_device") self.collective_rpc("load_model") def collective_rpc( self, method: str | Callable, timeout: Optional[float] = None, args: tuple = (), kwargs: Optional[dict[str, Any]] = None, **kwargs_extra: Any, ) -> list[Any]: if isinstance(method, str): sent_method = method else: sent_method = pickle.dumps(method) del method message = pickle.dumps((sent_method, args, kwargs or {})) for socket in self.sockets: socket.send(message, zmq.DONTWAIT) outputs = [] for socket in self.sockets: outputs.append(pickle.loads(socket.recv())) for output in outputs: if isinstance(output, Exception): raise output return outputs def check_health(self): return class vLLMHttpServerBase: """vLLM http server in single node, this is equivalent to launch server with command line: ``` vllm serve --tensor-parallel-size=8 ... ``` """ def __init__( self, config: RolloutConfig, model_config: HFModelConfig, rollout_mode: RolloutMode, workers: list[ActorHandle], replica_rank: int, node_rank: int, gpus_per_node: int, nnodes: int, ): """ Args: config (RolloutConfig): full config. model_config (HFModelConfig): model config. rollout_mode (RolloutMode): rollout mode. replica_rank (int): replica rank, a replica may contain multiple nodes. node_rank (int): node rank. gpus_per_node (int): number of gpus per node. nnodes (int): number of nodes. """ super().__init__() self.config: RolloutConfig = omega_conf_to_dataclass(config) self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config, dataclass_type=HFModelConfig) self.config.max_model_len = self.config.prompt_length + self.config.response_length self.rollout_mode = rollout_mode self.workers = workers self.replica_rank = replica_rank self.node_rank = node_rank self.gpus_per_node = gpus_per_node self.nnodes = nnodes if self.rollout_mode != RolloutMode.HYBRID and self.config.load_format == "dummy": logger.warning(f"rollout mode is {self.rollout_mode}, load_format is dummy, set to auto") self.config.load_format = "auto" # used for http server self._server_address = ray.util.get_node_ip_address().strip("[]") self._server_port = None # used for data parallel: --data-parallel-address, --data-parallel-rpc-port if self.node_rank == 0: self._master_address = self._server_address self._master_port, self._master_sock = get_free_port(self._server_address) self._dp_master_port, self._dp_master_sock = get_free_port(self._server_address) logger.info( f"vLLMHttpServer, replica_rank: {self.replica_rank}, master address: {self._master_address}, " f"master port: {self._master_port}, data parallel master port: {self._dp_master_port}" ) else: self._master_address = None self._master_port = None def get_master_address(self): """Get master address and port for data parallel.""" return self._master_address, self._master_port def get_server_address(self): """Get http server address and port.""" assert self._server_port is not None, "http server is not launched, port is None" return self._server_address, self._server_port async def launch_server(self, master_address: str = None, master_port: int = None): if self.node_rank != 0: assert master_address and master_port, "non-master node should provide master address and port" self._master_address = master_address self._master_port = master_port # 1. setup vllm serve cli args engine_kwargs = self.config.get("engine_kwargs", {}).get("vllm", {}) or {} engine_kwargs = {key: val for key, val in engine_kwargs.items() if val is not None} if self.config.get("limit_images", None): # support for multi-image data engine_kwargs["limit_mm_per_prompt"] = {"image": self.config.get("limit_images")} if self.config.cudagraph_capture_sizes: engine_kwargs["cuda_graph_sizes"] = self.config.cudagraph_capture_sizes # Override default generation config from hugging face model config, # user can still override them by passing kwargs in each request. override_generation_config = dict( temperature=self.config.temperature, top_k=self.config.top_k, top_p=self.config.top_p, repetition_penalty=1.0, max_new_tokens=self.config.response_length, ) logger.info(f"override_generation_config: {override_generation_config}") args = { "dtype": self.config.dtype, "load_format": self.config.load_format, "skip_tokenizer_init": False, # "trust_remote_code": True, "max_model_len": self.config.max_model_len, "max_num_seqs": self.config.max_num_seqs, "enable_chunked_prefill": self.config.enable_chunked_prefill, "max_num_batched_tokens": self.config.max_num_batched_tokens, "enable_prefix_caching": self.config.enable_prefix_caching, "enable_sleep_mode": True, "disable_custom_all_reduce": True, "enforce_eager": self.config.enforce_eager, "gpu_memory_utilization": self.config.gpu_memory_utilization, "disable_log_stats": self.config.disable_log_stats, "tensor_parallel_size": self.config.tensor_model_parallel_size, "seed": self.config.get("seed", 0), "override_generation_config": json.dumps(override_generation_config), **engine_kwargs, } if self.config.expert_parallel_size > 1: assert self.gpus_per_node % self.config.tensor_model_parallel_size == 0, ( "gpus_per_node should be divisible by tensor_model_parallel_size" ) data_parallel_size_local = self.gpus_per_node // self.config.tensor_model_parallel_size assert len(self.workers) == data_parallel_size_local * self.config.tensor_model_parallel_size, ( f"num workers ({len(self.workers)}) should be equal to dp_size_local " ) f"({data_parallel_size_local}) * tp_size ({self.config.tensor_model_parallel_size})" args.update( { "enable_expert_parallel": self.config.expert_parallel_size > 1, "data_parallel_size": self.config.data_parallel_size, "data_parallel_size_local": data_parallel_size_local, "data_parallel_start_rank": self.node_rank * data_parallel_size_local, "data_parallel_address": self._master_address, "data_parallel_rpc_port": self._master_port, } ) # update lora-related args if self.model_config.lora_rank > 0: args.update( { "enable_lora": True, "max_loras": 1, "max_lora_rank": get_vllm_max_lora_rank(self.model_config.lora_rank), } ) server_args = ["serve", self.model_config.local_path] for k, v in args.items(): if isinstance(v, bool): if v: server_args.append(f"--{k}") else: server_args.append(f"--{k}") server_args.append(str(v)) if self.replica_rank == 0: pprint(server_args) CMD_MODULES = [vllm.entrypoints.cli.serve] parser = FlexibleArgumentParser(description="vLLM CLI") subparsers = parser.add_subparsers(required=False, dest="subparser") cmds = {} for cmd_module in CMD_MODULES: new_cmds = cmd_module.cmd_init() for cmd in new_cmds: cmd.subparser_init(subparsers).set_defaults(dispatch_function=cmd.cmd) cmds[cmd.name] = cmd server_args = parser.parse_args(args=server_args) server_args.model = server_args.model_tag if server_args.subparser in cmds: cmds[server_args.subparser].validate(server_args) # 2. setup distributed executor backend distributed_executor_backend = ExternalZeroMQDistributedExecutor if len(self.workers) > 0 else None server_args.distributed_executor_backend = distributed_executor_backend zmq_addresses = ray.get([worker.get_zeromq_address.remote() for worker in self.workers]) logger.info( f"replica_rank={self.replica_rank}, node_rank={self.node_rank}, nnodes={self.nnodes}, " f"get worker zmq addresses: {zmq_addresses}" ) os.environ["VERL_VLLM_ZMQ_ADDRESSES"] = ",".join(zmq_addresses) # 3. launch server if self.node_rank == 0: await self.run_server(server_args) else: await self.run_headless(server_args) async def run_server(self, args: argparse.Namespace): engine_args = AsyncEngineArgs.from_cli_args(args) usage_context = UsageContext.OPENAI_API_SERVER vllm_config = engine_args.create_engine_config(usage_context=usage_context) vllm_config.parallel_config.data_parallel_master_port = self._dp_master_port engine_client = AsyncLLM.from_vllm_config( vllm_config=vllm_config, usage_context=usage_context, disable_log_requests=engine_args.disable_log_requests, disable_log_stats=engine_args.disable_log_stats, ) # Don't keep the dummy data in memory await engine_client.reset_mm_cache() app = build_app(args) await init_app_state(engine_client, vllm_config, app.state, args) if self.replica_rank == 0 and self.node_rank == 0: logger.info(f"Initializing a V1 LLM engine with config: {vllm_config}") self.engine = engine_client self._server_port, self._server_task = await run_unvicorn(app, args, self._server_address) async def run_headless(self, args: argparse.Namespace): # Create the EngineConfig. engine_args = vllm.AsyncEngineArgs.from_cli_args(args) usage_context = UsageContext.OPENAI_API_SERVER vllm_config = engine_args.create_engine_config(usage_context=usage_context, headless=True) parallel_config = vllm_config.parallel_config local_engine_count = parallel_config.data_parallel_size_local host = parallel_config.data_parallel_master_ip port = engine_args.data_parallel_rpc_port # add to config too handshake_address = get_tcp_uri(host, port) # Create the engines. self.engine_manager = CoreEngineProcManager( target_fn=EngineCoreProc.run_engine_core, local_engine_count=local_engine_count, start_index=vllm_config.parallel_config.data_parallel_rank, local_start_index=0, vllm_config=vllm_config, local_client=False, handshake_address=handshake_address, executor_class=Executor.get_class(vllm_config), log_stats=not engine_args.disable_log_stats, ) async def generate( self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str, image_data: Optional[list[Any]] = None, ) -> TokenOutput: """Generate sequence with token-in-token-out.""" # TODO(@wuxibin): switch to `/generate` http endpoint once multi-modal support ready. max_tokens = self.config.max_model_len - len(prompt_ids) sampling_params["logprobs"] = 0 if sampling_params.pop("logprobs", False) else None sampling_params.setdefault("repetition_penalty", self.config.get("repetition_penalty", 1.0)) sampling_params = SamplingParams(max_tokens=max_tokens, **sampling_params) prompt_ids = _qwen2_5_vl_dedup_image_tokens(prompt_ids, self.model_config.processor) prompt = TokensPrompt( prompt_token_ids=prompt_ids, multi_modal_data={"image": image_data} if image_data else None ) # Add lora request lora_request = None if self.model_config.lora_rank > 0: # Make sure we also check that the lora is already loaded in the engine lora_loaded = VLLM_LORA_INT_ID in await self.engine.list_loras() if lora_loaded: lora_request = LoRARequest( lora_name=VLLM_LORA_NAME, lora_int_id=VLLM_LORA_INT_ID, lora_path=VLLM_LORA_PATH ) generator = self.engine.generate( prompt=prompt, sampling_params=sampling_params, request_id=request_id, lora_request=lora_request ) # Get final response final_res: Optional[RequestOutput] = None async for output in generator: final_res = output assert final_res is not None token_ids = final_res.outputs[0].token_ids log_probs = None if sampling_params.logprobs is not None: log_probs = [logprobs[token_ids[i]].logprob for i, logprobs in enumerate(final_res.outputs[0].logprobs)] return TokenOutput(token_ids=token_ids, log_probs=log_probs) async def wake_up(self): if self.rollout_mode == RolloutMode.HYBRID: # Call all workers to switch between trainer mode and rollout mode. await asyncio.gather(*[worker.wake_up.remote() for worker in self.workers]) elif self.rollout_mode == RolloutMode.COLOCATED: # Directly call engine to wake up without sync weights. if self.node_rank == 0: await self.engine.wake_up(tags=["kv_cache", "weights"]) elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip wake_up in standalone mode") async def sleep(self): if self.rollout_mode == RolloutMode.HYBRID: if self.node_rank == 0: await self.engine.reset_prefix_cache() await asyncio.gather(*[worker.sleep.remote() for worker in self.workers]) elif self.rollout_mode == RolloutMode.COLOCATED: if self.node_rank == 0: await self.engine.reset_prefix_cache() await self.engine.sleep(level=1) elif self.rollout_mode == RolloutMode.STANDALONE: logger.info("skip sleep in standalone mode") async def wait_for_requests_to_drain(self): await self.engine.wait_for_requests_to_drain() @ray.remote(num_cpus=1) class vLLMHttpServer(vLLMHttpServerBase): """vLLM http server in single node, this is equivalent to launch server with command line: ``` vllm serve --tensor-parallel-size=8 ... ``` """ def __init__( self, config: RolloutConfig | RewardModelConfig, model_config: HFModelConfig, rollout_mode: RolloutMode, workers: list[ActorHandle], replica_rank: int, node_rank: int, gpus_per_node: int, nnodes: int, ): super().__init__(config, model_config, rollout_mode, workers, replica_rank, node_rank, gpus_per_node, nnodes) _rollout_worker_actor_cls = ray.remote(vLLMAsyncRollout) class vLLMReplica(RolloutReplica): def __init__( self, replica_rank: int, config: RolloutConfig | RewardModelConfig, model_config: HFModelConfig, gpus_per_node: int = 8, is_reward_model: bool = False, ): super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model) self.server_class = vLLMHttpServer def get_ray_class_with_init_args(self) -> RayClassWithInitArgs: """Get rollout worker actor class for colocated and standalone mode.""" worker_dict_cls = RayClassWithInitArgs( cls=_rollout_worker_actor_cls, config=self.config, model_config=self.model_config, device_mesh=None, ) return worker_dict_cls async def launch_servers(self): """Launch http server in each node.""" assert len(self.workers) == self.world_size, ( f"worker number {len(self.workers)} not equal to world size {self.world_size}" ) # get node_id of all workers worker_node_ids = await asyncio.gather( *[ worker.__ray_call__.remote(lambda self: ray.get_runtime_context().get_node_id()) for worker in self.workers ] ) # For non-data parallel case, there's only one server whether it's single or multi nodes. nnodes, gpus_per_node = self.nnodes, self.gpus_per_node if self.config.data_parallel_size == 1: nnodes = 1 gpus_per_node = self.world_size # create server actor in each node with node affinity for node_rank in range(nnodes): workers = self.workers[node_rank * gpus_per_node : (node_rank + 1) * gpus_per_node] node_id = worker_node_ids[node_rank * gpus_per_node] name = ( f"vllm_server_{self.replica_rank}_{node_rank}" if not self.is_reward_model else f"vllm_server_reward_{self.replica_rank}_{node_rank}" ) server = self.server_class.options( scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=node_id, soft=False, ), name=name, ).remote( config=self.config, model_config=self.model_config, rollout_mode=self.rollout_mode, workers=workers, replica_rank=self.replica_rank, node_rank=node_rank, gpus_per_node=gpus_per_node, nnodes=nnodes, ) self.servers.append(server) # launch http server in each node master_address, master_port = await self.servers[0].get_master_address.remote() await asyncio.gather( *[ server.launch_server.remote(master_address=master_address, master_port=master_port) for server in self.servers ] ) # get http server address from first server server_address, server_port = await self.servers[0].get_server_address.remote() self._server_handle = self.servers[0] self._server_address = ( f"[{server_address}]:{server_port}" if is_valid_ipv6_address(server_address) else f"{server_address}:{server_port}" ) async def sleep(self): """Sleep each rollout server.""" # Drain DP engines for safe sleep. await self.servers[0].wait_for_requests_to_drain.remote() await asyncio.gather(*[server.sleep.remote() for server in self.servers]) def _qwen2_5_vl_dedup_image_tokens(prompt_ids: list[int], processor): """Deduplicate consecutive image tokens in prompt_ids for Qwen2.5-VL, since vLLM will replicate the <|image_pad|> token by image_data. For example, ``` <|vision_start|><|image_pad|><|image_pad|>...<|image_pad|><|vision_end|> => <|vision_start|><|image_pad|><|vision_end|> ``` """ if processor is not None and "Qwen2VLImageProcessor" in processor.image_processor.__class__.__name__: prompt_ids = np.array(prompt_ids) # Create a mask where True indicates elements to keep mask = np.ones(len(prompt_ids), dtype=bool) # Find where the array equals the value is_value = prompt_ids == processor.image_token_id # Find consecutive duplicates by checking if previous element is also the value mask[1:] &= ~(is_value[1:] & is_value[:-1]) return prompt_ids[mask].tolist() else: return prompt_ids ================================================ FILE: verl_distillation/verl/workers/rollout/vllm_rollout/vllm_rollout_spmd.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The vllm_rollout that can be applied in different backend When working with FSDP: - Use DTensor weight loader (recommended) or HF weight loader - Utilize state_dict from the FSDP to synchronize the weights among tp ranks in vLLM When working with Megatron: - Use Megatron weight loader - During training, only the current pp stage holds the parameters - Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters) - Bind the parameters to the inference engine - Do inference in tp. pp is treated as additional dp - After inference, all the parameters that doesn't belong to this pp rank is freed. """ import asyncio import getpass import inspect import logging import os import pickle import time from contextlib import contextmanager from dataclasses import asdict from types import MethodType from typing import Any, Generator import numpy as np import ray import torch import torch.distributed import zmq import zmq.asyncio from filelock import FileLock from omegaconf import ListConfig from tensordict import TensorDict from torch.distributed.device_mesh import DeviceMesh from vllm import LLM, SamplingParams from vllm.config import CompilationConfig, LoRAConfig from vllm.lora.request import LoRARequest try: # https://github.com/vllm-project/vllm/commit/96b9aa5aa076e64c68765232aec343e4d0006e2a from vllm.config import CompilationMode _use_compilation_mode = True except ImportError: from vllm.config import CompilationLevel _use_compilation_mode = False try: from vllm.worker.worker_base import WorkerWrapperBase except ModuleNotFoundError: # https://github.com/vllm-project/vllm/commit/6a113d9aed8221a9c234535958e70e34ab6cac5b from vllm.v1.worker.worker_base import WorkerWrapperBase from verl import DataProto from verl.third_party.vllm import VLLM_SLEEP_LEVEL from verl.utils.device import is_npu_available from verl.utils.distributed import initialize_global_process_group_ray from verl.utils.model import get_lora_rank_from_adapter from verl.utils.profiler import GPUMemoryLogger from verl.utils.ray_utils import ray_noset_visible_devices from verl.utils.torch_functional import get_response_mask, pad_2d_list_to_length from verl.utils.vllm import TensorLoRARequest, VLLMHijack, is_version_ge from verl.workers.config import HFModelConfig, RolloutConfig from verl.workers.rollout.base import BaseRollout from verl.workers.rollout.utils import get_free_port, is_valid_ipv6_address from verl.workers.rollout.vllm_rollout.utils import ( VLLM_LORA_INT_ID, VLLM_LORA_NAME, VLLM_LORA_PATH, get_vllm_max_lora_rank, ) logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # TODO # 1. support pp in vllm # 2. passing tokenizer is not necessary? no encoding/decoding is happending here # 3. simplify init logics # NOTE(sgm): add for verl. We can optimize it by making the dataloader yield List[int] without padding. def _pre_process_inputs(pad_token_id, prompt_token_ids: torch.Tensor) -> list[int]: # remove the left padding in the prompt token_id # pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id # is not None else self.llm_engine.tokenizer.eos_token_id non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0] token_ids = prompt_token_ids[non_pad_index:].tolist() return token_ids if is_version_ge(pkg="vllm", minver="0.7.3"): VLLMHijack.hijack() class vLLMRollout(BaseRollout): def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): super().__init__(config, model_config, device_mesh) if config.layered_summon: self.sleep_level = 1 else: self.sleep_level = VLLM_SLEEP_LEVEL model_path = model_config.local_path tokenizer = model_config.tokenizer model_hf_config = model_config.hf_config trust_remote_code = model_config.trust_remote_code lora_adapter_path = getattr(model_config, "lora_adapter_path", None) if lora_adapter_path is not None: lora_rank = get_lora_rank_from_adapter(lora_adapter_path) else: lora_rank = model_config.lora_rank self.lora_kwargs = ( {"enable_lora": True, "max_loras": 1, "max_lora_rank": get_vllm_max_lora_rank(lora_rank)} if model_config.lora_rank > 0 else {} ) tensor_parallel_size = self.config.get("tensor_model_parallel_size", 1) assert tensor_parallel_size <= torch.distributed.get_world_size(), ( "tensor parallel size should be less than or equal to the world size" ) max_num_batched_tokens = self.config.get("max_num_batched_tokens", 8192) rope_scaling_config = getattr(model_hf_config, "rope_scaling", None) if not rope_scaling_config: max_position_embeddings = None if hasattr(model_hf_config, "max_position_embeddings"): max_position_embeddings = model_hf_config.max_position_embeddings elif hasattr(model_hf_config, "llm_config") and hasattr( model_hf_config.llm_config, "max_position_embeddings" ): max_position_embeddings = model_hf_config.llm_config.max_position_embeddings elif hasattr(model_hf_config, "text_config") and hasattr( model_hf_config.text_config, "max_position_embeddings" ): max_position_embeddings = model_hf_config.text_config.max_position_embeddings if max_position_embeddings is None: raise ValueError("max_position_embeddings not found in model_hf_config") assert max_position_embeddings >= config.prompt_length + config.response_length, ( "model context length should be greater than total sequence length" ) else: # handle type where there's a length extend factor # see https://qwen.readthedocs.io/en/latest/deployment/vllm.html#extended-context-support # for using yarn as an example rope_scaling_factor = rope_scaling_config.get("factor", 1.0) assert ( model_hf_config.max_position_embeddings * rope_scaling_factor >= config.prompt_length + config.response_length ), ( "model context length should be greater than total sequence length, " + f"got rope_scaling_factor={rope_scaling_factor} and " + f"max_position_embeddings={model_hf_config.max_position_embeddings}" ) max_model_len = int(config.max_model_len or config.prompt_length + config.response_length) if max_num_batched_tokens < max_model_len and self.config.enable_chunked_prefill: raise ValueError( "Enable chunked prefill, max_num_batched_tokens is smaller than max_model_len, \ please increase max_num_batched_tokens or disable chunked prefill" ) load_format = "dummy" if config.load_format.startswith("dummy") else config.load_format # copy it to avoid secretly modifying the engine config engine_kwargs = config.get("engine_kwargs", {}).get("vllm", {}) or {} # For each vLLM engine parameter, # - `None` means not setting it, so we pop it, and leave it to vLLM default value # (which can vary across different vLLM versions); # - Otherwise it's the desired value we want to explicitly set. engine_kwargs = {key: val for key, val in engine_kwargs.items() if val is not None} if config.get("limit_images", None): # support for multi-image data engine_kwargs["limit_mm_per_prompt"] = {"image": config.get("limit_images")} compilation_config = {} cudagraph_capture_sizes = config.get("cudagraph_capture_sizes") # enforce_eager must be False to use cudagraph if not config.enforce_eager and cudagraph_capture_sizes: if isinstance(cudagraph_capture_sizes, ListConfig): compilation_args = {"cudagraph_capture_sizes": cudagraph_capture_sizes} if _use_compilation_mode: compilation_args["mode"] = CompilationMode.VLLM_COMPILE else: compilation_args["level"] = CompilationLevel.PIECEWISE compilation_config["compilation_config"] = CompilationConfig(**compilation_args) else: logger.warning(f"cudagraph_capture_sizes must be a list, but got {cudagraph_capture_sizes}") self.inference_engine = LLM( model=model_path, enable_sleep_mode=config.free_cache_engine, tensor_parallel_size=tensor_parallel_size, distributed_executor_backend="external_launcher", dtype=config.dtype, enforce_eager=config.enforce_eager, gpu_memory_utilization=config.gpu_memory_utilization, disable_custom_all_reduce=True, skip_tokenizer_init=False, max_model_len=max_model_len, max_num_seqs=config.max_num_seqs, load_format=load_format, disable_log_stats=config.disable_log_stats, max_num_batched_tokens=max_num_batched_tokens, enable_chunked_prefill=config.enable_chunked_prefill, enable_prefix_caching=config.enable_prefix_caching, trust_remote_code=trust_remote_code, seed=config.get("seed", 0), **compilation_config, **self.lora_kwargs, **engine_kwargs, ) kwargs = dict( n=1, logprobs=0, # can be set to 0 and let actor to recompute max_tokens=config.response_length, repetition_penalty=config.get("repetition_penalty", 1.0), ) kwargs["detokenize"] = False # supporting adding any sampling params from the config file for k in config.keys(): if hasattr(SamplingParams(), str(k)) and k != "seed": kwargs[k] = config.get(k) kwargs["n"] = 1 # already repeat in ray_trainer print(f"kwargs: {kwargs}") self.sampling_params = SamplingParams(**kwargs) self.pad_token_id = tokenizer.pad_token_id @contextmanager def update_sampling_params(self, **kwargs): # update sampling params old_sampling_params_args = {} if kwargs: for key, value in kwargs.items(): if hasattr(self.sampling_params, key): old_value = getattr(self.sampling_params, key) old_sampling_params_args[key] = old_value setattr(self.sampling_params, key, value) yield # roll back to previous sampling params # if len(old_sampling_params_args): for key, value in old_sampling_params_args.items(): setattr(self.sampling_params, key, value) @GPUMemoryLogger(role="vllm rollout spmd", logger=logger) @torch.no_grad() def generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto: """Generate sequences for a batch of prompts. Args: batch (DataProto): Input batch. Returns: DataProto: Output batch. - prompts: [bsz, prompt_length], prompt token ids from dataset. - responses: [bsz, response_length], output token ids include response tokens from LLM generation and observation tokens from tool_calls. - response_mask: [bsz, response_length], 1 for LLM generated tokens, 0 for observation/padding tokens. - input_ids: [bsz, prompt_length + response_length], whole sequence token ids, including prompt tokens and response tokens. - attention_mask: [bsz, prompt_length + response_length], 0 for padding tokens, 1 for other tokens. - position_ids: [bsz, prompt_length + response_length], incremental position ids. For multi-turn conversations: responses: |<- LLM generation ->|<- tool_calls ->|<- LLM generation ->|<- padding ->| response_mask: | 1, 1, 1, ..., 1, 1 | 0, 0, .., 0, 0 | 1, 1, 1, ..., 1, 1 | 0, 0, ..., 0| """ idx = prompts.batch["input_ids"] # (bs, prompt_length) # left-padded attention_mask attention_mask = prompts.batch["attention_mask"] position_ids = prompts.batch["position_ids"] # used to construct attention_mask eos_token_id = prompts.meta_info["eos_token_id"] batch_size = idx.size(0) non_tensor_batch = prompts.non_tensor_batch if "raw_prompt_ids" not in non_tensor_batch: non_tensor_batch["raw_prompt_ids"] = np.array( [_pre_process_inputs(self.pad_token_id, idx[i]) for i in range(batch_size)], dtype=object ) if batch_size != len(non_tensor_batch["raw_prompt_ids"]): raise RuntimeError("vllm sharding manager is not work properly.") if "multi_modal_data" in non_tensor_batch: vllm_inputs = [] for raw_prompt_ids, multi_modal_data in zip( non_tensor_batch.pop("raw_prompt_ids"), non_tensor_batch.pop("multi_modal_data"), strict=True ): vllm_inputs.append({"prompt_token_ids": raw_prompt_ids, "multi_modal_data": multi_modal_data}) else: vllm_inputs = [ {"prompt_token_ids": raw_prompt_ids} for raw_prompt_ids in non_tensor_batch.pop("raw_prompt_ids") ] for input_data in vllm_inputs: # Ensure token IDs are lists or numpy arrays if not isinstance(input_data["prompt_token_ids"], list | np.ndarray): raise TypeError( f"prompt_token_ids must be a list or numpy array, got {type(input_data['prompt_token_ids'])}" ) input_data["prompt_token_ids"] = list(input_data["prompt_token_ids"]) do_sample = prompts.meta_info.get("do_sample", True) is_validate = prompts.meta_info.get("validate", False) if not do_sample: kwargs = { "best_of": 1, "top_p": 1.0, "top_k": -1, "min_p": 0.0, "temperature": 0, "n": 1, # if greedy, only 1 response } elif is_validate: # TODO: try ** kwargs = { "top_k": self.config.val_kwargs.top_k, "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "n": 1, # if validate, already repeat in ray_trainer } lora_requests = None if self.lora_kwargs: lora_int_ids = list(self.inference_engine.llm_engine.list_loras()) if len(lora_int_ids) > 0: lora_int_id = lora_int_ids[0] lora_requests = [ LoRARequest(lora_name=f"{lora_int_id}", lora_int_id=lora_int_id, lora_path="/simon-stub-path") ] * batch_size # users can customize different sampling_params at different run with self.update_sampling_params(**kwargs): outputs = self.inference_engine.generate( prompts=vllm_inputs, # because we have already convert it to prompt token id sampling_params=self.sampling_params, lora_request=lora_requests, use_tqdm=False, ) # TODO(sgm): disable logprob when recompute_log_prob is enable # if n = 1: (bs, response_length) ; if n > 1: (bs * n, response_length) response = [] rollout_log_probs = [] for output in outputs: for sample_id in range(len(output.outputs)): response_ids = output.outputs[sample_id].token_ids response.append(response_ids) if self.config.calculate_log_probs: curr_log_prob = [] for i, logprob in enumerate(output.outputs[sample_id].logprobs): curr_log_prob.append(logprob[response_ids[i]].logprob) rollout_log_probs.append(curr_log_prob) response = pad_2d_list_to_length(response, self.pad_token_id, max_length=self.config.response_length).to( idx.device ) if self.config.calculate_log_probs: rollout_log_probs = pad_2d_list_to_length( rollout_log_probs, -1, max_length=self.config.response_length ).to(idx.device) rollout_log_probs = rollout_log_probs.to(torch.float32) seq = torch.cat([idx, response], dim=-1) response_length = response.size(1) delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device) delta_position_id = delta_position_id.unsqueeze(0).expand(batch_size, -1) if position_ids.dim() == 3: # qwen2vl mrope (batch size, 4, seq len) delta_position_id = delta_position_id.view(batch_size, 1, -1).expand(batch_size, position_ids.size(1), -1) # TODO(sgm): fix position_ids on right_pad # prompt: left pad + response: right pad # attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0] # position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11] response_position_ids = position_ids[..., -1:] + delta_position_id position_ids = torch.cat([position_ids, response_position_ids], dim=-1) response_attention_mask = get_response_mask( response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype ) attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1) # all the tp ranks should contain the same data here. data in all ranks are valid batch = TensorDict( { "prompts": idx, "responses": response, "input_ids": seq, # here input_ids become the whole sentences "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=batch_size, ) if self.config.calculate_log_probs: # we will recompute old log prob with actor batch["rollout_log_probs"] = rollout_log_probs return DataProto(batch=batch, non_tensor_batch=non_tensor_batch) async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tags: weights or kv_cache. """ if not self.config.free_cache_engine: return if "tags" in inspect.signature(self.inference_engine.wake_up).parameters: self.inference_engine.wake_up(tags=tags) else: self.inference_engine.wake_up() async def release(self): """Release weights and kv cache in GPU memory.""" self.inference_engine.reset_prefix_cache() if not self.config.free_cache_engine: return self.inference_engine.sleep(level=self.sleep_level) async def update_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs): """Update the weights of the rollout model. Args: weights: A generator that yields the name of the weight tensor and the tensor itself. """ peft_config, base_sync_done = kwargs.get("peft_config", None), kwargs.get("base_sync_done", False) if peft_config and base_sync_done: lora_int_id = int(time.time_ns() % 0x7FFFFFFF) lora_reqest = TensorLoRARequest( lora_name=f"{lora_int_id}", lora_int_id=lora_int_id, lora_path="simon_lora_path", peft_config=asdict(peft_config), lora_tensors=dict(weights), ) self.inference_engine.llm_engine.add_lora(lora_reqest) logger.info(f"vLLM load weights, loaded_params: {len(weights)}") else: from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader model = self.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model patch_vllm_moe_model_weight_loader(model) model.load_weights(weights) # https://github.com/vllm-project/vllm/issues/13175 def _monkey_patch_compute_logits(model, vocab_size: int): original_compute_logits = model.compute_logits def compute_logits( self, *args, **kwargs, ) -> torch.Tensor: logits = original_compute_logits(*args, **kwargs) logits[..., vocab_size:] = float("-inf") return logits model.compute_logits = MethodType(compute_logits, model) class vLLMAsyncRollout(BaseRollout): """vLLMAsyncRollout is a thin wrapper of WorkerWrapperBase, which is engine in single worker process.""" def __init__( self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, ): super().__init__(config, model_config, device_mesh) self.tokenizer = model_config.tokenizer self.inference_engine: WorkerWrapperBase = None self.address = self._init_zeromq() self.lora_config = ( {"max_loras": 1, "max_lora_rank": get_vllm_max_lora_rank(model_config.lora_rank)} if model_config.lora_rank > 0 else {} ) # https://github.com/vllm-project/vllm/issues/25171 if config.layered_summon or config.expert_parallel_size > 1: self.sleep_level = 1 else: self.sleep_level = VLLM_SLEEP_LEVEL def _init_zeromq(self) -> str: tensor_parallel_size = self.config.tensor_model_parallel_size # single node: ipc, multi nodes: tcp local_world_size = int(os.environ["RAY_LOCAL_WORLD_SIZE"]) socket_type = "ipc" if tensor_parallel_size <= local_world_size else "tcp" # File lock to prevent multiple workers listen to same port with FileLock(f"/tmp/verl_vllm_zmq_{getpass.getuser()}.lock"): context = zmq.asyncio.Context() self.socket = context.socket(zmq.REP) if socket_type == "ipc": pid = os.getpid() address = f"ipc:///tmp/verl_vllm_zmq_{pid}_{getpass.getuser()}.ipc" else: ip = ray.util.get_node_ip_address().strip("[]") port, sock = get_free_port(ip) if is_valid_ipv6_address(ip): address = f"tcp://[{ip}]:{port}" self.socket.setsockopt(zmq.IPV6, 1) else: address = f"tcp://{ip}:{port}" self.socket.bind(address) loop = asyncio.get_running_loop() self.zmq_loop_task = loop.create_task(self._loop_forever()) return address async def _loop_forever(self): while True: try: message = await self.socket.recv() method, args, kwargs = pickle.loads(message) result = await self._execute_method(method, *args, **kwargs) await self.socket.send(pickle.dumps(result)) except Exception as e: logger.exception(f"vLLMAsyncRollout _loop_forever error: {e}") await self.socket.send(pickle.dumps(e)) break def _init_worker(self, all_kwargs: list[dict[str, Any]]): """Initialize worker engine.""" if not torch.distributed.is_initialized(): initialize_global_process_group_ray() all_kwargs[0]["rank"] = int(os.environ["RANK"]) device_name = "NPU" if is_npu_available else "GPU" all_kwargs[0]["local_rank"] = ( 0 if not ray_noset_visible_devices() else int(ray.get_runtime_context().get_accelerator_ids()[device_name][0]) ) self.vllm_config = all_kwargs[0]["vllm_config"] if self.lora_config: lora_dtype = getattr(torch, self.config.dtype) self.vllm_config.lora_config = LoRAConfig(lora_dtype=lora_dtype, **self.lora_config) self.inference_engine = WorkerWrapperBase(vllm_config=self.vllm_config) self.inference_engine.init_worker(all_kwargs) def _load_model(self, *args, **kwargs): self.inference_engine.load_model(*args, **kwargs) _monkey_patch_compute_logits(self.inference_engine.worker.model_runner.model, len(self.tokenizer)) async def _execute_method(self, method: str | bytes, *args, **kwargs): if method == "init_worker": return self._init_worker(*args, **kwargs) elif method == "load_model": return self._load_model(*args, **kwargs) elif method == "sleep" or method == "wake_up": raise ValueError("wake_up and sleep should not be called through ZeroMQ") else: return self.inference_engine.execute_method(method, *args, **kwargs) async def resume(self, tags: list[str]): """Resume rollout weights or kv cache in GPU memory. Args: tags: weights or kv_cache. """ if self.config.free_cache_engine: self.inference_engine.wake_up(tags=tags) async def release(self): """Release weights and kv cache in GPU memory.""" if self.config.free_cache_engine: self.inference_engine.sleep(level=self.sleep_level) async def update_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None], **kwargs): """Update the weights of the rollout model. Args: weights: A generator that yields the name of the weight tensor and the tensor itself. """ peft_config, base_sync_done = kwargs.get("peft_config", None), kwargs.get("base_sync_done", False) if peft_config and base_sync_done: # In async mode, make sure the old lora is removed before adding the new one self.inference_engine.worker.remove_lora(VLLM_LORA_INT_ID) lora_request = TensorLoRARequest( lora_name=VLLM_LORA_NAME, lora_int_id=VLLM_LORA_INT_ID, lora_path=VLLM_LORA_PATH, peft_config=asdict(peft_config), lora_tensors=dict(weights), ) self.inference_engine.worker.add_lora(lora_request) logger.info(f"vLLM load weights, loaded_params: {len(weights)}") else: from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader model = self.inference_engine.worker.model_runner.model patch_vllm_moe_model_weight_loader(model) model.load_weights(weights) def generate_sequences(self, prompts: DataProto) -> DataProto: """Batch generate sequences in sync mode.""" raise NotImplementedError # ==================== server mode public methods ==================== def get_zeromq_address(self): return self.address ================================================ FILE: verl_distillation/verl/workers/sharding_manager/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_distillation/verl/workers/sharding_manager/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sharding manager to implement HybridEngine """ from verl import DataProto class BaseShardingManager: def __init__(self): self.timing = {} def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass def preprocess_data(self, data: DataProto) -> DataProto: return data def postprocess_data(self, data: DataProto) -> DataProto: return data ================================================ FILE: verl_distillation/verl/workers/sharding_manager/fsdp_sglang.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os from sglang.srt.entrypoints.engine import Engine from sglang.srt.weight_sync.utils import update_weights as sgl_update_weights from torch.distributed.device_mesh import DeviceMesh from torch.distributed.fsdp.api import FullStateDictConfig, ShardedStateDictConfig, StateDictType from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from verl import DataProto from verl.protocol import all_gather_data_proto from verl.utils.device import get_device_id, get_torch_device, set_expandable_segments from verl.utils.fsdp_utils import fsdp_version, load_fsdp_model_to_gpu, offload_fsdp_model_to_cpu from verl.utils.import_utils import deprecated from verl.utils.memory_utils import aggressive_empty_cache from verl.utils.model import convert_weight_keys from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage, simple_timer from verl.utils.torch_functional import check_device_is_available from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets from .base import BaseShardingManager # from vllm.distributed import parallel_state as sglang_ps logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) @deprecated() class FSDPSGLangShardingManager(BaseShardingManager): @check_device_is_available() def __init__( self, module: FSDP, inference_engine: Engine, model_config, rollout_config, full_params: bool = False, device_mesh: DeviceMesh = None, offload_param: bool = False, multi_stage_wake_up: bool = False, ): self.module = module self.inference_engine = inference_engine self.model_config = model_config self.rollout_config = rollout_config self.device_mesh = device_mesh self.offload_param = offload_param self.multi_stage_wake_up = multi_stage_wake_up # Full params self.full_params = full_params if full_params and fsdp_version(self.module) == 1: FSDP.set_state_dict_type( self.module, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig() ) elif fsdp_version(self.module) == 1: FSDP.set_state_dict_type( self.module, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) self.tp_size = self.device_mesh["infer_tp"].size() self.tp_rank = self.device_mesh["infer_tp"].get_local_rank() # Note that torch_random_states may be different on each dp rank self.torch_random_states = get_torch_device().get_rng_state() # get a random rng states if self.device_mesh is not None: gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) else: self.gen_random_states = None @GPUMemoryLogger(role="FSDPSGLangShardingManager enter", logger=logger) def __enter__(self): self.timing = {} with simple_timer("reshard", self.timing): loop = asyncio.get_event_loop() loop.run_until_complete(self.wake_up()) @GPUMemoryLogger(role="FSDPSGLangShardingManager exit", logger=logger) def __exit__(self, exc_type, exc_value, traceback): loop = asyncio.get_event_loop() loop.run_until_complete(self.sleep()) async def update_weights(self, params): named_tensors = [(k, v) for k, v in params.items()] update_weights_bucket_bytes = int(self.rollout_config.update_weights_bucket_megabytes) << 20 for params_batch in get_named_tensor_buckets(named_tensors, update_weights_bucket_bytes): await sgl_update_weights( engine=self.inference_engine, params_batch=params_batch, device_mesh_key="infer_tp", device_mesh=self.device_mesh, ) if self.device_mesh["infer_tp"].get_local_rank() == 0: await self.inference_engine.flush_cache() async def release_memory(self): if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine: if self.multi_stage_wake_up: await self.inference_engine.release_memory_occupation(tags=["kv_cache", "weights"]) else: await self.inference_engine.release_memory_occupation() log_gpu_memory_usage("After release memory occupation in sharding manager", logger=logger) @GPUMemoryLogger(role="FSDPSGLangShardingManager enter", logger=logger) async def wake_up(self): aggressive_empty_cache(force_sync=True) log_gpu_memory_usage("Before state_dict() in sharding manager memory", logger=logger) if self.offload_param: load_fsdp_model_to_gpu(self.module) params = self.module.state_dict() log_gpu_memory_usage("After state_dict() in sharding manager memory", logger=logger) device = get_device_id() # used when fsdp2 set cpu_offload_policy params = { k: v.to(device, non_blocking=True) if fsdp_version(self.module) == 2 else v for k, v in params.items() } # convert weight keys to match the model config params = convert_weight_keys(params, getattr(self.module, "_fsdp_wrapped_module", self.module)) if self.offload_param: offload_fsdp_model_to_cpu(self.module) log_gpu_memory_usage("After offload_param in sharding manager memory", logger=logger) # sglang need to set _set_allocator_settings to False logger.debug("fsdp sglang sharding_manager _set_allocator_settings to False") # Note(chenyang): SGLang is using torch memory pool to manage memory # which is incompatible with expandable segments set_expandable_segments(False) if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine: if self.multi_stage_wake_up: await self.inference_engine.resume_memory_occupation(tags=["weights"]) log_gpu_memory_usage("Before resume SGLang weights in sharding manager", logger=logger) else: await self.inference_engine.resume_memory_occupation() log_gpu_memory_usage("Before resume SGLang weights + kv_cache in sharding manager", logger=logger) # Copy, not share memory await self.update_weights(params) log_gpu_memory_usage("After sync model weights in sharding manager", logger=logger) del params aggressive_empty_cache(force_sync=True) log_gpu_memory_usage("After del state_dict and empty_cache in sharding manager", logger=logger) if ( self.multi_stage_wake_up and self.rollout_config.free_cache_engine and self.device_mesh["infer_tp"].get_local_rank() == 0 ): await self.inference_engine.resume_memory_occupation(tags=["kv_cache"]) log_gpu_memory_usage("After resume SGLang kv_cache in sharding manager", logger=logger) # important: need to manually set the random states of each tp to be identical. if self.device_mesh is not None: self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="FSDPSGLangShardingManager exit", logger=logger) async def sleep(self): if self.rollout_config.free_cache_engine: log_gpu_memory_usage("Before SGLang offload in sharding manager", logger=logger) await self.release_memory() log_gpu_memory_usage("After SGLang offload in sharding manager", logger=logger) self.module.train() # add empty cache after each compute aggressive_empty_cache(force_sync=True) # always set _set_allocator_settings to True when using sglang # it is required by fsdp2 to avoid oom logger.debug("fsdp sglang sharding_manager _set_allocator_settings to True") set_expandable_segments(True) # restore random states if self.device_mesh is not None: self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) def preprocess_data(self, data: DataProto) -> DataProto: """All gather across tp group to make each rank has identical input.""" if self.tp_size == 1: return data # TODO: Current impl doesn't consider FSDP with torch micro-dp group = self.device_mesh["infer_tp"].get_group() all_gather_data_proto(data=data, process_group=group) return data def postprocess_data(self, data: DataProto) -> DataProto: """Get chunk data of this tp rank since we do all gather in preprocess.""" if self.tp_size == 1: return data return data.chunk(chunks=self.tp_size)[self.tp_rank] ================================================ FILE: verl_distillation/verl/workers/sharding_manager/fsdp_ulysses.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Contains a resharding manager that binds weights from FSDP zero3 to XPerfGPT """ from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.protocol import all_gather_data_proto from verl.utils.ulysses import get_ulysses_sequence_parallel_group, set_ulysses_sequence_parallel_group from .base import BaseShardingManager class FSDPUlyssesShardingManager(BaseShardingManager): """ Sharding manager to support data resharding when using FSDP + Ulysses """ def __init__(self, device_mesh: DeviceMesh): super().__init__() self.device_mesh = device_mesh self.seed_offset = 12345 def __enter__(self): if self.device_mesh is not None: # We have a global SP group # so we have to change to use model-specific sp group self.prev_sp_group = get_ulysses_sequence_parallel_group() set_ulysses_sequence_parallel_group(self.device_mesh["sp"].get_group()) # TODO: check how to set seed for each model def __exit__(self, exc_type, exc_value, traceback): # restore random states if self.device_mesh is not None: # revert to previous sp group set_ulysses_sequence_parallel_group(self.prev_sp_group) # TODO: check how to set seed for each model def preprocess_data(self, data: DataProto) -> DataProto: """ AllGather data from sp region This is because the data is first sharded along the FSDP dimension as we utilize the DP_COMPUTE In Ulysses, we need to make sure the same data is used across a SP group """ if self.device_mesh is not None: group = self.device_mesh["sp"].get_group() all_gather_data_proto(data=data, process_group=group) return data def postprocess_data(self, data: DataProto) -> DataProto: """ Split the data to follow FSDP partition """ if self.device_mesh is not None: sp_size = self.device_mesh["sp"].size() sp_rank = self.device_mesh["sp"].get_local_rank() data = data.chunk(chunks=sp_size)[sp_rank] return data ================================================ FILE: verl_distillation/verl/workers/sharding_manager/fsdp_vllm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import os import time from collections import OrderedDict from torch.distributed.device_mesh import DeviceMesh from torch.distributed.fsdp.api import FullStateDictConfig, ShardedStateDictConfig, StateDictType from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP try: # for torch 2.5+ from torch.distributed.tensor import DTensor except ImportError: from torch.distributed._tensor import DTensor from dataclasses import asdict from verl import DataProto from verl.protocol import all_gather_data_proto from verl.third_party.vllm import LLM, VLLM_SLEEP_LEVEL from verl.third_party.vllm import parallel_state as vllm_ps from verl.utils.device import get_device_id, get_device_name, get_torch_device, set_expandable_segments from verl.utils.fsdp_utils import ( fsdp_version, layered_summon_lora_params, load_fsdp_model_to_gpu, offload_fsdp_model_to_cpu, ) from verl.utils.import_utils import deprecated from verl.utils.model import check_exclude_modules, check_target_modules, convert_weight_keys from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage, simple_timer from verl.utils.torch_functional import check_device_is_available from verl.utils.vllm import TensorLoRARequest, VLLMHijack, is_version_ge from .base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) @deprecated() class FSDPVLLMShardingManager(BaseShardingManager): """Sharding manager for FSDP models with vLLM inference engine integration. Manages parameter synchronization between FSDP training models and vLLM inference engines, handling both full parameters and LoRA adapters with efficient memory management and device placement. """ @check_device_is_available() def __init__( self, module: FSDP, inference_engine: LLM, model_config, rollout_config, full_params: bool = False, device_mesh: DeviceMesh = None, offload_param: bool = False, load_format: str = "dummy_hf", layered_summon: bool = True, ): self.module = module # For AsyncLLM, inference_engine and model_runner are defer initialized in vLLMAsyncRollout.load_model self.inference_engine = inference_engine # self.model_runner = inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner if # inference_engine else None self.model_runner = ( self.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner if self.inference_engine else None ) self.model_config = model_config self.rollout_config = rollout_config self.device_mesh = device_mesh self.offload_param = offload_param self.load_format = load_format self.layered_summon = layered_summon # Full params self.full_params = full_params if full_params and fsdp_version(self.module) == 1: FSDP.set_state_dict_type( self.module, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig() ) elif fsdp_version(self.module) == 1: FSDP.set_state_dict_type( self.module, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) self.tp_size = self.device_mesh["infer_tp"].size() self.tp_rank = self.device_mesh["infer_tp"].get_local_rank() # Note that torch_random_states may be different on each dp rank self.torch_random_states = get_torch_device().get_rng_state() # get a random rng states if self.device_mesh is not None: gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) else: self.gen_random_states = None self.base_sync_done: bool = "dummy" not in load_format if is_version_ge(pkg="vllm", minver="0.7.3"): VLLMHijack.hijack() @GPUMemoryLogger(role="fsdp vllm sharding_manager", logger=logger) def __enter__(self): def __collect_lora_params() -> OrderedDict: """ collect lora params or full params if base model is not ready in vllm work with if isinstance(self.module._fsdp_wrapped_module, PeftModel) """ from peft.utils.save_and_load import get_peft_model_state_dict lora_params = OrderedDict() peft_model = getattr(self.module, "_fsdp_wrapped_module", self.module) if fsdp_version(self.module) > 0: if self.layered_summon: if not self.base_sync_done: raise ValueError( "To use layered_summon, you must make sure base-model is preloaded in vllm, e.g. let " "rollout.load_format=safetensors" ) lora_params = layered_summon_lora_params(self.module) else: with FSDP.summon_full_params(self.module, writeback=False): if self.base_sync_done: lora_params = get_peft_model_state_dict(peft_model) lora_params = { name: param.full_tensor().detach().cpu() if hasattr(param, "full_tensor") else param.detach().cpu() for name, param in lora_params.items() } else: model = peft_model.base_model.model orig_dev = "cpu" if "cpu" in str(next(model.parameters()).device) else get_device_name() model = model.to("cpu") for name, param in model.state_dict().items(): if any(x in name for x in ["_flat_param", "lora_"]): continue name = name.replace("_fsdp_wrapped_module.", "").replace(".base_layer", "") lora_params[name] = ( param.full_tensor().detach().cpu() if hasattr(param, "full_tensor") else param.detach().cpu() ) model = model.to(orig_dev) get_torch_device().empty_cache() else: if self.base_sync_done: lora_params = get_peft_model_state_dict(peft_model) else: model = peft_model.base_model.model orig_dev = "cpu" if "cpu" in str(next(model.parameters()).device) else get_device_name() model = model.to("cpu") for name, param in model.state_dict().items(): if any(x in name for x in ["_flat_param", "lora_"]): continue name = name.replace("_fsdp_wrapped_module.", "").replace(".base_layer", "") lora_params[name] = param.detach().cpu() model = model.to(orig_dev) return lora_params # NOTE: Basically, we only need `get_torch_device().empty_cache()` before vllm wake_up and # after vllm sleep, since vllm has its own caching memory allocator CuMemAllocator. # Out of vllm scope, we should avoid empty cache to let pytorch using caching memory # to speed up memory allocations. # # pytorch: https://pytorch.org/docs/stable/notes/cuda.html#memory-management # vllm: https://github.com/vllm-project/vllm/blob/v0.7.3/vllm/device_allocator/cumem.py#L103 self.timing = {} with simple_timer("reshard", self.timing): get_torch_device().empty_cache() log_gpu_memory_usage("Before state_dict() in sharding manager memory", logger=logger) if self.offload_param: load_fsdp_model_to_gpu(self.module) peft_config = None peft_model = getattr(self.module, "_fsdp_wrapped_module", self.module) if hasattr(peft_model, "peft_config"): peft_config = peft_model.peft_config.get("default", None) params = __collect_lora_params() else: params = self.module.state_dict() params = convert_weight_keys(params, getattr(self.module, "_fsdp_wrapped_module", self.module)) if self.offload_param: offload_fsdp_model_to_cpu(self.module) log_gpu_memory_usage("After state_dict() in sharding manager memory", logger=logger) # vllm need to set _set_allocator_settings to False logger.debug("fsdp vllm sharding_manager _set_allocator_settings to False") set_expandable_segments(False) if self.rollout_config.free_cache_engine: if "tags" in inspect.signature(self.inference_engine.wake_up).parameters: self.inference_engine.wake_up(tags=["weights"]) else: self.inference_engine.wake_up() # update model params self.update_params(params, peft_config=peft_config) log_gpu_memory_usage("After sync model weights in sharding manager", logger=logger) del params get_torch_device().empty_cache() if ( self.rollout_config.free_cache_engine and "tags" in inspect.signature(self.inference_engine.wake_up).parameters ): self.inference_engine.wake_up(tags=["kv_cache"]) log_gpu_memory_usage("After del state_dict and empty_cache in sharding manager", logger=logger) # important: need to manually set the random states of each tp to be identical. if self.device_mesh is not None: self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="fsdp vllm sharding_manager", logger=logger) def __exit__(self, exc_type, exc_value, traceback): if self.rollout_config.free_cache_engine: self.inference_engine.sleep(level=VLLM_SLEEP_LEVEL) self.module.train() # add empty cache after each compute get_torch_device().empty_cache() # _set_allocator_settings to True is required by fsdp2 to avoid oom logger.debug("fsdp vllm sharding_manager _set_allocator_settings to True") set_expandable_segments(True) # restore random states if self.device_mesh is not None: self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) @GPUMemoryLogger(role="fsdp vllm sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: """All gather across tp group to make each rank has identical input.""" if self.tp_size == 1: return data # TODO: Current impl doesn't consider FSDP with torch micro-dp group = vllm_ps.get_tensor_model_parallel_group().device_group all_gather_data_proto(data=data, process_group=group) return data @GPUMemoryLogger(role="fsdp vllm sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: """Get chunk data of this tp rank since we do all gather in preprocess.""" if self.tp_size == 1: return data return data.chunk(chunks=self.tp_size)[self.tp_rank] def update_params(self, updated_params, peft_config=None): """Update model parameters in the vLLM inference engine. Synchronizes parameters from the FSDP training model to the vLLM inference engine, handling both full model parameters and LoRA adapters with proper device placement and memory management. Args: updated_params (dict): Dictionary of parameter names to tensor values. peft_config (optional): PEFT configuration for LoRA adapters. """ model = self.model_runner.model if peft_config: if self.base_sync_done: lora_int_id = int(time.time_ns() % 0x7FFFFFFF) lora_reqest = TensorLoRARequest( lora_name=f"{lora_int_id}", lora_int_id=lora_int_id, lora_path="simon_lora_path", peft_config=asdict(peft_config), lora_tensors=updated_params, ) self.inference_engine.llm_engine.add_lora(lora_reqest) logger.info(f"vLLM load weights, loaded_params: {len(updated_params)}") return else: def replace_lora_wrapper(k): """Replace LoRA parameter keys with base layer equivalents. Transforms LoRA parameter names to their corresponding base layer names for proper weight loading in vLLM when base model sync is not done. Args: k (str): Original parameter key name. Returns: str: Transformed parameter key for base layer. """ stacked_params = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"] if k.endswith(".weight"): module_k = k[: -len(".weight")] if check_exclude_modules(peft_config, module_k): return k elif any([module_k.endswith(s) for s in stacked_params]) or check_target_modules( peft_config, module_k ): return f"{module_k}.base_layer.weight" if k.endswith(".bias"): module_k = k[: -len(".bias")] if check_exclude_modules(peft_config, module_k): return k elif any([module_k.endswith(s) for s in stacked_params]) or check_target_modules( peft_config, module_k ): return f"{module_k}.base_layer.bias" return k updated_params = {replace_lora_wrapper(k): v for k, v in updated_params.items()} from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader patch_vllm_moe_model_weight_loader(model) device = get_device_id() # used when fsdp2 set cpu_offload_policy loaded_params = model.load_weights( ( (name, param.to(device, non_blocking=True).full_tensor() if isinstance(param, DTensor) else param) for name, param in updated_params.items() ) ) self.base_sync_done = True logger.info(f"vLLM load weights, loaded_params: {len(loaded_params) if loaded_params else -1}") ================================================ FILE: verl_distillation/verl/workers/sharding_manager/megatron_sglang.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains a Megatron style Hybrid Engine that shares the weights of the actor with the inference engine. """ import asyncio import logging import os from omegaconf import DictConfig from sglang.srt.entrypoints.engine import Engine from sglang.srt.weight_sync.utils import update_weights as sgl_update_weights from torch import nn from torch.distributed.device_mesh import DeviceMesh from verl.protocol import DataProto, all_gather_data_proto from verl.utils.device import get_torch_device, set_expandable_segments from verl.utils.import_utils import deprecated from verl.utils.megatron_utils import ( load_megatron_model_to_gpu, offload_megatron_model_to_cpu, per_tensor_generator, ) from verl.utils.memory_utils import aggressive_empty_cache from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage, simple_timer from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets from .base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_PPO_LOGGING_LEVEL", "WARN")) """ Megatron Hybrid Engine: - During training, only the current pp stage holds the parameters - Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters) - Bind the parameters to the inference engine - Do inference in tp. pp is treated as additional dp - After inference, all the parameters that doesn't belong to this pp rank is freed. """ @deprecated() class MegatronSGLangShardingManager(BaseShardingManager): """A sharding manager for Megatron-style training & inference with SGLang. This class manages the sharding of model parameters between training and inference phases in a Megatron-style parallel setup. It handles: - Loading/offloading parameters between CPU/GPU - Updating inference engine weights - Managing random states for reproducibility - Data preprocessing for distributed inference Args: actor_module (nn.ModuleList): The actor model modules inference_engine (Engine): The SGLang inference engine model_config: Configuration for the actor's model rollout_config: Configuration for rollout generation transformer_config: Transformer-specific configuration layer_name_mapping: Mapping between layer names and parameters weight_converter: Utility for converting weights between formats device_mesh (DeviceMesh | None): PyTorch device mesh for distributed training offload_param (bool): Whether to offload parameters to CPU when not in use """ def __init__( self, actor_module: nn.ModuleList, inference_engine: Engine, model_config: DictConfig, rollout_config: DictConfig, transformer_config, layer_name_mapping, weight_converter, device_mesh: DeviceMesh | None = None, offload_param: bool = False, bridge=None, ): self.actor_module = actor_module self.inference_engine = inference_engine self.model_config = model_config self.rollout_config = rollout_config self.transformer_config = transformer_config self.layer_name_mapping = layer_name_mapping self.weight_converter = weight_converter self.device_mesh = device_mesh self.bridge = bridge self.offload_param = offload_param if self.device_mesh is not None: self.infer_tp_size = self.device_mesh["infer_tp"].mesh.size()[0] else: self.infer_tp_size = self.inference_engine._tp_size # Note that torch_random_states may be different on each dp rank self.torch_random_states = get_torch_device().get_rng_state() # get a random rng states if self.device_mesh is not None: gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) else: self.gen_random_states = None @GPUMemoryLogger(role="MegatronSGLangShardingManager enter", logger=logger) def __enter__(self): self.timing = {} with simple_timer("reshard", self.timing): loop = asyncio.get_event_loop() loop.run_until_complete(self.wake_up()) @GPUMemoryLogger(role="MegatronSGLangShardingManager exit", logger=logger) def __exit__(self, exc_type, exc_value, traceback): loop = asyncio.get_event_loop() loop.run_until_complete(self.sleep()) async def update_weights(self, params): """ Update model weights using tensor buckets, similar to THUDM/slime's implementation. Notes: - For the best performance of `rebuild_cuda_tensor`, it is recommended to: 1. Enable `RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES`. 2. Manually set `CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7` when using Tensor Parallelism (TP >= 8). - See reference implementations in SLIME: - Main logic: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L452 - runtime envs: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L39 """ if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine: await self.inference_engine.resume_memory_occupation() named_tensors = params update_weights_bucket_bytes = int(self.rollout_config.update_weights_bucket_megabytes) << 20 for params_batch in get_named_tensor_buckets(named_tensors, update_weights_bucket_bytes): await sgl_update_weights( engine=self.inference_engine, params_batch=params_batch, device_mesh_key="infer_tp", device_mesh=self.device_mesh, ) if self.device_mesh["infer_tp"].get_local_rank() == 0: await self.inference_engine.flush_cache() async def release_memory(self): if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine: await self.inference_engine.release_memory_occupation() @GPUMemoryLogger(role="MegatronSGLangShardingManager enter", logger=logger) async def wake_up(self): aggressive_empty_cache(force_sync=True) if self.offload_param: load_megatron_model_to_gpu(self.actor_module, load_grad=False) if self.bridge is not None: per_tensor_param = self.bridge.export_weights(self.actor_module) else: per_tensor_param = per_tensor_generator( self.actor_module, self.model_config, self.weight_converter, self.transformer_config, self.layer_name_mapping, ) set_expandable_segments(False) await self.update_weights(per_tensor_param) if self.offload_param: offload_megatron_model_to_cpu(self.actor_module) aggressive_empty_cache(force_sync=True) # important: need to manually set the random states of each tp to be identical. if self.device_mesh is not None: self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="MegatronSGLangShardingManager exit", logger=logger) async def sleep(self): if self.rollout_config.free_cache_engine: log_gpu_memory_usage("Before SGLang offload in sharding manager", logger=logger) await self.release_memory() log_gpu_memory_usage("After SGLang offload in sharding manager", logger=logger) for model in self.actor_module: model.train() # add empty cache after each compute aggressive_empty_cache(force_sync=True) set_expandable_segments(True) # restore random states if self.device_mesh is not None: self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) @GPUMemoryLogger(role="megatron sglang sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: # DP_COMPUTE_PROTO: all training ranks are dp, the same as fsdp if self.infer_tp_size == 1: return data all_gather_data_proto(data, self.device_mesh["infer_tp"].get_group()) return data @GPUMemoryLogger(role="megatron sglang sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: # DP_COMPUTE_PROTO: all training ranks are dp, the same as fsdp if self.infer_tp_size == 1: return data return data.chunk(chunks=self.infer_tp_size)[self.device_mesh["infer_tp"].get_local_rank()] ================================================ FILE: verl_distillation/verl/workers/sharding_manager/megatron_vllm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains a Megatron style Hybrid Engine that shares the weights of the actor with the inference engine. """ import inspect import logging import os import torch import torch.distributed from megatron.core import parallel_state as mpu from omegaconf import DictConfig from torch import nn from verl import DataProto from verl.models.mcore.weight_converter import McoreToHFWeightConverterBase from verl.protocol import all_gather_data_proto from verl.third_party.vllm import LLM, VLLM_SLEEP_LEVEL from verl.third_party.vllm import parallel_state as vllm_ps from verl.utils.device import get_torch_device, set_expandable_segments from verl.utils.import_utils import deprecated from verl.utils.megatron_utils import load_megatron_model_to_gpu, offload_megatron_model_to_cpu, per_tensor_generator from verl.utils.memory_utils import aggressive_empty_cache from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage from verl.utils.profiler.performance import simple_timer from verl.utils.torch_functional import check_device_is_available from .base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) """ Megatron Hybrid Engine: - During training, only the current pp stage holds the parameters - Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters) - Bind the parameters to the inference engine - Do inference in tp. pp is treated as additional dp - After inference, all the parameters that doesn't belong to this pp rank is freed. """ @deprecated() class MegatronVLLMShardingManager(BaseShardingManager): """A sharding manager that bridges Megatron-LM training with vLLM inference. This class handles the parameter sharding and communication between: - Megatron-LM's tensor/expert parallel training setup - vLLM's tensor parallel inference setup Key responsibilities: - Manages parameter broadcasting between training and inference configurations - Handles weight conversion between Megatron and HuggingFace formats - Coordinates memory management between training and inference phases - Maintains random state consistency across different parallel groups Args: actor_module (nn.ModuleList): The Megatron-LM model being trained inference_engine (LLM): The vLLM inference engine model_config: Configuration for the actor's model transformer_config: Transformer-specific configuration for the model rollout_config: Configuration for rollout layer_name_mapping: Mapping between Megatron and HF layer names weight_converter (McoreToHFWeightConverterBase): Converts weights between formats device_mesh: Device mesh for parallel operations offload_param (bool): Whether to offload parameters when not in use """ @check_device_is_available() def __init__( self, actor_module: nn.ModuleList, inference_engine: LLM, model_config: DictConfig, transformer_config, rollout_config: DictConfig, layer_name_mapping, weight_converter: McoreToHFWeightConverterBase, device_mesh, offload_param: bool = True, bridge=None, ): self.actor_module = actor_module self.inference_engine = inference_engine self.offload_param = offload_param # For AsyncLLM, inference_engine and model_runner are defer initialized in vLLMAsyncRollout.load_model self.model_runner = ( self.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner if self.inference_engine else None ) self.model_config = model_config self.transformer_config = transformer_config self.rollout_config = rollout_config self.layer_name_mapping = layer_name_mapping self.weight_converter = weight_converter self.bridge = bridge # initialize groups for vllm inference self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() self.device_mesh = device_mesh self.infer_tp_size = self.device_mesh["infer_tp"].size() self.infer_tp_rank = self.device_mesh["infer_tp"].get_local_rank() self.train_tp_size = mpu.get_tensor_model_parallel_world_size() self.train_tp_rank = mpu.get_tensor_model_parallel_rank() self.train_tp_group = mpu.get_tensor_model_parallel_group() self.train_ep_size = mpu.get_expert_model_parallel_world_size() self.train_ep_rank = mpu.get_expert_model_parallel_rank() self.train_ep_group = mpu.get_expert_model_parallel_group() self.train_etp_size = mpu.get_expert_tensor_parallel_world_size() self.train_etp_rank = mpu.get_expert_tensor_parallel_rank() self.train_etp_group = mpu.get_expert_tensor_parallel_group() self.need_tp_reshard = self.train_tp_size != self.infer_tp_size self.train_tp_larger = self.train_tp_size > self.infer_tp_size self.torch_random_states = get_torch_device().get_rng_state() if self.device_mesh is not None: gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) else: self.gen_random_states = None @GPUMemoryLogger(role="megatron vllm sharding_manager", logger=logger) def __enter__(self): self.timing = {} with simple_timer("reshard", self.timing): aggressive_empty_cache(force_sync=True) log_gpu_memory_usage("Before state_dict() in sharding manager memory", logger=logger) if self.offload_param: load_megatron_model_to_gpu(self.actor_module, load_grad=False) set_expandable_segments(False) if self.rollout_config.free_cache_engine: if "tags" in inspect.signature(self.inference_engine.wake_up).parameters: self.inference_engine.wake_up(tags=["weights"]) else: self.inference_engine.wake_up() if self.bridge is not None: per_tensor_param = self.bridge.export_weights(self.actor_module) else: per_tensor_param = per_tensor_generator( self.actor_module, self.model_config, self.weight_converter, self.transformer_config, self.layer_name_mapping, ) model = self.model_runner.model from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader patch_vllm_moe_model_weight_loader(model) loaded_params = model.load_weights(per_tensor_param) info = f"vLLM load weights, loaded_params: {len(loaded_params)}" logger.info(info) if self.offload_param: offload_megatron_model_to_cpu(self.actor_module) aggressive_empty_cache(force_sync=True) if ( self.rollout_config.free_cache_engine and "tags" in inspect.signature(self.inference_engine.wake_up).parameters ): self.inference_engine.wake_up(tags=["kv_cache"]) # important: need to manually set the random states of each tp to be identical. if self.device_mesh is not None: self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="megatron vllm sharding_manager", logger=logger) def __exit__(self, exc_type, exc_value, traceback): if self.rollout_config.free_cache_engine: self.inference_engine.sleep(level=VLLM_SLEEP_LEVEL) for model in self.actor_module: model.train() aggressive_empty_cache(force_sync=True) set_expandable_segments(True) # restore random states if self.device_mesh is not None: self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) @GPUMemoryLogger(role="megatron vllm sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: # DP_COMPUTE_PROTO: all training ranks are dp, the same as fsdp if self.infer_tp_size == 1: return data # TODO: Current impl doesn't consider FSDP with torch micro-dp group = vllm_ps.get_tensor_model_parallel_group().device_group all_gather_data_proto(data=data, process_group=group) return data @GPUMemoryLogger(role="megatron vllm sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: # DP_COMPUTE_PROTO: all training ranks are dp, the same as fsdp if self.infer_tp_size == 1: return data return data.chunk(chunks=self.infer_tp_size)[self.infer_tp_rank] ================================================ FILE: verl_rl/CONTRIBUTING.md ================================================ # Contributing to verl Thank you for considering a contribution to verl! We welcome contributions of any kind - bug fixes, enhancements, documentation improvements, or even just feedback. Whether you're an experienced developer or this is your first open-source project, your help is invaluable. Your support can take many forms: - Report issues or unexpected behaviors. - Suggest or implement new features. - Improve or expand documentation. - Review pull requests and assist other contributors. - Spread the word: share verl in blog posts, social media, or give the repo a ⭐. ## Finding Issues to Contribute Looking for ways to dive in? Check out these issues: - [Good first issues](https://github.com/volcengine/verl/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22good%20first%20issue%22) - [Call for contribution](https://github.com/volcengine/verl/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22call%20for%20contribution%22) Furthermore, you can learn the development plan and roadmap via [RFC](https://github.com/volcengine/verl/issues?q=is%3Aissue%20state%3Aopen%20label%3ARFC) and [Roadmap](https://github.com/volcengine/verl/issues?q=state%3Aopen%20label%3A%22roadmap%22). ## Developing - **Python-only**: install verl via `pip install -e .[test,vllm]` or `pip install -e .[test,sglang]` and iterate quickly. For full dependency setup, check out the verl [installation doc](https://verl.readthedocs.io/en/latest/start/install.html). ## Code Linting and Formatting We rely on pre-commit to keep our code consistent. To set it up: ```bash pip install pre-commit pre-commit install # for staged changes pre-commit run # for all files in the repo pre-commit run --all-files # run a specific hook with pre-commit # pre-commit run --all-files --show-diff-on-failure --color=always pre-commit run --all-files --show-diff-on-failure --color=always ruff pre-commit run --all-files --show-diff-on-failure --color=always autogen-trainer-cfg ``` ## Testing Our test suites run on GitHub Actions. Check these workflows for details: - [GPU unit tests](https://github.com/volcengine/verl/blob/main/.github/workflows/gpu_unit_tests.yml) - [CPU unit tests](https://github.com/volcengine/verl/blob/main/.github/workflows/cpu_unit_tests.yml) - [vLLM tests](https://github.com/volcengine/verl/blob/main/.github/workflows/vllm.yml) - [SGLang tests](https://github.com/volcengine/verl/blob/main/.github/workflows/sgl.yml) ### Adding CI tests If possible, please add CI test(s) for your new feature: 1. Find the most relevant workflow yml file, which usually corresponds to a `hydra` default config (e.g. `ppo_trainer`, `ppo_megatron_trainer`, `sft_trainer`, etc). 2. Add related path patterns to the `paths` section if not already included. 3. Minimize the workload of the test script(s) (see existing scripts for examples). ## Building the Docs ``` # Ensure verl is on your PYTHONPATH, e.g.: pip install -e .[test] # Install documentation dependencies pip install -r requirements-docs.txt # Generate HTML docs make clean make html # Preview locally python -m http.server -d _build/html/ ``` Open your browser at http://localhost:8000 to explore the docs. ## Pull Requests & Code Reviews Thanks for submitting a PR! To streamline reviews: - Follow our Pull Request Template for title format and checklist. - Adhere to our pre-commit lint rules and ensure all checks pass. - Update docs for any user-facing changes. - Add or update tests in the CI workflows, or explain why tests aren't applicable. ## License See the [LICENSE](https://github.com/volcengine/verl/blob/main/LICENSE) file for full details. ## Thank You We appreciate your contributions to verl. Your efforts help make the project stronger and more user-friendly. Happy coding! ================================================ FILE: verl_rl/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: verl_rl/README.md ================================================ # OneRec RL Training Reinforcement learning training for OneRec recommendation model based on verl framework. ## Installation ### 1. Configure hostfile (multi-node) ```bash cat > /etc/mpi/hostfile << EOF 192.168.1.100 slots=8 192.168.1.101 slots=8 192.168.1.102 slots=8 EOF ``` Note: `slots=N` specifies the number of GPUs available on each node. ### 2. Install dependencies ```bash # Single node bash deploy_env.sh # Multi-node bash deploy_env.sh --all-nodes ``` ### 3. Start Ray cluster ```bash bash init_ray_cluster.sh ``` ## Quick Start ### Data Format We use SFT data from five `*_rec` tasks: `video_rec`, `interactive_rec`, `label_cond_rec`, `ad_rec`, `product_rec`. See [data/README.md](../data/README.md) for detailed data format specification. ### Start Training ```bash cd verl_rl export BASE_MODEL="/path/to/your/model" bash recipe/onerec/run_grpo.sh 2>&1 | tee logs/train_$(date +%Y%m%d_%H%M%S).log ``` ## Configuration ### Model | Parameter | Default | Description | |-----------|---------|-------------| | `BASE_MODEL` | - | Model path | | `ROLLOUT_TP_SIZE` | 1 | Tensor parallel size | ### Training | Parameter | Default | Description | |-----------|---------|-------------| | `LEARNING_RATE` | 2e-6 | Learning rate | | `KL_LOSS_COEF` | 0.001 | KL loss coefficient | | `TEMPERATURE` | 1 | Sampling temperature | ### Rollout | Parameter | Default | Description | |-----------|---------|-------------| | `ROLLOUT_N` | 1 | Samples per prompt | | `STAGE2_BEAM_SIZE` | 32 | Beam search width | | `RESPONSE_LENGTH` | 2048 | Max response length | | `STAGE1_MAX_TOKENS` | 1024 | Stage 1 max tokens | | `STAGE2_NUM_TOKENS` | 3 | Stage 2 tokens | ### Think Mode | Parameter | Default | Description | |-----------|---------|-------------| | `ENABLE_THINK` | False | Enable think mode | | `ENABLE_NONTHINK` | False | Enable non-think mode | | `USE_FORCE_PREFIX` | False | Force prefix | ## Directory Structure ``` verl_rl/ ├── deploy_env.sh # Environment deployment ├── init_ray.sh # Single node Ray init ├── init_ray_cluster.sh # Multi-node Ray cluster ├── requirements.txt # Dependencies ├── recipe/ │ └── onerec/ │ ├── run_grpo.sh # Training script │ └── onerec_recipe.py └── verl/ # verl core code ``` ================================================ FILE: verl_rl/README_ORIGINAL.md ================================================
👋 Hi, everyone! verl is a RL training library initiated by ByteDance Seed team and maintained by the verl community.

Ask DeepWiki.com [![GitHub Repo stars](https://img.shields.io/github/stars/volcengine/verl)](https://github.com/volcengine/verl/stargazers) [![Twitter](https://img.shields.io/twitter/follow/verl_project)](https://twitter.com/verl_project) [![Documentation](https://img.shields.io/badge/documentation-blue)](https://verl.readthedocs.io/en/latest/)
![seed logo](https://github.com/user-attachments/assets/c42e675e-497c-4508-8bb9-093ad4d1f216)

verl: Volcano Engine Reinforcement Learning for LLMs

verl is a flexible, efficient and production-ready RL training library for large language models (LLMs). verl is the open-source version of **[HybridFlow: A Flexible and Efficient RLHF Framework](https://arxiv.org/abs/2409.19256v2)** paper. verl is flexible and easy to use with: - **Easy extension of diverse RL algorithms**: The hybrid-controller programming model enables flexible representation and efficient execution of complex post-training dataflows. Build RL dataflows such as GRPO, PPO in a few lines of code. - **Seamless integration of existing LLM infra with modular APIs**: Decouples computation and data dependencies, enabling seamless integration with existing LLM frameworks, such as FSDP, Megatron-LM, vLLM, SGLang, etc - **Flexible device mapping**: Supports various placement of models onto different sets of GPUs for efficient resource utilization and scalability across different cluster sizes. - Ready integration with popular HuggingFace models verl is fast with: - **State-of-the-art throughput**: SOTA LLM training and inference engine integrations and SOTA RL throughput. - **Efficient actor model resharding with 3D-HybridEngine**: Eliminates memory redundancy and significantly reduces communication overhead during transitions between training and generation phases.

## News - [2025/07] The first verl meetup will be held at ICML Vancouver on July 16th! Please [join us](https://lu.ma/0ek2nyao) if you are at ICML! (onsite only) - [2025/07] verl keynote at [AWS AI Hours Singapore](https://pages.awscloud.com/aws-ai-hours-sg.html#agenda) on 7/8, verl & verl-agent project updates at [Agent for SWE meetup](https://lu.ma/e498qhsi) by LF AI & Data Singapore on 7/11. - [2025/06] verl with Megatron backend enables large MoE models such as [DeepSeek-671b and Qwen3-236b](https://verl.readthedocs.io/en/latest/perf/dpsk.html). - [2025/06] verl team will provide latest project updates at [PyTorch Day China](https://www.lfasiallc.com/pytorch-day-china/) on June 7th. Meet our dev team in Beijing! - [2025/04] [Seed-Thinking-v1.5](https://github.com/ByteDance-Seed/Seed-Thinking-v1.5/blob/main/seed-thinking-v1.5.pdf) tech report is released! Trained with verl, Seed-Thinking-v1.5 achieves 86.7 on AIME 2024, 55.0 on Codeforces and 77.3 on GPQA, demonstrating excellent reasoning abilities in STEM and coding. Beyond reasoning tasks, the method demonstrates notable generalization across diverse domains. - [2025/03] [DAPO](https://dapo-sia.github.io/) is the open-sourced SOTA RL algorithm that achieves 50 points on AIME 2024 based on the Qwen2.5-32B pre-trained model, surpassing the previous SOTA achieved by DeepSeek's GRPO (DeepSeek-R1-Zero-Qwen-32B). DAPO's training is fully powered by verl and the reproduction code is available in `recipe/dapo` now.
more...
  • [2025/04] [VAPO](https://arxiv.org/pdf/2504.05118) (value-based augmented PPO) paper covers our latest RL method for reasoning models. Trained from Qwen-32B-base model, VAPO achieves 60.4 on AIME 2024, outperforming DAPO-32B.
  • [2025/05] [PF-PPO](https://arxiv.org/abs/2409.06957), accepted to ICML 2025, is now supported in verl! PF-PPO enhances policy learning efficiency and robustness by filtering potentially noisy reward signals and reusing high-quality experiences via a replay buffer.
  • [2025/04] We will give a tutorial about latest post-training techniques and programming guide for verl at [ICLR 2025 Expo](https://iclr.cc/virtual/2025/calendar?filter_events=Expo+Talk+Panel&filter_rooms=), [SCI-FM workshop](https://open-foundation-model.github.io/) and [LMSys afterparty](https://lu.ma/d23nyynm). Talk materials available [here](https://github.com/eric-haibin-lin/verl-community/tree/main/iclr25).
  • [2025/03] verl v0.3.0.post1 is released! See [release note](https://github.com/volcengine/verl/releases/) for details. It achieves [~1.4x speedup](https://tongyx361.github.io/blogs/posts/verl-intro/#/verl-flexible-and-efficient-rl-for-llms) compared to prev versions.
  • [2025/05] verl will be presented at [A2M Shanghai](https://a2m.msup.com.cn/home/?aid=4488&city=shanghai) on 5/16 - 5/17.
  • [2025/05] verl will be presented at [GOSIM x PyTorch Day 2025](https://paris2025.gosim.org/). See you in Paris!
  • [2025/03] We introduced the programming model of verl at the [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/n77GibL2corAtQHtVEAzfg) and [verl intro and updates](https://github.com/eric-haibin-lin/verl-community/blob/main/slides/verl-lmsys-meetup.pdf) at the [SGLang-LMSYS Org Meetup](https://lu.ma/ntjrr7ig) in Sunnyvale mid-March.
  • [2025/03] We will present verl(HybridFlow) at EuroSys 2025. See you in Rotterdam!
  • [2025/02] verl v0.2.0.post2 is released!
  • [2025/02] We presented verl in the Bytedance/NVIDIA/Anyscale Ray Meetup. See you in San Jose!
  • [2025/01] [Doubao-1.5-pro](https://team.doubao.com/zh/special/doubao_1_5_pro) is released with SOTA-level performance on LLM & VLM. The RL scaling preview model is trained using verl, reaching OpenAI O1-level performance on math benchmarks (70.0 pass@1 on AIME).
  • [2024/12] verl is presented at Ray Forward 2024. Slides available here
  • [2024/12] The team presented Post-training LLMs: From Algorithms to Infrastructure at NeurIPS 2024. Slides and video available.
  • [2024/10] verl is presented at Ray Summit. Youtube video available.
  • [2024/08] HybridFlow (verl) is accepted to EuroSys 2025.
## Key Features - **FSDP**, **FSDP2** and **Megatron-LM** for training. - **vLLM**, **SGLang** and **HF Transformers** for rollout generation. - Compatible with Hugging Face Transformers and Modelscope Hub: [Qwen-3](https://github.com/volcengine/verl/blob/main/examples/grpo_trainer/run_qwen3-8b.sh), Qwen-2.5, Llama3.1, Gemma2, DeepSeek-LLM, etc - Supervised fine-tuning. - Reinforcement learning with [PPO](examples/ppo_trainer/), [GRPO](examples/grpo_trainer/), [ReMax](examples/remax_trainer/), [REINFORCE++](https://verl.readthedocs.io/en/latest/examples/config.html#algorithm), [RLOO](examples/rloo_trainer/), [PRIME](recipe/prime/), [DAPO](recipe/dapo/), [DrGRPO](recipe/drgrpo), [KL_Cov & Clip_Cov](recipe/entropy) etc. - Support model-based reward and function-based reward (verifiable reward) for math, [coding](https://github.com/volcengine/verl/tree/main/recipe/dapo), etc - Support vision-language models (VLMs) and [multi-modal RL](examples/grpo_trainer/run_qwen2_5_vl-7b.sh) with Qwen2.5-vl, Kimi-VL - [Multi-turn with tool calling](https://github.com/volcengine/verl/tree/main/examples/sglang_multiturn) - LLM alignment recipes such as [Self-play preference optimization (SPPO)](https://github.com/volcengine/verl/tree/main/recipe/sppo) - Flash attention 2, [sequence packing](examples/ppo_trainer/run_qwen2-7b_seq_balance.sh), [sequence parallelism](examples/ppo_trainer/run_deepseek7b_llm_sp2.sh) support via DeepSpeed Ulysses, [LoRA](examples/sft/gsm8k/run_qwen_05_peft.sh), [Liger-kernel](examples/sft/gsm8k/run_qwen_05_sp2_liger.sh). - Scales up to 671B models and hundreds of GPUs with [expert parallelism](https://github.com/volcengine/verl/pull/1467) - Multi-gpu [LoRA RL](https://verl.readthedocs.io/en/latest/advance/ppo_lora.html) support to save memory. - Experiment tracking with wandb, swanlab, mlflow and tensorboard. ## Upcoming Features and Changes - Q3 Roadmap https://github.com/volcengine/verl/issues/2388 - DeepSeek 671b optimizations with Megatron https://github.com/volcengine/verl/issues/1033 - Multi-turn rollout and tools using optimizations https://github.com/volcengine/verl/issues/1882 - [Agent integration](https://github.com/volcengine/verl/tree/main/verl/experimental/agent_loop) - Async and off-policy architecture https://github.com/volcengine/verl/pull/2231 - List of breaking changes since v0.4 https://github.com/volcengine/verl/discussions/2270 ## Getting Started Documentation **Quickstart:** - [Installation](https://verl.readthedocs.io/en/latest/start/install.html) - [Quickstart](https://verl.readthedocs.io/en/latest/start/quickstart.html) - [Programming Guide](https://verl.readthedocs.io/en/latest/hybrid_flow.html) & [Tech Talk](https://hcqnc.xetlk.com/sl/3vACOK) (in Chinese) - [PPO in verl](https://verl.readthedocs.io/en/latest/algo/ppo.html) - [GRPO in verl](https://verl.readthedocs.io/en/latest/algo/grpo.html) **Running a PPO example step-by-step:** - [Prepare Data for Post-Training](https://verl.readthedocs.io/en/latest/preparation/prepare_data.html) - [Implement Reward Function for Dataset](https://verl.readthedocs.io/en/latest/preparation/reward_function.html) - [PPO Example Architecture](https://verl.readthedocs.io/en/latest/examples/ppo_code_architecture.html) - [Config Explanation](https://verl.readthedocs.io/en/latest/examples/config.html) **Reproducible algorithm baselines:** - [RL performance on coding, math](https://verl.readthedocs.io/en/latest/algo/baseline.html) **For code explanation and advance usage (extension):** - PPO Trainer and Workers - [PPO Ray Trainer](https://verl.readthedocs.io/en/latest/workers/ray_trainer.html) - [PyTorch FSDP Backend](https://verl.readthedocs.io/en/latest/workers/fsdp_workers.html) - [Megatron-LM Backend](https://verl.readthedocs.io/en/latest/index.html) - Advanced Usage and Extension - [Add Models with the FSDP Backend](https://verl.readthedocs.io/en/latest/advance/fsdp_extension.html) - [Add Models with the Megatron-LM Backend](https://verl.readthedocs.io/en/latest/advance/megatron_extension.html) - [Multi-turn Rollout Support](https://verl.readthedocs.io/en/latest/sglang_multiturn/multiturn.html) - [Search Tool Integration](https://verl.readthedocs.io/en/latest/sglang_multiturn/search_tool_example.html) - [Sandbox Fusion Integration](https://verl.readthedocs.io/en/latest/examples/sandbox_fusion_example.html) - [Deployment using Separate GPU Resources](https://github.com/volcengine/verl/tree/main/examples/split_placement) - [Extend to Other RL(HF) algorithms](https://verl.readthedocs.io/en/latest/advance/dpo_extension.html) - [Ray API design tutorial](https://verl.readthedocs.io/en/latest/advance/placement.html) **Blogs from the community** - [When Reasoning Models Break Tokenization: The Hidden Complexity of Multiturn Training](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/multi-turn/fast_tokenization/multiturn_tokenization_and_masking.md) - [verl deployment on AWS SageMaker](https://medium.com/@kaige.yang0110/run-verl-on-sagemaker-using-4x8-l40s-gpus-8e6d5c3c61d3) - [verl x SGLang Multi-turn Code Walkthrough](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/multi-turn/code-walk-through/readme_EN.md) - [Optimizing SGLang Memory Usage in verl](https://hebiao064.github.io/rl-memory-management) - [SGLang, verl, OpenBMB and Tsinghua University: Pioneering End-to-End Multi-Turn RLHF](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/multi-turn/verl-multiturn-rollout-Release.md) - [Reinforcement Learning from Human Feedback on AMD GPUs with verl and ROCm Integration](https://rocm.blogs.amd.com/artificial-intelligence/verl-large-scale/README.html) - [veMLP x verl :玩转强化学习训练](https://mp.weixin.qq.com/s/7nbqxk4knMGd-hQE9ls2tA) - [使用 verl 进行 GRPO 分布式强化学习训练最佳实践](https://www.volcengine.com/docs/6459/1463942) - [HybridFlow verl 原文浅析](https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/blob/main/rlhf/verl/readme.md) - [最高提升 20 倍吞吐量!豆包大模型团队发布全新 RLHF 框架,现已开源!](https://team.doubao.com/en/blog/%E6%9C%80%E9%AB%98%E6%8F%90%E5%8D%8720%E5%80%8D%E5%90%9E%E5%90%90%E9%87%8F-%E8%B1%86%E5%8C%85%E5%A4%A7%E6%A8%A1%E5%9E%8B%E5%9B%A2%E9%98%9F%E5%8F%91%E5%B8%83%E5%85%A8%E6%96%B0-rlhf-%E6%A1%86%E6%9E%B6-%E7%8E%B0%E5%B7%B2%E5%BC%80%E6%BA%90) ## Performance Tuning Guide The performance is essential for on-policy RL algorithm. We have written a detailed [performance tuning guide](https://verl.readthedocs.io/en/latest/perf/perf_tuning.html) to help you optimize performance. ## Upgrade to vLLM >= v0.8.2 verl now supports vLLM>=0.8.2 when using FSDP as the training backend. Please refer to [this document](https://github.com/volcengine/verl/blob/main/docs/README_vllm0.8.md) for the installation guide and more information. Please avoid vllm 0.7.x, which contains bugs that may lead to OOMs and unexpected errors. ## Use Latest SGLang SGLang is fully supported with verl, and SGLang RL Group is working extensively on building unique features, including multi-turn agentic RL, VLM RLHF, server-based RL, and partial rollout. Please refer to [this document](https://verl.readthedocs.io/en/latest/workers/sglang_worker.html) for the installation guide and more information. ## Upgrade to FSDP2 verl is fully embracing FSDP2! FSDP2 is recommended by torch distributed team, providing better throughput and memory usage, and is composible with other features (e.g. torch.compile). To enable FSDP2, simply use verl main and set the following options: ``` actor_rollout_ref.ref.strategy=fsdp2 actor_rollout_ref.actor.strategy=fsdp2 critic.strategy=fsdp2 reward_model.strategy=fsdp2 ``` Furthermore, FSDP2 cpu offloading is compatible with gradient accumulation. You can turn it on to save memory with `actor_rollout_ref.actor.fsdp_config.offload_policy=True`. For more details, see https://github.com/volcengine/verl/pull/1026 ## AMD Support (ROCm Kernel) verl now supports FSDP as the training engine (Megatron support coming soon) and both integrates with vLLM and SGLang as inference engines. Please refer to [this document](https://github.com/volcengine/verl/blob/main/docs/amd_tutorial/amd_build_dockerfile_page.rst) for the installation guide and more information, and [this document](https://github.com/volcengine/verl/blob/main/docs/amd_tutorial/amd_vllm_page.rst) for the vLLM performance tuning for ROCm. ## Citation and acknowledgement If you find the project helpful, please cite: - [HybridFlow: A Flexible and Efficient RLHF Framework](https://arxiv.org/abs/2409.19256v2) - [A Framework for Training Large Language Models for Code Generation via Proximal Policy Optimization](https://i.cs.hku.hk/~cwu/papers/gmsheng-NL2Code24.pdf) ```bibtex @article{sheng2024hybridflow, title = {HybridFlow: A Flexible and Efficient RLHF Framework}, author = {Guangming Sheng and Chi Zhang and Zilingfeng Ye and Xibin Wu and Wang Zhang and Ru Zhang and Yanghua Peng and Haibin Lin and Chuan Wu}, year = {2024}, journal = {arXiv preprint arXiv: 2409.19256} } ``` verl is inspired by the design of Nemo-Aligner, Deepspeed-chat and OpenRLHF. The project is adopted and contributed by Bytedance, Anyscale, LMSys.org, [Alibaba Qwen team](https://github.com/QwenLM/), Shanghai AI Lab, Tsinghua University, UC Berkeley, UCLA, UIUC, University of Hong Kong, ke.com, [All Hands AI](https://www.all-hands.dev/), [ModelBest](http://modelbest.cn/), JD AI Lab, Microsoft Research, [StepFun](https://www.stepfun.com/), Amazon, LinkedIn, Meituan, [Camel-AI](https://www.camel-ai.org/), [OpenManus](https://github.com/OpenManus), Xiaomi, NVIDIA research, [Baichuan](https://www.baichuan-ai.com/home), [RedNote](https://www.xiaohongshu.com/), [SwissAI](https://www.swiss-ai.org/), [Moonshot AI (Kimi)](https://www.moonshot-ai.com/), Baidu, Snowflake, Skywork.ai, JetBrains, [IceSword Lab](https://www.iceswordlab.com), and many more. ## Awesome work using verl - [TinyZero](https://github.com/Jiayi-Pan/TinyZero): a reproduction of **DeepSeek R1 Zero** recipe for reasoning tasks ![GitHub Repo stars](https://img.shields.io/github/stars/Jiayi-Pan/TinyZero) - [SkyThought](https://github.com/NovaSky-AI/SkyThought): RL training for Sky-T1-7B by NovaSky AI team. ![GitHub Repo stars](https://img.shields.io/github/stars/NovaSky-AI/SkyThought) - [simpleRL-reason](https://github.com/hkust-nlp/simpleRL-reason): SimpleRL-Zoo: Investigating and Taming Zero Reinforcement Learning for Open Base Models in the Wild ![GitHub Repo stars](https://img.shields.io/github/stars/hkust-nlp/simpleRL-reason) - [Easy-R1](https://github.com/hiyouga/EasyR1): **Multi-modal** RL training framework ![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/EasyR1) - [OpenManus-RL](https://github.com/OpenManus/OpenManus-RL): LLM Agents RL tunning framework for multiple agent environments. ![GitHub Repo stars](https://img.shields.io/github/stars/OpenManus/OpenManus-RL) - [rllm](https://github.com/agentica-project/rllm): async RL training with [verl-pipeline](https://github.com/agentica-project/verl-pipeline) ![GitHub Repo stars](https://img.shields.io/github/stars/agentica-project/rllm) - [RAGEN](https://github.com/ZihanWang314/ragen): a general-purpose reasoning **agent** training framework ![GitHub Repo stars](https://img.shields.io/github/stars/ZihanWang314/ragen) - [Search-R1](https://github.com/PeterGriffinJin/Search-R1): RL with reasoning and **searching (tool-call)** interleaved LLMs ![GitHub Repo stars](https://img.shields.io/github/stars/PeterGriffinJin/Search-R1) - [ReSearch](https://github.com/Agent-RL/ReSearch): Learning to **Re**ason with **Search** for LLMs via Reinforcement Learning ![GitHub Repo stars](https://img.shields.io/github/stars/Agent-RL/ReSearch) - [Skywork-OR1](https://github.com/SkyworkAI/Skywork-OR1): Skywork open reaonser series ![GitHub Repo stars](https://img.shields.io/github/stars/SkyworkAI/Skywork-OR1) - [ToRL](https://github.com/GAIR-NLP/ToRL): Scaling tool-integrated RL ![GitHub Repo stars](https://img.shields.io/github/stars/GAIR-NLP/ToRL) - [Absolute Zero Reasoner](https://github.com/LeapLabTHU/Absolute-Zero-Reasoner): [A no human curated data self-play framework for reasoning](https://arxiv.org/abs/2505.03335) ![GitHub Repo stars](https://img.shields.io/github/stars/LeapLabTHU/Absolute-Zero-Reasoner) - [verl-agent](https://github.com/langfengQ/verl-agent): A scalable training framework for **long-horizon LLM/VLM agents**, along with a new algorithm **GiGPO** ![GitHub Repo stars](https://img.shields.io/github/stars/langfengQ/verl-agent) - [RL-Factory](https://github.com/Simple-Efficient/RL-Factory): An easy and efficient RL post-training framework for Agentic Learning ![GitHub Repo stars](https://img.shields.io/github/stars/Simple-Efficient/RL-Factory) - [ReTool](https://retool-rl.github.io/): ReTool: reinforcement learning for strategic tool use in LLMs. Code release is in progress... - [verl-tool](https://github.com/TIGER-AI-Lab/verl-tool): An unified and easy-to-extend tool-agent training framework based on verl![GitHub Repo stars](https://img.shields.io/github/stars/TIGER-AI-Lab/verl-tool) - [PRIME](https://github.com/PRIME-RL/PRIME): Process reinforcement through implicit rewards ![GitHub Repo stars](https://img.shields.io/github/stars/PRIME-RL/PRIME) - [MemAgent](https://github.com/BytedTsinghua-SIA/MemAgent): MemAgent: Reshaping Long-Context LLM with Multi-Conv RL based Memory Agent ![GitHub Repo stars](https://img.shields.io/github/stars/BytedTsinghua-SIA/MemAgent) - [POLARIS](https://github.com/ChenxinAn-fdu/POLARIS): A Post-training recipe for scaling RL on Advanced Reasoning models ![GitHub Repo stars](https://img.shields.io/github/stars/ChenxinAn-fdu/POLARIS) - [GUI-R1](https://github.com/ritzz-ai/GUI-R1): **GUI-R1**: A Generalist R1-style Vision-Language Action Model For **GUI Agents** ![GitHub Repo stars](https://img.shields.io/github/stars/ritzz-ai/GUI-R1) - [DeepRetrieval](https://github.com/pat-jj/DeepRetrieval): RL Training of **Search Agent** with **Search/Retrieval Outcome** ![GitHub Repo stars](https://img.shields.io/github/stars/pat-jj/DeepRetrieval) - [Code-R1](https://github.com/ganler/code-r1): Reproducing R1 for **Code** with Reliable Rewards ![GitHub Repo stars](https://img.shields.io/github/stars/ganler/code-r1) - [DeepResearcher](https://github.com/GAIR-NLP/DeepResearcher): Scaling deep research via reinforcement learning in real-world environments ![GitHub Repo stars](https://img.shields.io/github/stars/GAIR-NLP/DeepResearcher) - [VAGEN](https://github.com/RAGEN-AI/VAGEN): Training VLM agents with multi-turn reinforcement learning ![GitHub Repo stars](https://img.shields.io/github/stars/RAGEN-AI/VAGEN) - [RM-R1](https://arxiv.org/abs/2505.02387): RL training of reasoning reward models ![GitHub Repo stars](https://img.shields.io/github/stars/RM-R1-UIUC/RM-R1) - [LUFFY](https://arxiv.org/pdf/2504.14945): Learning to Reason under Off-Policy Guidance![GitHub Repo stars](https://img.shields.io/github/stars/ElliottYan/LUFFY) - [DeepMath](https://github.com/zwhe99/DeepMath): DeepMath-103K data and series models for math reasoning![GitHub Repo stars](https://img.shields.io/github/stars/zwhe99/DeepMath) - [Entropy Mechanism of RL](https://github.com/PRIME-RL/Entropy-Mechanism-of-RL): The Entropy Mechanism of Reinforcement Learning for Large Language Model Reasoning![GitHub Repo stars](https://img.shields.io/github/stars/PRIME-RL/Entropy-Mechanism-of-RL) - [LLaSA-TTS-GRPO](https://github.com/channel-io/ch-tts-llasa-rl-grpo): TTS fine-tuning with GRPO optimization based on LLASA models ![GitHub Repo stars](https://img.shields.io/github/stars/channel-io/ch-tts-llasa-rl-grpo) - [PF-PPO](https://arxiv.org/abs/2409.06957): Policy Filtration for PPO based on the reliability of reward signals for more efficient and robust RLHF. - [RACRO](https://github.com/gyhdog99/RACRO2): Build multi-modal reasoning models via decoupling it into query-conditioned captioning and text-only reasoning ![GitHub Repo stars](https://img.shields.io/github/stars/gyhdog99/RACRO2) and many more awesome work listed in [recipe](recipe/README.md). ## Contribution Guide See [contributions guide](CONTRIBUTING.md) ## About [ByteDance Seed Team](https://team.doubao.com/) Founded in 2023, ByteDance Seed Team is dedicated to crafting the industry's most advanced AI foundation models. The team aspires to become a world-class research team and make significant contributions to the advancement of science and society. You can get to know Bytedance Seed better through the following channels👇 --- We are HIRING! Send us an [email](mailto:haibin.lin@bytedance.com) if you are interested in internship/FTE opportunities in RL for agents. ================================================ FILE: verl_rl/deploy_env.sh ================================================ #!/bin/bash # Multi-node Environment Deployment Script # Usage: bash deploy_env.sh [--all-nodes] set -e SCRIPT_DIR=$(cd $(dirname $0); pwd) PROJECT_DIR=${SCRIPT_DIR} # Configuration CONDA_ENV_NAME=${CONDA_ENV_NAME:-"verl"} PYTHON_VERSION=${PYTHON_VERSION:-"3.10"} HOSTFILE=${HOSTFILE:-"/etc/mpi/hostfile"} # Colors GREEN='\033[0;32m' YELLOW='\033[1;33m' RED='\033[0;31m' NC='\033[0m' log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } log_error() { echo -e "${RED}[ERROR]${NC} $1"; } # Initialize conda init_conda() { for conda_sh in /root/anaconda3/etc/profile.d/conda.sh \ /root/miniconda3/etc/profile.d/conda.sh \ $HOME/anaconda3/etc/profile.d/conda.sh \ $HOME/miniconda3/etc/profile.d/conda.sh \ /opt/conda/etc/profile.d/conda.sh; do [ -f "$conda_sh" ] && source "$conda_sh" && return 0 done command -v conda &>/dev/null } # Setup proxy setup_proxy() { log_info "Setting up proxy..." unset -v http_proxy https_proxy no_proxy export http_proxy=http://oversea-squid2.ko.txyun:11080 export https_proxy=http://oversea-squid2.ko.txyun:11080 export no_proxy=localhost,127.0.0.1,localaddress,localdomain.com,internal,corp.kuaishou.com,test.gifshow.com,staging.kuaishou.com } # Install on local node install_local() { log_info "Installing environment..." # Setup proxy first setup_proxy if ! init_conda; then log_error "Conda not found." exit 1 fi # Configure conda for stability conda config --set remote_read_timeout_secs 600 conda config --set remote_connect_timeout_secs 60 conda config --set remote_max_retries 10 conda config --set show_channel_urls yes # Accept TOS for Anaconda channels conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/main 2>/dev/null || true conda tos accept --override-channels --channel https://repo.anaconda.com/pkgs/r 2>/dev/null || true # Create or activate conda env if conda env list | grep -q "^${CONDA_ENV_NAME} "; then log_warn "Environment '${CONDA_ENV_NAME}' exists, activating..." else log_info "Creating environment '${CONDA_ENV_NAME}'..." conda create -n ${CONDA_ENV_NAME} python=${PYTHON_VERSION} -y fi source $(conda info --base)/etc/profile.d/conda.sh conda activate ${CONDA_ENV_NAME} log_info "Installing torch..." pip install torch==2.6.0 # Install requirements log_info "Installing requirements.txt..." pip install -r ${PROJECT_DIR}/requirements.txt # Install flash-attn separately log_info "Installing flash-attn..." pip install flash-attn==2.7.4.post1 --no-build-isolation # Install verl package log_info "Installing verl package..." cd ${PROJECT_DIR} pip install -e . log_info "Done!" } # Deploy to all nodes deploy_all_nodes() { [ ! -f "${HOSTFILE}" ] && log_error "Hostfile not found: ${HOSTFILE}" && exit 1 ALL_NODES=$(awk '!a[$1]++ {print $1}' ${HOSTFILE}) log_info "Deploying to: ${ALL_NODES}" mkdir -p ./logs/deploy for node in ${ALL_NODES}; do log_info "Deploying to ${node}..." ssh -n ${node} "CONDA_ENV_NAME=${CONDA_ENV_NAME} bash ${SCRIPT_DIR}/deploy_env.sh" \ > "./logs/deploy/deploy_${node}.log" 2>&1 & done wait log_info "Deployment completed! Check logs in ./logs/deploy/" } # Main case "${1}" in --all-nodes) deploy_all_nodes ;; *) install_local ;; esac ================================================ FILE: verl_rl/docker/Apptainerfile.rocm ================================================ Bootstrap: docker # Support - Traing: fsdp; Inference: vllm # FROM: rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 # Support - Traing: fsdp; Inference: vllm, sglang FROM lmsysorg/sglang:v0.4.5-rocm630 %environment export PYTORCH_ROCM_ARCH="gfx90a;gfx942" export HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" export CFLAGS="-D__HIP_PLATFORM_AMD__" export CXXFLAGS="-D__HIP_PLATFORM_AMD__" %post # Create source directory mkdir -p /opt/src # Uninstall and reinstall vllm pip uninstall -y vllm cd /opt/src git clone -b v0.6.3 https://github.com/vllm-project/vllm.git cd vllm MAX_JOBS=$(nproc) python3 setup.py install cd /opt rm -rf /opt/src/vllm # Install dependencies pip install "tensordict<0.6" --no-deps pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ "ray[data,train,tune,serve]" \ torchdata \ transformers \ wandb \ orjson \ pybind11 # Clone and install verl from GitHub cd /opt git clone https://github.com/volcengine/verl.git cd verl # Uncomment to use a specific version # git checkout v0.3.0.post0 pip install -e . --no-deps # Install torch_memory_saver pip install git+https://github.com/ExtremeViscent/torch_memory_saver.git --no-deps ================================================ FILE: verl_rl/docker/Dockerfile.extention.awsefa ================================================ # Base Image support aws EFA # Build Image with frameworks based on this FROM verlai/verl:app-verl0.5-sglang0.4.6.post5-mcore0.12.2 # For aws instances with EFA net interface (Sagemaker AI Pod) # install EFA driver: ######## AWS EFA ############ ENV NCCL_VERSION=2.25.1-1 ENV DEBIAN_FRONTEND=noninteractive ENV EFA_INSTALLER_VERSION=1.40.0 ENV AWS_OFI_NCCL_VERSION=1.14.2 ENV FI_EFA_SET_CUDA_SYNC_MEMOPS=0 ENV FI_PROVIDER=efa RUN apt update && apt install -y linux-image-generic libhwloc-dev RUN cd /tmp && \ curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ cd aws-efa-installer && \ ./efa_installer.sh -y -g --skip-kmod --skip-limit-conf --no-verify && \ ldconfig && \ rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* # NCCL EFA Plugin RUN cd /tmp && \ curl -LO https://github.com/aws/aws-ofi-nccl/archive/refs/tags/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ tar -xzf /tmp/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ rm /tmp/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ mv aws-ofi-nccl-${AWS_OFI_NCCL_VERSION} aws-ofi-nccl && \ cd /tmp/aws-ofi-nccl && \ ./autogen.sh && \ ./configure --prefix=/opt/amazon/efa \ --with-libfabric=/opt/amazon/efa \ --with-cuda=/usr/local/cuda \ --enable-platform-aws \ --with-mpi=/opt/amazon/openmpi && \ make -j$(nproc) install && \ rm -rf /tmp/aws-ofi/nccl # NCCL RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ echo "/opt/amazon/openmpi/lib" >> /etc/ld.so.conf.d/efa.conf && \ ldconfig ENV OMPI_MCA_pml=^cm,ucx \ OMPI_MCA_btl=tcp,self \ OMPI_MCA_btl_tcp_if_exclude=lo,docker0,veth_def_agent \ OPAL_PREFIX=/opt/amazon/openmpi \ NCCL_SOCKET_IFNAME=^docker,lo,veth_def_agent \ FI_EFA_USE_HUGE_PAGE=0 # docker build -t verl:awsefa --label "commit=$(git rev-parse --short HEAD)" . # on aws: # docker run --ipc=host --privileged --name verldev --gpus all --network=host --shm-size=1800gb -itd verl:awsefa ================================================ FILE: verl_rl/docker/Dockerfile.ngc.vllm ================================================ # docker buildx build --platform linux/x86_64 -t "verlai/verl:ngc-th2.4.0-cu124-vllm0.6.3-ray2.4-te1.7-v0.0.6" -f docker/Dockerfile.ngc.vllm . --builder cloud-verlai-verl-builder --progress=plain --push FROM nvcr.io/nvidia/pytorch:24.05-py3 # uninstall nv-pytorch fork RUN pip3 uninstall pytorch-quantization \ pytorch-triton \ torch \ torch-tensorrt \ torchvision \ xgboost transformer_engine flash_attn \ apex megatron-core -y RUN pip3 install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu124 # =============== Megatron dependencies (optional) ================= # install apex, set MAX_JOBS to avoid OOMs RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \ --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \ git+https://github.com/NVIDIA/apex # =============== End of Megatron dependencies (optional) ================= RUN pip3 install --no-cache-dir \ accelerate \ codetiming \ datasets \ dill \ hydra-core \ numpy \ 'pandas' \ 'peft' \ 'pyarrow>=15.0.0' \ 'pybind11' \ 'pylatexenc' \ 'ray>=2.10' \ 'tensordict<0.6' \ 'transformers' \ 'vllm==0.6.3.post1' \ 'wandb' # full dependencies RUN pip3 install pytest pre-commit py-spy pyext liger-kernel # =============== Megatron dependencies (optional) ================= # install Transformer Engine, which requires FA 2.5.8. Do it in a separate step for docker cache RUN MAX_JOBS=4 NINJA_FLAGS="-j4" pip3 install flash-attn==2.5.8 --no-cache-dir --no-build-isolation RUN MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 pip3 install git+https://github.com/eric-haibin-lin/TransformerEngine.git@v1.7.0 # =============== End of Megatron dependencies (optional) ================= ================================================ FILE: verl_rl/docker/Dockerfile.ngc.vllm0.8 ================================================ # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio # Install torch-2.6.0+cu124 + vllm-0.8.3 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 RUN pip install --no-cache-dir "vllm==0.8.3" "torch==2.6.0" "torchvision==0.21.0" "torchaudio==2.6.0" "tensordict==0.6.2" torchdata \ "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=15.0.0" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler \ pytest py-spy pyext pre-commit ruff # Install flash-attn-2.7.4.post1 (cxx11abi=False) RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Install flashinfer-0.2.2.post1+cu124 (cxx11abi=False) # vllm-0.8.3 does not support flashinfer>=0.2.3 # see https://github.com/vllm-project/vllm/pull/15777 RUN wget -nv https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install verl RUN pip install --no-cache-dir verl[vllm] -U # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_rl/docker/Dockerfile.ngc.vllm0.8.sagemaker ================================================ # Using a pre-built image from AWS DLC which contains the current version of python (3.10) and supported cuda version (12.1) FROM 763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:2.1.0-transformers4.36.0-gpu-py310-cu121-ubuntu20.04 # uninstall nv-pytorch fork RUN pip3 uninstall -y pytorch-quantization \ pytorch-triton torch torch-tensorrt torchvision \ xgboost transformer_engine flash_attn apex megatron-core # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini && \ apt-get clean # Install torch-2.6.0 + vllm-0.8.2 RUN pip install --no-cache-dir vllm==0.8.2 torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 tensordict torchdata==0.11.0 \ transformers>=4.49.0 accelerate datasets peft hf-transfer \ ray[default] codetiming hydra-core pandas pyarrow>=15.0.0 pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler \ pytest pre-commit py-spy pyext ruff # Install flash_attn-2.7.4.post1 RUN pip uninstall -y transformer-engine flash-attn && \ pip install flash-attn==2.7.4.post1 --no-build-isolation # Fix cv2 RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir nvidia-ml-py>=12.560.30 opencv-python-headless==4.8.0.74 fastapi==0.115.6 && \ pip install --no-cache-dir --upgrade optree>=0.13.0 # Install verl RUN pip install --no-cache-dir verl[vllm] -U # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_rl/docker/Dockerfile.rocm ================================================ # FROM "compute-artifactory.amd.com:5000/rocm-plus-docker/framework/compute-rocm-rel-6.4:94_ubuntu22.04_py3.10_pytorch_release-2.7_575e247" FROM "rlfoundation.azurecr.io/rocm6.3.4:vllm-0.8.5-numa-patch-ubuntu-22.04" SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV MAX_JOBS=512 ENV PATH="/usr/local/python3.12/bin:$PATH" RUN ln -sf /usr/bin/python3.12 /usr/bin/python && \ ln -sf /usr/bin/pip3.12 /usr/bin/pip ############################################ ############################################ RUN apt-get update RUN apt-get install -y pkg-config liblzma-dev ############################################ ############################################ ########################################### ##########Install TransformerEngine######## ########################################### WORKDIR /workspace/ # transformer-engine install # https://github.com/ROCm/TransformerEngine RUN rm -rf TransformerEngine RUN git clone --recursive https://github.com/ROCm/TransformerEngine.git WORKDIR /workspace/TransformerEngine RUN git checkout 236178e5 # git checkout bb061ade # git checkout 864405c ENV NVTE_FRAMEWORK=pytorch ENV NVTE_ROCM_ARCH=gfx942 ENV NVTE_USE_HIPBLASLT=1 ENV NVTE_USE_ROCM=1 # export CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr:${CMAKE_PREFIX_PATH:-}" ENV CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr" # ENV NVTE_BUILD_MAX_JOBS=$(MAX_JOBS) RUN MAX_JOBS=$(MAX_JOBS) pip install . -vvv WORKDIR /workspace/ ########################################### ########################################### ########################################### #################################################################################### ################Install vllm - sglang require vllm 0.6.7 dependency################# #################################################################################### #### Require vllm 0.6.7 - checkout 113274a0 WORKDIR /workspace/ RUN rm -rf vllm RUN pip uninstall -y vllm # Refer to here (down-grade vllm to 0.6.3): https://docs.vllm.ai/en/v0.6.3/getting_started/amd-installation.html RUN git clone https://github.com/ROCm/vllm.git # git clone https://github.com/vllm-project/vllm.git WORKDIR /workspace/vllm RUN git checkout 113274a0 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" #ENV MAX_JOBS=512 ENV MAX_JOBS=${MAX_JOBS} RUN pip install "boto3>=1.26.0" RUN pip install setuptools_scm # will add src into py. You can delete the repo RUN python3 setup.py install WORKDIR /workspace/ #################################################################################### #################################################################################### #################################################################################### ########################################### ############For hack docker################ ########################################### RUN pip install setuptools==75.8.0 ########################################### ########################################### ########################################### ########################################### ############build sgalng################### ########################################### # Set environment variables ENV BASE_DIR=/sgl-workspace ENV BUILD_TYPE=all ENV SGL_REPO=https://github.com/sgl-project/sglang ENV SGL_BRANCH=v0.4.6.post5 ENV TRITON_REPO=https://github.com/ROCm/triton.git ENV TRITON_COMMIT=improve_fa_decode_3.0.0 ENV AITER_REPO=https://github.com/ROCm/aiter.git ENV AITER_COMMIT=v0.1.2 # v0.1.2 version - commit id: 9d11f47 # ENV AITER_COMMIT=9d11f47 ENV HIP_FORCE_DEV_KERNARG=1 ENV HSA_NO_SCRATCH_RECLAIM=1 ENV SGLANG_SET_CPU_AFFINITY=1 ENV SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 ENV NCCL_MIN_NCHANNELS=112 ENV MOE_PADDING=1 ENV VLLM_FP8_PADDING=1 ENV VLLM_FP8_ACT_PADDING=1 ENV VLLM_FP8_WEIGHT_PADDING=1 ENV VLLM_FP8_REDUCE_CONV=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE=1 ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" ENV AMDGPU_TARGETS=gfx942 ENV ROCM_ARCH=gfx942 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" # Switch to working directory WORKDIR /sgl-workspace # Clean and create directory RUN rm -rf /sgl-workspace && mkdir -p /sgl-workspace # Clone and build sglang RUN git clone ${SGL_REPO} \ && cd sglang \ && git checkout ${SGL_BRANCH} || echo "Using default branch" \ && cd sgl-kernel \ && rm -f pyproject.toml \ && mv pyproject_rocm.toml pyproject.toml \ && python setup_rocm.py install \ && cd .. \ && if [ "$BUILD_TYPE" = "srt" ]; then \ python -m pip --no-cache-dir install -e "python[srt_hip]"; \ else \ python -m pip --no-cache-dir install -e "python[all_hip]"; \ fi \ && cd /sgl-workspace \ && cp -r /sgl-workspace/sglang /sglang \ && python -m pip cache purge # Install common Python packages RUN pip install IPython orjson python-multipart torchao pybind11 # Rebuild Triton RUN pip uninstall -y triton || true \ && git clone ${TRITON_REPO} \ && cd triton \ && git checkout ${TRITON_COMMIT} \ && cd python \ && python3 setup.py install \ && cd /sgl-workspace # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942 --amdgpu-lower-module-lds-strategy=1" # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" # Build aiter #version: Commit 9d11f47 # && git checkout ${AITER_COMMIT} \ RUN pip uninstall -y aiter || true RUN git clone ${AITER_REPO} \ && cd aiter \ && git checkout ${AITER_COMMIT} \ && git submodule sync \ && git submodule update --init --recursive \ && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py install \ && cd /sgl-workspace # && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop \ # && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop \ # Copy MI300X config RUN find /sgl-workspace/sglang/python/sglang/srt/layers/quantization/configs/ \ /sgl-workspace/sglang/python/sglang/srt/layers/moe/fused_moe_triton/configs/ \ -type f -name '*MI300X*' | \ xargs -I {} sh -c 'vf_config=$(echo "$1" | sed "s/MI300X/MI300X_VF/"); cp "$1" "$vf_config"' -- {} # Environment setup complete. RUN echo "Environment setup complete." WORKDIR /workspace/ ########################################### ########################################### ########################################### ########################################### ###############vllm v0.8.5################# ########################################### # ENV GITHUB_USERNAME=yushengsu-thu # ENV GITHUB_MAIL=yushengsu@gmail.com # RUN git config --global user.name "${GITHUB_USERNAME}" \ # && git config --global user.email "${GITHUB_MAIL}" WORKDIR /workspace/ ENV VLLM_TARGET_DEVICE=rocm ENV ROCM_PATH=/opt/rocm ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev # Find the repo path in: DockerFile/Dockerfile.rocm_yang # RUN git clone https://github.com/RLFoundation/vllm-patch.git RUN pip uninstall -y vllm || true RUN rm -rf vllm-patch RUN git clone https://github.com/RLFoundation/vllm-patch.git \ && cd vllm-patch \ && git checkout v0.8.5-sleep-numa \ && rm -rf build/ dist/ *.egg-info \ && ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so \ && SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py install # RUN SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py develop WORKDIR /workspace/ ########################################### ########################################### ########################################### ######################################### #### Install megatron-core############### ######################################### RUN pip uninstall -y megatron-core && \ git clone https://github.com/yushengsu-thu/Megatron-LM-amd_version.git && \ cd Megatron-LM-amd_version && \ pip install -vvv -e . && \ cd /workspace/ ######################################### ######################################### ######################################### ####################################### ################apex################### ####################################### WORKDIR /workspace/ RUN pip uninstall -y apex && \ git clone https://github.com/ROCm/apex.git && \ cd apex && \ python setup.py install && \ cd /workspace/ ####################################### ####################################### ####################################### ################################################################################ ###########################Add torch_memory_saver############################### ################################################################################ # Set environment variables ENV HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" ENV CFLAGS="-D__HIP_PLATFORM_AMD__" ENV CXXFLAGS="-D__HIP_PLATFORM_AMD__" RUN pip install "git+https://github.com/YangWang92/torch_memory_saver_numa.git@numa" ################################################################################ ################################################################################ ################################################################################ ######################################## ######Install ray####################### ######################################## # need to add this patch: https://github.com/ray-project/ray/pull/53531/files RUN pip uninstall ray -y RUN pip install "ray[data,train,tune,serve]>=2.47.0" ######################################## ######################################## ######################################## ########################################## #######Install other dependencies######### ########################################## RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ torchdata \ wandb \ orjson \ pybind11 WORKDIR /workspace/ RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ pip install -e . ########################################## ########################################## ########################################## WORKDIR /workspace/ CMD ["/usr/bin/bash"] ================================================ FILE: verl_rl/docker/Dockerfile.rocm_verl-0.3.0.post1 ================================================ # Build the docker in the repo dir: # docker build -f docker/Dockerfile.rocm -t verl-rocm:03.04.2015 . # docker images # you can find your built docker # Support - Traing: fsdp; Inference: vllm # FROM rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 # Support - Traing: fsdp; Inference: vllm, sglang FROM lmsysorg/sglang:v0.4.6.post5-rocm630 # Set working directory # WORKDIR $PWD/app # Set environment variables ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" ENV HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" ENV CFLAGS="-D__HIP_PLATFORM_AMD__" ENV CXXFLAGS="-D__HIP_PLATFORM_AMD__" # Install vllm RUN pip uninstall -y vllm && \ rm -rf vllm && \ git clone -b v0.6.3 https://github.com/vllm-project/vllm.git && \ cd vllm && \ MAX_JOBS=$(nproc) python3 setup.py install && \ cd .. && \ rm -rf vllm # Copy the entire project directory COPY . . # Install dependencies RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ "ray[data,train,tune,serve]<2.45.0" \ torchdata \ transformers \ wandb \ orjson \ pybind11 RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ pip install -e . # Install torch_memory_saver RUN pip install git+https://github.com/ExtremeViscent/torch_memory_saver.git --no-deps ================================================ FILE: verl_rl/docker/Dockerfile.rocm_verl-0.4.1 ================================================ # FROM "compute-artifactory.amd.com:5000/rocm-plus-docker/framework/compute-rocm-rel-6.4:94_ubuntu22.04_py3.10_pytorch_release-2.7_575e247" FROM "rlfoundation.azurecr.io/rocm6.3.4:vllm-0.8.5-numa-patch-ubuntu-22.04" SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV MAX_JOBS=512 ENV PATH="/usr/local/python3.12/bin:$PATH" RUN ln -sf /usr/bin/python3.12 /usr/bin/python && \ ln -sf /usr/bin/pip3.12 /usr/bin/pip ############################################ ############################################ RUN apt-get update RUN apt-get install -y pkg-config liblzma-dev ############################################ ############################################ ########################################### ##########Install TransformerEngine######## ########################################### WORKDIR /workspace/ # transformer-engine install # https://github.com/ROCm/TransformerEngine RUN rm -rf TransformerEngine RUN git clone --recursive https://github.com/ROCm/TransformerEngine.git WORKDIR /workspace/TransformerEngine RUN git checkout 236178e5 # git checkout bb061ade # git checkout 864405c ENV NVTE_FRAMEWORK=pytorch ENV NVTE_ROCM_ARCH=gfx942 ENV NVTE_USE_HIPBLASLT=1 ENV NVTE_USE_ROCM=1 # export CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr:${CMAKE_PREFIX_PATH:-}" ENV CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr" # ENV NVTE_BUILD_MAX_JOBS=$(MAX_JOBS) RUN MAX_JOBS=$(MAX_JOBS) pip install . -vvv WORKDIR /workspace/ ########################################### ########################################### ########################################### #################################################################################### ################Install vllm - sglang require vllm 0.6.7 dependency################# #################################################################################### #### Require vllm 0.6.7 - checkout 113274a0 WORKDIR /workspace/ RUN rm -rf vllm RUN pip uninstall -y vllm # Refer to here (down-grade vllm to 0.6.3): https://docs.vllm.ai/en/v0.6.3/getting_started/amd-installation.html RUN git clone https://github.com/ROCm/vllm.git # git clone https://github.com/vllm-project/vllm.git WORKDIR /workspace/vllm RUN git checkout 113274a0 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" #ENV MAX_JOBS=512 ENV MAX_JOBS=${MAX_JOBS} RUN pip install "boto3>=1.26.0" RUN pip install setuptools_scm # will add src into py. You can delete the repo RUN python3 setup.py install WORKDIR /workspace/ #################################################################################### #################################################################################### #################################################################################### ########################################### ############For hack docker################ ########################################### RUN pip install setuptools==75.8.0 ########################################### ########################################### ########################################### ########################################### ############build sgalng################### ########################################### # Set environment variables ENV BASE_DIR=/sgl-workspace ENV BUILD_TYPE=all ENV SGL_REPO=https://github.com/sgl-project/sglang ENV SGL_BRANCH=v0.4.6.post5 ENV TRITON_REPO=https://github.com/ROCm/triton.git ENV TRITON_COMMIT=improve_fa_decode_3.0.0 ENV AITER_REPO=https://github.com/ROCm/aiter.git ENV AITER_COMMIT=v0.1.2 # v0.1.2 version - commit id: 9d11f47 # ENV AITER_COMMIT=9d11f47 ENV HIP_FORCE_DEV_KERNARG=1 ENV HSA_NO_SCRATCH_RECLAIM=1 ENV SGLANG_SET_CPU_AFFINITY=1 ENV SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 ENV NCCL_MIN_NCHANNELS=112 ENV MOE_PADDING=1 ENV VLLM_FP8_PADDING=1 ENV VLLM_FP8_ACT_PADDING=1 ENV VLLM_FP8_WEIGHT_PADDING=1 ENV VLLM_FP8_REDUCE_CONV=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE=1 ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" ENV AMDGPU_TARGETS=gfx942 ENV ROCM_ARCH=gfx942 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" # Switch to working directory WORKDIR /sgl-workspace # Clean and create directory RUN rm -rf /sgl-workspace && mkdir -p /sgl-workspace # Clone and build sglang RUN git clone ${SGL_REPO} \ && cd sglang \ && git checkout ${SGL_BRANCH} || echo "Using default branch" \ && cd sgl-kernel \ && rm -f pyproject.toml \ && mv pyproject_rocm.toml pyproject.toml \ && python setup_rocm.py install \ && cd .. \ && if [ "$BUILD_TYPE" = "srt" ]; then \ python -m pip --no-cache-dir install -e "python[srt_hip]"; \ else \ python -m pip --no-cache-dir install -e "python[all_hip]"; \ fi \ && cd /sgl-workspace \ && cp -r /sgl-workspace/sglang /sglang \ && python -m pip cache purge # Install common Python packages RUN pip install IPython orjson python-multipart torchao pybind11 # Rebuild Triton RUN pip uninstall -y triton || true \ && git clone ${TRITON_REPO} \ && cd triton \ && git checkout ${TRITON_COMMIT} \ && cd python \ && python3 setup.py install \ && cd /sgl-workspace # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942 --amdgpu-lower-module-lds-strategy=1" # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" # Build aiter #version: Commit 9d11f47 # && git checkout ${AITER_COMMIT} \ RUN pip uninstall -y aiter || true RUN git clone ${AITER_REPO} \ && cd aiter \ && git checkout ${AITER_COMMIT} \ && git submodule sync \ && git submodule update --init --recursive \ && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py install \ && cd /sgl-workspace # && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop \ # && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop \ # Copy MI300X config RUN find /sgl-workspace/sglang/python/sglang/srt/layers/quantization/configs/ \ /sgl-workspace/sglang/python/sglang/srt/layers/moe/fused_moe_triton/configs/ \ -type f -name '*MI300X*' | \ xargs -I {} sh -c 'vf_config=$(echo "$1" | sed "s/MI300X/MI300X_VF/"); cp "$1" "$vf_config"' -- {} # Environment setup complete. RUN echo "Environment setup complete." WORKDIR /workspace/ ########################################### ########################################### ########################################### ########################################### ###############vllm v0.8.5################# ########################################### # ENV GITHUB_USERNAME=yushengsu-thu # ENV GITHUB_MAIL=yushengsu@gmail.com # RUN git config --global user.name "${GITHUB_USERNAME}" \ # && git config --global user.email "${GITHUB_MAIL}" WORKDIR /workspace/ ENV VLLM_TARGET_DEVICE=rocm ENV ROCM_PATH=/opt/rocm ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev # Find the repo path in: DockerFile/Dockerfile.rocm_yang # RUN git clone https://github.com/RLFoundation/vllm-patch.git RUN pip uninstall -y vllm || true RUN rm -rf vllm-patch RUN git clone https://github.com/RLFoundation/vllm-patch.git \ && cd vllm-patch \ && git checkout v0.8.5-sleep-numa \ && rm -rf build/ dist/ *.egg-info \ && ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so \ && SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py install # RUN SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py develop WORKDIR /workspace/ ########################################### ########################################### ########################################### ######################################### #### Install megatron-core############### ######################################### RUN pip uninstall -y megatron-core && \ git clone https://github.com/yushengsu-thu/Megatron-LM-amd_version.git && \ cd Megatron-LM-amd_version && \ pip install -vvv -e . && \ cd /workspace/ ######################################### ######################################### ######################################### ####################################### ################apex################### ####################################### WORKDIR /workspace/ RUN pip uninstall -y apex && \ git clone https://github.com/ROCm/apex.git && \ cd apex && \ python setup.py install && \ cd /workspace/ ####################################### ####################################### ####################################### ################################################################################ ###########################Add torch_memory_saver############################### ################################################################################ # Set environment variables ENV HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" ENV CFLAGS="-D__HIP_PLATFORM_AMD__" ENV CXXFLAGS="-D__HIP_PLATFORM_AMD__" RUN pip install "git+https://github.com/YangWang92/torch_memory_saver_numa.git@numa" ################################################################################ ################################################################################ ################################################################################ ######################################## ######Install ray####################### ######################################## # need to add this patch: https://github.com/ray-project/ray/pull/53531/files RUN pip uninstall ray -y RUN pip install "ray[data,train,tune,serve]>=2.47.0" ######################################## ######################################## ######################################## ########################################## #######Install other dependencies######### ########################################## RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ torchdata \ wandb \ orjson \ pybind11 WORKDIR /workspace/ RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ pip install -e . ########################################## ########################################## ########################################## WORKDIR /workspace/ CMD ["/usr/bin/bash"] CMD ["/usr/bin/bash"] ================================================ FILE: verl_rl/docker/Dockerfile.sglang ================================================ # Start from the NVIDIA official image (ubuntu-22.04 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=32 ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" # Define installation arguments ARG APT_SOURCE=https://mirrors.ustc.edu.cn/ubuntu/ # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini && \ apt-get clean # Change pip source ARG PIP_INDEX=https://mirrors.aliyun.com/pypi/simple/ RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Install sglang-0.4.6.post5 and torch-memory-saver RUN pip uninstall -y cuda-python && pip install "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install torch-memory-saver --no-cache-dir # Install torch-2.6.0 RUN pip install --no-cache-dir torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 tensordict torchdata \ transformers>=4.49.0 accelerate datasets peft hf_transfer \ ray[default] codetiming hydra-core pandas pyarrow>=15.0.0 pylatexenc qwen-vl-utils wandb liger-kernel \ pytest pre-commit py-spy pyext # Install flash_attn-2.7.4.post1 RUN pip uninstall -y transformer-engine flash-attn && \ wget -v https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Fix cv2 RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir nvidia-ml-py>=12.560.30 opencv-python-headless==4.8.0.74 fastapi==0.115.6 ================================================ FILE: verl_rl/docker/Dockerfile.vemlp.vllm.te ================================================ # docker buildx build --platform linux/x86_64 -t "verlai/verl:$TAG" -f docker/$FILE . # the one in docker.io is an alias for the one veturbo # FROM vemlp-cn-beijing.cr.volces.com/veturbo/pytorch:2.4-cu124 FROM docker.io/haibinlin/verl:v0.0.5-th2.4.0-cu124-base # only config pip index with https://pypi.tuna.tsinghua.edu.cn/simple if needed # unset for now RUN pip3 config unset global.index-url # transformers 4.47.0 contains the following bug: # AttributeError: 'Gemma2Attention' object has no attribute '_flash_attn_uses_top_left_mask' RUN pip3 install --no-cache-dir \ torch==2.4.0 \ accelerate \ codetiming \ dill \ hydra-core \ numpy \ pybind11 \ tensordict \ "transformers <= 4.46.0" RUN pip3 install --no-cache-dir flash-attn==2.7.0.post2 --no-build-isolation # vllm depends on ray RUN pip3 install --no-cache-dir vllm==0.6.3 ray==2.10 # install apex RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \ --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \ git+https://github.com/NVIDIA/apex # install Transformer Engine # - flash-attn pinned to 2.5.3 by TransformerEngine, switch to eric-haibin-lin/TransformerEngine.git@v1.7.0 to relax version req # - install with: MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 to avoid OOM # - cudnn is required by TransformerEngine # RUN CUDNN_PATH=/opt/conda/lib/python3.11/site-packages/nvidia/cudnn \ # pip3 install git+https://github.com/eric-haibin-lin/TransformerEngine.git@v1.7.0 RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install flash-attn==2.5.3 --no-cache-dir --no-build-isolation RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install git+https://github.com/NVIDIA/TransformerEngine.git@v1.7 ================================================ FILE: verl_rl/docker/Dockerfile.vllm.sglang.megatron.deepseek ================================================ # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio # Reinstall CUDA 12.4 RUN aria2c https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin && \ mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600 RUN aria2c --always-resume=true --max-tries=99999 https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ dpkg -i cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ cp /var/cuda-repo-ubuntu2204-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cuda-toolkit-12-4 && \ rm cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ update-alternatives --set cuda /usr/local/cuda-12.4 && \ rm -rf /usr/local/cuda-12.6 # Install torch-2.6.0+cu124 + vllm-0.8.5.post1 + sglang-0.4.6.post5 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 # Install sglang-0.4.6.post1 and torch-memory-saver RUN pip install --resume-retries 999 "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install --resume-retries 999 torch-memory-saver --no-cache-dir RUN pip install --resume-retries 999 --no-cache-dir "vllm==0.8.5.post1" "torch==2.6.0" "torchvision==0.21.0" "torchaudio==2.6.0" "tensordict==0.6.2" torchdata RUN pip install --resume-retries 999 --no-cache-dir "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=15.0.0" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile \ pytest py-spy pyext pre-commit ruff # Install flash-attn-2.7.4.post1 (cxx11abi=False) RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install Apex RUN git clone https://github.com/NVIDIA/apex.git && \ cd apex && \ pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --no-deps --no-cache-dir git+https://github.com/NVIDIA/TransformerEngine.git@v2.3 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix opencv RUN pip install opencv-python RUN pip install opencv-fixer && \ python -c "from opencv_fixer import AutoFix; AutoFix()" # Install verl # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url RUN apt-get update && \ apt-get install -y aria2 libfreeimage3 libfreeimage-dev zlib1g ================================================ FILE: verl_rl/docker/README.md ================================================ # Dockerfiles of verl We provide pre-built Docker images for quick setup. And from this version, we utilize a new image release hierarchy for productivity and stability. The image types are divided into three large categories: - **Base Image**: Without inference and training frameworks, only basic dependencies are installed. Can directly install vllm or SGLang on top of it, without need of reinstall torch or CUDA. - **Application Image**: Stable version with inference and training frameworks installed. - **Preview Image**: Unstable version with the latest frameworks and features. The first two types of images are hosted on dockerhub [verlai/verl](https://hub.docker.com/r/verlai/verl) repository, while the preview images are hosted on community repository. > The image versions are mapped with verl releases, for example, image with tag ``verl0.4`` is built for verl release ``v0.4.x``. ## Base Image The stable base image is ``verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4``. The installed package versions can be found from tags, and the Dockerfile can be found in ``verl[version]-[packages]/Dockerfile.base``. The base images for preview are ``verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0`` and ``verlai/verl:base-verl0.5-preview-cu128-cudnn9.8-torch2.7.1-fa2.8.0`` with different CUDA versions. The update of base image is not frequent, and the app image can be built on top of it without reinstalling base packages. ## Application Image From this version, we divide images built for vLLM and SGLang as the divergence of dependent packages like FlashInfer. There are four types of application images available: - **vLLM with FSDP and Megatron**: ``verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.2-te2.2``, with Deep-EP support: ``verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.2-te2.2-deepep``. - **SGLang with FSDP and Megatron**: ``verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.2-te2.2`` (need vLLM support, but can have some package conflicts), with Deep-EP support: ``verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.2-te2.2-deepep``. - **Preview version of SGLang with FSDP and Megatron, CUDA 12.6**: ``verlai/verl:app-verl0.5-sglang0.4.8-mcore0.12.2-te2.2`` - **Preview version of SGLang with FSDP and Megatron, CUDA 12.8**: ``verlai/verl:app-preview-verl0.5-sglang0.4.8-mcore0.12.2-te2.2`` For Megatron 0.13.0, we offer preview images, to use latest codes, just replace ``mcore0.12.2`` with ``mcore0.13.0-preview`` in the above image tag. The latest vLLM support is coming soon. Docker images with Megatron backends are runnable with large language model like ``Qwen/Qwen3-235B-A22B``, ``deepseek-ai/DeepSeek-V3-0324`` post-training. Refer to the :doc:`Large Language Model Post-Training documentation<../perf/dpsk>` for more details. Application images can be updated frequently, and the Dockerfile can be found in ``docker/verl[version]-[packages]/Dockerfile.app.[frameworks]``. Based on the base image, it is easy to build your own application image with the desired inference and training frameworks. ## Community Image For vLLM with FSDP, please refer to [hiyouga/verl](https://hub.docker.com/r/hiyouga/verl) repository and the latest version is ``hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.4-flashinfer0.2.2-cxx11abi0``. For SGLang with FSDP, please refer to [ocss884/verl-sglang](https://hub.docker.com/r/ocss884/verl-sglang) repository and the latest version is ``ocss884/verl-sglang:ngc-th2.6.0-cu126-sglang0.4.6.post5`` which is provided by SGLang RL Group. See files under ``docker/`` for NGC-based image or if you want to build your own. Note that For aws instances with EFA net interface (Sagemaker AI Pod), you need to install EFA driver as shown in ``docker/Dockerfile.extenstion.awsefa`` ## Installation from Docker After pulling the desired Docker image and installing desired inference and training frameworks, you can run it with the following steps: 1. Launch the desired Docker image and attach into it: ```sh docker create --runtime=nvidia --gpus all --net=host --shm-size="10g" --cap-add=SYS_ADMIN -v .:/workspace/verl --name verl sleep infinity docker start verl docker exec -it verl bash ``` 2. If you use the images provided, you only need to install verl itself without dependencies: ```sh # install the nightly version (recommended) git clone https://github.com/volcengine/verl && cd verl pip3 install --no-deps -e . ``` [Optional] If you hope to switch between different frameworks, you can install verl with the following command: ```sh # install the nightly version (recommended) git clone https://github.com/volcengine/verl && cd verl pip3 install -e .[vllm] pip3 install -e .[sglang] ``` ================================================ FILE: verl_rl/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.sglang.vllm.mcore0.12 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.6.post5 and torch-memory-saver RUN pip install --resume-retries 999 "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install torch-memory-saver --no-cache-dir # Some sglang operations in 0.4.6.post5 require vllm # [Warning] vllm can have some packages not compatible with sglang, for example, flashinfer RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_rl/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.sglang.vllm.mcore0.12.deepep ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.6.post5 and torch-memory-saver RUN pip install --resume-retries 999 "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install torch-memory-saver --no-cache-dir # Some sglang operations in 0.4.6.post5 require vllm # [Warning] vllm can have some packages not compatible with sglang, for example, flashinfer RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install ================================================ FILE: verl_rl/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.sglang.vllm.mcore0.13.preview ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.6.post5 and torch-memory-saver RUN pip install --resume-retries 999 "sglang[all]==0.4.6.post5" --no-cache-dir --find-links https://flashinfer.ai/whl/cu124/torch2.6/flashinfer-python && pip install torch-memory-saver --no-cache-dir # Some sglang operations in 0.4.6.post5 require vllm # [Warning] vllm can have some packages not compatible with sglang, for example, flashinfer RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.5 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_r0.13.0 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install ================================================ FILE: verl_rl/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.vllm.mcore0.12 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.6.0+cu124 + vllm-0.8.5.post1 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Install flashinfer-0.2.2.post1+cu126 (cxx11abi=True) # vllm-0.8.3 does not support flashinfer>=0.2.3 # see https://github.com/vllm-project/vllm/pull/15777 RUN aria2c --max-tries=9999 https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ rm flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_rl/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.vllm.mcore0.12.deepep ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.6.0+cu124 + vllm-0.8.5.post1 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Install flashinfer-0.2.2.post1+cu126 (cxx11abi=True) # vllm-0.8.3 does not support flashinfer>=0.2.3 # see https://github.com/vllm-project/vllm/pull/15777 RUN aria2c --max-tries=9999 https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ rm flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Fix for transformers 4.53.0 RUN pip3 install --no-cache-dir "transformers[hf_xet]<4.52.0" # Install mbridge RUN pip3 install --no-cache-dir mbridge # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install ================================================ FILE: verl_rl/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.app.vllm.mcore0.13.preview ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.6.0+cu124 + vllm-0.8.5.post1 # torch-2.6.0+cu124: cxx11abi=False # torch-2.6.0+cu126: cxx11abi=True # see https://github.com/flashinfer-ai/flashinfer/issues/911 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.8.5.post1 # Install flashinfer-0.2.2.post1+cu126 (cxx11abi=True) # vllm-0.8.3 does not support flashinfer>=0.2.3 # see https://github.com/vllm-project/vllm/pull/15777 RUN aria2c --max-tries=9999 https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.2.post1/flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ pip install --no-cache-dir flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl && \ rm flashinfer_python-0.2.2.post1+cu124torch2.6-cp38-abi3-linux_x86_64.whl # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.5 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Install mbridge RUN pip3 install --no-cache-dir mbridge # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install ================================================ FILE: verl_rl/docker/verl0.4-cu124-torch2.6-fa2.7.4/Dockerfile.base ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-v2-cu124-cudnn9.8-torch2.6-fa2.8.0-te2.3 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio # Reinstall CUDA 12.4 RUN aria2c https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin && \ mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600 RUN aria2c --always-resume=true --max-tries=99999 https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ dpkg -i cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ cp /var/cuda-repo-ubuntu2204-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cuda-toolkit-12-4 && \ rm cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb && \ update-alternatives --set cuda /usr/local/cuda-12.4 && \ rm -rf /usr/local/cuda-12.6 RUN pip install --resume-retries 999 --no-cache-dir torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff # Install flash-attn-2.7.4.post1 (cxx11abi=False) RUN wget -nv https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl && \ pip install --no-cache-dir flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN git clone https://github.com/NVIDIA/apex.git && \ cd apex && \ pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 && \ dpkg -i ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb # Fix opencv RUN pip install --resume-retries 999 --no-cache-dir opencv-python RUN pip install --resume-retries 999 --no-cache-dir opencv-fixer && \ python -c "from opencv_fixer import AutoFix; AutoFix()" RUN pip install --resume-retries 999 --no-cache-dir cuda-bindings # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url RUN apt-get update && \ apt-get install -y libfreeimage3 libfreeimage-dev zlib1g htop ================================================ FILE: verl_rl/docker/verl0.4-cu124-torch2.6-fa2.7.4/README.md ================================================ # verl image with verl v0.4.x ## Important packages version ```txt cuda==12.4 cudnn==9.8.0 torch==2.6.0 flash_attn=2.7.4 sglang==0.4.6.post5 vllm==0.8.5.post1 vidia-cudnn-cu12==9.8.0.87 transformer_engine==2.3 megatron.core==core_v0.12.2 # Preview transformer_engine==2.5 megatron.core==core_r0.13.0 ``` ## Target - Base image: - `verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4` - App image: - `verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.2-te2.2`: SGLang requires vLLM in 0.4.6.post5 version, vLLM can have some package conflicts with SGLang - `verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.2-te2.2-deepep`: Built with deepep - `verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.2-te2.2` - `verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.2-te2.2-deepep`: Built with deepep - Preview image: - `verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.13.0-te2.2-preview` - `verlai/verl:app-verl0.4-vllm0.8.5-mcore0.13.0-te2.2-preview` ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.app.sglang.mcore0.12 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.7.4 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.8 and torch-memory-saver # Install FlashInfer Python package RUN pip install --upgrade pip setuptools packaging RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.6.post1 RUN pip install --resume-retries 999 --no-cache-dir "sglang[all]==0.4.8" && pip install torch-memory-saver --no-cache-dir # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.52.3" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.app.vllm.mcore0.12 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.0-fa2.7.4 # Define environments ENV MAX_JOBS=32 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install torch-2.7.0+cu126 + vllm-0.9.1 RUN pip install --resume-retries 999 --no-cache-dir vllm==0.9.1 # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.2.1 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.base.torch2.7.0 ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 libfreeimage3 libfreeimage-dev zlib1g htop && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio RUN pip install --resume-retries 999 --no-cache-dir torch==2.7.0 torchvision==0.22.0 torchaudio==2.7.0 # Install flash-attn-2.7.4.post1, although built with torch2.6, it is compatible with torch2.7 # https://github.com/Dao-AILab/flash-attention/issues/1644#issuecomment-2899396361 RUN ABI_FLAG=$(python -c "import torch; print('TRUE' if torch._C._GLIBCXX_USE_CXX11_ABI else 'FALSE')") && \ URL="https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ FILE="flash_attn-2.7.4.post1+cu12torch2.6cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ wget -nv "${URL}" && \ pip install --no-cache-dir "${FILE}" # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" --resume-retries 999 git+https://github.com/NVIDIA/apex.git # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 RUN apt-get install -y ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas cuda-bindings \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7-fa2.7.4/Dockerfile.base.torch2.7.1 ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 libfreeimage3 libfreeimage-dev zlib1g htop && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio RUN pip install --resume-retries 999 --no-cache-dir torch==2.7.1 torchvision==0.22.1 torchaudio==2.7.1 # Install flash-attn-2.7.4.post1, although built with torch2.6, it is compatible with torch2.7 # https://github.com/Dao-AILab/flash-attention/issues/1644#issuecomment-2899396361 RUN ABI_FLAG=$(python -c "import torch; print('TRUE' if torch._C._GLIBCXX_USE_CXX11_ABI else 'FALSE')") && \ URL="https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ FILE="flash_attn-2.7.4.post1+cu12torch2.6cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ wget -nv "${URL}" && \ pip install --no-cache-dir "${FILE}" # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" --resume-retries 999 git+https://github.com/NVIDIA/apex.git # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 RUN apt-get install -y ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.52.3" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas cuda-bindings \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7-fa2.7.4/README.md ================================================ # verl image with verl v0.5 ## Important packages version ```txt cuda==12.6 cudnn==9.8.0 torch==2.7.1 flash_attn=2.8.0 ## sglang==0.4.8 vllm==0.8.5.post1 vidia-cudnn-cu12==9.8.0.87 transformer_engine==2.3 megatron.core==core_v0.12.2 # Preview transformer_engine==2.5 megatron.core==core_r0.13.0 ``` ## Target - Base image: - `verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.0-fa2.7.4`: We offer a base image with deep ep built in, for vllm - `verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.7.4`: We offer a base image with deep ep built in, for sglang - App image: - `verlai/verl:app-verl0.5-vllm0.9.1-mcore0.12.2-te2.2` - `verlai/verl:app-verl0.5-sglang0.4.8-mcore0.12.2-te2.2` ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7.1-fa2.8.0/Dockerfile.app.sglang.mcore0.12 ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.8 and torch-memory-saver # Install FlashInfer Python package RUN pip install --upgrade pip setuptools packaging RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.6.post1 RUN pip install --resume-retries 999 --no-cache-dir "sglang[all]==0.4.8" && pip install torch-memory-saver --no-cache-dir # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v2.3 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7.1-fa2.8.0/Dockerfile.app.sglang.mcore0.13.preview ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.8 and torch-memory-saver # Install FlashInfer Python package RUN pip install --upgrade pip setuptools packaging RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.6.post1 RUN pip install --resume-retries 999 --no-cache-dir "sglang[all]==0.4.8" && pip install torch-memory-saver --no-cache-dir # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.5 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_v0.12.2 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7.1-fa2.8.0/Dockerfile.base ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:24.08-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 libfreeimage3 libfreeimage-dev zlib1g htop && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio RUN pip install --resume-retries 999 --no-cache-dir torch==2.7.1 torchvision==0.22.1 torchaudio==2.7.1 # Install flash-attn-2.8.0.post2 (cxx11abi=True) RUN ABI_FLAG=$(python -c "import torch; print('TRUE' if torch._C._GLIBCXX_USE_CXX11_ABI else 'FALSE')") && \ URL="https://github.com/Dao-AILab/flash-attention/releases/download/v2.8.0.post2/flash_attn-2.8.0.post2+cu12torch2.7cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ FILE="flash_attn-2.8.0.post2+cu12torch2.7cxx11abi${ABI_FLAG}-cp310-cp310-linux_x86_64.whl" && \ wget -nv "${URL}" && \ pip install --no-cache-dir "${FILE}" # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" --resume-retries 999 git+https://github.com/NVIDIA/apex.git # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 RUN apt-get install -y ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.53" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas cuda-bindings \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pyext pre-commit ruff # Install DeepEP ## the dependency of IBGDA RUN ln -s /usr/lib/x86_64-linux-gnu/libmlx5.so.1 /usr/lib/x86_64-linux-gnu/libmlx5.so ## Clone and build deepep and deepep-nvshmem RUN git clone -b v2.3.1 https://github.com/NVIDIA/gdrcopy.git && \ git clone https://github.com/deepseek-ai/DeepEP.git && \ cd DeepEP && git checkout a84a248 # Prepare nvshmem RUN wget https://developer.nvidia.com/downloads/assets/secure/nvshmem/nvshmem_src_3.2.5-1.txz && \ tar -xvf nvshmem_src_3.2.5-1.txz && mv nvshmem_src deepep-nvshmem && \ cd deepep-nvshmem && git apply ../DeepEP/third-party/nvshmem.patch ENV CUDA_HOME=/usr/local/cuda ### Set MPI environment variables. Having errors when not set. ENV CPATH=/usr/local/mpi/include:$CPATH ENV LD_LIBRARY_PATH=/usr/local/mpi/lib:$LD_LIBRARY_PATH ENV LD_LIBRARY_PATH=/usr/local/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV GDRCOPY_HOME=/workspace/gdrcopy ## Build deepep-nvshmem RUN cd deepep-nvshmem && \ NVSHMEM_SHMEM_SUPPORT=0 \ NVSHMEM_UCX_SUPPORT=0 \ NVSHMEM_USE_NCCL=0 \ NVSHMEM_MPI_SUPPORT=0 \ NVSHMEM_IBGDA_SUPPORT=1 \ NVSHMEM_PMIX_SUPPORT=0 \ NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \ NVSHMEM_USE_GDRCOPY=1 \ cmake -G Ninja -S . -B build/ -DCMAKE_INSTALL_PREFIX=/workspace/deepep-nvshmem/install && cmake --build build/ --target install ENV NVSHMEM_DIR=/workspace/deepep-nvshmem/install ENV LD_LIBRARY_PATH=$NVSHMEM_DIR/lib:$LD_LIBRARY_PATH ENV PATH=$NVSHMEM_DIR/bin:$PATH ## Build deepep RUN cd DeepEP && \ python setup.py install # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_rl/docker/verl0.5-cu126-torch2.7.1-fa2.8.0/README.md ================================================ # verl image with verl v0.5 ## Important packages version ```txt cuda==12.6 cudnn==9.8.0 torch==2.7.1 flash_attn=2.8.0 ## sglang==0.4.8 vllm==0.8.5.post1 vidia-cudnn-cu12==9.8.0.87 transformer_engine==2.3 megatron.core==core_v0.12.2 # Preview transformer_engine==2.5 megatron.core==core_r0.13.0 ``` ## Target - Base image: - `verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0`: We offer a base image with deep ep built in - App image: - `verlai/verl:app-verl0.5-sglang0.4.9-mcore0.12.2` - `verlai/verl:app-verl0.5-sglang0.4.9-mcore0.13.0-preview` - vllm temporarily not support latest version ================================================ FILE: verl_rl/docker/verl0.5-preview-cu128-torch2.7.1-fa2.8.0/Dockerfile.app.sglang.megatron ================================================ # Start from the verl base image # Dockerfile.base FROM verlai/verl:base-verl0.5-preview-cu128-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Define environments ENV MAX_JOBS=8 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Install sglang-0.4.8 and torch-memory-saver # Install FlashInfer Python package RUN pip install --resume-retries 999 --no-cache-dir --no-build-isolation flashinfer-python==0.2.6.post1 RUN pip install --resume-retries 999 --no-cache-dir "sglang[all]==0.4.8" && pip install torch-memory-saver --no-cache-dir # Fix packages RUN pip install --no-cache-dir "tensordict==0.6.2" "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pre-commit ruff RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --resume-retries 999 --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" RUN pip install --resume-retries 999 --no-cache-dir nvidia-cudnn-cu12==9.8.0.87 # Install TransformerEngine RUN export NVTE_FRAMEWORK=pytorch && pip3 install --resume-retries 999 --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@release_v2.5 # Install Megatron-LM RUN pip3 install --no-deps --no-cache-dir --no-build-isolation git+https://github.com/NVIDIA/Megatron-LM.git@core_r0.13.0 # Install mbridge RUN pip3 install --no-cache-dir mbridge ================================================ FILE: verl_rl/docker/verl0.5-preview-cu128-torch2.7.1-fa2.8.0/Dockerfile.base ================================================ # Base Docker Image of verl, with CUDA/Torch/FlashAttn/Apex/TransformerEngine, without other frameworks # Target: verlai/verl:base-verl0.5-preview-cu128-cudnn9.8-torch2.7.1-fa2.8.0-fi0.2.6 # Start from the NVIDIA official image (ubuntu-22.04 + cuda-12.6 + python-3.10) # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html FROM nvcr.io/nvidia/pytorch:25.02-py3 # Define environments ENV MAX_JOBS=16 ENV VLLM_WORKER_MULTIPROC_METHOD=spawn ENV DEBIAN_FRONTEND=noninteractive ENV NODE_OPTIONS="" ENV PIP_ROOT_USER_ACTION=ignore ENV HF_HUB_ENABLE_HF_TRANSFER="1" # Define installation arguments ARG APT_SOURCE=https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ ARG PIP_INDEX=https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple # Set apt source RUN cp /etc/apt/sources.list /etc/apt/sources.list.bak && \ { \ echo "deb ${APT_SOURCE} jammy main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-updates main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-backports main restricted universe multiverse"; \ echo "deb ${APT_SOURCE} jammy-security main restricted universe multiverse"; \ } > /etc/apt/sources.list # Install systemctl RUN apt-get update && \ apt-get install -y -o Dpkg::Options::="--force-confdef" systemd && \ apt-get clean # Install tini RUN apt-get update && \ apt-get install -y tini aria2 libfreeimage3 libfreeimage-dev zlib1g htop && \ apt-get clean # Change pip source RUN pip config set global.index-url "${PIP_INDEX}" && \ pip config set global.extra-index-url "${PIP_INDEX}" && \ python -m pip install --upgrade pip # Uninstall nv-pytorch fork RUN pip uninstall -y torch torchvision torchaudio \ pytorch-quantization pytorch-triton torch-tensorrt \ xgboost transformer_engine flash_attn apex megatron-core grpcio RUN pip install --resume-retries 999 --no-cache-dir torch==2.7.1 torchvision==0.22.1 torchaudio==2.7.1 --index-url https://download.pytorch.org/whl/cu128 # Install flash-attn-2.8.0.post2 (cxx11abi=True) RUN ABI_FLAG=$(python -c "import torch; print('TRUE' if torch._C._GLIBCXX_USE_CXX11_ABI else 'FALSE')") && \ URL="https://github.com/Dao-AILab/flash-attention/releases/download/v2.8.0.post2/flash_attn-2.8.0.post2+cu12torch2.7cxx11abi${ABI_FLAG}-cp312-cp312-linux_x86_64.whl" && \ FILE="flash_attn-2.8.0.post2+cu12torch2.7cxx11abi${ABI_FLAG}-cp312-cp312-linux_x86_64.whl" && \ wget -nv "${URL}" && \ pip install --no-cache-dir "${FILE}" # Fix packages RUN pip uninstall -y pynvml nvidia-ml-py && \ pip install --no-cache-dir --upgrade "nvidia-ml-py>=12.560.30" "fastapi[standard]>=0.115.0" "optree>=0.13.0" "pydantic>=2.9" "grpcio>=1.62.1" # Install cudnn RUN aria2c --max-tries=9999 https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb && \ cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ && \ apt-get update && \ apt-get -y install cudnn-cuda-12 && \ rm cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb # Install Apex RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" --resume-retries 999 git+https://github.com/NVIDIA/apex.git # Profiling tools RUN aria2c --always-resume=true --max-tries=99999 https://developer.nvidia.com/downloads/assets/tools/secure/nsight-systems/2025_3/nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ apt-get update && apt-get install -y libxcb-cursor0 RUN apt-get install -y ./nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb && \ rm -rf /usr/local/cuda/bin/nsys && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys /usr/local/cuda/bin/nsys && \ rm -rf /usr/local/cuda/bin/nsys-ui && \ ln -s /opt/nvidia/nsight-systems/2025.3.1/target-linux-x64/nsys-ui /usr/local/cuda/bin/nsys-ui && \ rm nsight-systems-2025.3.1_2025.3.1.90-1_amd64.deb RUN pip install --resume-retries 999 --no-cache-dir "tensordict==0.6.2" torchdata "transformers[hf_xet]>=4.51.0" accelerate datasets peft hf-transfer \ "numpy<2.0.0" "pyarrow>=19.0.1" pandas cuda-bindings \ ray[default] codetiming hydra-core pylatexenc qwen-vl-utils wandb dill pybind11 liger-kernel mathruler blobfile xgrammar \ pytest py-spy pre-commit ruff # Reset pip config RUN pip config unset global.index-url && \ pip config unset global.extra-index-url ================================================ FILE: verl_rl/docker/verl0.5-preview-cu128-torch2.7.1-fa2.8.0/README.md ================================================ # verl image with verl v0.5 ## Important packages version ```txt cuda==12.8 cudnn==9.8.0 torch==2.7.1 flash_attn=2.8.0 ## sglang==0.4.8 transformer_engine==2.5 megatron.core==core_r0.13.0 vidia-cudnn-cu12==9.8.0.87 ``` ## Target - Base image: - `verlai/verl:base-verl0.5-preview-cu128-cudnn9.8-torch2.7.1-fa2.8.0`: We offer a base image with flash infer 0.2.6.post1 built in - App image: - `verlai/verl:app-verl0.5-preview-sglang0.4.8-mcore0.13.0-preview` - vllm temporarily not support latest version ## !!!Notice!!! - pyext is lack of maintainace and cannot work with python 3.12, consider using replacement and deprecating this package. ================================================ FILE: verl_rl/docs/Makefile ================================================ # Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = verl SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: verl_rl/docs/README.md ================================================ # verl documentations ## Build the docs ```bash # If you want to view auto-generated API docstring, please make sure verl is available in python path. For instance, install verl via: # pip install .. -e[test] # Install dependencies needed for building docs. pip install -r requirements-docs.txt # Build the docs. make clean make html ``` ## Open the docs with your browser ```bash python -m http.server -d _build/html/ ``` Launch your browser and navigate to http://localhost:8000 to view the documentation. Alternatively you could drag the file `_build/html/index.html` to your local browser and view directly. ================================================ FILE: verl_rl/docs/README_vllm0.7.md ================================================ # Upgrading to vllm >= 0.7 Note: verl+vllm 0.8.3 is now stable. Please see ``docs/README_vllm0.8.md`` for upgrade guide. ## Installation Note: At time of writing, verl+vllm 0.7.x supports **FSDP** for training and **vLLM** for rollout. ``` # Create the conda environment conda create -n verl python==3.10 conda activate verl # Install verl git clone https://github.com/volcengine/verl.git cd verl pip3 install -e . # Install the latest stable version of vLLM pip3 install vllm==0.7.3 # Install flash-attn pip3 install flash-attn --no-build-isolation ``` Note that if you are installing lower versions of vLLM (0.7.0, 0.7.1, 0.7.2), you need to make some tiny patches manually on vllm (/path/to/site-packages/vllm after installation) after the above steps: - vllm/distributed/parallel_state.py: Remove the assertion below: ``` if (world_size != tensor_model_parallel_size * pipeline_model_parallel_size): raise RuntimeError( f"world_size ({world_size}) is not equal to " f"tensor_model_parallel_size ({tensor_model_parallel_size}) x " f"pipeline_model_parallel_size ({pipeline_model_parallel_size})") ``` - vllm/executor/uniproc_executor.py: change `local_rank = rank` to `local_rank = int(os.environ["LOCAL_RANK"])` - vllm/model_executor/model_loader/weight_utils.py: remove the `torch.cuda.empty_cache()` in `pt_weights_iterator` ## Features ### Use cuda graph After installation, examples using FSDP as training backends can be used. By default, the `enforce_eager` is set to True, which disables the cuda graph. To enjoy cuda graphs and the sleep mode of vLLM>=0.7, add the following lines to the bash script: ``` actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ ``` For a typical job like examples/ppo_trainer/run_qwen2-7b_seq_balance.sh, the rollout generation time is 85 seconds with vLLM0.7.0. By enabling the cudagraph, the generation duration is further reduced to 62 seconds. **Note:** Currently, if the `n` is greater than 1 in `SamplingParams` in vLLM>=0.7, there is a potential performance issue on the stability of rollout generation time (Some iterations would see generation time bursts) using vLLM's V0 Engine. ### Use vLLM V1 Engine Using the vLLM V1 engine can avoid instability issues and achieve additional performance improvements. To use the V1 engine, you can first uninstall the previously installed vLLM and then follow the steps below to install the newer version. ``` git clone https://github.com/vllm-project/vllm.git cd vllm git checkout 2275784 sed -i "903a\ data_parallel_size = world_size // pipeline_model_parallel_size // tensor_model_parallel_size" ./vllm/distributed/parallel_state.py VLLM_USE_PRECOMPILED=1 pip install --editable . ``` Then you can enable the V1 engine by setting `export VLLM_USE_V1=1`. In some benchmark tests, the V1 engine demonstrates a 1.5x speed improvement over the vLLM V0 engine. The stable support of the vLLM V1 engine is available on verl main. ================================================ FILE: verl_rl/docs/README_vllm0.8.md ================================================ # Upgrading to vLLM >= 0.8 Last updated: 05/04/2025. ## Installation Note: This version of verl+vLLM 0.8+ supports **FSDP** for training and **vLLM** for rollout. ```bash # Create the conda environment conda create -n verl python==3.10 conda activate verl # Install verl git clone https://github.com/volcengine/verl.git cd verl pip3 install -e . # Install the latest stable version of vLLM pip3 install vllm==0.8.3 # Install flash-attn pip3 install flash-attn --no-build-isolation ``` We have a pre-built docker image for verl+vLLM 0.8.3. You can direct import it with the following command: ```bash docker pull hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0 ``` ## Features vLLM 0.8+ supports cuda graph and V1 engine by default in verl. To enable these features, remember to add the following lines to the bash script: ```bash actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ ``` and also **remove** the environment variable if it exists: ## Notes When you just directly upgrade vllm>=0.8, some dependency packages may undergo version changes. If you encounter the following problems: ```bash in from torch.multiprocessing.reductions import ForkingPickler ImportError: cannot import name 'ForkingPickler' from 'torch.multiprocessing.reductions' (/opt/conda/lib/python3.11/site-packages/torch/multiprocessing/reductions.py) ``` You need to upgrade `tensordict` to version 0.6.2 using the command `pip install tensordict==0.6.2`. ================================================ FILE: verl_rl/docs/_static/js/runllm-widget.js ================================================ document.addEventListener("DOMContentLoaded", function () { var script = document.createElement("script"); script.type = "module"; script.id = "runllm-widget-script"; script.src = "https://widget.runllm.com"; script.setAttribute("version", "stable"); script.setAttribute("crossorigin", "true"); script.setAttribute("runllm-keyboard-shortcut", "Mod+j"); script.setAttribute("runllm-name", "verl Chatbot"); script.setAttribute("runllm-position", "TOP_RIGHT"); script.setAttribute("runllm-assistant-id", "679"); script.async = true; document.head.appendChild(script); }); ================================================ FILE: verl_rl/docs/advance/agent_loop.rst ================================================ Agent Loop ========== Last updated: 07/17/2025. .. versionadded:: 0.4.2 [status: alpha] .. warning:: Agent Loop is ready for use, but the API may change in future releaes. Agent Loop is designed as general interface for multi-turn rollout and agentic reinforcement learning. **Design goal**: - Plugable user defined agent loop - Provide standard request generate api with different inference frameworks - Provide request level load balance between multiple inference servers **Non-goal**: - How tool is defined and how to call tool In high level overview, agent loop is given a prompt, run user defined loop: call LLM generate api, call tools, ... and return the final output. The final output is then calculated reward and used as trajectory for RL training. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/agent_loop_overview.svg?raw=true API Design ---------- ``AgentLoopBase`` class is the abstraction of agent loop, and ``run`` method is the only interface that user need to implement. The run method, given prompt messages in format: [{"role": "user"}, {"content": "..."}], and additional sampling params, could do whatever user wants, such as - call LLM generate api - call tools: web search, database query, code sandbox, ... - environment interaction - reflection - ... .. code:: python class AgentLoopBase(ABC): @abstractmethod async def run(self, messages: list[dict[str, Any]], sampling_params: dict[str, Any]) -> AgentLoopOutput: """Run agent loop to interact with LLM server and environment. Args: messages (List[Dict[str, Any]]): Input messages. sampling_params (Dict[str, Any]): LLM sampling params. Returns: AgentLoopOutput: Agent loop output. """ raise NotImplementedError After running user defined loop, run method should return ``AgentLoopOutput``, including prompt token ids, response token ids, and response mask. .. code:: python class AgentLoopOutput(BaseModel): """Agent loop output.""" prompt_ids: list[int] """Prompt token ids.""" response_ids: list[int] """Response token ids including LLM generated token, tool response token.""" response_mask: list[int] """Response mask, 1 for LLM generated token, 0 for tool response token.""" .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/agent_loop_output.svg?raw=true .. note:: AgentLoopOutput only output one trajectory for a given prompt, multiple trajectories output is still under discussion. Architecture Design ------------------- .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/agent_loop_architecture.png?raw=true A single PPO step contain two phase: rollout and train. In rollout phase: 1. PPOTrainer sample a batch from dataset and call ``AgentLoopManager.generate_sequences``. 2. AgentLoopManager ``wake_up`` all async LLM server instances, which will sync weights between inference engine(vLLM/SGLang) and training engine(FSDP/Megatron-LM). 3. AgentLoopManager split batch into chunks and send each chunk to ``AgentLoopWorker``. 4. AgentLoopWorker receive chunk and for each prompt, spawn a user defined ``AgentLoopBase`` instance, run ``run`` coroutine until end and get ``AgentLoopOutput``. .. tip:: AgentLoopWorker schedules multiple coroutines concurrently. If number of AgentLoopWorker equals batch_size, then each worker is response for one prompt. In agent loop, when user need LLM generate response: 5. Call ``AsyncLLMServerManager.generate`` with prompt_ids. 6. AsyncLLMServerManager select a server instance with least request in first turn and send request to it. (In following turns, the request will be sent to the same server instance). 7. AsyncLLMServer receive a request, issue ipc/rpc with model_runner, and generate response. (There's slight differences between vLLM and SGLang, see below). When all prompts in all AgentLoopWorker finish, AgentLoopManager gather results and return to PPOTrainer. 8. AgentLoopManager ``sleep`` all server instances, which will free kv cache and offload weights to CPU memory. AsyncLLMServer ~~~~~~~~~~~~~~ AsyncLLMServer is the abstraction of LLM server with two types of generation api: - `OpenAI chat completion `_: generate response for the given chat conversation. - Token in token out: generate response ids for the given token ids. We have officially supported vLLM and SGLang AsyncLLMServer, both of them implement the two api and are well tested. Other inference engine should be easy to plug-in by implement the ``AsyncServerBase`` class. .. code:: python class AsyncServerBase(ABC): @abstractmethod async def chat_completion(self, raw_request: Request) -> JSONResponse: """OpenAI chat completion API. Args: raw_request (Request): raw json request Returns: JSONResponse: json response API reference: https://platform.openai.com/docs/api-reference/chat/create """ raise NotImplementedError @abstractmethod async def generate(self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str) -> list[int]: """Generate response ids given prompt ids. Args: prompt_ids (List[int]): prompt ids sampling_params (Dict[str, Any]): sampling params request_id (str): request id Returns: List[int]: response ids """ raise NotImplementedError Chat completion vs Token in token out ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. warning:: The following conclusion is based on our recent experience and is still open to investigation and discussion. Almost all agent frameworks (LangGraph, CrewAI, LlamaIndex, etc) call LLM with OpenAI chat completion api, and keep chat history as messages. So user may expect that we should use the chat completion api in multi-turn rollout. But based on our recent experience on single-turn training on DAPO and multi-turn training on `retool `_, we found the token_ids from apply the final messages may not equal to the token_ids by concat prompt_ids and response_ids in each turn. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/multi_turn.png?raw=true **Where does this inconsistency happened?** First, the tool parser may alter the content. For example .. code:: json {"role": "assistant", "content": "Let me call a ... and get the result"} After tool_calls extraction, the messages is like this: .. code:: json {"role": "assistant", "content": "Let me call a and get the result", "tool_calls": [{"name": "foo", "arguments": "{}"}]} Encode the extracted message back is not equal to the original LLM generated response_ids. Second, the `decode-encode` may also lead to inconsistency: `Agent-R1 issue#30 `_. **What is the impact of this inconsistency?** This inconsistency is not a big problem for serving/agent system, but is critical to RL training. It causes the trajectory deviate from the policy model distribution. We have observed that apply_chat_template to the final chat history messages make PPO training not even converged in single-turn. vLLM ^^^^ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/async_vllm.png?raw=true For vLLM, the Async LLM Engine is running in same process as the server, and ModelRunner is running in same process as FSDP/Megatron-LM workers. Async LLM Engine communicate with ModelRunner through ZeroMQ. When server receive a request, it directly call engine to generate response_ids. SGLang ^^^^^^ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/async_sglang.png?raw=true For SGLang, the Async LLM Engine is running in same process as FSDP/Megatron-LM worker-0, and it spawn multiple subprocesses as ModelRunner. Also, Async LLM Engine communicate with ModelRunner through ZeroMQ. When server receive a request, it remote call the worker-0 and get response_ids. AsyncLLMServerManager ~~~~~~~~~~~~~~~~~~~~~ AsyncLLMServerManager serve as proxy to multiple AsyncLLMServer instances, provides: - load balance: select a server instance with least request in first turn and send request to it. - sticky session: bind request_id to server instance, so that the same request_id will be sent to the same server instance in following turns. AsyncLLMServerManager is passed to ``AgentLoopBase.__init__``, whenever user want to interact with LLM in agent loop, they can call ``AsyncLLMServerManager.generate`` to generate response_ids. .. code:: python class AsyncLLMServerManager: async def generate( self, request_id, *, prompt_ids: list[int], sampling_params: dict[str, Any], ) -> list[int]: """Generate tokens from prompt ids. Args: request_id (str): request id for sticky session. prompt_ids (List[int]): List of prompt token ids. sampling_params (Dict[str, Any]): Sampling parameters for the chat completion. Returns: List[int]: List of generated token ids. """ ... Next ---- - :doc:`Agentic RL Training<../start/agentic_rl>`: Quick start agentic RL training with gsm8k dataset. - `LangGraph MathExpression `_: Demonstrate how to use LangGraph to build agent loop. - `Retool `_: End-to-end retool paper reproduction using tool agent. ================================================ FILE: verl_rl/docs/advance/checkpoint.rst ================================================ .. _checkpoint-page: Using Checkpoints to Support Fault Tolerance Training ===================================================== Last updated: 06/25/2025. There could be training errors or machine failure during the whole RLHF training process, so it is recommended to enable checkpoints to minimize your loss. The API Interface has already been listed in :ref:`config-explain-page`, and we will not repeat them. But there are still some technique details we hope to clarify. .. note:: Notice that the ``checkpoint.contents`` field has no effect to FSDP checkpoint except ``hf_model``, the other 3 fields are binded together to save and load. We recommend to include ``model``, ``optimizer`` and ``extra`` all. Checkpoint Saving Directory Structure ------------------------------------- Commonly, we use the ``default_local_dir`` declared in ``ppo_trainer.yaml`` or ``ppo_megatron_trainer.yml`` to work as preffix when saving checkpoints, which is ``checkpoints/${trainer.project_name}/${trainer.experiment_name}``. So the inner checkpoint structure of **FSDP** is like: .. code:: checkpoints/${trainer.project_name}/${trainer.experiment_name} ├── global_steps_${i} │ ├── actor │ │ ├── huggingface # default save config and tokenizer, save huggingface model if include ``hf_model`` in checkpoint.contents │ │ └── fsdp_config.json # FSDP config file, including world_size and fsdp version │ │ ├── model_world_size_{self.world_size}_rank_{self.rank}.pt │ │ ├── optim_world_size_{self.world_size}_rank_{self.rank}.pt │ │ └── extra_state_world_size_{self.world_size}_rank_{self.rank}.pt │ ├── critic │ │ ├── huggingface │ │ └── fsdp_config.json │ │ ├── model_world_size_{self.world_size}_rank_{self.rank}.pt │ │ ├── optim_world_size_{self.world_size}_rank_{self.rank}.pt │ │ └── extra_state_world_size_{self.world_size}_rank_{self.rank}.pt └── latest_checkpointed_iteration.txt All model shards, optimizers and extra states are stored together, in a sharded and distributed way. While **Megatron** current checkpoint structure is: .. code:: checkpoints/${trainer.project_name}/${trainer.experiment_name} ├── global_steps_${i} │ ├── actor │ │ ├── huggingface # default save config and tokenizer, save huggingface model if include ``hf_mode`` in checkpoint.contents │ │ └── dist_ckpt # save sharded model/optimizer/rng_states, naming the same as Megatron │ └── critic │ │ ├── huggingface │ │ └── dist_ckpt └── latest_checkpointed_iteration.txt Convert FSDP and Megatron Checkpoints to HuggingFace Format Model ----------------------------------------------------------------- We provide a tool to convert the FSDP and Megatron checkpoints to HuggingFace format model. The tool is located in ``verl/model_merger``. For older versions of verl that don't include fsdp_config.json in checkpoints, you can use the legacy model merger located at ``verl/scripts/legacy_model_merger.py``. The script supports two main sub-commands: `merge` (to convert and save checkpoints) and `test` (to validate merged checkpoints against a reference model). The arguments for the `merge` sub-command are as follows: .. code:: bash usage: python -m verl.model_merger merge [-h] --backend {fsdp,megatron} [--local_dir LOCAL_DIR] [--tie-word-embedding] [--is-value-model] [--use_cpu_initialization] [--target_dir TARGET_DIR] [--hf_upload_path HF_UPLOAD_PATH] [--private] options: -h, --help show this help message and exit --backend {fsdp,megatron} The backend of the model --local_dir LOCAL_DIR Path to the saved model checkpoints --tie-word-embedding Whether to tie word embedding weights (currently only Megatron supported) --is-value-model Whether the model is a value model (currently only Megatron supported) --use_cpu_initialization Whether to use CPU initialization for the model. This is useful for large models that cannot fit into GPU memory during initialization. --target_dir TARGET_DIR Directory to save the merged huggingface model --hf_upload_path HF_UPLOAD_PATH Hugging Face repository ID to upload the model --private Whether to upload the model to a private Hugging Face repository Example usage for merging Megatron checkpoints: .. code:: bash python -m verl.model_merger merge \ --backend megatron \ --tie-word-embedding \ --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model Example usage for distributed merging Megatron checkpoints: .. code:: bash torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} -m verl.model_merger merge \ --backend megatron \ --tie-word-embedding \ --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model Example usage for merging FSDP checkpoints: .. code:: bash python -m verl.model_merger merge \ --backend fsdp \ --local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model Megatron Merger details ----------------------- Current implement of decoder layers uses ``nn.ModuleList`` to store the layers, and thus the model layers on every PP rank and VPP rank starts their index from 0. There are 3 ways to correct this behavior: 1. Modify the decoder layer's state_dict, add ``offset`` to each layer's index, thus rewrite ``nn.ModuleList`` implementation. 2. Modify the layer index when saving checkpoint and recover them when loading checkpoint. 3. The Checkpoint merger do this work, calculate the actual ``offset`` from ``state_dict`` only, a little complex. Current implementation use solution 2. HuggingFace to Megatron DistCheckpoint details ---------------------------------------------- If your model is quite huge, we recommend you to use Megatron dist-checkpoint to load the model. Megatron dist-checkpoint supports loading with different kinds of model parallelism, and it is much faster than the original checkpoint loading. To convert original HuggingFace model to Megatron dist-checkpoint, you can use the ``scripts/converter_hf_to_mcore.py`` script. Large MoE models are temporarily supported with CPU initialization, which is a little slower. While we are working on a better solution to support large models. Example command to convert the model is as follows: .. code:: bash python scripts/converter_hf_to_mcore.py \ --hf_model_path Qwen/Qwen1.5-MoE-A2.7B-Chat \ --output_path /mnt/disk/Qwen/Qwen1.5-MoE-A2.7B-Chat \ --use_cpu_initialization # Only work for MoE models Example command to distributed convert the huge model like deepseekv3 671B is as follows: .. code:: bash torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} scripts/converter_hf_to_mcore.py \ --hf_model_path deepseek-ai/DeepSeek-V3 \ --output_path /mnt/disk/deepseek-ai/DeepSeek-V3 \ --use_cpu_initialization # Only work for MoE models Original Checkpoint Utils ------------------------- Original Checkpoint Utils refer to original checkpoint implementation in ``verl/models/[model]/megatron/checkpoint_utils``. We only need ``[model]_loader.py`` in original checkpoint utils now, since we get rid of storing ``hf_model`` every time (which is not recommended for large model training, try only saving sharded models if you can). .. note:: Note that ``[model]_loader`` only support environments where **storage clusters are able to connect with every calculation nodes**. Because it utilizes **sharded load way to minimize the loading checkpoint overhead**. Every rank loads its own data from ``state_dict`` which can be accessed by all of them. While there is also no need to broadcast among DP ranks, since the saved state_dict is only produced by DP rank 0. For users who can **only place the huggingface model on one device**, we keep the original costly implementation in ``[model]_loader_deprecated``. In this implementation, rank 0 broadcast all weights to each tp and pp rank, and then dp rank 0 broadcast to all dp ranks. There may be at risks of OOM. To use deprecated loader, change the import package of ``load_state_dict_to_megatron_llama``. ================================================ FILE: verl_rl/docs/advance/dpo_extension.rst ================================================ Extend to other RL(HF) algorithms ================================= Last updated: 02/25/2025. We already implemented the complete training pipeline of the PPO algorithms. To extend to other algorithms, we analyze the high-level principle to use verl and provide a tutorial to implement the DPO algorithm. Users can follow the similar paradigm to extend to other RL algorithms. .. note:: **Key ideas**: Single process drives multi-process computation and data communication. Overall Approach ---------------- Step 1: Consider what multi-machine multi-GPU computations are needed for each model, such as ``generate_sequence`` , ``compute_log_prob`` and ``update_policy`` in the actor_rollout model. Implement distributed single-process-multiple-data (SPMD) computation and encapsulate them into APIs Step 2: Based on different distributed scenarios, including FSDP and 3D parallelism in Megatron-LM, implement single-process control of data interaction among multi-process computations. Step 3: Utilize the encapsulated APIs to implement the control flow Example: Online DPO ------------------- We use verl to implement a simple online DPO algorithm. The algorithm flow of Online DPO is as follows: 1. There is a prompt (rollout) generator which has the same weight as the actor model. After a batch of prompts are fed into the generator, it generates N responses for each prompt. 2. Send all the prompts + responses to a verifier for scoring, which can be reward model or a rule-based function. Then sort them in pairs to form a training batch. 3. Use this training batch to train the actor model using DPO. During the process, a reference policy is needed. Step 1: What are the multi-machine multi-GPU computations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Sample Generator** Implementation details: .. code:: python from verl.single_controller.base import Worker from verl.single_controller.ray import RayWorkerGroup, RayClassWithInitArgs, RayResourcePool import ray @ray.remote class SampleGenerator(Worker): def __init__(self, config): super().__init__() self.config = config def generate_sequences(self, data): pass Here, ``SampleGenerator`` can be viewed as a multi-process pulled up by ``torchrun``, with each process running the same code (SPMD). ``SampleGenerator`` needs to implement a ``generate_sequences`` API for the control flow to call. The implementation details inside can use any inference engine including vllm, sglang and huggingface. Users can largely reuse the code in verl/verl/workers/rollout/vllm_rollout/vllm_rollout.py and we won't go into details here. **ReferencePolicy inference** API: compute reference log probability .. code:: python from verl.single_controller.base import Worker import ray @ray.remote class ReferencePolicy(Worker): def __init__(self): super().__init__() self.model = Model() def infer(self, data): return self.model(data) **Actor update** API: Update actor model parameters .. code:: python from verl.single_controller.base import Worker import ray @ray.remote class DPOActor(Worker): def __init__(self): super().__init__() self.model = Model() self.model = FSDP(self.model) # or other distributed strategy self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3) self.loss_fn = xxx def update(self, data): self.optimizer.zero_grad() logits = self.model(data) loss = self.loss_fn(logits) loss.backward() self.optimizer.step() **Notes: How to distinguish between control processes and distributed computation processes** ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Control processes are generally functions directly decorated with ``@ray.remote`` - Computation processes are all wrapped into a ``RayWorkerGroup``. Users can reuse most of the distribtued computation logics implemented in PPO algorithm, including FSDP and Megatron-LM backend in verl/verl/trainer/ppo. Step 2: Based on different distributed scenarios, implement single-process control of multi-process data interaction ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **The core problem to solve here is how a single process sends data to multiple processes, drives multi-process computation, and how the control process obtains the results of multi-process computation.** First, we initialize the multi-process ``WorkerGroup`` in the control process. .. code:: python @ray.remote(num_cpus=1) def main_task(config): # construct SampleGenerator resource_pool = RayResourcePool(process_on_nodes=[8] * 2) # 16 GPUs ray_cls = RayClassWithInitArgs(SampleGenerator, config=config) # put SampleGenerator onto resource pool worker_group = RayWorkerGroup(resource_pool, ray_cls) # construct reference policy As we can see, in the control process, multiple processes are wrapped into a ``RayWorkerGroup``. Inside this ``WorkerGroup``, there is a ``self._workers`` member, where each worker is a RayActor (https://docs.ray.io/en/latest/ray-core/actors.html) of SampleGenerator. ray_trainer.md also provide an implementation of ``MegatronRayWorkerGroup``. Assuming the model is distributed using FSDP, and there is a batch of data on the control process, for data parallelism, the underlying calling process is: .. code:: python data = xxx data_list = data.chunk(dp_size) output = [] for d in data_list: # worker_group._workers[i] is a SampleGenerator output.append(worker_group._workers[i].generate_sequences.remote(d)) output = ray.get(output) output = torch.cat(output) Single process calling multiple processes involves the following 3 steps: 1. Split the data into DP parts on the control process. 2. Send the data to remote, call the remote computation through RPC, and utilize multi-process computation. 3. Obtain the computation results of each worker on the control process and merge them. Frequently calling these 3 steps on the controller process greatly hurts code readability. **In verl, we have abstracted and encapsulated these 3 steps, so that the worker's method + dispatch + collect can be registered into the worker_group** .. code:: python from verl.single_controller.base.decorator import register def dispatch_data(worker_group, data): return data.chunk(worker_group.world_size) def collect_data(worker_group, data): return torch.cat(data) dispatch_mode = { 'dispatch_fn': dispatch_data, 'collect_fn': collect_data } @register(dispatch_mode=dispatch_mode) def generate_sequences(self, data): pass In this way, we can directly call the method inside the worker through the ``worker_group`` on the control (driver) process (which is a single process): .. code:: python output = worker_group.generate_sequences(data) This single line includes data splitting, data distribution and computation, and data collection. Furthermore, the model parallelism size of each model is usually fixed, including dp, tp, pp. So for these common distributed scenarios, we have pre-implemented specific dispatch and collect methods,in `decorator.py `_, which can be directly used to wrap the computations. .. code:: python from verl.single_controller.base.decorator import register, Dispatch @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(self, data: DataProto) -> DataProto: pass Here it requires the data interface to be ``DataProto``. Definition of ``DataProto`` is in `protocol.py `_. Step 3: Main training loop ~~~~~~~~~~~~~~~~~~~~~~~~~~ With the above training flows, we can implement the algorithm's control flow. It is recommended that ``main_task`` is also a ray remote process. .. code:: python @ray.remote(num_cpus=1) def main_task(config): # construct SampleGenerator resource_pool = RayResourcePool(process_on_nodes=[8] * 2) # 16 GPUs ray_cls = RayClassWithInitArgs(SampleGenerator, config=config) # put SampleGenerator onto resource pool sample_gen = RayWorkerGroup(resource_pool, ray_cls) # construct reference policy ray_cls = RayClassWithInitArgs(ReferencePolicy) ref_policy = RayWorkerGroup(resource_pool, ray_cls) # construct actor ray_cls = RayClassWithInitArgs(DPOActor) dpo_policy = RayWorkerGroup(resource_pool, ray_cls) dataloader = DataLoader() for data in dataloader: # generate data data = sample_gen.generate_sequences(data) # generate scores for each data data = generate_scores(data) # generate pairwise data using scores data = generate_pairwise_data(data) # generate ref_log_prob data.batch['ref_log_prob'] = ref_policy.infer(data) # update using dpo dpo_policy.update(data) # logging Here, different ``WorkerGroups`` can be placed in the same resource pool or in different resource pools using ``create_colocated_worker_cls`` similar as in `ray_trainer.py `_. ================================================ FILE: verl_rl/docs/advance/fsdp_extension.rst ================================================ Add models with the FSDP backend ================================== Last updated: 02/09/2025. Model -------------------------- In principle, our FSDP backend can support any HF model and we can sychronoize the actor model weight with vLLM using `hf_weight_loader.py` under `third_party/vllm`. However, ``hf_weight_loader`` is will gather the full state_dict of a model during synchronization, which may cause OOM. We suggest using ``dtensor_weight_loader`` which gather the full model parameter layer by layer to reduce the peak memory usage. We already support dtensor weight loader for the models below in `dtensor_weight_loader.py` under `third_party/vllm`: - ``GPT2LMHeadModel`` - ``LlamaForCausalLM`` - ``LLaMAForCausalLM`` - ``MistralForCausalLM`` - ``InternLMForCausalLM`` - ``AquilaModel`` - ``AquilaForCausalLM`` - ``Phi3ForCausalLM`` - ``GemmaForCausalLM`` - ``Gemma2ForCausalLM`` - ``GPTBigCodeForCausalLM`` - ``Starcoder2ForCausalLM`` - ``Qwen2ForCausalLM`` - ``DeepseekV2ForCausalLM`` To implement ``dtensor_weight_loader`` of a model that's supported in vLLM, follow the guide of gemma model below: 1. Copy the ``load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]])`` from the vllm model class to ``dtensor_weight_loaders.py`` 2. Modify the arguments to ``(actor_weights: Dict, vllm_model: nn.Module)`` 3. Replace the ``self`` to ``vllm_model`` 4. Add the ``local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)`` before each ``param = params_dict[name]`` and modify the following weight loading using ``local_loaded_weight``. 5. Register the implemented dtensor weight loader to ``__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__``. .. code-block:: diff - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def gemma_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), ("gate_up_proj", "gate_proj", 0), ("gate_up_proj", "up_proj", 1), ] - params_dict = dict(self.named_parameters()) + params_dict = dict(vllm_model.named_parameters()) loaded_params = set() - for name, loaded_weight in weights: + for name, loaded_weight in actor_weights.items(): for (param_name, shard_name, shard_id) in stacked_params_mapping: if shard_name not in name: continue name = name.replace(shard_name, param_name) # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue + local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight) param = params_dict[name] weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) + weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id) break else: # lm_head is not used in vllm as it is tied with embed_token. # To prevent errors, skip loading lm_head.weight. if "lm_head.weight" in name: continue # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue + local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight) param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) - weight_loader(param, loaded_weight) + weight_loader(param, local_loaded_weight.to(dtype=param.dtype)) loaded_params.add(name) unloaded_params = params_dict.keys() - loaded_params if unloaded_params: raise RuntimeError( "Some weights are not initialized from checkpoints: " f"{unloaded_params}") ================================================ FILE: verl_rl/docs/advance/megatron_extension.rst ================================================ Add models with the Megatron-LM backend ========================================= Last updated: 04/25/2025. Model ----------- If use latest verl, we have direct support of ``GPTModel`` for Megatron backend. You can use the similar way of using Megatron to pretrain custom models. We list the steps here: 1. Find `model_initializer.py `_ 2. If your model is configurable by ``TransformerLayerSpec`` , you can directly use ``GPTModel``. Otherwise, Please implement a new ``ModelLayerSpec`` and ``ModelLayer`` here. 3. Use the right ``LayerSpec`` , ``TransformerConfig`` and ``HuggingfaceConfig`` as arguments to initialize the GPTModel. 4. Return the model at last. ================================================ FILE: verl_rl/docs/advance/one_step_off.md ================================================ # Recipe: One Step Off Policy Async Trainer **Author:** `https://github.com/meituan-search` Last updated: 07/17/2025. ## Introduction ### Background The current reinforcement learning training process implemented by verl is synchronous, adhering to the algorithmic workflows of established methods like PPO, GRPO, and DAPO. In each step, training samples are generated by the latest model, and the model is updated after training completes. While this approach aligns with off-policy reinforcement learning and stabilizes RL training, but it suffers from severe efficiency issues. Model updates must wait for the longest output in the generation phase to complete. During the generation of long-tail samples, GPUs remain idle, resulting in significant underutilization. The more severe the long-tail problem in sample generation, the lower the overall training efficiency. For example, in DAPO 32B training, the Rollout phase accounts for approximately 70% of the total time, and increasing resources does not reduce the Rollout duration. ![DAPO 32B Math Performance]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/dapo_32b_math.png) > source data: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=nwusertongyuxuan361 ### Solution We have implemented the **One Step Off Async Trainer** to help alleviate this issue. This approach parallelizes the generation and training processes, utilizing samples generated in the previous step for current training. It also involves appropriately partitioning resources, allocating dedicated resources for generation while automatically assigning the remainder to training. By reducing resources allocated to the generation phase, we mitigate GPU idle time during long-tail sample generation. Throughout this process, generation and training parameters maintain a one-step off policy. ![One Step Off Policy Diagram]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/one_step_off_policy.png) > reference: [AReaL: A Large-Scale Asynchronous Reinforcement Learning System for Language Reasoning]( > https://arxiv.org/abs/2505.24298) Our core contributions include: 1. **Parallel Generation and Training**: Samples for the next batch are asynchronously generated while the current batch is being trained. 2. **Resource Isolation**: Unlike `hybrid_engine`, this method requires explicit resource allocation for rollout, with remaining resources automatically assigned to training. 3. **NCCL Parameter Synchronization**: Employs NCCL communication primitives for seamless parameter transfer between generation and training modules. ### Experimental Results - **Machine Configuration**: 2 nodes with 16 H20 GPUs each - Generation: 4 GPUs - Training: 12 GPUs - **Model**: Qwen2.5-Math-7B - **Rollout Configuration**: - **Max Response Length**: FSDP2: 20,480 tokens; Megatron: 8,192 tokens - **Algorithm**: DAPO - **Rollout Engine**: vLLM | training mode | engine | step | gen | wait_prev_gen | generate_sequences | old_log_prob | update_actor | total time | acc/best@32/mean | acc/maj@32/mean | |------------------------|---------------|------|-----|---------------|--------------------|--------------|--------------|---------------|------------------|-----------------| | colocate sync | VLLM+FSDP2 | 749 | 321 | - | 247 | 88 | 286 | 19h18m | 0.5948 | 0.417 | | one-step-overlap async | VLLM+FSDP2 | 520 | - | 45 | 458 | 108 | 337 | 15h34m(+23%) | 0.6165 | 0.494 | | colocate sync | VLLM+Megatron | 699 | 207 | - | 162 | 119 | 344 | 18h21m | 0.605 | 0.4217 | | one-step-overlap async | VLLM+Megatron | 566 | - | 59 | 501 | 120 | 347 | 13h06m (+40%) | 0.6569 | 0.4038 | * colocate sync: step ≈ gen + old_log_prob + update_actor * one-step-overlap async: step ≈ wait_prev_gen + old_log_prob + update_actor ![One Step Off Megatron Performance]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/one_step_off_megatron.png) > source data: https://wandb.ai/hou-zg-meituan/one-step-off-policy?nw=nwuserhouzg ## Implementation ### One Step Off Policy Async Pipline Our implemented **One Step Off Policy Async Pipeline** integrates seamlessly into existing training logic at minimal cost, eliminating the need for additional sample storage management. The core mechanism uses `async_gen_next_batch` for asynchronous rollout generation while maintaining continuous operation during epoch transitions via `create_continuous_iterator`. ```python # iterator generator, simplify one-step integration of the training process def _create_continuous_iterator(self): for epoch in range(self.config.trainer.total_epochs): iterator = iter(self.train_dataloader) for batch_dict in iterator: yield epoch, batch_dict # read next batch samples, parameters sync and launch asyn gen_seq def _async_gen_next_batch(self, continuous_iterator): # read train_data try: epoch, batch_dict = next(continuous_iterator) except StopIteration: return None batch = DataProto.from_single_dict(batch_dict) gen_batch = batch_pocess(batch) # sync weights from actor to rollout self.sync_rollout_weights() # async generation gen_batch_output = self.rollout_wg.async_generate_sequences(gen_batch) # future encapsulated return GenerationBatchFuture(epoch, batch, gen_batch_output) continuous_iterator = self._create_continuous_iterator() # run rollout first to achieve one-step-off batch_data_future = self._async_gen_next_batch(continuous_iterator) while batch_data_future is not None: # wait for the gen_seq result from the previous step batch = batch_data_future.get() # launch the next async call to generate sequences batch_data_future = self._async_gen_next_batch(continuous_iterator) # compute advantages batch = critic.compute_values(batch) batch = reference.compute_log_prob(batch) batch = reward.compute_reward(batch) batch = compute_advantages(batch) # model update critic_metrics = critic.update_critic(batch) actor_metrics = actor.update_actor(batch) ``` ### Parameter Synchronization The exciting point is that our nccl based weights updating for rollout model has great performance. At most of time, the latency is under 300ms, which is negligible for RLHF. > **sync_rollout_weights**:The time for synchronizing parameters from actor to rollout is extremely fast and can almost > be ignored because it is implemented with nccl. ```python class ActorRolloutRefWorker: # actor acquires the meta-info of model parameters for parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): params = self._get_actor_params() ret = [] for key, tensor in params.items(): ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret # rollout sets the meta-info of model parameters for parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): self._weights_info = weights_info class AsyncRayPPOTrainer(RayPPOTrainer): def init_workers(self): ... # rollout obtains the meta-info of model parameters from the actor for parameter sync weights_info = self.actor_wg.get_actor_weights_info()[0] self.rollout_wg.set_actor_weights_info(weights_info) # Create an actor-rollout communication group for parameter sync actor_rollout_workers = self.actor_wg.workers + self.rollout_wg.workers collective.create_collective_group( actor_rollout_workers, len(actor_rollout_workers), list(range(0, len(actor_rollout_workers))), backend="nccl", group_name="actor_rollout" ) ``` ```python # drive process call the actor and rollout respectively to sync parameters by nccl def sync_rollout_weights(self): self.actor_wg.sync_rollout_weights() ray.get(self.rollout_wg.sync_rollout_weights()) # fsdp model parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): params = self._get_actor_params() if self._is_actor else None if self._is_rollout: inference_model = ( self.rollout.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model ) patch_vllm_moe_model_weight_loader(inference_model) # Model parameters are broadcast tensor-by-tensor from actor to rollout for key, shape, dtype in self._weights_info: tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor: assert key in params origin_data = params[key] if hasattr(origin_data, "full_tensor"): origin_data = origin_data.full_tensor() if torch.distributed.get_rank() == 0: tensor.copy_(origin_data) from ray.util.collective import collective collective.broadcast(tensor, src_rank=0, group_name="actor_rollout") if self._is_rollout: inference_model.load_weights([(key, tensor)]) ``` ## Usage ### FSDP2 Configuration Example ```shell python3 -m recipe.one_step_off_policy.async_main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_trainer.yaml' \ actor_rollout_ref.actor.strategy=fsdp2 \ # actor and rollout are placed separately actor_rollout_ref.hybrid_engine=False \ # actor and rollout resource trainer.nnodes=1 \ trainer.n_gpus_per_node=6 \ rollout.nnodes=1 \ rollout.n_gpus_per_node=2 ``` ### Megatron Configuration Example ```shell python3 -m recipe.one_step_off_policy.async_main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_megatron_trainer.yaml' \ actor_rollout_ref.actor.strategy=megatron \ # actor and rollout are placed separately actor_rollout_ref.hybrid_engine=False \ # actor and rollout resource trainer.nnodes=1 \ trainer.n_gpus_per_node=6 \ rollout.nnodes=1 \ rollout.n_gpus_per_node=2 ``` ### Configuration Guidelines 1. **Card Number Relationships** Maintain either of these relationships for optimal batch distribution: - `actor_rollout_ref.rollout.n` should be an integer divisor of: `trainer.n_gpus_per_node * trainer.nnodes` - `actor_rollout_ref.rollout.n * data.train_batch_size` should be evenly divisible by: `trainer.n_gpus_per_node * trainer.nnodes` > Rationale: Ensures training samples can be evenly distributed across training GPUs when using partial resources for generation. 2. **Dynamic Resource Tuning** Adjust `trainer.nnodes` `trainer.n_gpus_per_node` `rollout.nnodes` `rollout.n_gpus_per_node` based on phase durations: - **Ideal state**: Rollout and training phases have comparable durations - **Diagnostic metrics**: - Monitor `wait_prev_gen` duration - Analyze `sequence_length` distribution - **Adjustment strategy**: - High `wait_prev_gen` + uniform sequence lengths → Increase rollout resources - High `wait_prev_gen` + long-tail sequences → Optimize stopping criteria (resource increase won't help) > **wait_prev_gen**:The time consumed waiting for the previous rollout to end (the part that is not fully overlapped). **Resource Configuration Strategies:** - **Resource-constrained scenario**: Optimize resource utilization by adjusting GPU allocation ratios, keeping the number of nodes equal to allow training and rollout to share nodes; - Configure `trainer.nnodes = rollout.nnodes` with `trainer.n_gpus_per_node + rollout.n_gpus_per_node = physical_gpus_per_node`. Control rollout resource allocation by adjusting `n_gpus_per_node`. - **Resource-abundant scenario**: Optimize performance by adjusting the number of nodes, keeping the number of GPUs per node equal to enable independent scaling of training and rollout parallelism. - Configure `trainer.n_gpus_per_node = rollout.n_gpus_per_node` and control rollout resource allocation by adjusting `trainer.nnodes` and `rollout.nnodes`to achieve optimal performance. > **Note**: The total number of nodes required by the system is not simply `trainer.nnodes + rollout.nnodes`. The > actual calculation depends on GPU capacity: > - When `trainer.n_gpus_per_node + rollout.n_gpus_per_node <= physical_gpus_per_node`, > the required node count is `max(trainer.nnodes, rollout.nnodes)` > - When `trainer.n_gpus_per_node + rollout.n_gpus_per_node > physical_gpus_per_node`, > the required node count is `trainer.nnodes + rollout.nnodes` ## Functional Support | Category | Support Situation | |--------------------|-----------------------------------------------------------------------------------------------------------------| | train engine | FSDP2
Megatron | | rollout engine | vLLM | | AdvantageEstimator | GRPO
GRPO_PASSK
REINFORCE_PLUS_PLUS
RLOO
OPO
REINFORCE_PLUS_PLUS_BASELINE
GPG | | Reward | all | ================================================ FILE: verl_rl/docs/advance/placement.rst ================================================ Ray API Design Tutorial ======================================= Last updated: 10/30/2024. We provide a tutorial for our Ray API design, including: - Ray basic concepts - Resource Pool and RayWorkerGroup - Data Dispatch, Execution and Collection - Initialize the RayWorkerGroup and execute the distributed computation in the given Resource Pool See details in `tutorial.ipynb `_. ================================================ FILE: verl_rl/docs/advance/ppo_lora.rst ================================================ RL(HF) algorithms with LoRA Support =========================================== Last updated: 06/05/2025. We support LoRA (Low-Rank Adaptation) for reinforcement learning algorithms such as PPO, GRPO, and others. LoRA is a parameter-efficient fine-tuning technique that injects trainable low-rank matrices into pre-trained weights (typically linear layers). This reduces memory footprint and compute cost, making it possible to fine-tune large models with limited hardware. The benefits this brings include: - reinforcement learning with very large models (e.g. 70B+) with modest hardware (e.g. 8x80G GPUs), - enable larger batch sizes due to reduced memory usage, - simplify model transfer and deployment, as only LoRA adapters need to be saved, - Combine with techniques like `SLoRA `_ or `CCoE `_ to serve multiple LoRA adapters efficiently This guide explains how to enable LoRA in RL training and configure related parameters. Usage Guide ------------------------ 1. Lora is available in the `verl.trainer.ppo.ray_trainer.RayPPOTrainer`. Examples are provided via the `verl.trainer.main_ppo` entry point. 2. Currently, LoRA is supported via huggingface peft, only with fsdp/fsdp2 and vllm backend (sglang support coming soon). - `strategy=fsdp` or `strategy=fsdp2` - `rollout.name=vllm` 3. Required configurations for LoRA: - `actor_rollout_ref.model.lora_rank`: int, set to a reasonable value greater than 0 (e.g., 8, 16, 32, 64) - `actor_rollout_ref.model.lora_alpha`: float, the alpha term in LoRA - `actor_rollout_ref.rollout.load_format="safetensors"`: required. This enables vLLM to load the base model. - `actor_rollout_ref.model.target_modules`: the target modules for LoRA. Typically set to "all-linear". 4. Recommend options: - `actor_rollout_ref.model.use_shm=True`: preload the model into `/dev/shm` to improve model loading speed. - `actor_rollout_ref.rollout.layered_summon=True`: this enables the actor-model to gather the FSDP shards per layers when synchronizing the LoRA Adapter to vLLM, thereby reducing GPU peak memory. Recommended if the model is very large (70B+) or the GPU memory is limited (< 48GB) Best Practices and Notes ------------------------- 1. **Learning rate**: it is recommended to increase the value of learning rate by an order of magnitude. 2. **LoRA Rank**: - Too small a rank can hurt convergence. - LoRA rank recommendation from @thelongestusernameofall: - A very small lora_rank can lead to slower convergence or worse training performance. It is recommended to set lora_rank to be>=32. Tests have shown that for a 0.5B model, with lora_rank=32,the training convergence speed and final performance are almost identical to non-LoRA training - For a 32B model,with lora_rank=128,the training convergence speed and final performance are also almost identical to non-LoRA training. - More comprehensive reference results are coming soon. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/f2b80b8b26829124dd393b7a795a0640eff11644/docs/lora.jpg?raw=true 3. Reference configuration for RL training with the Qwen2.5-72B model using 8 x 80GB GPUs (increase lora_rank if needed): .. code-block:: data.train_batch_size=64 \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=8 \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=64 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ Example Script ------------------- For an end-to-end example, refer to the script below: examples/grpo_trainer/run_qwen2_5-3b_gsm8k_grpo_lora.sh ================================================ FILE: verl_rl/docs/advance/rollout_trace.rst ================================================ Trace Function Usage Instructions ======================================== Last updated: 07/10/2025. Applicable Scenarios -------------------- Agentic RL involves multiple turns of conversations, tool invocations, and user interactions during the rollout process. During the Model Training process, it is necessary to track function calls, inputs, and outputs to understand the flow path of data within the application. The Trace feature helps, in complex multi-round conversations, to view the transformation of data during each interaction and the entire process leading to the final output by recording the inputs, outputs, and corresponding timestamps of functions, which is conducive to understanding the details of how the model processes data and optimizing the training results. The Trace feature integrates commonly used Agent trace tools, including wandb weave and mlflow, which are already supported. Users can choose the appropriate trace tool according to their own needs and preferences. Here, we introduce the usage of each tool. Trace Parameter Configuration ----------------------------- - ``actor_rollout_ref.rollout.trace.backend=mlflow|weave`` # the trace backend type - ``actor_rollout_ref.rollout.trace.token2text=True`` # To show decoded text in trace view Glossary -------- +----------------+------------------------------------------------------------------------------------------------------+ | Object | Explaination | +================+======================================================================================================+ | trajectory | A complete multi-turn conversation includes: | | | 1. LLM output at least once | | | 2. Tool Call | +----------------+------------------------------------------------------------------------------------------------------+ | step | The training step corresponds to the global_steps variable in the trainer | +----------------+------------------------------------------------------------------------------------------------------+ | sample_index | The identifier of the sample, defined in the extra_info.index of the dataset. It is usually a number,| | | but may also be a uuid in some cases. | +----------------+------------------------------------------------------------------------------------------------------+ | rollout_n | In the GROP algorithm, each sample is rolled out n times. rollout_n represents the serial number of | | | the rollout. | +----------------+------------------------------------------------------------------------------------------------------+ | validate | Whether the test dataset is used for evaluation? | +----------------+------------------------------------------------------------------------------------------------------+ Rollout trace functions ----------------------- There are 2 functions used for tracing: 1. ``rollout_trace_op``: This is a decorator function used to mark the functions to trace. In default, only few method has it, you can add it to more functions to trace more infor. 2. ``rollout_trace_attr``: This function is used to mark the entry of a trajectory and input some info to trace. If you add new type of agent, you may need to add it to enable trace. Usage of wandb weave -------------------- 1.1 Basic Configuration ~~~~~~~~~~~~~~~~~~~~~~~ 1. Set the ``WANDB_API_KEY`` environment variable 2. Configuration Parameters 1. ``actor_rollout_ref.rollout.trace.backend=weave`` 2. ``trainer.logger=['console', 'wandb']``: This item is optional. Trace and logger are independent functions. When using Weave, it is recommended to also enable the wandb logger to implement both functions in one system. 3. ``trainer.project_name=$project_name`` 4. ``trainer.experiment_name=$experiment_name`` 5. ``actor_rollout_ref.rollout.mode=async``: Since trace is mainly used for agentic RL, need to enable agent toop using async mode for either vllm or sglang. Note: The Weave Free Plan comes with a default monthly network traffic allowance of 1GB. During the training process, the amount of trace data generated is substantial, reaching dozens of gigabytes per day, so it is necessary to select an appropriate wandb plan. 1.2 View Trace Logs ~~~~~~~~~~~~~~~~~~~ After executing the training, on the project page, you can see the WEAVE sidebar. Click Traces to view it. Each Trace project corresponds to a trajectory. You can filter and select the trajectories you need to view by step, sample_index, rollout_n, and experiment_name. After enabling token2text, prompt_text and response_text will be automatically added to the output of ToolAgentLoop.run, making it convenient to view the input and output content. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/weave_trace_list.png?raw=true 1.3 Compare Trace Logs ~~~~~~~~~~~~~~~~~~~~~~ Weave can select multiple trace items and then compare the differences among them. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/weave_trace_compare.png?raw=true Usage of mlflow --------------- 1. Basic Configuration ~~~~~~~~~~~~~~~~~~~~~~ 1. Set the ``MLFLOW_TRACKING_URI`` environment variable, which can be: 1. Http and https URLs corresponding to online services 2. Local files or directories, such as ``sqlite:////tmp/mlruns.db``, indicate that data is stored in ``/tmp/mlruns.db``. When using local files, it is necessary to initialize the file first (e.g., start the UI: ``mlflow ui --backend-store-uri sqlite:////tmp/mlruns.db``) to avoid conflicts when multiple workers create files simultaneously. 2. Configuration Parameters 1. ``actor_rollout_ref.rollout.trace.backend=mlflow`` 2. ``trainer.logger=['console', 'mlflow']``. This item is optional. Trace and logger are independent functions. When using mlflow, it is recommended to also enable the mlflow logger to implement both functions in one system. 3. ``trainer.project_name=$project_name`` 4. ``trainer.experiment_name=$experiment_name`` 2. View Log ~~~~~~~~~~~ Since ``trainer.project_name`` corresponds to Experiments in mlflow, in the mlflow view, you need to select the corresponding project name, then click the "Traces" tab to view traces. Among them, ``trainer.experiment_name`` corresponds to the experiment_name of tags, and tags corresponding to step, sample_index, rollout_n, etc., are used for filtering and viewing. For example, searching for ``"tags.step = '1'"`` can display all trajectories of step 1. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/mlflow_trace_list.png?raw=true Opening one of the trajectories allows you to view each function call process within it. After enabling token2text, prompt_text and response_text will be automatically added to the output of ToolAgentLoop.run, making it convenient to view the content. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/mlflow_trace_view.png?raw=true Note: 1. mlflow does not support comparing multiple traces 2. rollout_trace can not associate the mlflow trace with the run, so the trace content cannot be seen in the mlflow run logs. ================================================ FILE: verl_rl/docs/advance/rope.rst ================================================ RoPE Scaling override ======================================= Last updated: 05/14/2025. Some models such as `Qwen/Qwen2.5-7B-Instruct `_ support RoPE Scaling but don't have it defined in their config.json file. For example, this model supports this configuration: .. code:: python { ..., "rope_scaling": { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn" } } In order to support a longer context for such models, you must override the model configs when starting the trainer. PPO example: .. code:: bash +actor_rollout_ref.model.override_config.rope_scaling.type=yarn \ +actor_rollout_ref.model.override_config.rope_scaling.factor=4.0 \ +actor_rollout_ref.model.override_config.rope_scaling.original_max_position_embeddings=32768 \ And for the critic model .. code:: bash +critic.model.override_config.rope_scaling.type=yarn \ +critic.model.override_config.rope_scaling.factor=4.0 \ +critic.model.override_config.rope_scaling.original_max_position_embeddings=32768 \ ================================================ FILE: verl_rl/docs/algo/baseline.md ================================================ # Algorithm Baselines Last updated: 06/18/2025. ## Math related datasets ### GSM8k Assuming GSM8k/math dataset is preprocessed via: ```bash python3 examples/data_preprocess/*.py ``` Refer to the table below to reproduce RL training from different pre-trained checkpoints. Below is the performance on the GSM8k dataset if not specified otherwise. More comprehensive benchmark results areavailable in the recipe folder. | Hardware | Model | Method | Test score | Details | |-------------|----------------------------------|-------------------|--------------|---------| | NVIDIA GPU | google/gemma-2-2b-it | hf checkpoint | 23.9 | [Huggingface](https://huggingface.co/google/gemma-2-2b-it#benchmark-results) | | NVIDIA GPU | google/gemma-2-2b-it | SFT | 52.06 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/gemma-2-2b-it-sft-0.411.log) | | NVIDIA GPU | google/gemma-2-2b-it | SFT + PPO | 64.02 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/gemma-2-2b-it-ppo-bsz512_4-prompt1024-resp-512-0.640.log), [wandb](https://api.wandb.ai/links/verl-team/h7ux8602) | | NVIDIA GPU | Qwen/Qwen2.5-0.5B-Instruct | hf checkpoint | 36.4 | [Qwen blog](https://qwenlm.github.io/blog/qwen2.5-llm/) | | NVIDIA GPU | Qwen/Qwen2.5-0.5B-Instruct | PPO | 56.7 | [command and log](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) | | NVIDIA GPU | Qwen/Qwen2.5-0.5B-Instruct | PRIME | 58.7 | [script](https://github.com/volcengine/verl/blob/main/recipe/prime/run_prime_qwen.sh), [wandb](https://api.wandb.ai/links/zefan-wang-thu-tsinghua-university/rxd1btvb) | | NVIDIA GPU | Qwen/Qwen2.5-0.5B-Instruct | GRPO-LoRA | 54.3 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz64_2-prompt512-resp1024-lorarank32-score0.543.log)| | NVIDIA GPU | Qwen/Qwen2.5-1.5B-Instruct | GRPO-LoRA | 77.9 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-1.5B-bsz64_2-prompt512-resp1024-lorarank32-score0.779.log)| | NVIDIA GPU | Qwen/Qwen2.5-3B-Instruct | GRPO-LoRA | 86.1 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-3B-bsz64_2-prompt512-resp1024-lorarank32-score0.861.log)| | NVIDIA GPU | deepseek-ai/deepseek-llm-7b-chat | PPO (Megatron) | 69.5 [1] | [log](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/deepseek-llm-7b-chat-megatron-bsz256_4-prompt512-resp512-0.695.log), [wandb](https://wandb.ai/verl-team/verl_megatron_gsm8k_examples/runs/10fetyr3) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GRPO | 89 | [script](https://github.com/volcengine/verl/blob/a65c9157bc0b85b64cd753de19f94e80a11bd871/examples/grpo_trainer/run_qwen2-7b_seq_balance.sh) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GRPO (FSDP2) | 89.8 | [log](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/qwen2-7b-fsdp2.log) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GRPO (Megatron) | 89.6 | [log](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/qwen2-7b_math_megatron.log) | | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | ReMax | 97 | [script](https://github.com/eric-haibin-lin/verl/blob/main/examples/remax_trainer/run_qwen2.5-3b_seq_balance.sh), [wandb](https://wandb.ai/liziniu1997/verl_remax_example_gsm8k/runs/vxl10pln) | | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | SPPO | 65.6 (MATH) | [SPPO script](https://github.com/volcengine/verl/tree/main/recipe/sppo/README.md) | | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | GRPO-LoRA | 93.4 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-7B-bsz64_8-prompt512-resp1024-lorarank32-score0.934.log)| | NVIDIA GPU | Mixtral-8x22B-Instruct-v0.1 | Instruct model | 83.7 | [Qwen Blog](https://qwenlm.github.io/blog/qwen2.5-llm/) | | NVIDIA GPU | Mixtral-8x22B-Instruct-v0.1 | RLOO (Megatron) | 92.3 | [wandb](https://api.wandb.ai/links/ppo_dev/sbuiuf2d) | | NVIDIA GPU | Qwen/Qwen2.5-7B-Instruct | SPIN | 92 | [script](https://github.com/volcengine/verl/tree/main/recipe/spin/README.md) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GPG | 88 | [log](https://github.com/diqiuzhuanzhuan/verldata/blob/main/run_logs/qwen2-7b_math.log), [wandb](https://wandb.ai/diqiuzhuanzhuan/verl_gpg_example_gsm8k_math/runs/ab86c4va) | | NVIDIA GPU | Qwen/Qwen2-7B-Instruct | GPG (Megatron) | 88 | [log](https://github.com/diqiuzhuanzhuan/verldata/blob/main/run_logs/qwen2-7b_math_megatron.log), [wandb](https://wandb.ai/diqiuzhuanzhuan/verl_gpg_example_gsm8k_math/runs/yy8bheu8) | | NVIDIA GPU | Qwen/Qwen2.5-VL-7B-Instruct | GRPO (Megatron) | 65.4 (GEO3k) | [script](https://github.com/volcengine/verl/blob/main/examples/grpo_trainer/run_qwen2_5_vl-7b-megatron.sh), [wandb](https://api.wandb.ai/links/megatron-core-moe-dev/1yngvkek) | | AMD MI300 | deepseek-ai/deepseek-llm-7b-chat | PPO | 70.5 [1] | [log](https://github.com/yushengsu-thu/verl_training_log/blob/main/gsm8k/ppo_run_deepseek7b_llm.log) | | AMD MI300 | deepseek-ai/deepseek-llm-7b-chat | GRPO | 71.4 [1] | [log](https://github.com/yushengsu-thu/verl_training_log/blob/main/gsm8k/grpo_run_deepseek7b_llm.log) | | NVIDIA GPU | Qwen/Qwen2.5-14B-Instruct | GRPO-LoRA | 94.6 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-14B-bsz64_8-prompt512-resp1024-lorarank32-score0.946.log)| | NVIDIA GPU | Qwen/Qwen2.5-32B-Instruct | GRPO-LoRA | 95.8 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-32B-bsz64_8-prompt512-resp1024-lorarank32-score0.958.log)| | NVIDIA GPU | Qwen/Qwen2.5-72B-Instruct | GRPO-LoRA | 96.0 | [command and logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-72B-bs64_8-prompt512-resp1024-lorarank32-score0.960.log)| ### DAPO math-17k - Training DAPO math-17k dataset: https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k - Testing: AIME'24: https://huggingface.co/datasets/BytedTsinghua-SIA/AIME-2024 Note: - For Qwen/Qwen2.5-Math-7B, we directly modify the max_position_embeddings to 32768 without observing performance degradation in order to train longer response length. | Hardware | Model | Method | Test score | Details | |-------------|----------------------------------|-------------------|--------------|---------| | NVIDIA GPU | Qwen/Qwen2.5-Math-7B (32k) | DAPO | 36.3 | [command](https://github.com/volcengine/verl/blob/main/recipe/dapo/test_dapo_7b_math.sh), [logs](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/runs/ow47vvon?nw=nwusertongyuxuan361)| ## Coding related datasets Below is the result on leetcode if not specified otherwise. | Hardware | Model | Method | Test score | Details | |-------------|----------------------------------|-------------------|--------------|---------| | NVIDIA GPU | PRIME-RL/Eurus-2-7B-SFT | RPIME | 36.1 | [script](https://github.com/volcengine/verl/blob/main/recipe/prime/run_prime_qwen_code.sh), [swanlab](https://swanlab.cn/@wangzefan/prime_example/runs/7f541qhspgmy8nmhdlx35/chart) | ### Notes [1] During evaluation, we have only extracted answers following the format `"####"`. A more flexible answer extraction, longer response length, and better prompt engineering may lead to a higher score. [2] The default value of `actor_rollout_ref.actor.entropy_coeff` is set to `0.0` since verl 0.3.x on 2025-05-30, which is different from previous versions. ================================================ FILE: verl_rl/docs/algo/dapo.md ================================================ # Recipe: Decoupled Clip and Dynamic Sampling Policy Optimization (DAPO) Last updated: 06/19/2025. > Open-Source Algorithm Implementation & Expriement Running: [Yuxuan Tong](https://tongyx361.github.io/), [Guangming Sheng](https://hk.linkedin.com/in/guangming-sheng-b50640211) 🏠 [Homepage](https://dapo-sia.github.io/) | 📝 [Paper@arXiv](https://arxiv.org/abs/2503.14476) | 🤗 [Datasets&Models@HF](https://huggingface.co/collections/BytedTsinghua-SIA/dapo-67d7f1517ee33c8aed059da0) | 🐱 [Code@GitHub](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo) | 🐱 [Repo@GitHub](https://github.com/BytedTsinghua-SIA/DAPO) > We propose the **D**ecoupled Clip and Dynamic s**A**mpling **P**olicy **O**ptimization (DAPO) algorithm. By making our work publicly available, we provide the broader research community and society with practical access to scalable reinforcement learning, enabling all to benefit from these advancements. Our system is based on the awesome [verl](https://github.com/volcengine/verl) framework. Thanks for their great work! Applying DAPO training to Qwen2.5-32B base model proves to outperform the previous state-of-the-art DeepSeek-R1-Zero-Qwen-32B on AIME 2024, achieving **50%** accuracy with **50%** less training steps. > > ![dapo-main-result](https://dapo-sia.github.io/static/images/score.png) ## Quickstart 1. Prepare the datasets **on the Ray cluster**: ```bash bash prepare_dapo_data.sh # This downloads the datasets to ${HOME}/verl/data by default ``` 2. Submit the job to the Ray cluster **from any machine**: ```bash cd verl # Repo root export RAY_ADDRESS="http://${RAY_IP:-localhost}:8265" # The Ray cluster address to connect to export WORKING_DIR="${PWD}" # The local directory to package to the Ray cluster # Set the runtime environment like env vars and pip packages for the Ray cluster in yaml export RUNTIME_ENV="./recipe/dapo/runtime_env.yaml" # This sets environment variables for the Ray cluster bash recipe/dapo/run_dapo_qwen2.5_32b.sh # or other scripts ``` ## Reproduction Runs | Setup | AIME 2024 Acc. | Hardware | Image | Commit | Environment Variables | Training Script | Training Record | | -------------------------------------------- | -------------- | --------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | | DAPO | 52% | 16x8xH800 | `hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | | DAPO w/o Dynamic Sampling | 50% | 16x8xH800 | `hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_wo_ds_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_wo_ds_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | | DAPO w/o Token-level Loss & Dynamic Sampling | 44% | 16x8xH20 | `hiyouga/verl:ngc-th2.5.1-cu120-vllm0.7.4-hotfix` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_early_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_early_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | > [!IMPORTANT] > > **📢 Call for Contribution!** > > Welcome to submit your reproduction runs and setups! ## Configuration ### Separated Clip Epsilons (-> Clip-Higher) An example configuration: ```yaml actor_rollout_ref: actor: clip_ratio_low: 0.2 clip_ratio_high: 0.28 ``` `clip_ratio_low` and `clip_ratio_high` specify the $\varepsilon_{\text {low }}$ and $\varepsilon_{\text {high }}$ in the DAPO objective. Core relevant code: ```python pg_losses1 = -advantages * ratio pg_losses2 = -advantages * torch.clamp(ratio, 1 - cliprange_low, 1 + cliprange_high) pg_losses = torch.maximum(pg_losses1, pg_losses2) ``` ### Dynamic Sampling (with Group Filtering) An example configuration: ```yaml data: gen_batch_size: 1536 train_batch_size: 512 algorithm: filter_groups: enable: True metric: acc # score / seq_reward / seq_final_reward / ... max_num_gen_batches: 10 # Non-positive values mean no upper limit ``` Setting `filter_groups.enable` to `True` will filter out groups whose outputs' `metric` are all the same, e.g., for `acc`, groups whose outputs' accuracies are all 1 or 0. The trainer will repeat sampling with `gen_batch_size` until there are enough qualified groups for `train_batch_size` or reaching the upper limit specified by `max_num_gen_batches`. Core relevant code: ```python prompt_bsz = self.config.data.train_batch_size if num_prompt_in_batch < prompt_bsz: print(f'{num_prompt_in_batch=} < {prompt_bsz=}') num_gen_batches += 1 max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches: print(f'{num_gen_batches=} < {max_num_gen_batches=}. Keep generating...') continue else: raise ValueError( f'{num_gen_batches=} >= {max_num_gen_batches=}. Generated too many. Please check your data.' ) else: # Align the batch traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n batch = batch[:traj_bsz] ``` ### Flexible Loss Aggregation Mode (-> Token-level Loss) An example configuration: ```yaml actor_rollout_ref: actor: loss_agg_mode: "token-mean" # / "seq-mean-token-sum" / "seq-mean-token-mean" # NOTE: "token-mean" is the default behavior ``` Setting `loss_agg_mode` to `token-mean` will mean the (policy gradient) loss across all the tokens in all the sequences in a mini-batch. Core relevant code: ```python if loss_agg_mode == "token-mean": loss = verl_F.masked_mean(loss_mat, loss_mask) elif loss_agg_mode == "seq-mean-token-sum": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) # token-sum loss = torch.mean(seq_losses) # seq-mean elif loss_agg_mode == "seq-mean-token-mean": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) / torch.sum(loss_mask, dim=-1) # token-mean loss = torch.mean(seq_losses) # seq-mean else: raise ValueError(f"Invalid loss_agg_mode: {loss_agg_mode}") ``` ### Overlong Reward Shaping An example configuration: ```yaml data: max_response_length: 20480 # 16384 + 4096 reward_model: overlong_buffer: enable: True len: 4096 penalty_factor: 1.0 ``` Setting `overlong_buffer.enable` to `True` will penalize the outputs whose lengths are overlong but still within the hard context limit. Specifically, the penalty increases linearly from `0` to `overlong_buffer.penalty_factor` when the length of the output exceeds the `max_response_length` by `0` to `overlong_buffer.len` tokens. Core relevant code: ```python if self.overlong_buffer_cfg.enable: overlong_buffer_len = self.overlong_buffer_cfg.len expected_len = self.max_resp_len - overlong_buffer_len exceed_len = valid_response_length - expected_len overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0) reward += overlong_reward ``` ## FAQ ### Where is the "Overlong Filtering" in the paper? Most experiments in the paper, including the best-performant one, are run without Overlong Filtering because it's somehow overlapping with Overlong Reward Shaping in terms of properly learning from the longest outputs. So we don't implement it here. ### What's the difference between [the `recipe/dapo` directory in the `main` branch](https://github.com/volcengine/verl/tree/main/recipe/dapo) and the [`recipe/dapo` branch](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo)? [The `recipe/dapo` branch](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo) is for **as-is reproduction** and thus won't be updated with new features. [The `recipe/dapo` directory in the `main` branch](https://github.com/volcengine/verl/tree/main/recipe/dapo) works as an example of how to extend the latest `verl` to implement an algorithm recipe, which will be maintained with new features. ### Why can't I produce similar results after modifications? RL infrastructures nowadays still have inherent unrobustness, on which we are still working hard to improve. We strongly recommend to only modify one thing at a time. We also list some known problems here: 1. Enabling CUDA graph (`enforce_eager=False`) might cause model performance degradation, whose cause is still under investigation. ================================================ FILE: verl_rl/docs/algo/entropy.md ================================================ # Recipe: Entropy Mechanism Last updated: 06/27/2025.
The Entropy Mechanism of Reinforcement Learning for Large Language Model Reasoning. [![Paper](https://img.shields.io/badge/paper-A42C25?style=for-the-badge&logo=arxiv&logoColor=white)](https://arxiv.org/pdf/2505.22617) [![Github](https://img.shields.io/badge/PRIME-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white)](https://github.com/PRIME-RL/Entropy-Mechanism-of-RL) [![alphaXiv](https://img.shields.io/badge/discussion-A42C25?style=for-the-badge&logo=arxiv&logoColor=white&color=blue )](https://www.alphaxiv.org/abs/2505.22617) [![Twitter](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/stingning/status/1928088554166505667) [![Twitter](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/charlesfornlp/status/1928089451080585283) [![Twitter-ak](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/_akhaliq/status/1928077929105268861)
## 🎉News - **[2025/05/29]** 🎉 Ranked **#1** of the day on [Huggingface Daily Papers](https://huggingface.co/papers?date=2025-05-29). - **[2025/05/29]** Released our Paper on arXiv. See [here](https://arxiv.org/pdf/2505.22617). We provide insights into the entropy mechanism of RL for LLMs and propose two simple yet effective strategies to alleviate the entropy collapse. ## ✨Getting started After preparing the training data, for training Qwen2.5-7B on a single node, taking the KL-Cov approach as an example, you can simply run: ``` cd verl conda activate your_env bash recipe/dapo/7b_kl_cov.sh ``` While for training Qwen2.5-32B on multi nodes, you can run the following commands: ``` cd verl conda activate your_env bash recipe/dapo/32b_kl_cov.sh ``` ## 📖Introduction
issue
This paper addresses the entropy collapse issue in scaling reinforcement learning (RL) for large language models (LLMs), where policy entropy drops sharply during training, leading to overconfidence and performance saturation. We empirically establish a relationship between entropy ($H$) and performance ($R$): $R=−aexp(H)+b$, showing performance is bottlenecked by entropy exhaustion.
issue
Theoretically, we find entropy changes are driven by the covariance between action probability and logit updates, which correlates with advantage in Policy Gradient methods. High-probability, high-advantage actions reduce entropy, while rare, high-advantage actions increase it. Empirically, the covariance term remains positive, explaining entropy’s monotonic decline. To mitigate this, we propose ​​Clip-Cov​​ and ​​KL-Cov​​, which restrict updates for high-covariance tokens. These methods effectively prevent entropy collapse, and improve performance. ## 📃Evaluation
issue
Our method is able to maintain a considerably higher level of entropy throughout training. For example, when the baseline's entropy reaches a plateau and can no longer be consumed, the KL-Cov method still sustains an entropy level over 10 times higher. Meanwhile, the response length of the policy model steadily increases, and its performance on the test set consistently surpasses that of the baseline. This indicates that our model is able to explore more freely during training, learning better policy through RL. | **Method** | **AIME24** | **AIME25** | **AMC** | **MATH-500** | **OMNI-MATH** | **OlympiadBench** | **Minerva** | **Avg.** | | ----------------- | ---------: | ---------: | -------: | -----------: | ------------: | ----------------: | ----------: | -------: | | *Qwen2.5-7B* | | | | | | | | | | GRPO | 21.2 | 9.6 | 58.7 | 78.8 | 27.9 | 40.7 | 36.7 | 38.6 | | w. Clip-higher | 18.1 | 11.5 | 56.6 | 79.2 | 29.8 | 43.3 | 40.4 | 38.8 | | w. **`CLIP-Cov`** | 22.1 | **15.8** | 58.2 | 80.4 | **30.5** | **44.1** | **41.1** | 40.4 | | w. **`KL-Cov`** | **22.6** | 12.9 | **61.4** | **80.8** | 29.1 | 42.6 | 38.2 | **40.6** | | *Qwen2.5-32B* | | | | | | | | | | GRPO | 21.8 | 16.2 | 69.7 | 84.2 | 35.2 | 43.6 | 45.5 | 45.8 | | w. Clip-higher | 35.6 | 22.3 | 69.5 | 77.2 | 35.1 | 42.5 | 43.0 | 47.2 | | w. **`CLIP-Cov`** | 32.3 | 22.7 | 67.2 | **87.0** | **42.0** | **57.2** | 46.0 | 50.3 | | w. **`KL-Cov`** | **36.8** | **30.8** | **74.5** | 84.6 | 39.1 | 49.0 | **46.3** | **52.2** | Our two approaches both achieve non-trivial improvements across all benchmarks. Compared to GRPO, our method outperforms it by 2.0% on average for the 7B model and by 6.4% for the 32B model. Moreover, we observe that our method yields more substantial gains on the larger Qwen2.5-32B. Specifically, our method achieves improvements of 15.0% and 14.6% compared to GRPO on the most challenging benchmarks, AIME24 and AIME25, respectively. ## 🎈Citation If you find this paper or repo helpful, please cite us. ```bibtex @article{cui2025entropy, title={The Entropy Mechanism of Reinforcement Learning for Reasoning Language Models}, author={Cui, Ganqu and Zhang, Yuchen and Chen, Jiacheng and Yuan, Lifan and Wang, Zhi and Zuo, Yuxin and Li, Haozhan and Fan, Yuchen and Chen, Huayu and Chen, Weize and others}, journal={arXiv preprint arXiv:2505.22617}, year={2025} } ``` ## 🌻Acknowledgement We implement our reinforcement learning algorithm extending from [verl](https://github.com/volcengine/verl). We utilize [vLLM](https://github.com/vllm-project/vllm) for inference. Our models are trained primarily on [Qwen2.5 family](https://github.com/QwenLM/Qwen2.5). Our training data is built from [DAPO-MATH](https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k). Thanks for their great contributions! ## 📬 Contact For questions, discussion, or collaboration opportunities, feel free to contact: - Ganqu Cui: cuiganqu@pjlab.org.cn - Yuchen Zhang: yuchen.zhang2003@gmail.com - Jiacheng Chen: jackchan9345@gmail.com - Ning Ding: ningding.cs@gmail.com ================================================ FILE: verl_rl/docs/algo/gpg.md ================================================ # GPG: Group Policy Gradient Last updated: 07/03/2025. Group Policy Gradient (GPG) is a minimalist reinforcement learning (RL) method that enhances the reasoning ability of large language models without relying on supervised fine-tuning or complex tricks. GPG revisits traditional policy gradients and directly optimizes the RL objective—no surrogate losses, no KL penalties, no critic, and no reference model. Compared to GRPO, GPG is simpler, more efficient, and achieves better results on many tasks. For more details, please refer to the original paper [GPG: A Simple and Strong Reinforcement Learning Baseline for Model Reasoning ](https://arxiv.org/abs/2504.02546). ## Key Components - Use a corrected advantage function to improve policy gradient accuracy and training efficiency. - By eliminating the critic and reference models, avoiding KL divergence constraints, significantly simplifies the training process compared to Group Relative Policy Optimization (GRPO) ## Configuration To configure GPG within the framework, use the following YAML settings. ```yaml algorithm: adv_estimator: gpg actor_rollout_ref: actor: policy_loss: loss_mode: "gpg" ``` ## Advanced Extensions GPG is a simple and strong baseline for model reasoning. Although it avoids using KL loss in its original form, you can still use KL loss to further improve the performance. ```yaml algorithm: adv_estimator: gpg actor_rollout_ref: actor: use_kl_loss: True # enable kl regularization kl_loss_coef: 0.01 policy_loss: loss_mode: "gpg" ``` ================================================ FILE: verl_rl/docs/algo/grpo.md ================================================ # Group Relative Policy Optimization (GRPO) Last updated: 05/31/2025. In reinforcement learning, classic algorithms like PPO rely on a "critic" model to estimate the value of actions, guiding the learning process. However, training this critic model can be resource-intensive. GRPO simplifies this process by eliminating the need for a separate critic model. Instead, it operates as follows: - Group Sampling: For a given problem, the model generates multiple possible solutions, forming a "group" of outputs. - Reward Assignment: Each solution is evaluated and assigned a reward based on its correctness or quality. - Baseline Calculation: The average reward of the group serves as a baseline. - Policy Update: The model updates its parameters by comparing each solution's reward to the group baseline, reinforcing better-than-average solutions and discouraging worse-than-average ones. This approach reduces computational overhead by avoiding the training of a separate value estimation model, making the learning process more efficient. For more details, refer to the original paper [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://arxiv.org/pdf/2402.03300) ## Key Components - No Value Function (Critic-less): unlike PPO, GRPO does not train a separate value network (critic) - Group Sampling (Grouped Rollouts): instead of evaluating one rollout per input, GRPO generates multiple completions (responses) from the current policy for each prompt. This set of completions is referred to as a group. - Relative Rewards: within each group, completions are scored (e.g., based on correctness), and rewards are normalized relative to the group. ## Configuration Note that all configs containing `micro_batch_size` are used to configure the maximum sample or token count per forward or backward pass to avoid GPU OOMs, whose value should not change algorithmic/convergence behavior. Despite that many configurations start with the `ppo_` prefix, they work across different RL algorithms in verl, as the GRPO training loop is similar to that of PPO (without critic). ![image](https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d) - `actor_rollout.ref.rollout.n`: For each prompt, sample n times. Default to 1. For GRPO, please set it to a value larger than 1 for group sampling. - `data.train_batch_size`: The global batch size of prompts used to generate a set of sampled trajectories/rollouts. The number of responses/trajectories is `data.train_batch_size * actor_rollout.ref.rollout.n` - `actor_rollout_ref.actor.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO actor updates. The ppo_mini_batch_size is a global size across all workers. - `actor_rollout_ref.actor.ppo_epochs`: Number of epochs for GRPO updates on one set of sampled trajectories for actor - `actor_rollout_ref.actor.clip_ratio`: The GRPO clip range. Default to 0.2 - `algorithm.adv_estimator`: Default is gae. Please set it to grpo instead - `actor_rollout_ref.actor.loss_agg_mode`: Default is "token-mean". Options include "token-mean", "seq-mean-token-sum", "seq-mean-token-mean". The original GRPO paper takes the sample-level loss (seq-mean-token-mean), which may be unstable in long-CoT scenarios. All GRPO example scripts provided in verl uses the default configuration "token-mean" for loss aggregation instead. Instead of adding KL penalty in the reward, GRPO regularizes by directly adding the KL divergence between the trained policy and the reference policy to the loss: - `actor_rollout_ref.actor.use_kl_loss`: To use kl loss in the actor. When used, we are not applying KL in the reward function. Default is False. Please set it to True for GRPO. - `actor_rollout_ref.actor.kl_loss_coef`: The coefficient of kl loss. Default is 0.001. - `actor_rollout_ref.actor.kl_loss_type`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. How to calculate the kl divergence between actor and reference policy. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html ## Advanced Extensions ### DrGRPO [Understanding R1-Zero-Like Training: A Critical Perspective](https://arxiv.org/pdf/2503.20783) claims there's optimization bias in GRPO, which leads to artificially longer responses, especially for incorrect outputs. This inefficiency stems from the way GRPO calculates advantages using group-based reward normalization. Instead, DrGRPO aggregates token-level losses by normalizing with a global constant to eliminate length bias. Configure the following to enable DrGRPO, with all other parameters the same as GRPO's: - `actor_rollout_ref.actor.loss_agg_mode`: "seq-mean-token-sum-norm", which turns off seq-dim averaging - `actor_rollout_ref.actor.use_kl_loss`: Please set it to False for DrGRPO - `algorithm.norm_adv_by_std_in_grpo`: False, which turns off standard deviation norm ## Reference Example Qwen2.5 GRPO training log and commands: [link](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/qwen2-7b-fsdp2.log) ```bash bash examples/grpo_trainer/run_qwen3-8b.sh ``` For more reference performance, please see https://verl.readthedocs.io/en/latest/algo/baseline.html ================================================ FILE: verl_rl/docs/algo/opo.md ================================================ # On-Policy RL with Optimal Reward Baseline (OPO) Last updated: 06/02/2025. Loose on-policy constraints and suboptimal baselines in reinforcement learning often lead to training instability such as large policy shifts and entropy collapse. OPO addresses these challenges by using exact on-policy training with the theretically optimal reward baseline for advantage estimation. It achieves lower policy shifts and higher output entropy, encouraging more diverse and less repetitive responses. OPO uses group sampling to generate multiple outputs for each input like GRPO. Unlike group-based algorithms which typically use the mean reward of a group as its baseline, OPO employs a theoretically optimal baseline: the length-weighted reward of the group. It also omits the standard deviation normalization. By adopting these two key components, OPO enables the training of a single policy model with the objective of maximizing only the expected reward. For more detailes, refer to the original paper [On-Policy RL with Optimal Reward Baseline](https://arxiv.org/pdf/2505.23585). ## Key Components - Exact On-Policy Training: always generates responses from the current policy, without using any pre-generated data or off-policy data. - Optimal Reward Baseline: uses a length-weighted reward of the group as the baseline for normalizing the rewards. ## Configuration To configure OPO within the framework, use the following YAML settings. These parameters are crucial for enabling exact on-policy training and activating the optimal reward baseline. ```yaml algorithm: adv_estimator: opo # Use OPO for optimal reward baseline data: train_batch_size: 1024 actor_rollout_ref: actor: ppo_mini_batch_size: 1024 # ppo_mini_batch_size should equal to train_batch_size to enable exact on-policy training entropy_coeff: 0 # disable entropy regularization use_kl_loss: False # disable kl regularization kl_loss_coef: 0 ``` ## Advanced Extensions OPO can also be extended to other algorithms like RLOO and Reinforce++. It just needs to adjust their configurations to enable exact on-policy training and incorporate the optimal length-weighted reward baseline with minimal modifications to their advantage estimation functions. ================================================ FILE: verl_rl/docs/algo/ppo.md ================================================ # Proximal Policy Optimization (PPO) Last updated: 06/19/2025. Proximal Policy Optimization (PPO) is a family of policy gradient methods for reinforcement learning, proposed by OpenAI in 2017. PPO strikes a balance between simplicity, stability, and performance, making it one of the most widely used algorithms in modern RL applications, including large-scale language model fine-tuning. Traditional policy gradient methods like REINFORCE or Vanilla Policy Gradient suffer from: - High variance and sample inefficiency. - Instability due to large policy updates. PPO addresses this problem using a clipped surrogate objective that avoids overly large updates without requiring second-order derivatives. For more technical details regarding PPO, we suggest reading the introduction in the [OpenAI spinning up tutorial](https://spinningup.openai.com/en/latest/algorithms/ppo.html), and the paper [Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347). ## Key Components - Actor-Critic Architecture: PPO requires both an actor model (policy) and a critic model (value function). This differs from other algorithms like GRPO and RLOO that don't require a critic model. - Generalized Advantage Estimation (GAE): PPO uses GAE for computing advantage values, which helps reduce variance in policy gradient estimates while maintaining low bias. - Clipped Surrogate Objective: The core of PPO is implemented through the clipped surrogate objective function that limits policy updates. ## Configuration Note that all configs containing `micro_batch_size` are used to configure the maximum sample or token count per forward or backward pass to avoid GPU OOMs, whose value should not change algorithmic/convergence behavior. Most critic configs are similar to those of actors. Note that the critic model is omitted from the figure below. ![image](https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d) - `data.train_batch_size`: The global batch size of prompts used to generate a set of sampled trajectories/rollouts. The number of responses/trajectories is `data.train_batch_size * actor_rollout.ref.rollout.n` - `actor_rollout_ref.actor.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO actor updates. The ppo_mini_batch_size is a global size across all workers - `actor_rollout_ref.critic.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO critic updates. The ppo_mini_batch_size is a global size across all workers - `actor_rollout_ref.actor.clip_ratio`: The PPO clip range. Default to 0.2 - `actor_rollout_ref.actor.ppo_epochs`: Number of epochs for PPO updates on one set of sampled trajectories for actor - `critic.ppo_epochs`: Number of epochs for PPO updates on one set of sampled trajectories for critic. Defaults to `actor_rollout_ref.actor.ppo_epochs` - `algorithm.gemma`: discount factor - `algorithm.lam`: The lambda term that trades off between bias and variance in the GAE estimator - `algorithm.adv_estimator`: Support gae, grpo, reinforce_plus_plus, reinforce_plus_plus_baseline, rloo ## Advanced Extensions ### KL Divergence Control Options to prevent the policy from diverging too far from a reference policy. Two mechanisms are available: KL reward penalty and KL loss. For more technical details, see [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) Options to use KL loss for KL divergence control: - `actor_rollout_ref.actor.use_kl_loss`: to use kl loss in the actor. When used, we are not applying KL in the reward function. Default is False - `actor_rollout_ref.actor.kl_loss_coef`: The coefficient of kl loss. Default is 0.001. - `actor_rollout_ref.actor.kl_loss_type`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. How to calculate the kl divergence between actor and reference policy. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html Options to use KL penalty in the reward: - `algorithm.use_kl_in_reward`: Whether to enable in-reward kl penalty. Default is False. - `algorithm.kl_penalty`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. This defines the way to calculate the kl divergence between actor and reference policy. For specific options, refer to `kl_penalty` in core_algos.py. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html - `algorithm.kl_ctrl.kl_coef`: The (initial) coefficient of in-reward kl_penalty. Default is 0.001. - `algorithm.kl_ctrl.type`: 'fixed' for FixedKLController and 'adaptive' for AdaptiveKLController. - `algorithm.kl_ctrl.horizon`: See source code of AdaptiveKLController for details. - `algorithm.kl_ctrl.target_kl`: See source code of AdaptiveKLController for details. ### Dual-clip PPO The Dual-Clip PPO introduces a approach by applying a lower bound to the policy ratio when the advantage is less than zero, when multiplied by a large raito, does not exceed a specified lower bound. ![image](https://github.com/user-attachments/assets/fc232181-d8b0-4307-8dd2-4dc0a4c1c139) - `actor_rollout_ref.actor.clip_ratio_c`: lower bound of the value for Dual-clip PPO, defaults to 3.0 ## Reference Example Qwen2.5 training log and commands: [link](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) ```bash bash run_gemma.sh trainer.n_gpus_per_node=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ trainer.logger=console \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ data.train_batch_size=256 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size=2 \ critic.ppo_micro_batch_size=2 ``` Reference performance with verl v0.2: | Model | Method | Score | Link | |-------------------------------|------------------|-------|------------------------------------------------------------------------------------------------| | Qwen/Qwen2.5-0.5B-Instruct | pretrained model | 36.4 | [Qwen Blog](https://qwenlm.github.io/blog/qwen2.5-llm/) | | Qwen/Qwen2.5-0.5B-Instruct | PPO | 56.7 | [PPO Command and Logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) | ================================================ FILE: verl_rl/docs/algo/spin.md ================================================ # Recipe: Self-Play Fine-Tuning (SPIN) Last updated: 05/31/2025. `verl` provides a recipe inspired by the paper **"Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models"** (SPIN). SPIN is a language model finetuning algorithm that enables iterative self-improvement through a self-play mechanism inspired by game theory. **Core Idea:** Models learn by playing against themselves, reducing reliance on external preference datasets or stronger teacher models: 1. **Synthetic Data Generation:** The current model generates responses, creating its own training data from previous iterations. 2. **Two-Player Game Setup:** A game involving two players acted by a single LLM. 3. **Iterative Training:** The model progressively improves by refining its policy, with each iteration's model becoming the opponent for the next iteration. Paper Authors: [Zixiang Chen](https://github.com/uclaml/SPIN)\*, [Yihe Deng](https://github.com/uclaml/SPIN)\*, [Huizhuo Yuan](https://scholar.google.com/citations?user=8foZzX4AAAAJ)\*, [Kaixuan Ji](https://scholar.google.com/citations?user=FOoKDukAAAAJ), [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) [[Webpage](https://uclaml.github.io/SPIN/)] [[Huggingface](https://huggingface.co/papers/2401.01335)] [[Paper](https://arxiv.org/abs/2401.01335)] [[Original Implementation](https://github.com/uclaml/SPIN)] verl Implementation Authors: [Chendong Wang](https://cdwang96.github.io/), [Chenyang Zhao](https://github.com/zhaochenyang20) --- ## Key Function (compute_online_dpo_loss) and Related works SPIN (Chen et al., 2024) proposes an iterative self-play mechanism to fine-tune language models. In each iteration, SPIN's training objective, when using a logistic loss function, is equivalent to Direct Preference Optimization (DPO) loss (Rafailov et al., 2023). This `verl` recipe realizes SPIN's core concept by using DPO loss iteratively (Xu et al., 2023; Xiong et al., 2023; Snorkel AI, 2024). This means that in each iteration, we fine-tune the LLM using DPO loss for preference optimization. Notably, Xu et al. (2023) explored iterative preference optimization with pairwise cringe loss, while Xiong et al. (2023) discussed how to bridge theory and practice for RLHF under KL constraints using iterative training. The concept of iterative preference learning was also explored in online DPO (Guo et al., 2024), which focuses on direct alignment from online AI feedback. In online DPO, preference data is dynamically updated during training, allowing the model to learn from its own generated data. Specifically, we developed the **`compute_online_dpo_loss`** function and built this SPIN recipe on top of it. By incorporating online preference generation, this approach enables continuously refining language models without relying on fixed external preference datasets. **Reference Papers:** * [Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models](https://arxiv.org/abs/2401.01335) (Chen et al., 2024) * [Direct Preference Optimization: Your Language Model is Secretly a Reward Model](https://arxiv.org/abs/2305.18290) (Rafailov et al., 2023) * [Somethings are more cringe than others: Preference optimization with the pairwise cringe loss](https://arxiv.org/abs/2312.16682) (Xu et al., 2023) * [Iterative preference learning from human feedback: Bridging theory and practice for rlhf under kl-constraint](https://arxiv.org/abs/2312.11456) (Xiong et al., 2023) * [Snorkel-Mistral-PairRM-DPO](https://huggingface.co/snorkelai/Snorkel-Mistral-PairRM-DPO) (Snorkel AI, 2024) * [Direct language model alignment from online ai feedback](https://arxiv.org/abs/2402.04792) (Guo et al., 2024) ## Our Online DPO Implementation Our `compute_online_dpo_loss` function adapts `verl`'s existing PPO infrastructure (based on `verl` v0.3.0.post1) for this iterative online DPO. Key aspects of our implementation include: * **No Critic:** Unlike PPO, we omit the value function critic. * **Dynamic Reference Model:** An explicit reference policy (`ref_policy_wg`) is used for DPO loss. This reference model's weights can be periodically updated from the actor (`ref_update_freq`), providing a dynamic baseline. * **Online Preference Generation:** The `compute_onlineDPO_pref` function (in `core_algos.py`) dynamically creates chosen/rejected pairs based on a reward source (e.g., rule-based ranking for math problems). * **DPO Loss Integration:** We replace PPO's policy loss with our `compute_online_dpo_loss` (in `core_algos.py`) within the actor update (`dp_actor.py`), directly optimizing the policy using the generated preferences. * **Iterative Training Orchestration:** The `SpinTrainer` (in `spin_trainer.py`) manages the entire self-play loop: generation, preference labeling, optional reference model updates, and policy updates, enabling continuous self-improvement aligned with SPIN's principles. --- ## Algorithm This recipe implements an Online algorithm adapted to the `verl` Reinforcement Learning framework, which provides an alternative to PPO for fine-tuning language models. **Online Loop:** Instead of maximizing a scalar reward signal in PPO, this approach directly optimizes the policy model to align with preference data generated *online* during training: 1. **Generation:** The current model generates multiple responses for each prompt in a batch. 2. **Preference Labeling:** A function evaluates these generated responses to determine which one is preferred (chosen) and which is dispreferred (rejected). This can be done using a reward function or implicit ranking based on specific rules. (In this recipe, we use rule-based ranking on the math problem). 3. **Update:** This preference tuple (`prompt`, `chosen_response`, `rejected_response`) is used to update the actor model using `compute_online_dpo_loss`, comparing against a reference model. **Connection with SPIN:** Instead of only using a fixed target data distribution, the online generation loop in step 2 will dynamically change the target data distribution by using a certain Preference Labeling method (rule-based ranking on the math problem by selecting the better one in this recipe). This explores the direction mentioned in SPIN's paper Section 7 about "dynamically changing target data distribution" to potentially elevate LLM performance beyond the fixed human-annotated data ceiling. --- ## Reproduce the Experiment (Example Setup) The following steps outline how to set up the environment and run the SPIN recipe, based on the provided test log using GSM8K and Qwen2.5-3B-Instruct. 1. **Setup Environment (Example using Docker):** ```bash # Start a container with GPU access and shared memory docker run -it --name spin_test --gpus all \ --shm-size=32g \ --ipc=host \ -v /path/to/host/.cache:/root/.cache \ -e HF_TOKEN= \ lmsysorg/sglang:latest \ /bin/bash # Inside the container or on your host machine: # Ensure /tmp is writable mkdir -p /tmp chmod 1777 /tmp # Install Python 3.10 (if not present) and venv sudo apt update sudo apt install -y python3.10 python3.10-venv tmux python3 -m ensurepip --upgrade # Create and activate a virtual environment python3 -m venv ~/.python/spin_env source ~/.python/spin_env/bin/activate # Install uv (fast package installer) python3 -m pip install uv ``` 2. **Install verl and Dependencies:** ```bash # Clone the verl repository and checkout the spin branch cd ~ git clone git@github.com:volcengine/verl.git && cd verl # Install flash-attn (handle potential build issues) python3 -m uv pip install wheel packaging python3 -m uv pip install flash-attn --no-build-isolation --no-deps # Install verl with sglang extras python3 -m uv pip install -e ".[sglang]" ``` *Note: If `flash-attn` installation fails, try the manual steps again or consult its documentation.* 3. **Login & Download Data/Model:** ```bash # Login to Weights & Biases (optional, for logging) export WANDB_API_KEY= # wandb login # Download the GSM8K dataset python3 examples/data_preprocess/gsm8k.py --local_dir ~/data/gsm8k # Adjusted path # Download the base model (Example: Qwen2.5-3B-Instruct) huggingface-cli download Qwen/Qwen2.5-3B-Instruct --local-dir $HOME/models/Qwen2.5-3B-Instruct ``` 4. **Configure:** * Modify the configuration file (e.g., `config/spin_trainer.yaml` or the one specified in the run script) with correct paths to your downloaded model, data, desired hyperparameters (`dpo_beta`, learning rate, etc.), and distributed training settings (nodes, GPUs per node). * Pay attention to `actor_rollout_ref.model_path`, `data` paths, `reward_model` config (if using one), and `trainer.ref_update_freq`. 5. **Run Training:** ```bash # Set CUDA visible devices (adjust based on your hardware and config) export CUDA_VISIBLE_DEVICES=0,1,2,3 # Launch the training script (e.g., test.sh or a custom script) # Ensure test.sh points to the correct config and main script bash recipe/spin/run_spin.sh ``` --- ## Configuration * The primary configuration is typically managed through a YAML file specified in the launch script (e.g., `config/spin_trainer.yaml`). * Key configuration sections: * `data`: Paths to training/validation prompt files, batch sizes, sequence lengths. * `actor_rollout_ref`: Paths to the base model (used for actor and initial reference), FSDP settings, optimization parameters (learning rate, scheduler). * `reward_model`: Configuration for the reward model used for online preference labeling (path, batch size, etc.). Can be omitted if using a simpler reward function. * `algorithm`: DPO-specific hyperparameters like `dpo_beta`, `dpo_loss_type`. * `trainer`: Distributed training settings (nodes, GPUs per node), logging (WandB), checkpointing frequency, and `ref_update_freq` (set > 0 to enable periodic reference model updates from the actor). --- ## Key Files * `main_spin.py`: Main entry point using Hydra to load the config and launch the `SpinTrainer`. * `spin_trainer.py`: Defines the `SpinTrainer` class, orchestrating the Online DPO training loop. * `fsdp_workers.py`: Implements Ray workers (Actor, Reference) potentially using FSDP. * `dp_actor.py`: Contains the actor class, including the DPO policy update logic. * `core_algos.py`: Includes helper functions for `compute_online_dpo_loss` and `compute_onlineDPO_pref`. * `config/spin_trainer.yaml` (or similar): Main Hydra configuration file for the recipe. * `run_spin.sh` (or similar): Example bash script for launching a training run. * `README.md`: This file. --- ## Acknowledgement We sincerely thank the contribution and guidance from the `verl` community and advisors, including (adapted from SPPO): * [Zixiang Chen](https://sites.google.com/view/zxchen) * [Yuhao Yang](https://github.com/yhyang201) * [Yifan Zhang](https://github.com/yifanzhang-pro) * [Yongan Xiang](https://github.com/BearBiscuit05) * [Junrong Lin](https://github.com/ocss884) * [Yuxuan Tong](https://github.com/tongyx361) * [Guangming Shen](https://github.com/PeterSH6) * [Biao He](https://www.linkedin.com/in/biao-he/) * [Qingquan Song](https://qingquansong.github.io/) * [Chenyang Zhao](https://zhaochenyang20.github.io/Chayenne/) * [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) ================================================ FILE: verl_rl/docs/algo/sppo.md ================================================ # Recipe: Self-Play Preference Optimization (SPPO) Last updated: 05/28/2025. verl provides a community recipe implementation for the paper [Self-Play Preference Optimization for Language Model Alignment](https://arxiv.org/abs/2405.00675). SPPO can significantly enhance the performance of an LLM without strong external signals such as responses or preferences from GPT-4. It can outperform the model trained with iterative direct preference optimization (DPO), among other methods. SPPO is theoretically grounded, ensuring that the LLM can converge to the von Neumann winner (i.e., Nash equilibrium) under general, potentially intransitive preference, and empirically validated through extensive evaluations on multiple datasets. Paper Authors: [Yue Wu](https://yuewu.us/)\*, [Zhiqing Sun](https://www.cs.cmu.edu/~zhiqings/)\*, [Huizhuo Yuan](https://scholar.google.com/citations?user=8foZzX4AAAAJ)\*, [Kaixuan Ji](https://scholar.google.com/citations?user=FOoKDukAAAAJ), [Yiming Yang](https://www.cs.cmu.edu/~yiming/), [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) verl Implementation Authors: [Yuhao Yang](https://github.com/yhyang201), [Chenyang Zhao](https://github.com/zhaochenyang20) [[Webpage](https://uclaml.github.io/SPPO/)] [[Huggingface](https://huggingface.co/papers/2405.00675)] [[Paper](https://arxiv.org/abs/2405.00675)][[Original Implementation](https://github.com/uclaml/SPPO)] ## Reproduce the Experiment We evaluate the performance of SPPO on the MATH dataset. Starting from an initial score of 46.6 with Qwen2.5-7B-Instruct, we achieve a score of 65.6 after 20 epochs of training, placing our model approximately in the top 20 on the [MATH leaderboard](https://paperswithcode.com/sota/math-word-problem-solving-on-math). It's important to note that verl's internal evaluation metrics may not perfectly align with the official evaluation methodology for Qwen2.5-7B-Instruct. Therefore, for consistency and fair comparison, we report only the results based on verl's evaluation framework. ``` git clone git@github.com:volcengine/verl.git cd verl python3 -m uv pip install -e ".[sglang]" export WANDB_API_KEY= python3 examples/data_preprocess/math_dataset.py --local_dir ~/data/math huggingface-cli download Qwen/Qwen2.5-7B-Instruct --local-dir $HOME/models/Qwen2.5-7B-Instruct export CUDA_VISIBLE_DEVICES=0,1,2,3 bash recipe/sppo/run_qwen2.5-7b_rm.sh ``` Note that the installation would occasionally fail to install flash-attn. If this happens, you can install it manually by running: ```bash python3 -m uv pip install wheel python3 -m uv pip install packaging python3 -m uv pip install flash-attn --no-build-isolation --no-deps ``` ## Acknowledgement We sincerely thank the contribution and guidance from: - [Yue Wu](https://yuewu.us/) - [Chendong Wang](https://cdwang96.github.io/) - [Yifan Zhang](https://github.com/yifanzhang-pro) - [Yongan Xiang](https://github.com/BearBiscuit05) - [Junrong Lin](https://github.com/ocss884) - [Yuxuan Tong](https://github.com/tongyx361) - [Guangming Shen](https://github.com/PeterSH6) - [Biao He](https://www.linkedin.com/in/biao-he/) - [Qingquan Song](https://qingquansong.github.io/) - [Quanquan Gu](https://web.cs.ucla.edu/~qgu/) ================================================ FILE: verl_rl/docs/amd_tutorial/amd_build_dockerfile_page.rst ================================================ Getting started with AMD (ROCM Kernel) ===================================================== Last updated: 07/06/2025. Author: `Yusheng Su `_ Setup ----- If you run on AMD GPUs (MI300) with ROCM platform, you cannot use the previous quickstart to run verl. You should follow the following steps to build a docker and set ``RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES`` or ``RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES`` when starting ray in verl's RLHF training. docker/Dockerfile.rocm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash FROM "rlfoundation.azurecr.io/rocm6.3.4:vllm-0.8.5-numa-patch-ubuntu-22.04" SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV MAX_JOBS=512 ENV PATH="/usr/local/python3.12/bin:$PATH" RUN ln -sf /usr/bin/python3.12 /usr/bin/python && \ ln -sf /usr/bin/pip3.12 /usr/bin/pip ############################################ RUN apt-get update RUN apt-get install -y pkg-config liblzma-dev ############################################ ########################################### ##########Install TransformerEngine######## ########################################### WORKDIR /workspace/ # transformer-engine install # https://github.com/ROCm/TransformerEngine RUN rm -rf TransformerEngine RUN git clone --recursive https://github.com/ROCm/TransformerEngine.git WORKDIR /workspace/TransformerEngine git checkout 236178e5 # git checkout bb061ade # git checkout 864405c ENV NVTE_FRAMEWORK=pytorch ENV NVTE_ROCM_ARCH=gfx942 ENV NVTE_USE_HIPBLASLT=1 ENV NVTE_USE_ROCM=1 # export CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr:${CMAKE_PREFIX_PATH:-}" ENV CMAKE_PREFIX_PATH="/opt/rocm:/opt/rocm/hip:/usr/local:/usr" RUN MAX_JOBS=$(MAX_JOBS) pip install . -vvv WORKDIR /workspace/ ########################################### ########################################### ########################################### #################################################################################### ################Install vllm - sglang require vllm 0.6.7 dependency################# #################################################################################### #### Require vllm 0.6.7 - checkout 113274a0 WORKDIR /workspace/ RUN rm -rf vllm RUN pip uninstall -y vllm # Refer to here (down-grade vllm to 0.6.3): https://docs.vllm.ai/en/v0.6.3/getting_started/amd-installation.html RUN git clone https://github.com/ROCm/vllm.git # git clone https://github.com/vllm-project/vllm.git WORKDIR /workspace/vllm RUN git checkout 113274a0 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" #ENV MAX_JOBS=512 ENV MAX_JOBS=${MAX_JOBS} RUN pip install "boto3>=1.26.0" RUN pip install setuptools_scm # will add src into py. You can delete the repo RUN python3 setup.py install WORKDIR /workspace/ #################################################################################### #################################################################################### #################################################################################### ########################################### ############For hack docker################ ########################################### RUN pip install setuptools==75.8.0 ########################################### ########################################### ########################################### ########################################### ############build sgalng################### ########################################### # Set environment variables ENV BASE_DIR=/sgl-workspace ENV BUILD_TYPE=all ENV SGL_REPO=https://github.com/sgl-project/sglang ENV SGL_BRANCH=v0.4.6.post5 ENV TRITON_REPO=https://github.com/ROCm/triton.git ENV TRITON_COMMIT=improve_fa_decode_3.0.0 ENV AITER_REPO=https://github.com/ROCm/aiter.git ENV AITER_COMMIT=v0.1.2 # v0.1.2 version - commit id: 9d11f47 # ENV AITER_COMMIT=9d11f47 ENV HIP_FORCE_DEV_KERNARG=1 ENV HSA_NO_SCRATCH_RECLAIM=1 ENV SGLANG_SET_CPU_AFFINITY=1 ENV SGLANG_ALLOW_OVERWRITE_LONGER_CONTEXT_LEN=1 ENV NCCL_MIN_NCHANNELS=112 ENV MOE_PADDING=1 ENV VLLM_FP8_PADDING=1 ENV VLLM_FP8_ACT_PADDING=1 ENV VLLM_FP8_WEIGHT_PADDING=1 ENV VLLM_FP8_REDUCE_CONV=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE=1 ENV TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE=1 ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" ENV AMDGPU_TARGETS=gfx942 ENV ROCM_ARCH=gfx942 ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" # Switch to working directory WORKDIR /sgl-workspace # Clean and create directory RUN rm -rf /sgl-workspace && mkdir -p /sgl-workspace # Clone and build sglang RUN git clone ${SGL_REPO} \ && cd sglang \ && git checkout ${SGL_BRANCH} || echo "Using default branch" \ && cd sgl-kernel \ && rm -f pyproject.toml \ && mv pyproject_rocm.toml pyproject.toml \ && python setup_rocm.py install \ && cd .. \ && if [ "$BUILD_TYPE" = "srt" ]; then \ python -m pip --no-cache-dir install -e "python[srt_hip]"; \ else \ python -m pip --no-cache-dir install -e "python[all_hip]"; \ fi \ && cd /sgl-workspace \ && cp -r /sgl-workspace/sglang /sglang \ && python -m pip cache purge # Install common Python packages RUN pip install IPython orjson python-multipart torchao pybind11 # Rebuild Triton RUN pip uninstall -y triton || true \ && git clone ${TRITON_REPO} \ && cd triton \ && git checkout ${TRITON_COMMIT} \ && cd python \ && python3 setup.py install \ && cd /sgl-workspace # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942 --amdgpu-lower-module-lds-strategy=1" # ENV HIPCC_COMPILE_FLAGS_APPEND="--offload-arch=gfx942" # Build aiter #version: Commit 9d11f47 # && git checkout ${AITER_COMMIT} \ RUN pip uninstall -y aiter || true RUN git clone ${AITER_REPO} \ && cd aiter \ && git checkout ${AITER_COMMIT} \ && git submodule sync \ && git submodule update --init --recursive \ && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py install \ && cd /sgl-workspace # Copy MI300X config RUN find /sgl-workspace/sglang/python/sglang/srt/layers/quantization/configs/ \ /sgl-workspace/sglang/python/sglang/srt/layers/moe/fused_moe_triton/configs/ \ -type f -name '*MI300X*' | \ xargs -I {} sh -c 'vf_config=$(echo "$1" | sed "s/MI300X/MI300X_VF/"); cp "$1" "$vf_config"' -- {} # Environment setup complete. RUN echo "Environment setup complete." WORKDIR /workspace/ ########################################### ########################################### ########################################### ########################################### ###############vllm v0.8.5################# ########################################### WORKDIR /workspace/ ENV VLLM_TARGET_DEVICE=rocm ENV ROCM_PATH=/opt/rocm ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev # Find the repo path in: DockerFile/Dockerfile.rocm_yang # RUN git clone https://github.com/RLFoundation/vllm-patch.git RUN pip uninstall -y vllm || true RUN rm -rf vllm-patch RUN git clone https://github.com/RLFoundation/vllm-patch.git \ && cd vllm-patch \ && git checkout v0.8.5-sleep-numa \ && rm -rf build/ dist/ *.egg-info \ && ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so \ && SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py install # RUN SETUPTOOLS_SCM_PRETEND_VERSION=0.8.5.dev PYTORCH_ROCM_ARCH="gfx90a;gfx942" MAX_JOBS=${MAX_JOBS} python3 setup.py develop WORKDIR /workspace/ ########################################### ########################################### ########################################### ######################################### #### Install megatron-core############### ######################################### RUN pip uninstall -y megatron-core && \ git clone https://github.com/yushengsu-thu/Megatron-LM-amd_version.git && \ cd Megatron-LM-amd_version && \ pip install -vvv -e . && \ cd /workspace/ ######################################### ######################################### ######################################### ####################################### ################apex################### ####################################### WORKDIR /workspace/ RUN pip uninstall -y apex && \ git clone git@github.com:ROCm/apex.git && \ cd apex && \ python setup.py install && \ cd /workspace/ ####################################### ####################################### ####################################### ################################################################################ ###########################Add torch_memory_saver############################### ################################################################################ # Set environment variables ENV HIPCC_COMPILE_FLAGS_APPEND="--amdgpu-target=gfx90a;gfx942 -D__HIP_PLATFORM_AMD__" ENV CFLAGS="-D__HIP_PLATFORM_AMD__" ENV CXXFLAGS="-D__HIP_PLATFORM_AMD__" RUN pip install "git+https://github.com/YangWang92/torch_memory_saver_numa.git@numa" ################################################################################ ################################################################################ ################################################################################ ######################################## ######Install ray####################### ######################################## # need to add this patch: https://github.com/ray-project/ray/pull/53531/files RUN pip uninstall ray -y RUN pip install "ray[data,train,tune,serve]>=2.47.0" ######################################## ######################################## ######################################## ########################################## #######Install other dependencies######### ########################################## RUN pip install "tensordict==0.6.2" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ torchdata \ wandb \ orjson \ pybind11 WORKDIR /workspace/ RUN git clone https://github.com/volcengine/verl.git && \ cd verl && \ pip install -e . ########################################## ########################################## ########################################## WORKDIR /workspace/ CMD ["/usr/bin/bash"] Build the image: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash docker docker/build -t verl-rocm . Run the container ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note: You can pull the docker from this DockerHub: [RLSys Foundation](https://hub.docker.com/u/yushengsuthu) Pull the image: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash docker pull yushengsuthu/verl:verl-0.4.1_ubuntu-22.04_rocm6.3.4-numa-patch_vllm0.8.5_sglang0.4.6.post4 docker tag yushengsuthu/verl:verl-0.4.1_ubuntu-22.04_rocm6.3.4-numa-patch_vllm0.8.5_sglang0.4.6.post4 verl-rocm:latest Run the container ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optional: Running without root and with user permissions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash docker run --rm -it \ --device /dev/dri \ --device /dev/kfd \ -p 8265:8265 \ --group-add video \ --cap-add SYS_PTRACE \ --security-opt seccomp=unconfined \ --privileged \ -v $HOME/.ssh:/root/.ssh \ -v $HOME:$HOME \ --shm-size 128G \ -w $PWD \ verl-rocm \ /bin/bash (Optional): If you do not want to root mode and require assign yourself as the user Please add ``-e HOST_UID=$(id -u)`` and ``-e HOST_GID=$(id -g)`` into the above docker launch script. Example ------- Due to to special setting in AMD (ROCM) torch, 1. If your ``ray>=2.45.0`` (default), you need to set ``RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES`` when starting ray in verl's RLHF training and add this [patch](https://github.com/ray-project/ray/pull/53531/files). 2. If your ``ray<2.45.0``, you need to set ``RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES`` when starting ray in verl's RLHF training. Inference ``$ENGINE`` can be ``vllm`` or ``sglang``. We choose ``vllm`` as default in the following examples. PPO ~~~ .. code-block:: bash YOUR_PROJECT_NAME=r1-verl-ppo-upstream YOUR_RUN_NAME=r1-training_ppo-upstream # export HYDRA_FULL_ERROR=1 export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # [ray] < 2.45.0 #export RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 # [ray] >= 2.45.0 export RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 # Patch with https://github.com/ray-project/ray/pull/52794 GPUS_PER_NODE=8 MODEL_PATH=Qwen/Qwen2.5-0.5B-Instruct python3 examples/data_preprocess/gsm8k.py --local_dir data/gsm8k python3 -c "import transformers; transformers.pipeline('text-generation', model='$MODEL_PATH')" ENGINE=vllm #sglang PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.val_batch_size=1312 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=$MODEL_PATH \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.project_name=$YOUR_PROJECT_NAME \ trainer.experiment_name=$YOUR_RUN_NAME \ trainer.val_before_train=False \ trainer.n_gpus_per_node=$GPUS_PER_NODE \ trainer.nnodes=1 \ trainer.save_freq=10 \ trainer.test_freq=10 \ trainer.total_epochs=15 #2>&1 | tee verl_demo.log GRPO ~~~~ .. code-block:: bash YOUR_PROJECT_NAME=r1-verl-grpo-upstream YOUR_RUN_NAME=r1-training_grpo-upstream # export HYDRA_FULL_ERROR=1 # export FSDP_VERBOSE=1 #export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # [ray] < 2.45.0 #export RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 # [ray] >= 2.45.0 export RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 # Patch with https://github.com/ray-project/ray/pull/52794 GPUS_PER_NODE=8 MODEL_PATH=Qwen/Qwen2.5-0.5B-Instruct # MODEL_PATH=Qwen/Qwen2-7B-Instruct python3 examples/data_preprocess/gsm8k.py --local_dir data/gsm8k python3 -c "import transformers; transformers.pipeline('text-generation', model='$MODEL_PATH')" ENGINE=vllm #sglang python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.val_batch_size=1312 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=Flase \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=False \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name=$YOUR_PROJECT_NAME \ trainer.experiment_name=$YOUR_RUN_NAME \ trainer.n_gpus_per_node=$GPUS_PER_NODE \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 Multi-node training: slurm with Docker/Podman container --------------------------------------------------------------------------------------- If you want to run multi-node training with slurm, you can use the following script. .. note:: 1. You need to use ``podman`` or ``docker`` in the following script. We will release the apptainer script later. 2. If you want to use ``podman``, you just replace ``docker`` with ``podman`` in the following script. The script includes the following steps: 1. SLURM Configuration 2. Environment Setup 3. Docker/Podman Container Setup 4. Ray Cluster Initialization 5. Data Preprocessing 6. Model Setup 7. Training Launch slurm_script.sh ~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash #!/bin/bash #SBATCH --job-name=verl-ray-on-slurm #SBATCH --nodes=2 #SBATCH --ntasks-per-node=2 #SBATCH --mem=200G #SBATCH --time=30-00:00:00 #SBATCH --gpus-per-node=8 #SBATCH --cpus-per-task=28 #SBATCH --output=../verl_log/slurm-%j.out #SBATCH --error=../verl_log/slurm-%j.err #SBATCH --nodelist=gpu-[0,1] # load necessary modules ### Run this setup # [Cluster]: Use docker # docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 ########################################################################## ###The following setting should be set in different project and cluster### ########################################################################## ### Project CONTAINER_NAME="multinode_verl_training" IMG="verl.rocm" DOCKERFILE="docker/Dockerfile.rocm" # echo $PWD verl_workdir="${HOME}/projects/verl_upstream" export TRANSFORMERS_CACHE="${HOME}/.cache/huggingface" export HF_HOME=$TRANSFORMERS_CACHE ### Cluster Network Setting export NCCL_DEBUG=TRACE export GPU_MAX_HW_QUEUES=2 export TORCH_NCCL_HIGH_PRIORITY=1 export NCCL_CHECKS_DISABLE=1 # export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7 export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9 export NCCL_IB_GID_INDEX=3 export NCCL_CROSS_NIC=0 export CUDA_DEVICE_MAX_CONNECTIONS=1 export NCCL_PROTO=Simple export RCCL_MSCCL_ENABLE=0 export TOKENIZERS_PARALLELISM=false export HSA_NO_SCRATCH_RECLAIM=1 ########################################################################## ## Assign using GPUs export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ### For rocm and training script # [ray] < 2.45.0 #export RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 # [ray] >= 2.45.0 export RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 # Patch with https://github.com/ray-project/ray/pull/52794 # Build and launch the Docker container srun bash -c " # Exit on any error set -e # Clean up dangling images (images with tag) docker image prune -f # Need to pull the docker first docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "${IMG}"; then echo \"Building ${IMG} image...\" docker build -f \"${DOCKERFILE}\" -t \"${IMG}\" . else echo \"${IMG} image already exists, skipping build\" fi # Removing old container if exists docker rm \"${CONTAINER_NAME}\" 2>/dev/null || true # Checking network devices ibdev2netdev # Launch the docker docker run --rm -d \ -e HYDRA_FULL_ERROR=1 \ -e RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 \ -e RAY_EXPERIMENTAL_NOSET_HIP_VISIBLE_DEVICES=1 \ -e NCCL_DEBUG=${NCCL_DEBUG} \ -e GPU_MAX_HW_QUEUES=${GPU_MAX_HW_QUEUES} \ -e TORCH_NCCL_HIGH_PRIORITY=${TORCH_NCCL_HIGH_PRIORITY} \ -e NCCL_CHECKS_DISABLE=${NCCL_CHECKS_DISABLE} \ -e NCCL_IB_HCA=${NCCL_IB_HCA} \ -e NCCL_IB_GID_INDEX=${NCCL_IB_GID_INDEX} \ -e NCCL_CROSS_NIC=${NCCL_CROSS_NIC} \ -e CUDA_DEVICE_MAX_CONNECTIONS=${CUDA_DEVICE_MAX_CONNECTIONS} \ -e NCCL_PROTO=${NCCL_PROTO} \ -e RCCL_MSCCL_ENABLE=${RCCL_MSCCL_ENABLE} \ -e TOKENIZERS_PARALLELISM=${TOKENIZERS_PARALLELISM} \ -e HSA_NO_SCRATCH_RECLAIM=${HSA_NO_SCRATCH_RECLAIM} \ -e TRANSFORMERS_CACHE=${TRANSFORMERS_CACHE} \ -e HF_HOME=${HF_HOME} \ --network host \ --device /dev/dri \ --device /dev/kfd \ --device /dev/infiniband \ --group-add video \ --cap-add SYS_PTRACE \ --security-opt seccomp=unconfined \ --privileged \ -v \${HOME}:\${HOME} \ -v \${HOME}/.ssh:/root/.ssh \ -w "${verl_workdir}" \ --shm-size 128G \ --name \"${CONTAINER_NAME}\" \ \"${IMG}\" \ tail -f /dev/null echo \"Container setup completed\" " # (Optional): If you do not want to root mode and require assign yuorself as the user # Please add `-e HOST_UID=$(id -u)` and `-e HOST_GID=$(id -g)` into the above docker launch script. ### Ray launch the nodes before training # Getting the node names nodes_array=($(scontrol show hostnames "$SLURM_JOB_NODELIST" | tr '\n' ' ')) head_node=${nodes_array[0]} head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) # if we detect a space character in the head node IP, we'll # convert it to an ipv4 address. This step is optional. if [[ "$head_node_ip" == *" "* ]]; then IFS=' ' read -ra ADDR <<<"$head_node_ip" if [[ ${#ADDR[0]} -gt 16 ]]; then head_node_ip=${ADDR[1]} else head_node_ip=${ADDR[0]} fi echo "IPV6 address detected. We split the IPV4 address as $head_node_ip" fi port=6379 ip_head=$head_node_ip:$port export ip_head echo "IP Head: $ip_head" # make sure we set environment variables before Ray initialization # Print out all env variables printenv echo "Starting HEAD at $head_node" srun --nodes=1 --ntasks=1 -w "$head_node" \ docker exec "${CONTAINER_NAME}" \ ray start --head --node-ip-address="$head_node_ip" --port=$port \ --dashboard-port=8266 \ --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & # optional, though may be useful in certain versions of Ray < 1.0. sleep 10 # number of nodes other than the head node worker_num=$((SLURM_JOB_NUM_NODES - 1)) for ((i = 1; i <= worker_num; i++)); do node_i=${nodes_array[$i]} echo "Debug: Starting worker on node_i = ${node_i}" if [ -z "$node_i" ]; then echo "Error: Empty node name for worker $i" continue fi echo "Starting WORKER $i at $node_i" srun --nodes=1 --ntasks=1 -w "$node_i" \ docker exec "${CONTAINER_NAME}" \ ray start --address "$ip_head" --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & sleep 5 done # Ray initlization test (See whether any error in the above execution) echo "Testing Ray initialization in the slurm nodes..." docker exec "${CONTAINER_NAME}" python3 -c ' import ray try: ray.init(address="auto") print("\n=== Ray Cluster Status ===") print(f"Number of nodes: {len(ray.nodes())}") for node in ray.nodes(): print("Node: {}, Status: {}".format(node["NodeManagerHostname"], node["Alive"])) # print(f"Node: {node}") ray.shutdown() print("Ray initialization successful!") except Exception as e: print(f"Ray initialization failed: {str(e)}") ' echo "=== Ray test completed ===" ###### # Run data preprocessing echo "Starting data preprocessing..." docker exec "${CONTAINER_NAME}" \ python3 "examples/data_preprocess/gsm8k.py" "--local_dir" "../data/gsm8k" echo "Starting data preprocessing..." docker exec "${CONTAINER_NAME}" \ python3 "examples/data_preprocess/math_dataset.py" "--local_dir" "../data/math" train_files="../data/gsm8k/train.parquet" val_files="../data/gsm8k/test.parquet" # Download and test model echo "Loading model..." docker exec "${CONTAINER_NAME}" \ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')" MODEL_PATH="Qwen/Qwen2-7B-Instruct" # Set model path after pipeline test MODEL_PATH="Qwen/Qwen2.5-0.5B-Instruct" echo "== Data and model loading Done ==" echo "Start to train..." docker exec "${CONTAINER_NAME}" \ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')" MODEL_PATH="Qwen/Qwen2-7B-Instruct" PYTHONUNBUFFERED=1 srun --overlap --nodes=${SLURM_NNODES} --ntasks=1 -w "$head_node" \ docker exec "${CONTAINER_NAME}" \ python3 -m verl.trainer.main_ppo \ data.train_files=$train_files \ data.val_files=$val_files \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.enable_gradient_checkpointing=False \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=$MODEL_PATH \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.kl_ctrl.kl_coef=0.0001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \ trainer.n_gpus_per_node=${SLURM_GPUS_PER_NODE} \ trainer.val_before_train=False \ trainer.nnodes=${SLURM_NNODES} \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 Run slurm_script.sh ~~~~~~~~~~~~~~~~~~~~ Just sbatch your slurm_script.sh .. code-block:: bash sbatch slurm_script.sh ================================================ FILE: verl_rl/docs/amd_tutorial/amd_vllm_page.rst ================================================ verl performance tuning for AMD (ROCm Kernel) ===================================================== Last updated: 04/25/2025. Author: `Yang Wang `_ Patch vLLM to Enable Sleep Mode for AMD GPUs -------------------------------------------------------------- By default, verl requires vLLM to enable sleep mode, which allows vLLM to offload GPU memory to CPU memory after rollout. However, this feature is still under review by the vLLM community. To enable vLLM's sleep mode, you can first use community patched code (from `this pull request `_) to build vLLM from the source code in the corresponding pull request. After the patch merged in vLLM main branch, you can directly install vLLM from the latest version. 1. Clone the vLLM repository and build it with the following commands: .. code-block:: bash git clone -b sleep_amd https://github.com/HollowMan6/vllm.git cd vllm sudo ln -sf /opt/rocm/lib/libamdhip64.so /usr/lib/libamdhip64.so VLLM_TARGET_DEVICE=rocm ROCM_PATH=/opt/rocm/ VLLM_GPU_LANG=HIP SETUPTOOLS_SCM_PRETEND_VERSION=0.8.4.dev python3 setup.py develop 2. Additionally, make sure to use the ROCm version in your Docker image lager than or equal to ROCm 6.3.4, and we recommend to use ROCm 6.4.0 for better performance (see `this comment `_). After the upgrade, you can verify whether sleep mode is enabled by running the following test code (from `this comment `_). .. code-block:: python import torch from vllm import LLM llm = LLM(model="meta-llama/Llama-3.1-8B-Instruct", enable_sleep_mode=True) def run_inference(prompt): outputs = llm.generate(prompt) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") print("CUDA Memory Usage (after inference):") torch.cuda.empty_cache() print(f"{torch.cuda.memory_allocated()=}") run_inference("San Francisco is") llm.sleep() print("CUDA Memory Usage (after sleep):") torch.cuda.empty_cache() print(f"{torch.cuda.memory_allocated()=}") llm.wake_up() print("CUDA Memory Usage (after wakeup):") torch.cuda.empty_cache() print(f"{torch.cuda.memory_allocated()=}") run_inference("Paris is") If sleep mode is enabled, you should see the memory usage reduce after sleep. After applying the vLLM patch and completing the installation, you can enable sleep mode in verl to reduce memory overhead. This allows verl to offload unused GPU memory during rollout, significantly lowering the memory footprint during long-context training or multi-node reinforcement learning. Enable CUDA Graph and Bypass ROCm-related issues -------------------------------------------------------------- Due to potential issues with CUDA graph capture in ROCm, we’ve found that vLLM’s CUDA graph feature cannot be enabled on multiple nodes in verl on AMD platforms with vLLM V1 mode. This leads to significantly slower rollout performance. Our investigation shows that ROCm may trigger an unexpected crash when attempting to capture large batches with CUDA graph. One workaround is to patch the LLM configuration (from `this commit `_). .. code-block:: python self.inference_engine = LLM( model=model_path, enable_sleep_mode=True, tensor_parallel_size=tensor_parallel_size, distributed_executor_backend="external_launcher", dtype=config.dtype, enforce_eager=config.enforce_eager, gpu_memory_utilization=config.gpu_memory_utilization, disable_custom_all_reduce=True, disable_mm_preprocessor_cache=True, limit_mm_per_prompt=limit_mm_per_prompt, skip_tokenizer_init=False, max_model_len=max_model_len, load_format=load_format, disable_log_stats=config.disable_log_stats, max_num_batched_tokens=max_num_batched_tokens, enable_chunked_prefill=config.enable_chunked_prefill, enable_prefix_caching=True, trust_remote_code=trust_remote_code, # enable compilation config to bypass oom on rocm # change depends on your GPU memory size compilation_config={"cudagraph_capture_sizes": [1, 2, 4, 8, 16, 32, 64]}, seed=config.get('seed', 0), ) Then, you can choose to enable CUDA graph by setting the following environment variables (see `this page `_): .. code-block:: bash actor_rollout_ref.rollout.enforce_eager=False \ ================================================ FILE: verl_rl/docs/api/data.rst ================================================ Data interface ========================= Last updated: 05/19/2025 (API docstrings are auto-generated). DataProto is the interface for data exchange. The :class:`verl.DataProto` class contains two key members: - batch: a :class:`tensordict.TensorDict` object for the actual data - meta_info: a :class:`Dict` with additional meta information TensorDict ~~~~~~~~~~~~ :attr:`DataProto.batch` is built on top of :class:`tensordict`, a project in the PyTorch ecosystem. A TensorDict is a dict-like container for tensors. To instantiate a TensorDict, you must specify key-value pairs as well as the batch size. .. code-block:: python >>> import torch >>> from tensordict import TensorDict >>> tensordict = TensorDict({"zeros": torch.zeros(2, 3, 4), "ones": torch.ones(2, 3, 5)}, batch_size=[2,]) >>> tensordict["twos"] = 2 * torch.ones(2, 5, 6) >>> zeros = tensordict["zeros"] >>> tensordict TensorDict( fields={ ones: Tensor(shape=torch.Size([2, 3, 5]), device=cpu, dtype=torch.float32, is_shared=False), twos: Tensor(shape=torch.Size([2, 5, 6]), device=cpu, dtype=torch.float32, is_shared=False), zeros: Tensor(shape=torch.Size([2, 3, 4]), device=cpu, dtype=torch.float32, is_shared=False)}, batch_size=torch.Size([2]), device=None, is_shared=False) One can also index a tensordict along its batch_size. The contents of the TensorDict can be manipulated collectively as well. .. code-block:: python >>> tensordict[..., :1] TensorDict( fields={ ones: Tensor(shape=torch.Size([1, 3, 5]), device=cpu, dtype=torch.float32, is_shared=False), twos: Tensor(shape=torch.Size([1, 5, 6]), device=cpu, dtype=torch.float32, is_shared=False), zeros: Tensor(shape=torch.Size([1, 3, 4]), device=cpu, dtype=torch.float32, is_shared=False)}, batch_size=torch.Size([1]), device=None, is_shared=False) >>> tensordict = tensordict.to("cuda:0") >>> tensordict = tensordict.reshape(6) For more about :class:`tensordict.TensorDict` usage, see the official tensordict_ documentation. .. _tensordict: https://pytorch.org/tensordict/overview.html Core APIs ~~~~~~~~~~~~~~~~~ .. autoclass:: verl.DataProto :members: to, select, union, make_iterator, concat ================================================ FILE: verl_rl/docs/api/single_controller.rst ================================================ Single Controller interface ============================ Last updated: 05/27/2025 (API docstrings are auto-generated). The Single Controller provides a unified interface for managing distributed workers using Ray or other backends and executing functions across them. It simplifies the process of dispatching tasks and collecting results, particularly when dealing with data parallelism or model parallelism. Core APIs ~~~~~~~~~~~~~~~~~ .. autoclass:: verl.single_controller.Worker :members: __init__, __new__, get_master_addr_port, get_cuda_visible_devices, world_size, rank .. autoclass:: verl.single_controller.WorkerGroup :members: __init__, world_size .. autoclass:: verl.single_controller.ClassWithInitArgs :members: __init__, __call__ .. autoclass:: verl.single_controller.ResourcePool :members: __init__, world_size, local_world_size_list, local_rank_list .. autoclass:: verl.single_controller.ray.RayWorkerGroup :members: __init__ .. autofunction:: verl.single_controller.ray.create_colocated_worker_cls ================================================ FILE: verl_rl/docs/api/trainer.rst ================================================ Trainer Interface ================================ Last updated: 06/08/2025 (API docstrings are auto-generated). Trainers drive the training loop. Introducing new trainer classes in case of new training paradiam is encouraged. .. autosummary:: :nosignatures: verl.trainer.ppo.ray_trainer.RayPPOTrainer Core APIs ~~~~~~~~~~~~~~~~~ .. autoclass:: verl.trainer.ppo.ray_trainer.RayPPOTrainer :members: __init__, init_workers, fit .. automodule:: verl.utils.tokenizer :members: hf_tokenizer .. automodule:: verl.trainer.ppo.core_algos :members: agg_loss, kl_penalty, compute_policy_loss, kl_penalty .. automodule:: verl.trainer.ppo.reward :members: load_reward_manager, compute_reward, compute_reward_async .. autoclass:: verl.workers.reward_manager.NaiveRewardManager .. autoclass:: verl.workers.reward_manager.DAPORewardManager ================================================ FILE: verl_rl/docs/api/utils.rst ================================================ Utilities ============ Last updated: 05/19/2025 (API docstrings are auto-generated). This section documents the utility functions and classes in the VERL library. Python Functional Utilities ------------------------------ .. automodule:: verl.utils.py_functional :members: append_to_dict File System Utilities ------------------------ .. automodule:: verl.utils.fs :members: copy_to_local Tracking Utilities --------------------- .. automodule:: verl.utils.tracking :members: Tracking Metrics Utilities --------------------- .. automodule:: verl.utils.metric :members: reduce_metrics Checkpoint Management ------------------------ .. automodule:: verl.utils.checkpoint.checkpoint_manager :members: find_latest_ckpt_path .. automodule:: verl.utils.checkpoint.fsdp_checkpoint_manager :members: FSDPCheckpointManager Dataset Utilities --------------------- .. automodule:: verl.utils.dataset.rl_dataset :members: RLHFDataset, collate_fn Torch Functional Utilities ----------------------------- .. automodule:: verl.utils.torch_functional :members: get_constant_schedule_with_warmup, masked_whiten, masked_mean, logprobs_from_logits Sequence Length Balancing ---------------------------- .. automodule:: verl.utils.seqlen_balancing :members: get_reverse_idx, rearrange_micro_batches Ulysses Utilities -------------------- .. automodule:: verl.utils.ulysses :members: gather_outputs_and_unpad, ulysses_pad_and_slice_inputs FSDP Utilities ------------------ .. automodule:: verl.utils.fsdp_utils :members: get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, load_fsdp_model_to_gpu, load_fsdp_optimizer, offload_fsdp_model_to_cpu, offload_fsdp_optimizer, Debug Utilities ------------------- .. automodule:: verl.utils.profiler :members: log_gpu_memory_usage, GPUMemoryLogger ================================================ FILE: verl_rl/docs/ascend_tutorial/ascend_profiling.rst ================================================ 在昇腾设备上基于FSDP后端进行数据采集 ==================================== Last updated: 07/14/2025. 这是一份在昇腾设备上基于FSDP后端使用GRPO或DAPO算法进行数据采集的教程。 配置 ---- 复用verl/trainer/config/ppo_trainer.yaml中的配置项控制采集的模式和步数, 通过verl/trainer/config/npu_profile/npu_profile.yaml中的配置项控制例如采集等级等参数。 全局采集控制 ~~~~~~~~~~~~ 通过 ppo_trainer.yaml 中的参数控制采集步数和模式: - trainer.profile_steps: 该参数可以设置为一个包含采集步数的列表,例如[2, 4], 意味着将会采集第二步和第四步。如果该参数为null,则代表不进行采集 - actor_rollout_ref.profiler: 控制采集的ranks和模式 - all_ranks:设为True代表对所有rank进行采集 - ranks:当all_ranks不为True时, 通过ranks参数控制需要采集的rank,该参数设置为一个包含采集rank的列表, 例如[0, 1] - discrete: 控制采集的模式。当该参数设置为False,代表采集端到端的数据;当该参数设置为True,代表采用离散模式分训练阶段采集数据 通过 npu_profile.yaml 中的参数控制具体采集行为: - save_path:采集数据的存放路径 - level:采集等级,可选项为level_none、level0、level1和level2 - level_none:不采集所有Level层级控制的数据,即关闭profiler_level - level0:采集上层应用数据、底层NPU数据以及NPU上执行的算子信息 - level1:在level0的基础上多采集CANN层AscendCL数据和NPU上执行的AI Core性能指标信息 - level2:在level1的基础上多采集CANN层Runtime数据以及AI CPU - record_shapes:是否记录张量形状 - with_memory:是否启用内存分析 - with_npu:是否采集device侧性能数据 - with_cpu:是否采集host侧性能数据 - with_module:是否记录框架层python调用栈信息 - with_stack:是否记录算子调用栈信息 - analysis:是否自动解析数据 示例 ---- 禁用采集 ~~~~~~~~ .. code:: yaml trainer: profile_steps: null # disable profile 端到端采集 ~~~~~~~~~~ .. code:: yaml trainer: profile_steps: [1, 2, 5] actor_rollout_ref: profiler: discrete: False all_ranks: True 离散模式采集 ~~~~~~~~~~~~ .. code:: yaml trainer: profile_steps: [1, 2, 5] actor_rollout_ref: profiler: discrete: True all_ranks: False ranks: [0, 1] 可视化 ------ 采集后的数据存放在用户设置的save_path下,可通过 `MindStudio Insight `_ 工具进行可视化。 如果analysis参数设置为False,采集之后需要进行离线解析: .. code:: python import torch_npu # profiler_path请设置为"localhost.localdomain___ascend_pt"目录的上一级目录 torch_npu.profiler.profiler.analyse(profiler_path=profiler_path) ================================================ FILE: verl_rl/docs/ascend_tutorial/ascend_profiling_en.rst ================================================ Data collection based on FSDP (Fully Sharded Data Parallel) backend on Ascend devices(NPU) ========================================================================================== Last updated: 07/14/2025. This is a tutorial for data collection using the GRPO or DAPO algorithm based on FSDP on Ascend devices. Configuration ------------- Reuse the configuration items in verl/trainer/config/ppo_trainer.yaml to control the collection mode and steps, you can also manage the collection behaviors such as collection level via verl/trainer/config/npu_profile/npu_profile.yaml. Global collection control ~~~~~~~~~~~~~~~~~~~~~~~~~ Use parameters in ppo_trainer.yaml to control the collection mode and steps. - trainer.profile_steps: This parameter can be set as a list that has collection steps, such as [2, 4], which means it will collect steps 2 and 4. If set to null, no collection occurs. - actor_rollout_ref.profiler: Control the ranks and mode of profiling - all_ranks: Collects data from all ranks when set to true. - ranks: This parameter specifies which ranks to collect (e.g., [0, 1]) when all_ranks is False. - discrete: Controls the collection mode. If False, end-to-end data is collected; if True, data is collected in discrete phases during training. Use parameters in npu_profile.yaml to control collection behavior: - save_path: Storage path for collected data. - level: Collection level—options are level_none, level0, level1, and level2 - level_none: Disables all level-based data collection (turns off profiler_level). - level0: Collect high-level application data, underlying NPU data, and operator execution details on NPU. - level1: Extends level0 by adding CANN-layer AscendCL data and AI Core performance metrics on NPU. - level2: Extends level1 by adding CANN-layer Runtime data and AI CPU metrics. - record_shapes: Whether to record tensor shapes. - with_memory: Whether to enable memory analysis. - with_npu: Whether to collect device-side performance data. - with_cpu: Whether to collect host-side performance data. - with_module: Whether to record framework-layer Python call stack information. - with_stack: Whether to record operator call stack information. - analysis: Enables automatic data parsing. Examples -------- Disabling collection ~~~~~~~~~~~~~~~~~~~~ .. code:: yaml trainer: profile_steps: null # disable profile End-to-End collection ~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml trainer: profile_steps: [1, 2, 5] actor_rollout_ref: profiler: discrete: False all_ranks: True Discrete Mode Collection ~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml trainer: profile_steps: [1, 2, 5] actor_rollout_ref: profiler: discrete: True all_ranks: False ranks: [0, 1] Visualization ------------- Collected data is stored in the user-defined save_path and can be visualized by using the `MindStudio Insight `_ tool. If the analysis parameter is set to False, offline parsing is required after data collection: .. code:: python import torch_npu # Set profiler_path to the parent directory of the "localhost.localdomain___ascend_pt" folder torch_npu.profiler.profiler.analyse(profiler_path=profiler_path) ================================================ FILE: verl_rl/docs/ascend_tutorial/ascend_quick_start.rst ================================================ verl x Ascend =================================== Last updated: 06/17/2025. 我们在 verl 上增加对华为昇腾设备的支持。 硬件支持 ----------------------------------- Atlas 200T A2 Box16 Atlas 900 A2 PODc 安装 ----------------------------------- 基础环境准备 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +-----------+-------------+ | software | version | +-----------+-------------+ | Python | == 3.10 | +-----------+-------------+ | CANN | == 8.1.RC1 | +-----------+-------------+ | torch | == 2.5.1 | +-----------+-------------+ | torch_npu | == 2.5.1.RC1| +-----------+-------------+ vllm & vllm-ascend ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 为了能够在 verl 中正常使用 vllm,需使用以下命令编译安装 vllm 和 vllm-ascend。请注意根据机器类型区分安装方式。 .. code-block:: bash # vllm git clone -b v0.7.3 --depth 1 https://github.com/vllm-project/vllm.git cd vllm pip install -r requirements-build.txt # for Atlas 200T A2 Box16 VLLM_TARGET_DEVICE=empty pip install -e . --extra-index https://download.pytorch.org/whl/cpu/ # for Atlas 900 A2 PODc VLLM_TARGET_DEVICE=empty pip install -e . .. code-block:: bash # vllm-ascend git clone -b v0.7.3.post1 --depth 1 https://github.com/vllm-project/vllm-ascend.git cd vllm-ascend export COMPILE_CUSTOM_KERNELS=1 python setup.py install 安装verl ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: bash git clone https://github.com/volcengine/verl.git cd verl pip install -r requirements-npu.txt pip install -e . 其他三方库说明 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------+---------------+ | software | description | +--------------+---------------+ | transformers | v4.52.4 | +--------------+---------------+ | flash_attn | not supported | +--------------+---------------+ | liger-kernel | not supported | +--------------+---------------+ | tensordict | 0.8.3 (ARM) | +--------------+---------------+ 1. 支持通过 transformers 使能 --flash_attention_2, transformers 需大于等于 4.52.0版本。 2. 不支持通过 flash_attn 使能 flash attention 加速。 3. 不支持 liger-kernel 使能。 4. 针对 ARM 服务器,tensordict 要求 0.8.3,可在依赖安装完成后再手动安装 tensordict。 5. 针对 x86 服务器,需要安装 cpu 版本的 torchvision。 .. code-block:: bash pip install torchvision==0.20.1+cpu --index-url https://download.pytorch.org/whl/cpu 快速开始 ----------------------------------- 正式使用前,建议您通过对Qwen2.5-0.5B GRPO的训练尝试以检验环境准备和安装的正确性。 1.下载数据集并将数据集预处理为parquet格式,以便包含计算RL奖励所需的必要字段 .. code-block:: bash python3 examples/data_preprocess/gsm8k.py --local_dir ~/data/gsm8k 2.执行训练 .. code-block:: bash set -x export VLLM_ATTENTION_BACKEND=XFORMERS python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=512 \ data.max_response_length=128 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=5e-7 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.entropy_coeff=0.001 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=20 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=1 \ trainer.device=npu $@ 支持现状 ----------------------------------- +-----------+-------------------------+-------------+-------------------+----------------------+ | algorithm | model | rewards mae | throughput ratio | hardware | +-----------+-------------------------+-------------+-------------------+----------------------+ | GRPO | Qwen2.5-7B-instruct | 0.38% | 0.588 | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------+-------------------+----------------------+ | GRPO | Qwen2.5-32B-instruct | 0.30% | 0.685 | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------+-------------------+----------------------+ | GRPO | Qwen2.5-VL-3B-instruct | 3.14% | 0.470 | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------+-------------------+----------------------+ | GRPO | Qwen2.5-VL-7B-instruct | 3.30% | 0.380 | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------+-------------------+----------------------+ | GRPO | Qwen2.5-VL-32B-instruct | 0.79% | 0.568 | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------+-------------------+----------------------+ | DAPO | Qwen2.5-7B-instruct | 3.83% | pending | Atlas 200T A2 Box16 | +-----------+-------------------------+-------------+-------------------+----------------------+ | SFT-PEFT | Qwen2.5-0.5B-instruct | 0.06% | 0.305 | Atlas 900 A2 PODc | +-----------+-------------------------+-------------+-------------------+----------------------+ 精度对比说明 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 对于 SFT 类算法,我们期望在相同配置下华为昇腾设备与 A100 的 loss 平均绝对误差<= 2%。计算方式如下图。更多信息请参考 `精度计算说明 `_。 .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/loss_comparison.png?raw=true :alt: loss_comparison 根据经验,对于 GRPO 等 RL 类算法,我们期望在相同配置下华为昇腾设备与 A100 的 rewards 平均绝对误差<= 4%,计算方式参考上图。 吞吐对比说明 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Ascend npu 和 A100 分别取日志中前4个 step 的 "perf/throughput" 做平均, throughput ratio = npu 平均值 / A100 平均值。 计划 ----------------------------------- 查看 `roadmap `_ 获取更多特性的支持进度。 声明 ----------------------------------- verl中提供的ascend支持代码皆为参考样例,商业使用请通过官方正式途径沟通,谢谢。 ================================================ FILE: verl_rl/docs/conf.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = "verl" copyright = "2024 ByteDance Seed Foundation MLSys Team" author = "Guangming Sheng, Chi Zhang, Yanghua Peng, Haibin Lin" # -- General configuration --------------------------------------------------- # The master toctree document. master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "myst_parser", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.autosectionlabel", "sphinx.ext.napoleon", "sphinx.ext.viewcode", ] # Use Google style docstrings instead of NumPy docstrings. napoleon_google_docstring = True napoleon_numpy_docstring = False # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = { ".rst": "restructuredtext", ".md": "markdown", } # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add the JavaScript file html_js_files = [ "js/runllm-widget.js", ] exclude_patterns += ["README.md", "README_vllm0.7.md"] suppress_warnings = ["ref.duplicate", "ref.myst"] ================================================ FILE: verl_rl/docs/examples/config.rst ================================================ .. _config-explain-page: Config Explanation =================== Last updated: 06/18/2025. ppo_trainer.yaml for RL FSDP Backend ------------------------------------- Data ~~~~ .. code:: yaml data: tokenizer: null train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet prompt_key: prompt max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs return_raw_chat: False return_full_prompt: False shuffle: True filter_overlong_prompts: False filter_overlong_prompts_workers: 1 truncation: error image_key: images trust_remote_code: True custom_cls: path: null name: null - ``data.train_files``: Training set parquet. Can be a list or a single file. The program will read all files into memory, so it can't be too large (< 100GB). The path can be either local path or HDFS path. For HDFS path, we provide utils to download it to DRAM and convert the HDFS path to local path. - ``data.val_files``: Validation parquet. Can be a list or a single file. - ``data.prompt_key``: The field in the dataset where the prompt is located. Default is 'prompt'. - ``data.max_prompt_length``: Maximum prompt length. All prompts will be left-padded to this length. An error will be reported if the length is too long - ``data.max_response_length``: Maximum response length. Rollout in RL algorithms (e.g. PPO) generates up to this length - ``data.train_batch_size``: Batch size sampled for one training iteration of different RL algorithms. - ``data.return_raw_input_ids``: Whether to return the original input_ids without adding chat template. This is mainly used to accommodate situations where the reward model's chat template differs from the policy. It needs to be decoded first, then apply the RM's chat template. If using a model-based RM, and the policy and RM chat_templates are different, this flag needs to be set - ``data.return_raw_chat``: Whether to return the original chat (prompt) without applying chat template. - ``data.return_full_prompt``: Whether to return the full prompt with chat template - ``data.shuffle``: Whether to shuffle the data in the dataloader. - ``data.filter_overlong_prompts``: Default don't filter. - ``data.filter_overlong_prompts_workers``: For large-scale dataset, filtering overlong prompts could be timeconsuming. You cat set the ``filter_overlong_prompts_workers`` to use multiprocessing for speed up. Default to 1. - ``data.truncation``: Truncate the input_ids or prompt length if they exceed max_prompt_length. Default is 'error', not allow exceed the max_prompt_length. The users should increase the max_prompt_length if throwing the error. You can also set ``left``, ``right`` and ``middle``. When ``middle`` is selected, the logic splits the allowed max length roughly in half and keeps the head and tail of the sequence, effectively discarding the middle section. - ``data.image_key``: The field in the multi-modal dataset where the image is located. Default is 'images'. - ``data.trust_remote_code``: If the remote tokenizer has python file, we can use this field to allow using remote tokenizer. For example: moonshotai/Moonlight-16B-A3B-Instruct Customized Dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~ Customized dataset extension is implemented for the SFT trainer and can be extended to other trainers with similar changes. .. code:: yaml custom_cls: path: null name: null - ``data.custom_cls.path``: The path to the file containing your customized dataset class. If not specified, pre-implemented dataset will be used. - ``data.custom_cls.name``: The name of the dataset class within the specified file. Actor/Rollout/Reference Policy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml actor_rollout_ref: hybrid_engine: True model: path: ~/models/deepseek-llm-7b-chat external_lib: null override_config: model_config: {} moe_config: # Megatron only, can adjust moe configuration freeze_moe_router: False # Megatron only, can freeze moe router (no grad) enable_gradient_checkpointing: False enable_activation_offload: False trust_remote_code: False use_remove_padding: False actor: strategy: fsdp # This is for backward-compatibility ppo_mini_batch_size: 256 ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu ppo_micro_batch_size_per_gpu: 8 use_dynamic_bsz: False ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length} grad_clip: 1.0 clip_ratio: 0.2 entropy_coeff: 0.0 use_kl_loss: False # True for GRPO use_torch_compile: True # False to disable torch compile kl_loss_coef: 0.001 # for grpo kl_loss_type: low_var_kl # for grpo ppo_epochs: 1 data_loader_seed: null shuffle: False ulysses_sequence_parallel_size: 1 # sp size optim: lr: 1e-6 lr_warmup_steps: -1 # Prioritized. Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime min_lr_ratio: 0.0 # only used with cosine lr scheduler, default to 0.0 num_cycles: 0.5 # only used with cosine lr scheduler, default to 0.5 warmup_style: constant # select from constant/cosine total_training_steps: -1 # must be override by program fsdp_config: wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 param_offload: False optimizer_offload: False fsdp_size: -1 checkpoint: # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # For more flexibility, you can specify the contents to load from the checkpoint. load_contents: ${actor_rollout_ref.actor.checkpoint.save_contents} ref: fsdp_config: param_offload: False wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: 16 log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} ulysses_sequence_parallel_size: ${actor_rollout_ref.actor.ulysses_sequence_parallel_size} # sp size rollout: name: vllm temperature: 1.0 top_k: -1 # 0 for hf rollout, -1 for vllm rollout top_p: 1 prompt_length: ${data.max_prompt_length} # not use for opensource response_length: ${data.max_response_length} # for vllm rollout dtype: bfloat16 # should align with FSDP gpu_memory_utilization: 0.5 ignore_eos: False enforce_eager: True free_cache_engine: True load_format: dummy_dtensor tensor_model_parallel_size: 2 max_num_batched_tokens: 8192 max_num_seqs: 1024 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: 16 log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} # for hf rollout do_sample: True engine_kwargs: # inference engine parameters vllm: swap_space: null # null means "use the engine default value" (usually 4 GB), setting it to, e.g., 32 means 32 GB disable_mm_preprocessor_cache: False # disable preprocessor cache for multimodel models sglang: attention_backend: null # null means use the engine default value, available options: flashinfer, triton, flashmla n: 1 # for each prompt, sample n responses (i.e. num sample times). set it to values > 1 for grpo, rloo val_kwargs: # sampling parameters for validation top_k: -1 # 0 for hf rollout, -1 for vllm rollout top_p: 1.0 temperature: 0 n: 1 do_sample: False # default eager for validation agent: custom_async_server: # Use custom async server implementation for rollout path: null name: null **Common config for actor, rollout and reference model** - ``actor_rollout_ref.hybrid_engine``: Whether it's a hybrid engine, currently only supports hybrid engine - ``actor_rollout_ref.model.path``: Huggingface model path. This can be either local path or HDFS path. For HDFS path, we provide utils to download it to DRAM and convert the HDFS path to local path. - ``actor_rollout_ref.model.external_libs``: Additional Python packages that need to be imported. Used to register models or tokenizers into the Huggingface system. - ``actor_rollout_ref.model.override_config``: Used to override some of the model's original configurations, mainly dropout - ``actor_rollout_ref.model.enable_gradient_checkpointing``: FSDP only, decide Whether to enable gradient checkpointing for the actor, Megatron uses recompute options in ``override_transformer_config`` to set this - ``actor_rollout_ref.model.enable_activation_offload``: Whether to enable activation offloading for the actor - ``actor_rollout_ref.model.trust_remote_code``: Whether to enable loading a remote code model - ``actor_rollout_ref.model.use_fused_kernels``: Whether to use fused kernels in the model. If set to True, the following parameters will be used. - ``actor_rollout_ref.model.fused_kernel_options.impl_backend``: The implementation backend for fused kernels. Options: "triton" or "torch". Default is "torch". While in megatron, we only support "triton" as the implementation backend, so there is no need for this option. - ``actor_rollout_ref.model.use_remove_padding``: Whether to use remove padding in the model. If set to True, the model will remove padding tokens in the input_ids and response_ids. This helps a lot in improving model running efficiency. **Actor model** - ``actor_rollout_ref.actor.strategy``: fsdp or megatron. In this example, we use fsdp backend. - ``actor_rollout_ref.actor.ppo_mini_batch_size``: One sample is split into multiple sub-batches with batch_size=ppo_mini_batch_size for PPO updates. The ppo_mini_batch_size is a global num across all workers/gpus - ``actor_rollout_ref.actor.ppo_micro_batch_size``: [Will be deprecated, use ppo_micro_batch_size_per_gpu] Similar to gradient accumulation, the micro_batch_size_per_gpu for one forward pass, trading speed for GPU memory. The value represent the global view. - ``actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu``: Similar to gradient accumulation, the micro_batch_size_per_gpu for one forward pass, trading speed for GPU memory. The value represent the local num per gpu. - ``actor_rollout_ref.actor.grad_clip``: Gradient clipping for actor updates - ``actor_rollout_ref.actor.use_kl_loss``: to use kl loss in actor. When used, we are not applying KL in the reward function. - ``actor_rollout_ref.actor.clip_ratio``: PPO clip ratio - ``actor_rollout_ref.actor.use_torch_compile``: Whether to use torch compile in actor - ``actor_rollout_ref.actor.entropy_coeff``: The weight of entropy when calculating PPO loss. The default value is changed to 0.0 since v0.3.x - ``actor_rollout_ref.actor.ppo_epochs``: Number of epochs for PPO updates on one set of sampled data - ``actor_rollout_ref.actor.data_loader_seed``: From torch 2.6.0 Megatron backend can get wrong seed generated by pytorch between cp ranks and cause misalignment between data on these ranks, so we shall manually set the seed to avoid hanging issue. if ``actor_rollout_ref.actor.shuffle`` is not null, this must be set. - ``actor_rollout_ref.actor.shuffle``: Whether to shuffle data when there are multiple epochs - ``actor_rollout_ref.actor.optim``: Actor's optimizer parameters - ``actor_rollout_ref.actor.fsdp_config``: FSDP config for actor training - ``wrap_policy``: FSDP wrap policy. By default, it uses Huggingface's wrap policy, i.e., wrapping by DecoderLayer - No need to set transformer_layer_cls_to_wrap, so we comment it. - ``*_offload``: Whether to enable parameter, gradient and optimizer offload - Trading speed for GPU memory. - ``actor_rollout_ref.actor.use_kl_loss``: Whether to enable kl loss. Default is False. - ``actor_rollout_ref.actor.kl_loss_coef``: The coefficient of kl loss. Default is 0.001. - ``actor_rollout_ref.actor.kl_loss_type``: Support ``kl`` (``k1``), ``abs``, ``mse`` (``k2``), ``low_var_kl`` (``k3``) and ``full``. How to calculate the kl divergence between actor and reference policy. For specific options, refer to `kl_penalty()` in `core_algos.py `_ . See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html - ``actor_rollout_ref.actor.checkpoint``: The configurations of checkpoint function in actor - ``save_contents``: The contents to save in the checkpoint. By default, we save model, optimizer and extra information in the checkpoint. The extra information includes Rng states currently, FSDP supported lr_scheduler, and Megatron opt_param_scheduler will coming soon. We do not store hf_model in checkpoint by default, but we provide a tool in ``scripts/model_merge.py`` to convert checkpoint format to hf format. - ``load_contents``: The contents to load in the checkpoint, you can specify different checkpoint loading contents. By default, it is the same with ``save_checkpoint``. **Reference Model** Reference model will be enabled when ``actor.use_kl_loss`` or/and ``algorithm.use_kl_in_reward`` is/are True. - ``actor_rollout_ref.ref``: FSDP config same as actor. **For models larger than 7B, it's recommended to turn on offload for ref by default** - ``actor_rollout_ref.ref.log_prob_micro_batch_size``: [Will be deprecate, use log_prob_micro_batch_size_per_gpu] The batch size for one forward pass in the computation of ``ref_log_prob``. The value represent the global num. - ``actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu``: The batch size for one forward pass in the computation of ``ref_log_prob``. The value represent the local num per gpu. **Rollout Model** - ``actor_rollout_ref.rollout.name``: hf/vllm/sglang. - Rollout (Auto-regressive) parameters. The key should be equal to the property name in vLLM's ``SamplingParams``. - ``temperature``, ``top_k``, ``top_p`` and others: Sampling parameters in ``SamplingParams``. - ``actor_rollout_ref.rollout.dtype``: Rollout model parameters type. This should be align with the actor model parameter type in FSDP/Megatron backend. - ``actor_rollout_ref.rollout.gpu_memory_utilization``: - For vLLM v0.7.0 and later: The fraction of **total** GPU memory to be used for the vLLM instance. - For SGLang: Corresponding to ``mem_fraction_static``, the fraction of the free GPU memory used for **static** memory like model weights and KV cache. - ``actor_rollout_ref.rollout.tensor_model_parallel_size``: TP size for rollout. Only effective for vllm. - ``actor_rollout_ref.rollout.log_prob_micro_batch_size``: [Will be deprecate, use log_prob_micro_batch_size_per_gpu] The batch size for one forward pass in the computation of ``log_prob``. The value represent the global num. - ``actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu``: Micro batch size per gpu (The batch size for one forward pass) for recalculating ``log_prob``. The value represent the local num per gpu. - ``actor_rollout_ref.rollout.do_sample``: Whether to sample during training rollout. If set to False, the rollout model will perform greedy sampling. - ``actor_rollout_ref.rollout.val_kwargs```: Sampling parameters used specifically during validation. - ``top_k``: Top-k sampling parameter. Default to -1 for vLLM rollout or 0 for HF rollout. - ``top_p``: Top-p sampling parameter. Default is 1.0 (disabled). - ``temperature``: Sampling temperature. Default is 0 (deterministic greedy). - ``n``: Number of responses to generate during validation. Default is 1. - ``do_sample``: Whether to use sampling during validation. Default is False for deterministic outputs. When set to True, the rollout will use the ``actor_rollout_ref.rollout.val_kwargs`` parameters (top_k, top_p, temperature) to control the sampling behavior. - ``actor_rollout_ref.rollout.engine_kwargs.vllm``: extra vllm engine args - ``swap_space``: swap space in GB used by the inference engine. Positive integer, e.g., ``32`` means 32 GB. ``null``: means not setting and using the engine default value (usually, e.g., 4 GB for vLLM) - ``disable_mm_preprocessor_cache``: Whether to disable preprocessor cache for multimodel models. - ``actor_rollout_ref.rollout.engine_kwargs.sglang``: extra sglang engine args - ``attention_backend``: The attention backend to use for the inference engine. - ``null``: means not setting and using the engine default value (usually, e.g., ``fa3`` for SGLang) - ``flashinfer``: Use flashinfer attention backend. - ``triton``: Use triton attention backend. - ``flashmla``: Use flashmla attention backend. - ``actor_rollout_ref.rollout.ignore_eos``: Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. - ``actor_rollout_ref.rollout.free_cache_engine``: Offload the KVCache after rollout generation stage. Default is True. When set to True, for vllm v0.5.4 and v0.6.3, we need to disable the usage of CUDAGraph (set ``enforce_eager`` to True.) - ``actor_rollout_ref.rollout.enforce_eager``: Whether to use CUDAGraph in vLLM generation. Default set to True to disable CUDAGraph. - ``actor_rollout_ref.rollout.load_format``: Which weight loader to use to load the actor model weights to the rollout model. - ``auto``: Use Megatron weight loader. - ``megatron``: Use Megatron weight loader. Deployed with Megatron backend. The input model ``state_dict()`` is already partitioned along TP dimension and already gathered along PP dimension. This weight loader requires that the Rollout model and Actor model's parameters shape and name should be identical. - ``dtensor``: Default solution when using Huggingface weight loader. Deployed with FSDP backend and the state_dict_type is ``StateDictType.SHARDED_STATE_DICT``. Recommend to use this weight loader - ``hf``: Use Huggingface weight loader. Deployed with FSDP backend and the state_dict_type is ``StateDictType.FULL_STATE_DICT``. This solution doesn't need to rewrite the weight loader for each model implemented in vLLM but it results in larger peak memory usage. - ``dummy_hf``, ``dummy_megatron``, ``dummy_dtensor``: Random initialization. .. note:: **NOTED**: In this config field, users only need to select from ``dummy_megatron``, ``dummy_dtensor``, ``dummy_hf`` for rollout initialization and our hybrid engine will select the corresponding weight loader (i.e., ``megatron``, ``dtensor``, ``hf``) during actor/rollout weight synchronization. Megatron Optimizer and Optimizer Parameter Scheduler ____________________________________________________ .. code:: yaml optim: optimizer: adam lr: 1e-6 clip_grad: 1.0 total_training_steps: -1 # must be override by program lr_warmup_init: 0.0 # initial learning rate for warmup, default to 0.0 lr_warmup_steps: -1 # Prioritized. Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime lr_decay_steps: null lr_decay_style: constant # select from constant/linear/cosine/inverse_square_root min_lr: 0.0 # minimum learning rate, default to 0.0 weight_decay: 0.01 weight_decay_incr_style: constant # select from constant/linear/cosine lr_wsd_decay_style: exponential # select from constant/exponential/cosine lr_wsd_decay_steps: null use_checkpoint_opt_param_scheduler: False # use checkpoint optimizer parameter scheduler Notice that there are some differences in APIs between Megatron optimizer and FSDP optimizer. - Megatron optimizer scheduler names the period after lr_warmup as lr_decay_steps, so the ``warmup_style`` actually means the style of lr decay after warmup. - Megatron optimizer also support weight decay decay mechanism - ``use_checkpoint_opt_param_scheduler`` determines whether to use the checkpoint optimizer parameter scheduler. If set to True, the optimizer parameter scheduler will be saved in the checkpoint and loaded from the checkpoint during resuming training. For learning rate decay, original Megatron pretrain default option of ``lr_decay_style`` is ``linear``, meaning that the learning rate will be linearly decayed from the initial learning rate to ``min_lr`` within the ``lr_decay_steps``. However, in verl, to align with FSDP's default behavior, we set the default ``lr_decay_style`` to ``constant``, meaning that the learning rate will be kept constant after the warmup stage. Critic Model ~~~~~~~~~~~~ Most parameters for Critic are similar to Actor Model. Reward Model ~~~~~~~~~~~~ .. code:: yaml reward_model: enable: False model: input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical path: ~/models/Anomy-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} trust_remote_code: False fsdp_config: min_num_params: 0 param_offload: False micro_batch_size_per_gpu: 16 max_length: null reward_manager: naive - ``reward_model.enable``: Whether to enable reward model. If False, we compute the reward only with the user-defined reward functions. In GSM8K and Math examples, we disable reward model. For RLHF alignment example using full_hh_rlhf, we utilize reward model to assess the responses. If False, the following parameters are not effective. - ``reward_model.model`` - ``input_tokenizer``: Input tokenizer. If the reward model's chat template is inconsistent with the policy, we need to first decode to plaintext, then apply the rm's chat_template. Then score with RM. If chat_templates are consistent, it can be set to null. - ``path``: RM's HDFS path or local path. Note that RM only supports AutoModelForSequenceClassification. Other model types need to define their own RewardModelWorker and pass it from the code. - ``trust_remote_code``: Whether to enable loading a remote code model, default to False. - ``reward_model.reward_manager``: Reward Manager. This defines the mechanism of computing rule-based reward and handling different reward sources. Default is ``naive``. If all verification functions are multiprocessing-safe, the reward manager can be set to ``prime`` for parallel verification. Customized Reward Function ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml custom_reward_function: path: null name: compute_score - ``custom_reward_function.path``: The path to the file containing your customized reward function. If not specified, pre-implemented reward functions will be used. - ``custom_reward_function.name`` (Optional) : The name of the reward function within the specified file. Default is 'compute_score'. Algorithm ~~~~~~~~~ .. code:: yaml algorithm: gamma: 1.0 lam: 1.0 adv_estimator: gae use_kl_in_reward: False kl_penalty: kl # how to estimate kl divergence kl_ctrl: type: fixed kl_coef: 0.005 horizon: 10000 target_kl: 0.1 - ``gamma``: discount factor - ``lam``: Trade-off between bias and variance in the GAE estimator - ``adv_estimator``: Support ``gae``, ``grpo``, ``reinforce_plus_plus``, ``reinforce_plus_plus_baseline``, ``rloo`` - ``use_kl_in_reward``: Whether to enable in-reward kl penalty. Default is False. - ``kl_penalty``: Support ``kl``, ``abs``, ``mse``, ``low_var_kl`` and ``full``. How to calculate the kl divergence between actor and reference policy. For specific options, refer to `kl_penalty()` in `core_algos.py `_ . - ``kl_ctrl``: Config for in-reward kl_penalty controller - ``kl_coef``: The (initial) coefficient of in-reward kl_penalty. Default is 0.001. - ``type``: 'fixed' for FixedKLController and 'adaptive' for AdaptiveKLController. - ``horizon`` and ``target_kl``: See source code of AdaptiveKLController for details. Trainer ~~~~~~~ .. code:: yaml trainer: total_epochs: 30 project_name: verl_examples experiment_name: gsm8k logger: ['console', 'wandb'] log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 val_before_train: True test_freq: 2 critic_warmup: 0 default_hdfs_dir: null # hdfs checkpoint path default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} # local checkpoint path resume_mode: auto # or disable or resume_path if resume_from_path is set resume_from_path: null remove_previous_ckpt_in_save: False del_local_ckpt_after_load: False ray_wait_register_center_timeout: 300 - ``trainer.total_epochs``: Number of epochs in training. - ``trainer.project_name``: For wandb, swanlab, mlflow - ``trainer.experiment_name``: For wandb, swanlab, mlflow - ``trainer.logger``: Support console and wandb, swanlab, mlflow, tensorboard - ``trainer.log_val_generations``: The number of logged generation during validation (default ``0``) - ``trainer.nnodes``: Number of nodes used in the training. - ``trainer.n_gpus_per_node``: Number of GPUs per node. - ``trainer.save_freq``: The frequency (by iteration) to save checkpoint of the actor and critic model. - ``trainer.val_before_train``: Whether to run validation before training. - ``trainer.test_freq``: The validation frequency (by iteration). - ``trainer.critic_warmup``: The number of iteration to train the critic model before actual policy learning. - ``trainer.resume_mode``: The mode of resuming training. Support ``disable``, ``auto`` and ``resume_path``. If set to ``auto`` as default, the program will automatically resume from the latest checkpoint in the ``default_local_dir``. If set to ``resume_path``, the program will resume from the path specified in ``resume_from_path``. - ``trainer.resume_from_path``: The path to resume training from. Only effective when ``resume_mode`` is set to ``resume_path``. - ``trainer.remove_previous_ckpt_in_save``: Whether to remove previous checkpoints in the save directory. Default is False. - ``trainer.del_local_ckpt_after_load``: Whether to delete local checkpoints after loading them. Default is False. - ``trainer.ray_wait_register_center_timeout``: The timeout for waiting for the ray register center to be ready. Default is 300 seconds. This figure illustrates how the configurations affect the training. https://excalidraw.com/#json=pfhkRmiLm1jnnRli9VFhb,Ut4E8peALlgAUpr7E5pPCA .. image:: https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d evaluation.yaml --------------- Data ~~~~ .. code:: yaml data: path: /tmp/math_Qwen2-7B-Instruct.parquet prompt_key: prompt response_key: responses data_source_key: data_source reward_model_key: reward_model - ``data.path``: Path to the dataset file (Parquet format). - ``data.prompt_key``: The field in the dataset where the prompt is located. Default is 'prompt'. - ``data.response_key``: The key holds the generated responses. This should be a list of strings representing the responses. Default is 'responses'. - ``data.data_source_key``: This is used to separate metric calculations for different data sources, ensuring that metrics are calculated independently for each source. - ``data.reward_model_key``: The key holds the reference answers. These reference answers typically serve as the ground truth or test cases for the task. Customized Reward Function ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: yaml custom_reward_function: path: null name: compute_score - ``custom_reward_function.path``: The path to the file containing your customized reward function. If not specified, pre-implemented reward functions will be used. - ``custom_reward_function.name`` (Optional) : The name of the reward function within the specified file. Default is 'compute_score'. sft_trainer.yaml for SFT FSDP Backend -------------------------------------- Optim ~~~~~~~ .. code:: yaml optim: lr: 1e-5 weight_decay: 0.01 warmup_steps_ratio: 0.1 clip_grad: 1.0 lr_scheduler: cosine - ``optim.lr``: Learning rate for the optimizer. - ``optim.weight_decay``: Weight decay for the optimizer. - ``optim.warmup_steps_ratio``: Ratio of warmup steps to total training steps. - ``optim.clip_grad``: Gradient clipping value. - ``optim.lr_scheduler``: Learning rate scheduler type. Options: - ``cosine``: Cosine learning rate scheduler with warmup (default). - ``wsd``: Warmup-Stable-Decay scheduler that provides a stable learning rate phase between warmup and decay phases. Model ~~~~~~~~~~~~ Most parameters for Model are similar to Reward Model. .. code:: yaml model: partial_pretrain: ~/models/gemma-1.1-7b-it fsdp_config: model_dtype: fp32 wrap_policy: min_num_params: 0 cpu_offload: False offload_params: False external_lib: null enable_gradient_checkpointing: False trust_remote_code: False lora_rank: 0 lora_alpha: 16 target_modules: all-linear use_liger: False - ``partial_pretrain``: HDFS path or local path for the pretrained model. - ``fsdp_config`` - ``model_dtype``: Model parameters type, default to ``fp32``. Support: ``bf16``, ``fp16``, ``fp32``. - ``cpu_offload``: Whether to enable CPU offloading for FSDP. If True, the offload_params will be used as argument. - ``offload_params``: Whether to offload parameters to CPU when not involved in computation. If True, then this offloads gradients to CPU as well, meaning that the optimizer step runs on CPU. - ``lora_rank``: The rank of the LoRA model, default to 0. If ``lora_rank``>0, we will train LoRA modules instead of tuning the full model. - ``lora_alpha``: The alpha parameter for LoRA scaling, default to 16. - ``target_modules``: The names of the modules to apply the adapter to, default to ``all-linear``. See `peft docs `_ for detail. - ``use_liger``: Whether to enable Liger kernel, default to False. If True, we apply Liger kernel to the model (depends on `liger-kernel`). ================================================ FILE: verl_rl/docs/examples/gsm8k_example.rst ================================================ GSM8K Example ============= Last updated: 03/25/2025. Introduction ------------ In this example, we train an LLM to tackle the GSM8k task. Paper: https://arxiv.org/pdf/2110.14168 Dataset: https://huggingface.co/datasets/gsm8k Note that the original paper mainly focuses on training a verifier (a reward model) to solve math problems via Best-of-N sampling. In this example, we train an RLHF agent using a rule-based reward model. Dataset Introduction -------------------- GSM8k is a math problem dataset. The prompt is an elementary school problem. The LLM model is required to answer the math problem. The training set contains 7473 samples and the test set contains 1319 samples. **An example** Prompt Katy makes coffee using teaspoons of sugar and cups of water in the ratio of 7:13. If she used a total of 120 teaspoons of sugar and cups of water, calculate the number of teaspoonfuls of sugar she used. Solution The total ratio representing the ingredients she used to make the coffee is 7+13 = <<7+13=20>>20 Since the fraction representing the number of teaspoons she used is 7/20, she used 7/20\ *120 = <<7/20*\ 120=42>>42 #### 42 Step 1: Prepare dataset ----------------------- .. code:: bash cd examples/data_preprocess python3 gsm8k.py --local_dir ~/data/gsm8k Step 2: Download Model ---------------------- There're three ways to prepare the model checkpoints for post-training: - Download the required models from huggingface or modelscope .. code:: bash huggingface-cli download deepseek-ai/deepseek-math-7b-instruct --local-dir ~/models/deepseek-math-7b-instruct --local-dir-use-symlinks False # or modelscope download --model deepseek-ai/deepseek-math-7b-instruct --local_dir ~/models/deepseek-math-7b-instruct - Already store your store model in the local directory or HDFS path. - Also, you can directly use the model name in huggingface (e.g., deepseek-ai/deepseek-math-7b-instruct) in ``actor_rollout_ref.model.path`` and ``critic.model.path`` field in the run script. You can also download models from modelscope by setting environmental variable ``VERL_USE_MODELSCOPE=True``. See examples/ppo_trainer/run_deepseek7b_llm_modelscope.sh for example. Noted that users should prepare checkpoints for actor, critic and reward model. [Optional] Step 3: SFT your Model --------------------------------- We provide a SFT Trainer using PyTorch FSDP in `fsdp_sft_trainer.py `_. Users can customize their own SFT script using our FSDP SFT Trainer. We also provide various training scripts for SFT on GSM8K dataset in `gsm8k sft directory `_. .. code:: shell set -x torchrun -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=question \ data.response_key=answer \ data.micro_batch_size_per_gpu=8 \ model.partial_pretrain=deepseek-ai/deepseek-coder-6.7b-instruct \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-deepseek-coder-6.7b-instruct \ trainer.total_epochs=4 \ trainer.logger='["console","wandb"]' If you use AMD GPUs (ROCm kernel), you need to add the following environment variables into the run script: .. code-block:: bash export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES export CUDA_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES Step 4: Perform PPO training with your model on GSM8K Dataset ------------------------------------------------------------- - Prepare your own run.sh script. Here's an example for GSM8k dataset and deepseek-llm-7b-chat model. - Users could replace the ``data.train_files`` ,\ ``data.val_files``, ``actor_rollout_ref.model.path`` and ``critic.model.path`` based on their environment. - See :doc:`config` for detailed explanation of each config field. **Reward Model/Function** We use a rule-based reward model. We force the model to produce a final answer following 4 “#” as shown in the solution. We extract the final answer from both the solution and model's output using regular expression matching. We compare them and assign a reward of 1 to correct answer, 0.1 to incorrect answer and 0 to no answer. **Training Script** The training script example for FSDP and Megatron-LM backend are stored in examples/ppo_trainer directory. .. code:: bash cd ../ppo_trainer bash run_deepseek7b_llm.sh The script of run_deepseek7b_llm.sh .. code:: bash set -x python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=1 \ trainer.total_epochs=15 $@ If you use AMD GPUs (ROCm kernel), you need to add the following environment variables into the run script: .. code-block:: bash export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES export CUDA_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES If you encounter any issues in using AMD GPUs running VeRL, feel free to contact me - `Yusheng Su `_. ================================================ FILE: verl_rl/docs/examples/multi_modal_example.rst ================================================ Multi-Modal Example Architecture ================================= Last updated: 04/28/2025. Introduction ------------ Now, verl has supported multi-modal training. You can use fsdp and vllm/sglang to start a multi-modal RL task. Megatron supports is also on the way. Follow the steps below to quickly start a multi-modal RL task. Step 1: Prepare dataset ----------------------- .. code:: python # it will be saved in the $HOME/data/geo3k folder python examples/data_preprocess/geo3k.py Step 2: Download Model ---------------------- .. code:: bash # download the model from huggingface python3 -c "import transformers; transformers.pipeline(model='Qwen/Qwen2.5-VL-7B-Instruct')" Step 3: Perform GRPO training with multi-modal model on Geo3K Dataset --------------------------------------------------------------------- .. code:: bash # run the task bash examples/grpo_trainer/run_qwen2_5_vl-7b.sh ================================================ FILE: verl_rl/docs/examples/ppo_code_architecture.rst ================================================ PPO Example Architecture ======================== Last updated: 02/17/2025. Let's start with the Proximal Policy Optimization algorithm, which is most widely used algorithm in LLM post-training. The main entry point of the PPO algorithm example is: `main_ppo.py `_. In this tutorial, we will go through the code architecture in `main_ppo.py `_. Define the data --------------- Users need to preprocess and store the dataset in parquet files. And we implement `RLHFDataset` to load and tokenize the parquet files. For ``RLHFDataset`` (Default), at least 1 fields are required: - ``prompt``: Contains the string prompt We already provide some examples of processing the datasets to parquet files in `data_preprocess directory `_. Currently, we support preprocess of GSM8k, MATH, Hellasage, Full_hh_rlhf datasets. See :doc:`../preparation/prepare_data` for more information. Define the reward functions for different datasets -------------------------------------------------- In this main entry point, the users only need to define their own reward function based on the datasets (or applications) utilized in PPO training. For example, we already provide reward functions for `GSM8k `_ and `MATH `_ datasets in the ``_select_rm_score_fn``. In the ``RewardManager``, we will compute the reward score based on the data_source to select corresponding reward functions. For some RLHF datasets (e.g., full_hh_rlhf), the reward model is utilized to assess the responses without any reward functions. In this case, the ``RewardManager`` will return the ``rm_score`` computed by the reward model directly. See `reward functions `_ for detailed implementation. Define worker classes --------------------- .. code:: python if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: # for FSDP backend assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker from verl.single_controller.ray import RayWorkerGroup ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == 'megatron': # for Megatron backend assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup ray_worker_group_cls = NVMegatronRayWorkerGroup # Ray worker class for Megatron-LM else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ActorRolloutRefWorker, Role.Critic: CriticWorker, Role.RefPolicy: ActorRolloutRefWorker } global_pool_id = 'global_pool' resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, Role.Critic: global_pool_id, Role.RefPolicy: global_pool_id, } Step 1: Construct the mapping between roles and workers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A role represents a group of workers in the same process. We have pre-defined several roles in `ray_trainer.py `_. .. code:: python class Role(Enum): """ To create more roles dynamically, you can subclass Role and add new members """ Actor = 0 # This worker only has Actor Rollout = 1 # This worker only has Rollout ActorRollout = 2 # This worker has both actor and rollout, it's a HybridEngine Critic = 3 # This worker only has critic RefPolicy = 4 # This worker only has reference policy RewardModel = 5 # This worker only has reward model ActorRolloutRef = 6 # This worker contains actor, rollout and reference policy simultaneously Step 2: Define the worker class corresponding to this role ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - We have pre-implemented the ``ActorRolloutRefWorker``. Through different configs, it can be a standalone actor, a standalone rollout, an ActorRollout HybridEngine, or an ActorRolloutRef HybridEngine - We also pre-implemented workers for ``Actor``, ``Rollout``, ``Critic``, ``Reward Model`` and ``Reference model`` on two different backend: PyTorch FSDP and Megatron-LM. See `FSDP Workers `_ and `Megatron-LM Workers `_ for more information. Step 3: Define resource pool id and resource pool spec ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Resource pool is a division of global GPU resources, ``resource_pool_spec`` is a dict, mapping from id to # of GPUs - In the above example, we defined a global resource pool: global_pool_id, and then put all roles on this one resource pool with all the GPUs in this post-training task. This refers to *co-locate* placement where all the models share the same set of GPUs. - See resource pool and placement for advance usage. Defining reward model/function ------------------------------ .. code:: python # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: from verl.workers.fsdp_workers import RewardModelWorker role_worker_mapping[Role.RewardModel] = RewardModelWorker mapping[Role.RewardModel] = global_pool_id reward_fn = RewardManager(tokenizer=tokenizer, num_examine=0) # Note that we always use function-based RM for validation val_reward_fn = RewardManager(tokenizer=tokenizer, num_examine=1) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) Since not all tasks use model-based RM, users need to define here whether it's a model-based RM or a function-based RM - If it's a model-based RM, directly add the ``RewardModel`` role in the resource mapping and add it to the resource pool mapping. - Note that the pre-defined ``RewardModelWorker`` only supports models with the structure of huggingface ``AutoModelForSequenceClassification``. If it's not this model, you need to define your own RewardModelWorker in `FSDP Workers `_ and `Megatron-LM Workers `_. - If it's a function-based RM, the users are required to classified the reward function for each datasets. .. code:: python def _select_rm_score_fn(data_source): if data_source == 'openai/gsm8k': return gsm8k.compute_score elif data_source == 'lighteval/MATH': return math.compute_score else: raise NotImplementedError See reward functions implemented in `directory `_ for more information. Define, init and run the PPO Trainer ------------------------------------ .. code:: python trainer = RayPPOTrainer(config=config, tokenizer=tokenizer, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn) trainer.init_workers() trainer.fit() - We first initialize the ``RayPPOTrainer`` with user config, tokenizer and all the above worker mapping, resource pool, worker group and reward functions - We first call the ``trainer.init_workers()`` to initialize the models on the allocated GPUs (in the resource pool) - The actual PPO training will be executed in ``trainer.fit()`` verl can be easily extended to other RL algorithms by reusing the Ray model workers, resource pool and reward functions. See :doc:`extension<../advance/dpo_extension>` for more information. Details of the ``RayPPOTrainer`` is discussed in :doc:`Ray Trainer<../workers/ray_trainer>`. ================================================ FILE: verl_rl/docs/examples/sandbox_fusion_example.rst ================================================ Sandbox Fusion Example ============================ Last updated: 06/27/2025. Introduction ------------ Sandbox Fusion is a remote code sandbox service that provides a secure environment for running and evaluating code generated by Large Language Models (LLMs). This example demonstrates how to train an LLM and use Sandbox Fusion to verify generated code, enhancing both security and performance. By leveraging a remote code sandbox service with greater CPU resources for concurrent code verification, you can reduce the reward stage time by 10-30%, depending on the quality of the generated code. Step 1: Prepare the Dataset --------------------------- We use the Eurus-2-RL-Data dataset for training. This dataset combines math and code questions, making it suitable for LLM training tasks. You can download it from HuggingFace: `Eurus-2-RL-Data Dataset `_. Step 2: Set Up the Sandbox Fusion Service ----------------------------------------- Sandbox Fusion is a remote code sandbox service designed to securely run and evaluate LLM-generated code. To use it: 1. **Access Full Documentation**: For detailed setup instructions, refer to the `Sandbox Fusion Documentation `_. 2. **Deploy the Service**: Choose one of the following deployment methods: - **Local Deployment**: Follow the guide `here `_. - **FaaS Instance (Volcengine)**: Create an instance using the `Volcengine Documentation `_. After deployment, you will receive an API endpoint in the format: ``https:///run_code``. Step 3: Configure the Training Script ------------------------------------- To integrate Sandbox Fusion into your training script, configure the following parameters: **Key Settings for Sandbox Fusion** - ``reward_model.sandbox_fusion.url=''``: Enable Sandbox Fusion by specifying the API endpoint (must end with ``/run_code``). - ``reward_model.sandbox_fusion.max_concurrent=256``: Set the maximum number of concurrent API requests to the Sandbox Fusion service. - ``reward_model.sandbox_fusion.memory_limit_mb=1024``: Set the memory limit (in MB) for each sandbox instance. Defaults to 1024MB if not specified. **Additional Optimization** To further reduce code verification time, enable parallel processing with: - ``reward_model.reward_manager=prime``: The Prime reward manager verifies code across multiple subprocesses concurrently. **Example Script** For a practical implementation, refer to the example script: ``examples/ppo_trainer/run_deepseek7b_llm_sandbox_fusion.sh`` Once you’ve set your API endpoint in the script, you can start the training job. ================================================ FILE: verl_rl/docs/faq/faq.rst ================================================ Frequently Asked Questions ==================================== Last updated: 06/25/2025. Ray related ------------ How to add breakpoint for debugging with distributed Ray? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Please checkout the official debugging guide from Ray: https://docs.ray.io/en/latest/ray-observability/ray-distributed-debugger.html "Unable to register worker with raylet" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The cause of this issue is due to some system setting, e.g., SLURM added some constraints on how the CPUs are shared on a node. While `ray.init()` tries to launch as many worker processes as the number of CPU cores of the machine, some constraints of SLURM restricts the `core-workers` seeing the `raylet` process, leading to the problem. To fix this issue, you can set the config term ``ray_init.num_cpus`` to a number allowed by your system. Distributed training ------------------------ How to run multi-node post-training with Ray? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can start a ray cluster and submit a ray job, following the official guide from Ray: https://docs.ray.io/en/latest/ray-core/starting-ray.html Then in the configuration, set the ``trainer.nnode`` config to the number of machines for your job. How to use verl on a Slurm-managed cluster? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Ray provides users with `this `_ official tutorial to start a Ray cluster on top of Slurm. We have verified the :doc:`GSM8K example<../examples/gsm8k_example>` on a Slurm cluster under a multi-node setting with the following steps. 1. [Optional] If your cluster support `Apptainer or Singularity `_ and you wish to use it, convert verl's Docker image to an Apptainer image. Alternatively, set up the environment with the package manager available on your cluster or use other container runtimes (e.g. through `Slurm's OCI support `_) available to you. .. code:: bash apptainer pull /your/dest/dir/vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3.sif docker://verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3 2. Follow :doc:`GSM8K example<../examples/gsm8k_example>` to prepare the dataset and model checkpoints. 3. Modify `examples/slurm/ray_on_slurm.slurm `_ with your cluster's own information. 4. Submit the job script to the Slurm cluster with `sbatch`. Please note that Slurm cluster setup may vary. If you encounter any issues, please refer to Ray's `Slurm user guide `_ for common caveats. If you changed Slurm resource specifications, please make sure to update the environment variables in the job script if necessary. Install related ------------------------ NotImplementedError: TensorDict does not support membership checks with the `in` keyword. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Detail error information: .. code:: bash NotImplementedError: TensorDict does not support membership checks with the `in` keyword. If you want to check if a particular key is in your TensorDict, please use `key in tensordict.keys()` instead. Cause of the problem: There is no suitable version of tensordict package for the linux-arm64 platform. The confirmation method is as follows: .. code:: bash pip install tensordict==0.6.2 Output example: .. code:: bash ERROR: Could not find a version that satisfies the requirement tensordict==0.6.2 (from versions: 0.0.1a0, 0.0.1b0, 0.0.1rc0, 0.0.2a0, 0.0.2b0, 0.0.3, 0.1.0, 0.1.1, 0.1.2, 0.8.0, 0.8.1, 0.8.2, 0.8.3) ERROR: No matching distribution found for tensordict==0.6.2 Solution 1st: Install tensordict from source code: .. code:: bash pip uninstall tensordict git clone https://github.com/pytorch/tensordict.git cd tensordict/ git checkout v0.6.2 python setup.py develop pip install -v -e . Solution 2nd: Temperally modify the error takeplace codes: tensordict_var -> tensordict_var.keys() Illegal memory access --------------------------------- If you encounter the error message like ``CUDA error: an illegal memory access was encountered`` during rollout, please check the vLLM documentation for troubleshooting steps specific to your vLLM version. Checkpoints ------------------------ If you want to convert the model checkpoint into huggingface safetensor format, please refer to ``verl/model_merger``. Triton ``compile_module_from_src`` error ------------------------------------------------ If you encounter triton compilation error similar to the stacktrace below, please set the ``use_torch_compile`` flag according to https://verl.readthedocs.io/en/latest/examples/config.html to disable just-in-time compilation for fused kernels. .. code:: bash File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/jit.py", line 345, in return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs) File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 338, in run return self.fn.run(*args, **kwargs) File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/jit.py", line 607, in run device = driver.active.get_current_device() File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/driver.py", line 23, in __getattr__ self._initialize_obj() File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/driver.py", line 20, in _initialize_obj self._obj = self._init_fn() File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/driver.py", line 9, in _create_driver return actives[0]() File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/backends/nvidia/driver.py", line 371, in __init__ self.utils = CudaUtils() # TODO: make static File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/backends/nvidia/driver.py", line 80, in __init__ mod = compile_module_from_src(Path(os.path.join(dirname, "driver.c")).read_text(), "cuda_utils") File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/backends/nvidia/driver.py", line 57, in compile_module_from_src so = _build(name, src_path, tmpdir, library_dirs(), include_dir, libraries) File "/data/lbh/conda_envs/verl/lib/python3.10/site-packages/triton/runtime/build.py", line 48, in _build ret = subprocess.check_call(cc_cmd) File "/data/lbh/conda_envs/verl/lib/python3.10/subprocess.py", line 369, in check_call raise CalledProcessError(retcode, cmd) What is the meaning of train batch size, mini batch size, and micro batch size? ------------------------------------------------------------------------------------------ This figure illustrates the relationship between different batch size configurations. https://excalidraw.com/#json=pfhkRmiLm1jnnRli9VFhb,Ut4E8peALlgAUpr7E5pPCA .. image:: https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d How to generate ray timeline to analyse performance of a training job? ------------------------------------------------------------------------------------------ To generate the ray timeline file, you can set the config term ``ray_init.timeline_file`` to a json file path. For example: .. code:: bash ray_init.timeline_file=/tmp/ray_timeline.json The file will be generated in the specified path at the end of a training job. You can use tools like chrome://tracing or the Perfetto UI and view the ray timeline file. This figure shows the ray timeline file generated by from a training job on 1 node with 4 GPUs .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray_timeline.png?raw=true How to set proxy only for wandb? ------------------------------------------------------------------------------------------ If you need a proxy to access wandb, you can add below config in your training job script. Comparing to using global https_proxy env variable, this approach won't mess up other http requests, such as ChatCompletionScheduler. .. code:: bash +trainer.wandb_proxy=http:// ================================================ FILE: verl_rl/docs/hybrid_flow.rst ================================================ ========================================================= HybridFlow Programming Guide ========================================================= Last updated: 06/02/2025. .. _vermouth: https://github.com/vermouth1992 Author: `Chi Zhang `_ verl is an open source implementation of the paper `HybridFlow `_ [1]_. In this section, we will introduce the basic concepts of HybridFlow, the motivation and how to program with verl APIs. Motivation and Design ------------------------ We use dataflow to represent RL systems. [4]_. DataFlow ~~~~~~~~~~~~~~~~~~~~ Dataflow is an abstraction of computations. Neural Network training is a typical dataflow. It can be represented by computational graph. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/dataflow.jpeg?raw=true :alt: The dataflow graph from CS231n 2024 lecture 4 This figure [2]_ represents the computation graph of a polynomial function followed by a sigmoid function. In the data flow of neural network computation, each node represents an operator, and each edge represents the direction of forward/backward propagation. The computation graph determines the architecture of the neural network. RL as a dataflow problem ++++++++++++++++++++++++++++++++++++++++++++++ Reinforcement learning (RL) training can also be represented as a dataflow. Below is the dataflow graph that represents the PPO algorithm used in RLHF [3]_: .. image:: https://picx.zhimg.com/70/v2-cb8ab5ee946a105aab6a563e92682ffa_1440w.avis?source=172ae18b&biz_tag=Post :alt: PPO dataflow graph, credit to Zhihu 低级炼丹师 However, the dataflow of RL has fundamental differences compared with dataflow of neural network training as follows: +--------------------------+--------------------------------------------------+---------------------+ | Workload | Node | Edge | +--------------------------+--------------------------------------------------+---------------------+ | Neural Network Training | Operator (+/-/matmul/softmax) | Tensor movement | +--------------------------+--------------------------------------------------+---------------------+ | Reinforcement Learning | High-level operators (rollout/model forward) | Data Movement | +--------------------------+--------------------------------------------------+---------------------+ In the case of tabular reinforcement learning, each operator is a simple scalar math operation (e.g., bellman update). In deep reinforcement learning(DRL), each operator is a high-level neural network computation such as model inference/update. This makes RL a two-level dataflow problem: - Control flow: defines how the high-level operators are executed (e.g., In PPO, we first perform rollout. Then, we perform advantage computation. Finally, we perform training). It expresses the **core logics of RL algorithms**. - Computation flow: defines the dataflow of **neural network computation** (e.g., model forward/backward/optimizer). Design Choices ~~~~~~~~~~~~~~~~~~~~ The model size used in DRL before the LLM era is typically small. Thus, the high-level neural network computation can be done in a single process. This enables embedding the computation flow inside the control flow as a single process. However, in the LLM era, the computation flow (e.g., training neural network) becomes a multi-process program. This naturally leads to two design choices: 1. Convert the control flow into a multi-process program as well. Then colocate with computation flow (unified multi-controller) - Advantages: - Achieves the **optimal performance** under fixed computation flow and control flow as the communication overhead in both training and data transfer is minimized. - Disadvantages: - The computation and/or control flow is **hard to reuse** from software perspective as computation code is coupled with specific controller code. For example, the training loop of PPO is generic. Say we have an PPO training flow implemented with a specific computation flow such as FSDP. Neither the control flow or computation flow can be reused if we want to switch the computation flow from FSDP to Megatron, due to the coupling of control and computation flows. - Requires more efforts from the user under flexible and dynamic control flows, due to the multi-process nature of the program. 2. Separate the flows: single process for the control flow and multi-process for computation flow - Advantages: - The computation flow defined elsewhere can be **easily reused** after the decoupling. - The controller runs on a single process. Implementing a new RL algorithm with a **different control flow is simple and easy**. - Disadvantages: - Additional **data communication overhead** each time the controller process and computatation processes interact. The data has to be sent back and forth. In verl, the latter strategy with separate control flow and computation flow is adopted. verl is designed to decouple the control flow of RL algorithms, and the implementation of computation engines. Overall Execution Diagram ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Below is a simplified diagram denoting the execution of a reinforcement learning job. In the diagram, the controller runs on a single process, while the generator/actor workers, critic workers run on multiple processes, placed with specific resource groups. For rollout, the controller passes the data to the generator to perform sample generation. When the rollout is done, the data is passed back to controller for the next step of the algorithm. Similar execution is done for other workers. With the hybrid controller design, the data flow and computation is decoupled to provide both efficiency in computation and flexibility in defining algorithm training loops. .. figure:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/driver_worker.png?raw=true :alt: The execution diagram Codebase walkthrough (PPO) ------------------------------------------------ Entry function ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Code: https://github.com/volcengine/verl/blob/main/verl/trainer/main_ppo.py In this file, we define a remote function `main_task` that serves as the controller (driver) process as shown in the above figure. We also define a ``RewardManager``, where users can customize their reward function based on the data source in the dataset. Note that `RewardManager` should return the final token-level reward that is optimized by RL algorithms. Note that users can combine model-based rewards and rule-based rewards. The ``main_task`` constructs a RayPPOTrainer instance and launch the fit. Note that ``main_task`` **runs as a single process**. We highly recommend that the ``main_task`` is NOT scheduled on the head of the ray cluster because ``main_task`` will consume a lot of memory but the head usually contains very few resources. Ray trainer ~~~~~~~~~~~~~~~~~~~~ Code: https://github.com/volcengine/verl/blob/main/verl/trainer/ppo/ray_trainer.py The RayPPOTrainer manages - Worker and WorkerGroup construction - Runs the main loop of PPO algorithm Note that, the fit function of RayPPOTrainer **runs as a single process**. Worker and WorkerGroup construction ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each workerGroup manages a list of workers that runs remotely. Note that the worker group runs in the process of its constructor. Each worker inside the WorkerGroup runs on a GPU. The worker group serves as a proxy for the controller process to interact with a list of workers, in order to perform certain computations. **In order to do so, we have to bind the methods of the worker into the method of the WorkerGroup and define the data dispatch and data collection**. This is done via simple decoration that will be introduced in the Worker definition section. For example, in PPO, we define 3 worker groups: - ActorRolloutRef: manages actor, rollout and reference policy. ActorRolloutRefWorker can be instantiated as a single actor, a single rollout, a single reference policy, a combined actor/rollout or a combined actor/rollout/ref. This design is aimed for the maximum code reuse in various scenarios. The reason for colocating actor and rollout is for fast weight transfer using nccl. The reason for coloating actor and reference is to implement an efficient lora PPO as the reference policy is simply the base model of PPO in lora. The colocation is done via ``verl.single_controller.ray.base.create_colocated_worker_cls``, where it creates a single ray remote class exposing all class methods from these roles. - Critic: manages the critic model - Reward: manages the reward model The worker group will be constructed on the resource pool it designates. The resource pool is a set of GPUs in the ray cluster. Worker definition ~~~~~~~~~~~~~~~~~~~~ .. _ActorRolloutRefWorker: https://github.com/volcengine/verl/blob/main/verl/workers/fsdp_workers.py We take `ActorRolloutRefWorker `_ for an example. The APIs it should expose to the controller process are: - init_model: build the underlying model - generate_sequences: given prompts, generate responses - compute_log_prob: compute the log-probability of a generated sequence using actor - compute_ref_log_prob: compute the log-probability of a generated sequence using reference policy - save_checkpoint: save the checkpoint Note that these methods are defined in the worker that can only be invoked via remote calls. For example, if the controller process wants to initialize the model, it has to call .. code-block:: python for worker in actor_rollout_ref_wg: worker.init_model.remote() If the controller process wants to generate sequences, it has to call .. code-block:: python data = xxx # split the data into dp chunks data_dp_lst = data.split(dp_size) output_dp_lst = [] for i, worker in enumerate(actor_rollout_ref_wg): output_future = worker.generate_sequences.remote(data_dp_lst[i]) output_dp_lst.append(output_future) output = torch.cat(ray.get(output_dp_lst), dim=0) We observe that controller process calling worker group methods in general can be divided into 3 parts: - Split the data into data parallel sizes - Dispatch the corresponding data into each worker - Collect and concatenate the data when the computation finishes In verl, we design a syntax sugar to encapsulate the 3 processes into a single call from the controller process. .. code-block:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(data): ... # on the driver output = actor_rollout_ref_wg.generate_sequences(data) We decorate the method of the worker with a ``register`` that explicitly defines how the input data should be split and dispatched to each worker, and how the output data should be collected and concatenated by the controller. For example, ``Dispatch.DP_COMPUTE_PROTO`` splits the input data into dp chunks, dispatch each data to each worker, collect the output and concatenate the results. Note that this function requires the input and output to be a DataProto defined here (https://github.com/volcengine/verl/blob/main/verl/protocol.py). PPO main loop ~~~~~~~~~~~~~~~~~~~~ With the aforementioned APIs, we can implement the main loop of PPO as if it is a single process program .. code-block:: python for prompt in dataloader: output = actor_rollout_ref_wg.generate_sequences(prompt) old_log_prob = actor_rollout_ref_wg.compute_log_prob(output) ref_log_prob = actor_rollout_ref_wg.compute_ref_log_prob(output) values = critic_wg.compute_values(output) rewards = reward_wg.compute_scores(output) # compute_advantages is running directly on the control process advantages = compute_advantages(values, rewards) output = output.union(old_log_prob) output = output.union(ref_log_prob) output = output.union(values) output = output.union(rewards) output = output.union(advantages) # update actor actor_rollout_ref_wg.update_actor(output) critic.update_critic(output) Takeaways ~~~~~~~~~~~~~~~~~~~~ - This programming paradigm enables users to use different computation backend without modification of the control process. - This programming paradigm enables flexible placement (by changing the mapping of WorkerGroup and ResourcePool) without modification of the control process. Repository organization ------------------------------------------------ Important code files in the repository are organized as below: .. code-block:: bash verl # the verl package trainer main_ppo.py # the entrypoint for RL training ppo ray_trainer.py # the training loop for RL algorithms such as PPO fsdp_sft_trainer.py # the SFT trainer with FSDP backend config generation.yaml # configuration template for rollout ppo_trainer.yaml # configuration template for the RL trainer workers protocol.py # the interface of DataProto fsdp_workers.py # the FSDP worker interfaces: ActorRolloutRefWorker, CriticWorker, RewardModelWorker megatron_workers.py # the Megatron worker interfaces: ActorRolloutRefWorker, CriticWorker, RewardModelWorker actor dp_actor.py # data parallel actor with FSDP backend megatron_actor.py # nD parallel actor with Megatron backend critic dp_critic.py # data parallel critic with FSDP backend megatron_critic.py # nD parallel critic with FSDP backend reward_model megatron reward_model.py # reward model with Megatron backend rollout vllm vllm_rollout.py # rollout with vllm backend hf_rollout.py # rollout with huggingface TGI backend sharding_manager fsdp_ulysses.py # data and model resharding when using FSDP + ulysses fsdp_vllm.py # data and model resharding when using FSDP + ulysses + vllm megatron_vllm.py # data and model resharding when using Megatron + vllm utils dataset # datasets for SFT/RM/RL reward_score # function based reward gsm8k.py # reward function for gsm8k dataset math.py # reward function for math dataset seqlen_balancing.py # the sequence balance optimization models llama # Megatron implementation for llama, deepseek, mistral, etc transformers # ulysses integration with transformer models such as llama, qwen, etc weight_loader_registery.py # registry of weight loaders for loading hf ckpt into Megatron third_party vllm # adaptor for vllm's usage in RL vllm_spmd # vllm >= v0.7 adaptor examples # example scripts tests # integration and unit tests .github # the configuration of continuous integration tests .. [1] HybridFlow: A Flexible and Efficient RLHF Framework: https://arxiv.org/abs/2409.19256v2 .. [2] Data flow graph credit to CS231n 2024 lecture 4: https://cs231n.stanford.edu/slides/2024/lecture_4.pdf .. [3] PPO dataflow graph credit to 低级炼丹师 from Zhihu​: https://zhuanlan.zhihu.com/p/635757674 .. [4] RLFlow ================================================ FILE: verl_rl/docs/index.rst ================================================ Welcome to verl's documentation! ================================================ verl is a flexible, efficient and production-ready RL training framework designed for large language models (LLMs) post-training. It is an open source implementation of the `HybridFlow `_ paper. verl is flexible and easy to use with: - **Easy extension of diverse RL algorithms**: The hybrid programming model combines the strengths of single-controller and multi-controller paradigms to enable flexible representation and efficient execution of complex Post-Training dataflows. Allowing users to build RL dataflows in a few lines of code. - **Seamless integration of existing LLM infra with modular APIs**: Decouples computation and data dependencies, enabling seamless integration with existing LLM frameworks, such as PyTorch FSDP, Megatron-LM, vLLM and SGLang. Moreover, users can easily extend to other LLM training and inference frameworks. - **Flexible device mapping and parallelism**: Supports various placement of models onto different sets of GPUs for efficient resource utilization and scalability across different cluster sizes. - Ready integration with popular HuggingFace models verl is fast with: - **State-of-the-art throughput**: By seamlessly integrating existing SOTA LLM training and inference frameworks, verl achieves high generation and training throughput. - **Efficient actor model resharding with 3D-HybridEngine**: Eliminates memory redundancy and significantly reduces communication overhead during transitions between training and generation phases. -------------------------------------------- .. _Contents: .. toctree:: :maxdepth: 2 :caption: Quickstart start/install start/quickstart start/multinode start/ray_debug_tutorial start/more_resources start/agentic_rl .. toctree:: :maxdepth: 2 :caption: Programming guide hybrid_flow single_controller .. toctree:: :maxdepth: 1 :caption: Data Preparation preparation/prepare_data preparation/reward_function .. toctree:: :maxdepth: 2 :caption: Configurations examples/config .. toctree:: :maxdepth: 1 :caption: PPO Example examples/ppo_code_architecture examples/gsm8k_example examples/multi_modal_example .. toctree:: :maxdepth: 1 :caption: Algorithms algo/ppo.md algo/grpo.md algo/dapo.md algo/spin.md algo/sppo.md algo/entropy.md algo/opo.md algo/baseline.md algo/gpg.md .. toctree:: :maxdepth: 1 :caption: PPO Trainer and Workers workers/ray_trainer workers/fsdp_workers workers/megatron_workers workers/sglang_worker .. toctree:: :maxdepth: 1 :caption: Performance Tuning Guide perf/dpsk.md perf/perf_tuning README_vllm0.8.md perf/device_tuning perf/nsight_profiling.md .. toctree:: :maxdepth: 1 :caption: Adding new models advance/fsdp_extension advance/megatron_extension .. toctree:: :maxdepth: 1 :caption: Advanced Features advance/checkpoint advance/rope advance/ppo_lora.rst sglang_multiturn/multiturn.rst sglang_multiturn/interaction_system.rst advance/placement advance/dpo_extension examples/sandbox_fusion_example advance/rollout_trace.rst advance/one_step_off advance/agent_loop .. toctree:: :maxdepth: 1 :caption: Hardware Support amd_tutorial/amd_build_dockerfile_page.rst amd_tutorial/amd_vllm_page.rst ascend_tutorial/ascend_quick_start.rst ascend_tutorial/ascend_profiling.rst ascend_tutorial/ascend_profiling_en.rst .. toctree:: :maxdepth: 1 :caption: API References api/data api/single_controller.rst api/trainer.rst api/utils.rst .. toctree:: :maxdepth: 2 :caption: FAQ faq/faq .. toctree:: :maxdepth: 1 :caption: Development Notes sglang_multiturn/sandbox_fusion.rst Contribution ------------- verl is free software; you can redistribute it and/or modify it under the terms of the Apache License 2.0. We welcome contributions. Join us on `GitHub `_, `Slack `_ and `Wechat `_ for discussions. Contributions from the community are welcome! Please check out our `project roadmap `_ and `good first issues `_ to see where you can contribute. Code Linting and Formatting ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We use pre-commit to help improve code quality. To initialize pre-commit, run: .. code-block:: bash pip install pre-commit pre-commit install To resolve CI errors locally, you can also manually run pre-commit by: .. code-block:: bash pre-commit run Adding CI tests ^^^^^^^^^^^^^^^^^^^^^^^^ If possible, please add CI test(s) for your new feature: 1. Find the most relevant workflow yml file, which usually corresponds to a ``hydra`` default config (e.g. ``ppo_trainer``, ``ppo_megatron_trainer``, ``sft_trainer``, etc). 2. Add related path patterns to the ``paths`` section if not already included. 3. Minimize the workload of the test script(s) (see existing scripts for examples). We are HIRING! Send us an `email `_ if you are interested in internship/FTE opportunities in MLSys/LLM reasoning/multimodal alignment. ================================================ FILE: verl_rl/docs/perf/device_tuning.rst ================================================ Hardware Resource Needed for RL =============================== Last updated: 06/25/2025. Since RL requires more resources compared to regular training, determining how much resources are needed to successfully run it before training is a relatively difficult task. To provide more people with reference points for resource selection when dealing with different models and tasks, this section is mainly dedicated to introducing the environmental requirements based on experiments we have conducted. However, due to limited staff and equipment resources, we also hope for more contributions from the open-source community. When submitting a PR, it is necessary to provide a script to be added to the example/tuning scripts. We need two types of scripts: one is the configuration that can run with the **minimum resources(min)**, and the other is the configuration that runs with **recommended resources(recommended)**. For the former, it can be understood as a script that can run after applying all memory optimization techniques (e.g., offload, gradient checkpointing). For the latter, it can be understood as a script that can run while avoiding operations that incur additional time overhead as much as possible (targetting best throughput). When defining script names, please follow this format: ``[model]_[task]_[gpunums]_[device]_[train]_[infer].sh``. This will effectively improve the script's recognizability. You can place the script under the ``examples/tuning/`` directory. If you happen to have a configuration that has already been tested, we welcome you to submit a PR and include a screenshot from Wandb or other verifiable evidence. ---------------------------------------- 0.5B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2.5-0.5B - GRPO-LoRA - 1*H100 - 116 - fsdp - vllm0.8.3 - `qwen2-0.5b_grpo-lora_1_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 1.5B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2.5-1.5B - GRPO-LoRA - 1*H100 - 128 - fsdp - vllm0.8.3 - `qwen2-1.5b_grpo-lora_1_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 3B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2.5-3B - GRPO-LoRA - 1*H100 - 62 - fsdp - vllm0.8.3 - `qwen2-3b_grpo-lora_1_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 7B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2-7B - GRPO - 2*H800 - \ - fsdp - vllm0.8.2 - `qwen2-7b_grpo_2_h800_fsdp_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2.5-7B - GRPO-LoRA - 1*H100 - 16 - fsdp - vllm0.8.3 - `qwen2-7b_grpo-lora_1_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 14B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2-14B - GRPO - 4*H800 - \ - fsdp - vllm0.8.2 - `qwen2-14b_grpo_4_h800_fsdp_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2.5-14B - GRPO-LoRA - 2*H100 - 116 - fsdp - vllm0.8.3 - `qwen2-14b_grpo-lora_2_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 32B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2-32B - GRPO - 8*H20 - \ - megatron - vllm0.8.2 - `qwen2-32b_grpo_8_h20_megatron_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2.5-32B - GRPO-LoRA - 4*H100 - 180 - fsdp - vllm0.8.3 - `qwen2-32b_grpo-lora_4_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 70B ~~~ .. list-table:: :widths: auto :header-rows: 1 * - Tag - Model - Task - Resource - MaxBatch - Train - Infer - Link - Contributor * - MIN - Qwen2-70B - GRPO - 32*H20 - \ - fsdp - vllm0.8.2 - `qwen2-70b_grpo_32_h20_fsdp_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2-70B - GRPO - 32*H800 - \ - fsdp - vllm0.8.3 - `qwen2-70b_grpo_32_h800_fsdp_vllm `_ - `Xiangyongan `_ * - MIN - Qwen2.5-72B - GRPO-LoRA - 8*H100 - 176 - fsdp - vllm0.8.3 - `qwen2-72b_grpo-lora_8_h100_fsdp_vllm.sh `_ - `SimonHuang `_ 405B ~~~~ .. table:: :widths: auto ====== ====== ====== ======== ======== ====== ====== ====== tag model task resource MaxBatch train infer link ====== ====== ====== ======== ======== ====== ====== ====== \ \ \ \ \ \ \ ====== ====== ====== ======== ======== ====== ====== ====== 671B ~~~~ .. table:: :widths: auto ====== ====== ====== ======== ======== ====== ====== ====== tag model task resource MaxBatch train infer link ====== ====== ====== ======== ======== ====== ====== ====== \ \ \ \ \ \ \ ====== ====== ====== ======== ======== ====== ====== ====== ================================================ FILE: verl_rl/docs/perf/dpsk.md ================================================ # Training DeepSeek 671b Last updated: 06/13/2025. verl integrates Megatron to support large MoE models such as `Qwen3-235B-A22B` and `deepseek-ai/DeepSeek-V3`. This is an ongoing community effort. In the journey the community added the following features and optimizations that enable verl with larger models: - per tensor weight resharding between rollout and training - context parallelism and expert parallelism enabled via megatron - dynamic batch size (sequence balance) for megatron - reduced ray-related serialization overhead - optimizer offloading, recomputation, and efficient kernels - various debugging metrics and utils and the megatron backend now has a wider list of models supported: - DeepSeek-V3 - Moonlight - Qwen3 - Qwen2.5-VL (to be merged soon) - Qwen2 - Mixtral ## Getting Started ### DeepSeek 671b The recommended image with pre-built megatron dependency is `whatcanyousee/verl:ngc-cu124-vllm0.8.5-sglang0.4.6.post5-mcore0.12.2-te2.3-deepseekv3`, built with the Dockerfile in [docker/Dockerfile.vllm.sglang.megatron.deepseek](https://github.com/volcengine/verl/blob/main/docker/Dockerfile.vllm.sglang.megatron.deepseek). For checkpoint loading, we rely on megatron dist-ckpt for resharding. A converted dist-ckpt for DeepSeek-V3 is available from [huggingface BearBiscuit05/dpsk-v3-671B-BF16-dist_ckpt](https://huggingface.co/BearBiscuit05/dpsk-v3-671B-BF16-dist_ckpt/tree/main). To run end-to-end training on the DAPO dataset, run [recipe/dapo/test_dapo_dspk_671b_megatron.sh](https://github.com/volcengine/verl/blob/main/recipe/dapo/test_dapo_dspk_671b_megatron.sh). It runs on 512 H20(96GB) GPUs with the following setup: - vllm rollout with TP=32, bfloat16 - megatron training with attention DP, MoE EP=32, PP=16, bfloat16 MTP is disabled during RL training. ### Qwen3 236b For Qwen3-236b, please refer to [examples/grpo_trainer/run_qwen3-236b_megatron.sh](https://github.com/volcengine/verl/blob/main/examples/grpo_trainer/run_qwen3-236b_megatron.sh), which runs on 128 H20(96GB) GPUs. ## Upcoming Optimizations The community continue to optimize large MoE models further, ongoing efforts include: - further optimizing memory consumption, and provide recommended/tuned configurations with various machine types - optimizing long context RL training performance - performance improvement with SGLang x Megatron We invite the community to try and improve verl together. Get connected with us on [slack](https://join.slack.com/t/verlgroup/shared_invite/zt-2w5p9o4c3-yy0x2Q56s_VlGLsJ93A6vA)/[wechat](https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/WeChat.JPG)/[Github issues](https://github.com/volcengine/verl/issues/708)! ## Acknowledgement @vermouth1992 @ISEEKYAN @ETOgaosion @yzlnew @ShareLer @BearBiscuit05 @ccclyu @ann-qin-lu @SwordFaith @zzong2006 @zhaochenyang20 @ocss884 @eric-haibin-lin ================================================ FILE: verl_rl/docs/perf/nsight_profiling.md ================================================ # NVIDIA Nsight Systems profiling in verl Last updated: 06/20/2025. This guide explains how to use NVIDIA Nsight Systems for profiling verl training runs. ## Configuration Profiling in verl can be configured through several parameters in the trainer configuration file (ppo_trainer.yaml or other files like dapo_trainer.yaml): ### Prerequisites Nsight Systems version is important, please reference `docker/Dockerfile.vllm.sglang.megatron` for the version we used. ### Global profiling control verl has one single controller process and multiple worker processes. Both controller and worker processes can be profiled. Since the controller process can be executed in any nodes in the cluster, there is a message printed in the logging to indicate the controller process node hostname and process id. In `trainer`, three new config entries control the profiler behaviors: * **`trainer.profile_steps`**. List of step numbers at which profiling should be performed. For example: [1, 2, 5] will profile steps 1, 2, and 5. And ``null`` means no profiling. * **`controller_nsight_options`**. This config group is for the single controller. All fields in this config group will be just sent to Nsight Systems when Ray starts the controller process. `ppo_trainer.yaml` provides a workable example. Users can reference [Nsight Systems manual](https://docs.nvidia.com/nsight-systems/UserGuide/index.html) and [Ray user guide](https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html) for more details. * **`worker_nsight_options`**. This config group is for the worker processes. Similarly all fields in this config group will be just sent to Nsight Systems when Ray starts the controller process. Capture range is used to control the profiler when to start and stop. So `capture-range: "cudaProfilerApi"` is fixed and does not change it. Users can change `capture-range-end` with some accurate calculation or just leave it `null`. ### Worker process profiling Verl manages mulitiple RL roles, _Actor_, _Ref_, _Rollout_, _Critic_, _Reward_, which are implemented in different Worker classes. And these workers can be combined into one Ray Actor, running in a process group. Each RL role has its own profiling config group, `profiler`, which consists of three fields: * **`all_ranks` and `ranks`**. When `all_ranks` is set `True` then all ranks will be profiled; when set `False`, `ranks` will be profiled. By default, verl profiles the whole training process in a series ` worker_process_..nsys-rep` files for each process rank. PID is the process ID; RID is the capture range ID. * **`discrete`**. When set `False`, all the roles actions in one training step will be dumped in one database. When set `True`, the actions annotated by `DistProfiler.annotate` will be dumped into a discrete database. In this case, each role's action occupies one ``. * **`actor_rollout_ref`**. This Worker can be configured to contain at most 3 roles and executes together. So `actor_rollout_ref` has a `profiler` config and all the inside roles inherit it. * **Verl collocate mode**. Verl can combine two Worker sub classes to one Worker Actor. In this case, the user should take care that the combined Workers have consistent `discrete`. The Nsight Systems profiler uses a `torch.cuda.profiler.start()` and `stop()` pair to dump a `` database anyway. ### where to find the profiling data By default the `*.nsys-rep` files are saved in the directory `/tmp/ray/session_latest/logs/nsight/` at each node. According to the Ray manual, this default directory is not changeable. ["however, Ray preserves the `--output` option of the default config"](https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html). Some users may think it is not convenient, but it is understandable that Ray may start hundreds of processes and it would be a big network file system pressure if we save the files in one central place. ## Usage Example To enable profiling for specific components and steps, modify your ppo_trainer.yaml like this: ### Disable profiler ```yaml trainer: profile_steps: null # disable profile ``` ### Enable profiler and one database for one training step ```yaml trainer: profile_steps: [1, 2, 5] actor_rollout_ref: profiler: discrete: False all_ranks: False ranks: [0, 1] critic: profiler: discrete: False all_ranks: False ranks: [0, 1] reward_model: profiler: discrete: False all_ranks: False ranks: [0, 1] ``` ### Enable profiler and multiple databases for one training step ```yaml trainer: profile_steps: [1, 2, 5] actor_rollout_ref: profiler: discrete: True all_ranks: False ranks: [0, 1] critic: profiler: discrete: True all_ranks: False ranks: [0, 1] reward_model: profiler: discrete: True all_ranks: False ranks: [0, 1] ``` ## Profiling Output When profiling is enabled, verl will generate Nsight Systems profiles for the specified components and steps. The profiles will include: - CUDA kernel execution - Memory operations - CPU-GPU synchronization - NVTX markers for key operations Nsight Systems supports multi-report view, to open multiple databases together. In this mode, different processes and steps can be aligned in one time line for better analysis. ================================================ FILE: verl_rl/docs/perf/perf_tuning.rst ================================================ Performance Tuning Guide ============================== Last updated: 07/17/2025. Author: `Guangming Sheng `_, `Jiali Zheng `_ In this section, we will discuss how to tune the performance of all the stages in verl, including: 1. Rollout generation throughput. 2. Enable ``use_remove_padding=True`` for sequence packing (i.e., data packing and remove padding). 3. Batch size tuning for forward and backward computation 4. Enable ``use_dynamic_bsz=True`` for higher throughput. 5. Utilize Ulysses Sequence Parallel for Long Context Training 6. LigerKernel for SFT performance optimization 7. Forward prefetch in FSDP training backend 8. Memory optimization for entropy calculation from logits Rollout Generation Tuning -------------------------- verl currently supports two rollout backends: vLLM and TGI (with SGLang support coming soon). Below are key factors for tuning vLLM-based rollout. Before tuning, we recommend setting ``actor_rollout_ref.rollout.disable_log_stats=False`` so that rollout statistics are logged. - Increase ``gpu_memory_utilization``. - For vLLM v0.7.0 and later, the vLLM instance will only use gpu_memory_utilization of the **total** memory. - For SGLang, it's the fraction of the free GPU memory used for **static** memory like model weights and KV cache. However, the remaining (1-gpu_memory_utilization) will also be used during inference. However, if model parameters and optimizer states are not offloaded, using too high a fraction can lead to OOM. A value between 0.5 and 0.7 often strikes a good balance between high throughput and avoiding OOM. Note: since the definition of ``gpu_memory_utilization`` varies across inference engines, a value that works well for one engine may cause OOM for another. - Adjust ``max_num_seqs`` or ``max_num_batched_tokens``. If the GPU cache utilization is relatively low in the log, increase ``max_num_seqs`` or ``max_num_batched_tokens`` can enlarge the effective batch size in the decoding stage, allowing more concurrent requests per batch. We recommend setting ``max_num_batched_tokens > 2048`` for higher throughput. - Use a smaller ``tensor_parallel_size``. When GPU resources allow, a smaller tensor parallel size spawns more vLLM replicas. Data parallelism (DP) can yield higher throughput than tensor parallelism (TP), but also increases KVCache consumption. Carefully balance the trade-off between more replicas and higher memory usage. Our experiment in Sec. 8.4 of `HybridFlow paper `_ evaluate this trade-off. More tuning details such as dealing with Preemption and Chunked-prefill can be found in `vLLM official tuning guide `_ For optimal performance, we recommend using vLLM v0.8.3 or later. See https://github.com/volcengine/verl/blob/main/docs/README_vllm0.8.md for details. Enable remove padding (sequence packing) ----------------------------------------- Currently, for llama, mistral, gemma1 and qwen based models, users can enable `use_remove_padding=True` to utilize the sequence packing implementation provided by transformers library. For other models, transformers library may also support it but we haven't tested it yet. Users can add the desired model config to the `test_transformer.py `_ file. And test its functionality by running the following command: .. code-block:: bash pytest -s tests/models/test_transformer.py If the test passes, you can add your desired model into the model `registry.py `_ file. Then, you can enjoy the performance boost of sequence packing and welcome to PR your tested model to verl! Batch Size Tuning ----------------- To achieve higher throughput in experience preparation (i.e., model fwd) and model update (i.e., actor/critic fwd/bwd), users may need to tune the ``*micro_batch_size_per_gpu`` for different computation. In verl, the core principle for setting batch sizes is: - **Algorithmic metrics** (train batch size, PPO mini-batch size) are *global* (from a single-controller perspective), normalized in each worker. See the `normalization code `_. - **Performance-related parameters** (micro batch size, max token length for dynamic batch size) are *local* parameters that define the per-GPU data allocations. See the `normalization code `_. .. note:: In your training script, please use ``*micro_batch_size_per_gpu`` instead of ``*micro_batch_size``. So that you don't need to consider the normalization of the ``micro_batch_size`` and ``micro_batch_size`` will be deprecated. Batch Size Tuning tips """""""""""""""""""""" Therefore, users may need to tune the ``*micro_batch_size_per_gpu`` to accelerate training. Here're some tips: 1. **Enable gradient checkpointing**: Set ``actor_rollout_ref.model.enable_gradient_checkpointing=True`` and ``critic.model.enable_gradient_checkpointing=True``. This often allows for larger micro-batch sizes and will be beneficial for large mini-batch training. 2. Increase the ``*micro_batch_size_per_gpu`` as much as possible till equals to normalized ``mini_batch_size``. 3. **Use larger forward-only parameters**: Forward only parameter, such as ``actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu``, ``actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu``, ``critic.forward_micro_batch_size_per_gpu`` could be larger (e.g., 2x) than training related micro batch sizes, such as ``actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu``, ``critic.ppo_micro_batch_size_per_gpu``. 4. **Allow larger micro-batch sizes for Critic and Reward models**: micro batch size of Critic and Reward model could be larger than Actor model. This is because the actor model has much larger vocab size in the final layer. 5. **Enable activation offloading**: Set ``actor_rollout_ref.model.enable_activation_offload=True`` and ``critic.model.enable_activation_offload=True``. This often works together with gradient checkpointing to get larger micro-batch sizes and it's only available in FSDP backend now. Tuning for Dynamic Batch Size ----------------------------- Dynamic batch size is a technique that allows the model to process similar number of tokens in a single forward pass (with different actual batch sizes). This can significantly improve the training efficiency and reduce the memory usage. To utilize this technique, users can set ``use_dynamic_bsz=True`` in actor, ref, critic and reward models. With ``use_dynamic_bsz=True``, users don't need to tune ``*micro_batch_size_per_gpu``. Instead, users should tune the following parameters: - ``actor_rollout_ref.actor.ppo_max_token_len_per_gpu``, ``critic.ppo_max_token_len_per_gpu``: The maximum number of tokens to be processed in fwd and bwd of ``update_policy`` and ``update_critic``. - ``actor_rollout_ref.ref.log_prob_max_token_len_per_gpu`` and ``actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu``: The maximum number of tokens to be processed in a the fwd computation of ``compute_log_prob`` and ``compute_ref_log_prob``. - ``critic.forward_micro_batch_size_per_gpu``, ``reward_model.forward_micro_batch_size_per_gpu``: The maximum number of tokens to be processed in a the fwd computation of ``compute_values``, ``compute_rm_score``. Dynamic Batch Size Tuning tips """""""""""""""""""""""""""""" Here're some tips to tune the above parameters: 1. **Increase** ``actor_rollout_ref.actor.ppo_max_token_len_per_gpu`` Make it at least 2 x (max_prompt_length + max_response_length). We set it to 3x in `run_qwen2-7b_rm_seq_balance.sh `_. Try to increase it to get higher throughput. 2. **Forward-only parameters can be larger**: Similar to the non-dynamic-batch scenario, forward-only token limits can exceed those used in forward/backward operations. 3. **Use larger limits for Critic and Reward models**: Critic and Reward parameters can be set at least 2× the Actor’s limits. For instance, we set them to 4× here: `run_qwen2-7b_rm_seq_balance.sh `_ .. :math:`\text{critic.ppo_max_token_len_per_gpu} = 2 \times \text{actor.ppo_max_token_len_per_gpu})`. Ulysses Sequence Parallel for Long Context Training ---------------------------------------------------- To utilize this technique, users can set ``ulysses_sequence_parallel_size>1`` in actor, ref, critic and reward models. We support different model utilize different ulysses_sequence_parallel_size sizes. To train long sequence (>32k), users may need to decrease the ``*micro_batch_size_per_gpu`` and ``*max_token_len_per_gpu`` to avoid OOM. LigerKernel for SFT ---------------------- LigerKernel is a high-performance kernel for Supervised Fine-Tuning (SFT) that can improve training efficiency. To enable LigerKernel in your SFT training: 1. Install liger-kernel via ``pip3 install liger-kernel``. In your SFT configuration file (e.g., ``verl/trainer/config/sft_trainer.yaml``), set the ``use_liger`` parameter: .. code-block:: yaml model: use_liger: True # Enable LigerKernel for SFT 2. The default value is ``False``. Enable it only when you want to use LigerKernel's optimizations. 3. LigerKernel is particularly useful for improving training performance in SFT scenarios. Forward prefetch in FSDP training backend ---------------------- During the training phase, users can enable forward prefetching in FSDP by setting ``fsdp_config.forward_prefetch=True``. For example, ``actor_rollout_ref.actor.fsdp_config.forward_prefetch=True``. This configuration prefetches the next forward-pass all-gather operation before completing the current forward computation, overlapping communication with computation and improving efficiency. For further details, refer to the `FSDP forward_prefetch `_ documentation. .. note:: Backward prefetch is unsupported because the ``BACKWARD_POST`` policy may prefetch incorrectly in nested-module cases. For details, see the `FSDP documentation `_ Migrating to FSDP2 ---------------------- FSDP2 offers notable improvements over FSDP1. According to `PyTorch TorchTitan benchmarks `_: - 7% lower GPU memory usage on average - 1.5% throughput improvement with BF16 training - Better composability with DTensor and per-parameter sharding **Enabling FSDP2 in VERL:** .. code-block:: python # Enable FSDP2 in actor configuration actor_rollout_ref.actor.strategy="fsdp2" .. note:: FSDP2 requires PyTorch 2.1+ and is recommended for models with transformer architecture. Memory optimization for entropy calculation from logits ---------------------- The ``logits`` tensor (typically of shape ``[bsz*seq_len, voc]``) can consume significant memory. When using ``compute_entropy_from_logits``, memory usage reaches approximately ``[bsz*seq_len, voc] × (4 bytes (float32) + 2 bytes (autocast for softmax+logsumexp) + 1 byte (softmax output))``. To reduce this memory peak, enable chunked computation by setting: ``actor_rollout_ref.ref.entropy_from_logits_with_chunking = True`` This processes the tensor in chunks of shape ``[chunk_size, voc]`` (e.g., 2048) rather than the full sequence length, exclusively during the model's forward pass. Additionally, during training, standard gradient checkpointing (``enable_gradient_checkpointing=True``) does not apply to entropy calculations. To reduce memory peaks in this context, set: ``actor_rollout_ref.actor.entropy_checkpointing = True`` This enables entropy recomputation specifically for the entropy calculation, lowering memory usage during training. ================================================ FILE: verl_rl/docs/preparation/prepare_data.rst ================================================ Prepare Data for Post-Training ======================================== Last updated: 02/09/2025. Before starting the post-training job, we need to prepare the data for the policy training. The data should be stored in the parquet format. We provide several data preprocess scripts for different datasets, including GSM8K, MATH, HelloSwag, Full_hh_rlhf. To prepare other datasets, we need to follow the following steps: The data preprocess script can be divided into two parts: 1. The first part is the common part, which loads the dataset from huggingface's ``datasets`` package. Then preprocess the datasets with the ``make_map_fn`` and then store in the parquet format. .. code:: python import re import os import datasets from verl.utils.hdfs_io import copy, makedirs import argparse # To extract the solution for each prompts in the dataset # def extract_solution(solution_str): # ... if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--local_dir', default='/opt/tiger/gsm8k') parser.add_argument('--hdfs_dir', default=None) args = parser.parse_args() num_few_shot = 5 data_source = 'openai/gsm8k' dataset = datasets.load_dataset(data_source, 'main') train_dataset = dataset['train'] test_dataset = dataset['test'] # Construct a `def make_map_fn(split)` for the corresponding datasets. # ... train_dataset = train_dataset.map(function=make_map_fn('train'), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn('test'), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, 'train.parquet')) test_dataset.to_parquet(os.path.join(local_dir, 'test.parquet')) makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) 2. The users are required to implement the ``make_map_fn()`` function (as well as the ``extract_solution``) on their own to support different datasets or tasks. We already implemented the data preprocess of GSM8k, MATH, Hellaswag and Full_hh_rlhf datasets. And we take the GSM8k dataset as an example: **GSM8K** In the ``make_map_fn``, each data field should consist of the following 5 fields: 1. ``data_source``: The name of the dataset. To index the corresponding reward function in the ``RewardModule`` 2. ``prompt``: This field should be constructed in the format of huggingface chat_template. The tokenizer in ``RLHFDataset`` will apply chat template and tokenize the prompt. 3. ``ability``: Define the task category. 4. ``reward_model``: Currently, we only utilize the ``ground_truth`` field during evaluation. The ``ground_truth`` is computed by the ``extract_solution`` function. **NOTED** that the implementation of the corresponding reward function should align with this extracted ``ground_truth``. 5. ``extra_info``: Record some information of the current prompt. Not use for now. .. code:: python def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) # extract the solution after #### assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split('#### ')[1].replace(',', '') return final_solution instruction_following = "Let's think step by step and output the final answer after \"####\"." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question = example.pop('question') question = question + ' ' + instruction_following answer = example.pop('answer') solution = extract_solution(answer) data = { "data_source": data_source, "prompt": [{ "role": "user", "content": question }], "ability": "math", "reward_model": { "style": "rule", "ground_truth": solution }, "extra_info": { 'split': split, 'index': idx } } return data return process_fn ================================================ FILE: verl_rl/docs/preparation/reward_function.rst ================================================ Implement Reward Function for Dataset ====================================== Last updated: 06/02/2025. For each dataset, we need to implement a reward function or utilize a reward model to compute the rewards for the generated responses. We already pre-implemented some reward functions in `reward_score directory `_. You can also use customized reward functions. Currently, we support reward functions for GSM8k and MATH datasets. For RLHF datasets (e.g., full_hh_rlhf) and Code Generation (e.g., APPS), we utilize reward model and SandBox (will opensource soon) for evaluation respectively. RewardManager ------------- In the entrypoint of the PPO Post-Training script `main_ppo.py `_, we implement a ``RewardManager`` that utilize pre-implemented reward functions to compute the scores for each response. In the ``RewardManager``, we implemented a ``__call__`` function to compute the score for each response. All the reward functions are executed by ``compute_score_fn``. The input is a ``DataProto``, which includes: - ``input_ids``, ``attention_mask``: ``input_ids`` and ``attention_mask`` after applying chat_template, including prompt and response - ``responses``: response tokens - ``ground_truth``: The ground truth string of the current prompt. Stored in ``non_tensor_batch`` in the ``DataProto``, which should be preprocessed in the parquet files. - ``data_source``: The dataset name of the current prompt. Stored in ``non_tensor_batch`` in the ``DataProto``, which should be preprocessed in the parquet files. After detokenize the responses, the responses string and the ground truth string will be input to the ``compute_score_fn`` to compute the score for each response. Reward Functions ---------------- Pre-implemented ~~~~~~~~~~~~~~~ We already pre-implemented some reward functions in `reward_score directory `_. - In the `GSM8k example `_, we force the response to output the final answer after four ####, then use string matching to compare with the ground truth. If completely correct, score 1 point; if the format is correct, score 0.1 points; if the format is incorrect, score 0 points. - In the `MATH example `_, we follow the implementation in `lm-evaluation-harness repository `_. Customized ~~~~~~~~~~ You can implement customized reward functions in a separate file and specify them using ``custom_reward_function.path`` and ``custom_reward_function.name``. For the set of them, please refer to :ref:`config-explain-page`. The parameters of your reward function should be ``data_source``, ``solution_str``, ``ground_truth``, and ``extra_info``. For example: .. code:: python def my_reward_fn(data_source, solution_str, ground_truth, extra_info=None): return len(solution_str)/100 If you are testing only a single customized reward function, you can simply name it 'compute_score' and leave ``custom_reward_function.name`` unset. To run multiple tests with different customized reward functions, you can modify both ``custom_reward_function.path`` and ``custom_reward_function.name`` for each trial. For instance, you might create a single `my_reward.py` file and implement multiple reward functions within it. This way, for different trials, you only need to adjust ``custom_reward_function.name``, making it more convenient to conduct multiple tests within scripts. ================================================ FILE: verl_rl/docs/requirements-docs.txt ================================================ # markdown support recommonmark myst_parser # markdown table support sphinx-markdown-tables # theme default rtd # crate-docs-theme sphinx-rtd-theme # pin tokenizers version to avoid env_logger version req tokenizers==0.21 ================================================ FILE: verl_rl/docs/sglang_multiturn/interaction_system.rst ================================================ Interaction System for Multi-turn RL Training ============================================= Last updated: 06/25/2025. Overview -------- The verl interaction system enables dynamic, multi-turn conversational feedback during reinforcement learning training. This system allows models to engage in iterative problem-solving scenarios where interaction agents can provide corrective feedback, guidance, or evaluation based on the model's responses. **New in Multi-Interaction Support**: The system now supports multiple named interactions within a single training session, enabling sophisticated training scenarios where different samples can use different interaction strategies. This allows for curriculum learning, domain-specific feedback, and flexible agent switching at the sample level. Key features: - **Async-based Architecture**: Non-blocking interaction processing for distributed training - **Instance Management**: Stateful session handling with unique instance IDs for concurrent interactions - **SGLang Integration**: Seamless integration with SGLang rollout system for multi-turn conversations - **Configuration-driven**: Dynamic agent loading via YAML configuration files - **Multi-Interaction Support**: Registry system enabling multiple named interactions per rollout - **Sample-Level Selection**: Each sample can specify which interaction to use via configuration - **Reward Integration**: Turn-level scoring mechanism integrated with verl's reward system Architecture ------------ The interaction system follows a plugin-based architecture with clear separation of concerns: .. code-block:: Interaction Registry System ↓ BaseInteraction (Abstract Interface) ↓ Multiple Named Interactions (e.g., Gsm8kInteraction, CustomInteraction) ↓ SGLang Rollout Integration (interaction_map) ↓ Sample-Level Interaction Selection ↓ Async Request Lifecycle Management Core Components ~~~~~~~~~~~~~~~ **Interaction Registry System** The interaction registry system allows loading and managing multiple named interactions: .. code-block:: python from verl.interactions.utils.interaction_registry import initialize_interactions_from_config # Load multiple interactions from config interaction_map = initialize_interactions_from_config("config.yaml") # Access specific interaction by name gsm8k_interaction = interaction_map["gsm8k"] custom_interaction = interaction_map["custom_solver"] **BaseInteraction Interface** All interaction agents must implement the ``BaseInteraction`` abstract class: .. code-block:: python from verl.interactions.base import BaseInteraction from typing import Dict, Any, List, Tuple, Optional class BaseInteraction: def __init__(self, config: Dict[str, Any]): self.config = config self.name: str = config.get("name", "interaction_agent") async def start_interaction(self, instance_id: Optional[str] = None, **kwargs) -> str: """Initialize interaction session, return instance_id""" async def generate_response(self, instance_id: str, messages: List[Dict[str, Any]], **kwargs) -> Tuple[bool, str, float, Dict[str, Any]]: """Generate response, return (should_terminate, response, score, metadata)""" async def calculate_score(self, instance_id: str, **kwargs) -> float: """Calculate turn-level score for RL training""" async def finalize_interaction(self, instance_id: str, **kwargs) -> None: """Clean up resources""" **Request Lifecycle** The interaction system integrates with SGLang's async rollout via state management: 1. ``PENDING`` → Initialize interaction via ``start_interaction()`` 2. ``GENERATING`` → Model generates response 3. ``INTERACTING`` → Process response via ``generate_response()`` 4. ``GENERATING`` → Continue if not terminated, otherwise ``COMPLETED`` Configuration ------------- **Basic Setup** Enable interaction in your rollout configuration: .. code-block:: yaml actor_rollout_ref: rollout: multi_turn: enable: true interaction_config_path: "path/to/interaction_config.yaml" max_user_turns: 10 max_assistant_turns: 10 **Interaction Configuration File** Create an interaction configuration file (e.g., ``interaction_config.yaml``): **Single Interaction (Legacy Format)** .. code-block:: yaml interaction: - name: "gsm8k" class_name: "verl.interactions.gsm8k_interaction.Gsm8kInteraction" config: {} **Multiple Interactions (New Format)** .. code-block:: yaml interaction: - name: "gsm8k" class_name: "verl.interactions.gsm8k_interaction.Gsm8kInteraction" config: {} - name: "custom_solver" class_name: "custom.interactions.CustomInteraction" config: solver_type: "advanced" timeout: 30 - name: "code_verifier" class_name: "verl.interactions.base.BaseInteraction" config: verification_mode: "strict" **Automatic Name Generation** If no ``name`` field is provided, the system will automatically generate one from the class name: .. code-block:: yaml interaction: - class_name: "verl.interactions.gsm8k_interaction.Gsm8kInteraction" config: {} # Automatically generates name: "gsm8k" The system will dynamically load all specified interaction classes and make them available by name. Implementation Example: GSM8K ----------------------------- The GSM8K interaction demonstrates a complete implementation for math problem-solving scenarios: .. code-block:: python from verl.interactions.base import BaseInteraction from verl.utils.reward_score import gsm8k from uuid import uuid4 class Gsm8kInteraction(BaseInteraction): def __init__(self, config: dict): super().__init__(config) self._instance_dict = {} async def start_interaction(self, instance_id=None, ground_truth=None, **kwargs): if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id async def generate_response(self, instance_id, messages, **kwargs): # Extract last user message content content = "" for item in reversed(messages): if item.get("role") == "assistant": content = item.get("content", "") break # Ensure GSM8K format (#### prefix) self._instance_dict[instance_id]["response"] = content reward = await self.calculate_score(instance_id) if reward == 1.0: return True, "Your response is correct!", 1.0, {} else: return False, "Your response is incorrect! You need to reflect on your answer and try again.", 0.0, {} async def calculate_score(self, instance_id, **kwargs): return gsm8k.compute_score( self._instance_dict[instance_id]["response"], self._instance_dict[instance_id]["ground_truth"], method="strict", format_score=0.0, score=1.0, ) async def finalize_interaction(self, instance_id, **kwargs): del self._instance_dict[instance_id] Training Integration -------------------- **Training Script Configuration** Include interaction configuration in your training command: .. code-block:: bash python3 -m verl.trainer.main_ppo \\ --config-path="$CONFIG_PATH" \\ --config-name='gsm8k_multiturn_grpo_w_interaction' \\ algorithm.adv_estimator=grpo \\ data.train_batch_size=512 \\ data.return_raw_chat=True \\ actor_rollout_ref.rollout.name=sglang \\ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml" \\ trainer.total_epochs=15 **Data Requirements** Ensure your dataset includes interaction parameters with the ``name`` field for interaction selection: .. code-block:: python # Dataset should include interaction_kwargs in non_tensor_batch interaction_kwargs = [ {"name": "gsm8k", "query": "What is 2+2?", "ground_truth": "4"}, {"name": "custom_solver", "query": "Solve: x^2 + 5x + 6 = 0", "ground_truth": "x = -2, -3"}, {"name": "gsm8k", "query": "What is 3+3?", "ground_truth": "6"}, ] **Sample-Level Interaction Selection** Each sample can specify which interaction to use via the ``name`` field. This enables flexible training scenarios where different samples use different interaction strategies: .. code-block:: python # Example: Math problems use GSM8K interaction, code problems use code verifier data_samples = [ { "prompt": "What is 15% of 200?", "interaction_kwargs": { "name": "gsm8k", "query": "What is 15% of 200?", "ground_truth": "30" } }, { "prompt": "Write a function to check if a number is prime", "interaction_kwargs": { "name": "code_verifier", "code_type": "python", "expected_behavior": "return True for prime numbers" } } ] **Backward Compatibility** If no ``name`` field is provided in ``interaction_kwargs``, the system defaults to ``"gsm8k"`` for backward compatibility. Best Practices -------------- **Resource Management** - Always implement proper cleanup in ``finalize_interaction()`` - Use unique instance IDs to avoid conflicts in concurrent training - Handle edge cases like empty messages or malformed content **Performance Optimization** - Keep interaction logic lightweight to avoid blocking training - Use async/await properly to maintain non-blocking behavior - Consider caching expensive computations within interaction instances **Testing** Comprehensive testing is essential for interaction systems: .. code-block:: python import pytest from unittest.mock import patch @pytest.mark.asyncio async def test_interaction_workflow(): interaction = YourInteraction({}) # Test complete workflow instance_id = await interaction.start_interaction(ground_truth="expected_answer") messages = [{"role": "user", "content": "user_content"}, {"role": "assistant", "content": "assistant_response"}] should_terminate, response, reward, metadata = await interaction.generate_response(instance_id, messages) assert should_terminate in [True, False] assert isinstance(reward, float) await interaction.finalize_interaction(instance_id) Advanced Usage -------------- **Multi-Interaction Training Strategies** You can design sophisticated training scenarios using multiple interactions: .. code-block:: python # Example: Progressive difficulty with different interaction agents class MathTrainingPipeline: def create_interaction_config(self): return { "interaction": [ { "name": "basic_math", "class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {"difficulty": "easy"} }, { "name": "advanced_math", "class_name": "custom.interactions.AdvancedMathInteraction", "config": {"difficulty": "hard", "allow_hints": True} }, { "name": "competition_math", "class_name": "custom.interactions.CompetitionMathInteraction", "config": {"time_limit": 300, "show_steps": False} } ] } def create_curriculum_data(self, epoch): if epoch < 5: return [{"name": "basic_math", ...} for _ in samples] elif epoch < 10: return [{"name": "advanced_math", ...} for _ in samples] else: return [{"name": "competition_math", ...} for _ in samples] **Custom Scoring Functions** You can integrate custom reward functions: .. code-block:: python async def calculate_score(self, instance_id, **kwargs): response = self._instance_dict[instance_id]["response"] ground_truth = self._instance_dict[instance_id]["ground_truth"] # Custom evaluation logic if custom_evaluation_function(response, ground_truth): return 1.0 else: return 0.0 **Multi-step Interactions** For complex scenarios requiring multiple feedback rounds: .. code-block:: python async def generate_response(self, instance_id, messages, **kwargs): instance = self._instance_dict[instance_id] instance["attempts"] += 1 # Evaluate current response reward = await self.calculate_score(instance_id) if reward > 0.8: return True, "Excellent work!", reward, {} elif instance["attempts"] < 3: return False, "Good attempt, but try to improve...", reward, {} else: return True, "Maximum attempts reached.", reward, {} Troubleshooting --------------- **Common Issues** 1. **Instance ID Conflicts**: Ensure unique instance IDs across concurrent sessions 2. **Memory Leaks**: Always call ``finalize_interaction()`` to clean up resources 3. **Blocking Operations**: Keep interaction logic async and non-blocking 4. **Configuration Errors**: Verify interaction config path and class name are correct 5. **Interaction Name Conflicts**: Ensure all interactions have unique names in the configuration 6. **Missing Interaction**: Verify the ``name`` field in ``interaction_kwargs`` matches available interactions 7. **Backward Compatibility**: When migrating from single to multi-interaction, add ``name`` fields to existing data **Debugging** Enable debug logging to trace interaction flow: .. code-block:: bash export VERL_LOGGING_LEVEL=DEBUG **Performance Monitoring** Monitor interaction performance impact on training throughput and adjust accordingly. Related Documentation -------------------- - :doc:`multiturn`: Basic multi-turn rollout configuration - :doc:`sandbox_fusion`: Tool integration with SGLang - :doc:`search_tool_example`: Search tool implementation example ================================================ FILE: verl_rl/docs/sglang_multiturn/multiturn.rst ================================================ Multi-turn Rollout Support ========================== Last updated: 06/27/2025. Basic Configuration ~~~~~~~~~~~~~~~~~~~ To enable multi-turn rollout, make sure to configure the following fields in your rollout configuration: .. code-block:: yaml actor_rollout_ref: rollout: multi_turn: True name: "sglang" These configuration activates the sglang engine for multi-turn interaction during rollout. Custom Tool Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~ For custom environment interaction tools, you can implement your own tools based on ``verl.tools.base_tool.BaseTool``. Then, specify your tool configurations in a YAML file: .. code-block:: yaml tools: - class_name: "" config: type: native tool_schema: You may refer to GSM8KTool_example_configuration_, which is one example of the tool configurations. Its implementation can be found in gsm8k_tool.py_. Finally, set the ``tools_config_file`` in your rollout config: .. code-block:: yaml actor_rollout_ref: rollout: tool_kwargs: tools_config_file: This allows integration of customized tool behaviors during actor rollout steps. If you want rollout with simulated interaction, you can set the ``interaction_config_file`` in your rollout config: .. code-block:: yaml interaction: - class_name: "" config: {} .. code-block:: yaml actor_rollout_ref: rollout: interaction_config_file: If your tool creates multi-modal inputs, you should return a list of multi-modal inputs in your tool.execute() implementation. Image and video should be processed before returning. For example, if you are using Qwen2.5-VL, you can use the following code to get the representations: .. code-block:: python async def execute(self, ...) -> Tuple[str | Dict[str, Any], float, dict]: ... from verl.utils.dataset.vision_utils import process_image, process_video img1 = process_image(img1) video1 = process_video(video1) # due to the (image | video) key is ("image" | "video") instead of ("images" | "videos") in vllm, we need to use ("image" | "video") to specify list of images/videos # link: https://github.com/vllm-project/vllm/blob/3c545c0c3b98ee642373a308197d750d0e449403/vllm/multimodal/parse.py#L205 return {"image": [img1, ...], "video": [video1, ...], "text": "..."}, 0, {} remeber to set ``return_multi_modal_inputs: False`` in your dataset config in order to process the multi-modal inputs in the rollout correctly. Refer to the `Handling Multi-Modal Inputs in Datasets`_ section for more details. MCP Tool Configuration ~~~~~~~~~~~~~~~~~~~~~~ For MCP interaction tools, you can flexibly configure them using a YAML file. The typical setup is as follows: .. code-block:: yaml tools: - class_name: "" config: type: mcp mcp: mcp_servers_config_path: ./mcp_server.json tool_selected_list: {} The ``tool_selected_list`` field is optional and specifies which tools to use from the servers. If you want to enable all available tools, simply omit this attribute. Besides, ``mcp_servers_config_path`` points to a JSON file containing the MCP server configurations. For example: .. code-block:: json { "mcpServers": { "SSE Server": { "url": "your_server_url", "auth_token": "your_server_api_token" }, "STDIO Server": { "command": "npx", "args": ["-y", "server-mcp@0.2.1"], "env": { "SERVER_API_KEY": "your_server_api_token" } } } } Since the content formats returned by the MCP server may vary, users can inherit from ``MCPBaseTool`` and override the ``_parse_tool_result`` method to implement custom parsing logic. .. code-block:: python class MCPYourTool(MCPBaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): super().__init__(config, tool_schema) def _parse_tool_result(self, content: list) -> Tuple[str, dict]: ... Overall, you may refer to mcp_search_tool.py_ and mcp_tool_config.yaml_ for custom implementation and configuration. Multi-turn Tokenization ~~~~~~~~~~~~~~~~~~~~~~~ Tokenizing multi-turn rollouts poses a challenge: after applying the chat template and tokenizing the full message list, it's hard to identify which tokens belong to assistant messages. Since the token list is flat, it lacks direct alignment with the message roles. To address this, we adopt a **delta-based tokenization** strategy. Each time the LLM generates a new message, we: 1. Apply the chat template to all prior messages (`messages[:i]`). 2. Apply the chat template again including the latest message (`messages[:i+1]`). 3. Tokenize only the *delta* between these two serialized message strings. This ensures that only tokens generated by the assistant are included in the loss mask. .. code-block:: python # When using tokenizer # Exclude the assistant prompt (e.g., "<|im_start|>assistant") from the loss by setting add_generation_prompt=True prev = tokenizer.apply_chat_template(messages[:i], add_generation_prompt=True, tokenize=False) curr = tokenizer.apply_chat_template(messages[:i+1], add_generation_prompt=False, tokenize=False) token_ids += tokenizer.encode(curr[len(prev):], add_special_tokens=False) loss_mask += [1] * len(token_ids) # Mask only the new assistant tokens .. code-block:: python # When using processor # Exclude the assistant prompt (e.g., "<|im_start|>assistant") from the loss by setting add_generation_prompt=True prev = processor.apply_chat_template(messages[:i], add_generation_prompt=True, tokenize=False) prev_model_inputs = processor(text=prev, images=images, videos=videos, return_tensors="pt")[0].tolist() curr = processor.apply_chat_template(messages[:i+1], add_generation_prompt=False, tokenize=False) curr_model_inputs = processor(text=curr, images=images, videos=videos, return_tensors="pt")[0].tolist() token_ids += curr_model_inputs["input_ids"][len(prev_model_inputs["input_ids"]):] loss_mask += [1] * len(token_ids) # Mask only the new assistant tokens While we've validated this produces consistent results with full message tokenization, future models' chat template could break compatibility. To guard against silent inconsistencies, we compare the delta-based tokenization with full-tokenization results by default at the end of each rollout. If you see the following warning, you can check the mismatched substring in the log: .. code-block:: Inconsistent training and inference tokenization detected. This may lead to unexpected behavior during training. Please review your chat template to determine if this is intentional. For more information, refer to the multiturn README.md. The tokenization sanity check mode can be configured using the ``actor_rollout_ref.rollout.multi_turn.tokenization_sanity_check_mode`` parameter, which accepts the following values: - ``strict`` (default): Performs strict comparison between delta-based and full tokenization results, raising warnings for any differences. - ``ignore_strippable``: Ignores differences in whitespace characters (``\n``, ``\t``, ``\r``, spaces) while still checking for meaningful text mismatches. This is useful when debugging chat template issues where whitespace variations are expected and acceptable. - ``disable``: Completely disables the tokenization sanity check. Only use this if you have thoroughly validated that tokenization discrepancies are expected and won't impact training. Example configuration: .. code-block:: yaml actor_rollout_ref: rollout: multi_turn: tokenization_sanity_check_mode: "ignore_strippable" # Choose from: "disable", "ignore_strippable", "strict" Handling Multi-Modal Inputs in Datasets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If your dataset includes multi-modal inputs (such as images or videos), you can control whether these are pre-processed and included in each sample by setting the return_multi_modal_inputs flag in your dataset config (used by RLHFDataset). - ``return_multi_modal_inputs: True`` (default): The dataset will pre-process and include a multi_modal_inputs dictionary for each sample. This dict contains the model-ready representations (e.g., image tensors, video tensors, etc.) as produced by your processor. This is useful for single-turn or SFT-style training, where the model expects all modalities to be present in the batch. - ``return_multi_modal_inputs: False``: The dataset will not include the multi_modal_inputs field. This is recommended for multi-turn RL or tool-augmented rollouts, where the model may generate new multi-modal inputs dynamically during rollout, and you want to avoid conflicts or redundant data in the batch. Special Cases ^^^^^^^^^^^^^ Some models (e.g., Qwen/QwQ-32B and Qwen3 series) remove internal reasoning content during chat template rendering. As a result, the message content can vary across turns, making the delta-based tokenization inaccurate. For example, for the following conversation: .. code-block:: python messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2 + 2?"}, {"role": "assistant", "content": "user asked about a simple math question. 2 + 2 = 4."}, {"role": "user", "content": "Explain why."}, {"role": "assistant", "content": "user wants to know the reasoning behind the answer. Search for a good explanation", "tool_calls": [{"id": "tool1", "type": "search", "arguments": {"query": "Why is 2 + 2 = 4?"}}]}, {"role": "tool", "content": "The sum of two and two is four because it is a basic arithmetic operation."}, {"role": "assistant", "content": "The tool provided a good explanation.The sum of two and two is four because it is a basic arithmetic operation."} ] 1. Qwen/QwQ-32B will remove all reasoning content except the last assistant message after applying the chat template. .. code-block:: text <|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user What is 2 + 2?<|im_end|> <|im_start|>assistant 2 + 2 = 4.<|im_end|> <|im_start|>user Explain why.<|im_end|> <|im_start|>assistant {"name": "", "arguments": {"query": "Why is 2 + 2 = 4?"}} <|im_end|> <|im_start|>user The sum of two and two is four because it is a basic arithmetic operation. <|im_end|> <|im_start|>assistant The tool provided a good explanation. The sum of two and two is four because it is a basic arithmetic operation.<|im_end|> 2. Qwen3 series will remove all reasoning content before the last user message. .. code-block:: text <|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user What is 2 + 2?<|im_end|> <|im_start|>assistant 2 + 2 = 4.<|im_end|> <|im_start|>user Explain why.<|im_end|> <|im_start|>assistant user wants to know the reasoning behind the answer. Search for a good explanation {"name": "", "arguments": {"query": "Why is 2 + 2 = 4?"}} <|im_end|> <|im_start|>user The sum of two and two is four because it is a basic arithmetic operation. <|im_end|> <|im_start|>assistant The tool provided a good explanation. The sum of two and two is four because it is a basic arithmetic operation.<|im_end|> To handle this, we fall back to a **fixed base conversation** containing only a single system and user message. Since this base doesn't include assistant messages or reasoning content, it remains consistent across turns. .. code-block:: python BASE_CHAT_HISTORY = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "I am a user."} ] prev = tokenizer.apply_chat_template(BASE_CHAT_HISTORY, add_generation_prompt=True, tokenize=False) curr = tokenizer.apply_chat_template([*BASE_CHAT_HISTORY, messages[i]], add_generation_prompt=False, tokenize=False) token_ids += tokenizer.encode(curr[len(prev):], add_special_tokens=False) loss_mask += [1] * len(token_ids) This method works well for Qwen3 series. However, Qwen/QwQ-32B currently has a bug in its chat template. A fix_ has been proposed but not yet adopted. Until then, use the following command to download the fixed model revision: .. code-block:: bash pip install huggingface_hub huggingface-cli download Qwen/QwQ-32B --revision refs/pr/81 .. _fix: https://huggingface.co/Qwen/QwQ-32B/discussions/81 Discrepancy Between Training and Inference Templates ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Although the above approach fixes the delta mismatch issue, the removal of reasoning content in the inference-time chat template introduces a new discrepancy: training uses the full reasoning content, while inference does not. This mismatch can affect model performance in unpredictable ways. To avoid it, we default to using the full response (including reasoning) for both training and rollout. However, this approach comes with trade-offs: 1. Long reasoning contents can easily exceed the model's context window, especially in multi-turn rollout. 2. There's a mismatch between rollout and production environment now—models will not have reasoning content from past turns if you use the default chat template in production. We are still evaluating the impact of these issues. If you experience context length problems or prefer rollouts that match production (i.e., exclude reasoning), you can enable: ``actor_rollout_ref.rollout.multi_turn.use_inference_chat_template = True`` GSM8K Multi-turn Training Performance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See the training performance of multi-turn rollout on the GSM8K task HERE_. .. _HERE: https://wandb.ai/zhaochenyang20/gsm8k_async_rl/runs/1ro1r7om?nw=nwuserzhaochenyang20 .. _GSM8KTool_example_configuration: https://github.com/volcengine/verl/blob/main/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml .. _gsm8k_tool.py: https://github.com/volcengine/verl/blob/main/verl/tools/gsm8k_tool.py .. _mcp_search_tool.py: https://github.com/volcengine/verl/blob/main/verl/tools/mcp_search_tool.py .. _mcp_tool_config.yaml: https://github.com/volcengine/verl/blob/main/examples/sglang_multiturn/config/tool_config/mcp_tool_config.yaml Interaction System ~~~~~~~~~~~~~~~~~~ For dynamic conversational feedback during RL training, see: .. toctree:: :maxdepth: 1 interaction_system Search Tool Integration ~~~~~~~~~~~~~~~~~~~~~~~ .. toctree:: :maxdepth: 1 search_tool_example Code Walkthrough ~~~~~~~~~~~~~~~~~~~~~~~ If you want to learn more in depth about the code execution flow, please read https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/tree/main/rlhf/verl/multi-turn/code-walk-through ================================================ FILE: verl_rl/docs/sglang_multiturn/sandbox_fusion.rst ================================================ =============================== Sandbox Fusion Tool Integration =============================== Last updated: 06/10/2025. Motivations =========== - As users of verl, we want to allow the model to call certain tools during Actor rollout, incorporating the results into the training process. - A colleague from ByteDance proposed a paper aimed at enhancing model capability through code execution tools. - We aim to support tool-calling capabilities of inference engines using `sandbox-fusion` as the code execution system, providing the community with a reimplementation of `retools`. Reward Compute with Sandbox Fusion + FaaS Integration ===================================================== - In current datasets and tasks, similar work already exists (e.g., Prime), which uses local processes as runners to execute model-generated code for reward computation. - On this basis, #1429 has advanced the design by integrating FaaS as the runner for reward computation. Goals ===== - Adapt to the `sglang` tool-calling protocol and define tools for sandbox fusion. - Integrate with the `async-rollout` process, ensuring sandbox fusion tools follow asyncIO conventions. - Design and implement a basic rate limiter to prevent issues such as 429 errors. Non-Goals ========= - Training effectiveness is out of scope. - Observability metrics are not considered. - Distributed failover and component fault tolerance are not addressed. Design Details ============== Tool Schema Definition ---------------------- - Currently, only code execution is considered, requiring a `code` field in the JSON from the model. - Only Python code is supported for now, so no `language` parameter is defined. .. code-block:: python OpenAIFunctionToolSchema( type="function", function=OpenAIFunctionSchema( name="code_interpreter", description="A tool for executing code.", parameters=OpenAIFunctionParametersSchema( type="object", properties={ "code": OpenAIFunctionPropertySchema( type="string", description="The code to execute.", enum=None, ) }, required=["code"], ), strict=False, ) ) Configuration Parameters -------------------------- +----------------------------+--------------------------------------------------------------+ | Parameter Name | Description | +============================+==============================================================+ | `num_workers` | Number of worker threads/processes per DP to request runner. | +----------------------------+--------------------------------------------------------------+ | `rate_limit` | Global limit of concurrent code executions. Default: 10 | +----------------------------+--------------------------------------------------------------+ | `default_timeout` | Timeout (in seconds) for each code execution. Default: 30 | +----------------------------+--------------------------------------------------------------+ | `default_language` | Default programming language. Default: "python" | +----------------------------+--------------------------------------------------------------+ | `enable_global_rate_limit` | Whether to enable global rate limiting. Default: True | +----------------------------+--------------------------------------------------------------+ | `sandbox_fusion_url` | URL for the veFaas sandbox execution service | +----------------------------+--------------------------------------------------------------+ Rate Limiting Design ----------------------- Objective: - Limit the number of inflight requests using a token bucket model. - Ensure ordered submission to code runners to avoid starvation due to backoff. Design Highlights: - Use Ray Global Actor as a singleton distributed counter at cluster level. - Semaphore used for counting, with `acquire` and `release` in separate thread pools to preserve order. - Use Ray’s cloud-pickle to serialize functions for decoupled `ExecutionWorker`. .. code-block:: python @ray.remote(concurrency_groups={"acquire": 1,"release": 10}) class TokenBucketWorker: def __init__(self, rate_limit: int): self.rate_limit = rate_limit self.current_count = 0 self._semaphore = threading.Semaphore(rate_limit) @ray.method(concurrency_group="acquire") def acquire(self): self._semaphore.acquire() self.current_count += 1 @ray.method(concurrency_group="release") def release(self): self._semaphore.release() self.current_count -= 1 def get_current_count(self): return self.current_count class ExecutionWorker: def __init__(self, enable_global_rate_limit=True, rate_limit=10): self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None def _init_rate_limit(self, rate_limit): return TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote(rate_limit) def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T: with ExitStack() as stack: stack.callback(self.rate_limit_worker.release.remote) ray.get(self.rate_limit_worker.acquire.remote()) try: return fn(*fn_args, **fn_kwargs) except Exception as e: logger.warning(f"Error when executing code: {e}") def init_execution_pool(num_workers: int, enable_global_rate_limit=True, rate_limit=10, mode: PoolMode=PoolMode.ThreadMode): if mode == PoolMode.ThreadMode: return ray.remote(ExecutionWorker).options(max_concurrency=num_workers).remote( enable_global_rate_limit=enable_global_rate_limit, rate_limit=rate_limit ) else: raise NotImplementedError("Process mode is not implemented yet") Tool Implementation ------------------- - Use `instance_id` to identify requests across multiple dialogue rounds. - Use `execution_pool` to implement async invocation. - Cleanup state after rollout completion. .. code-block:: python class SandboxFusionTool(BaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): ... self.execution_pool = init_execution_pool(...) ... async def create(self, instance_id: Optional[str] = None, ...): ... async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> Tuple[str, float, dict]: code = parameters.get("code", "") timeout = parameters.get("timeout", self.default_timeout) language = parameters.get("language", self.default_language) if not isinstance(code, str): code = str(code) result = await self.execution_pool.execute.remote(self.execute_code,instance_id,code,timeout,language) self._instance_dict[instance_id]["reward"].append(result.strip()) return result, result, {} def execute_code(self,instance_id,code,timeout=30,language="python"): result_status, metadata = _process_single_case(0, None, None,self.sandbox_fusion_url, code, timeout, language) # we should always expect this since we don't have correct answer if metadata["run_status"] == "Finished": actual_output = metadata["stdout"] if metadata["stdout"] is not None else "" return actual_output else: return "no stdout here" async def calc_reward(self, instance_id: str, ...): ... async def release(self, instance_id: str, ...): ... Test Plan ========= Unit Tests ---------- - **test_tools_registration**: Test tool registration and initialization. - **test_rollout_req_creation**: Validate that `AsyncRolloutReq` is built correctly. - **test_over_size_case**: Ensure rollout terminates early when exceeding `max_seq_len`. - **test_tool_call_basic_case**: Mock `sglang` output, validate tool call and result. - **test_tool_call_batch_case**: Test batch processing of tool calls. - **test_basic_multi_process_init**: Validate Ray global actor behaves as singleton. - **TestSingleNodeRateLimiterCase**: Verify rate limiter works in single-node mode. - **test_rotten_execution**: Ensure rate limiter recovers from function errors. - **TestMultiNodeRateLimiterCase**: Verify behavior in multi-node environments. e2e Tests ---------- we provide e2e test scripts in `tests/special_e2e` folder, named `tests/special_e2e/run_gsm8k_fsdp_sgl_multiturn_sf_tool.sh` by setting 'trainer.rollout_data_dir' you can dump the rollout data to local disk. here is an sample taken from the rollout data: .. code-block:: python { "input": " system\nYou are a math expert. You are given a question and you need to solve it step by step. Reasoning step by step before any tool call. You should use the `calc_gsm8k_reward` tool after step by step solving the question, before generate final answer at least once and refine your answer if necessary. Put your final answer in the format of `#### `.\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n\n{\"type\": \"function\", \"function\": {\"name\": \"code_interpreter\", \"description\": \"A tool for executing code.\", \"parameters\": {\"type\": \"object\", \"properties\": {\"code\": {\"type\": \"string\", \"description\": \"The code to execute.\", \"enum\": null}}, \"required\": [\"code\"]}, \"strict\": false}}\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n\n user\nJohn gets a bonus that's the same percentage every year. Last year he made $100,000 and got a $10,000 bonus. This year he makes $200,000. How much will John make this year when adding both his total pay and bonus together? Let's think step by step and output the final answer after `####`.\n assistant\n", "output": "assistant\n\nOkay, let's see. John gets a bonus that's the same percentage every year. Last year, he made $100,000 and got a $10,000 bonus. This year, he makes $200,000. The question is asking how much he'll make this year when adding both his total pay and bonus together.\n\nFirst, I need to figure out the percentage bonus he gets. Since the bonus is the same percentage each year, if I can find that percentage based on last year's details, I can apply it to this year's earnings to find this year's bonus. Then, I'll add that bonus to his total pay this year.\n\nLast year, he earned $100,000 and got a $10,000 bonus. To find the percentage, I can divide the bonus by his total earnings. So, 10,000 divided by 100,000. Let me calculate that.\n\n10,000 / 100,000 equals 0.1, which is 10% as a percentage. So, his bonus is 10% of his pay each year.\n\nThis year, he makes $200,000. So, the bonus this year would be 10% of 200,000. Let me compute that using the code interpreter to ensure accuracy.\n \n{\"name\": \"code_interpreter\", \"arguments\": {\"code\": \"total_pay_this_year = 200000\\nbonus_percentage = 10 / 100\\nbonus_this_year = total_pay_this_year * bonus_percentage\\ntotal_income_this_year = total_pay_this_year + bonus_this_year\\nprint(total_income_this_year)\", \"executes\": \"True\"}}\n\n tool\n220000.0\n\n assistant\n\nSo the code shows that John's total income this year would be $220,000. This is consistent with the 10% bonus on last year's pay of $100,000, which was $10,000. Therefore, this year's bonus of $20,000 added to his total pay of $200,000 results in a total income of $220,000.\n\nI need to verify that there are no other factors at play. The problem states it's a constant percentage bonus each year, so there's no mention of changing percentages or other variables. Therefore, applying a straightforward percentage of 10% on this year's partial salary amount would result in $20,000. Adding that to his total pay gives $220,000. The calculation is correct and aligns with the question's parameters.\n\n\n#### 220000.0", "score": 0, "step": 1 } here is the readable format version: .. code-block:: python [system] You are a math expert. You are given a question and you need to solve it step by step. Reasoning step by step before any tool call. You should use the `calc_gsm8k_reward` tool after step by step solving the question, before generate final answer at least once and refine your answer if necessary. Put your final answer in the format of `#### `. # Tools You may call one or more functions to assist with the user query. You are provided with function signatures within XML tags: {"type": "function", "function": {"name": "code_interpreter", "description": "A tool for executing code.", "parameters": {"type": "object", "properties": {"code": {"type": "string", "description": "The code to execute.", "enum": null}}, "required": ["code"]}, "strict": false}} For each function call, return a json object with function name and arguments within XML tags: {"name": , "arguments": } [user] John gets a bonus that's the same percentage every year. Last year he made $100,000 and got a $10,000 bonus. This year he makes $200,000. How much will John make this year when adding both his total pay and bonus together? Let's think step by step and output the final answer after `####`. [assistant] Okay, let's see. John gets a bonus that's the same percentage every year. Last year, he made $100,000 and got a $10,000 bonus. This year, he makes $200,000. The question is asking how much he'll make this year when adding both his total pay and bonus together. First, I need to figure out the percentage bonus he gets. Since the bonus is the same percentage each year, if I can find that percentage based on last year's details, I can apply it to this year's earnings to find this year's bonus. Then, I'll add that bonus to his total pay this year. Last year, he earned $100,000 and got a $10,000 bonus. To find the percentage, I can divide the bonus by his total earnings. So, 10,000 divided by 100,000. Let me calculate that. 10,000 / 100,000 equals 0.1, which is 10% as a percentage. So, his bonus is 10% of his pay each year. This year, he makes $200,000. So, the bonus this year would be 10% of 200,000. Let me compute that using the code interpreter to ensure accuracy. {"name": "code_interpreter", "arguments": {"code": "total_pay_this_year = 200000\nbonus_percentage = 10 / 100\nbonus_this_year = total_pay_this_year * bonus_percentage\ntotal_income_this_year = total_pay_this_year + bonus_this_year\nprint(total_income_this_year)", "executes": "True"}} [tool] 220000.0 [assistant] So the code shows that John's total income this year would be $220,000. This is consistent with the 10% bonus on last year's pay of $100,000, which was $10,000. Therefore, this year's bonus of $20,000 added to his total pay of $200,000 results in a total income of $220,000. I need to verify that there are no other factors at play. The problem states it's a constant percentage bonus each year, so there's no mention of changing percentages or other variables. Therefore, applying a straightforward percentage of 10% on this year's partial salary amount would result in $20,000. Adding that to his total pay gives $220,000. The calculation is correct and aligns with the question's parameters. #### 220000.0 You can also use the `RolloutViewer` TUI tool to view the dumped rollout data: .. code-block:: bash python scripts/rollout_viewer.py ${trainer.rollout_data_dir} .. image:: https://github.com/user-attachments/assets/e34e5157-2880-4a21-afb2-73885d0dfb11 :alt: RolloutViewer screenshot ================================================ FILE: verl_rl/docs/sglang_multiturn/search_tool_example.rst ================================================ ======================= Search Tool Integration ======================= Last updated: 05/30/2025. Introduction ------------ - We have added a search tool calling function to Multi-Turn RL, enabling the model to initiate retrieval requests during Actor rollout and directly use retrieval results for training. **We support using a local dense retriever as the retrieval tool, as well as integrating with your own local retrieval engine.** Quick Reproduction ------------------ Create a New Docker Container ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: bash docker run \ -it \ --shm-size 32g \ --gpus all \ -v {Huggingface-Cache-Path}:/root/.cache \ --ipc=host \ --network=host \ --privileged \ --name sglang_{your-name} \ lmsysorg/sglang:dev \ /bin/zsh If you need to restart after exiting the container: .. code:: bash docker start -i sglang_{your-name} Update Python and Configure the Virtual Environment using uv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: bash apt update apt install -y python3.10 python3.10-venv # Create a virtual environment python3 -m venv ~/.python/verl-multiturn-rollout # Activate the virtual environment source ~/.python/verl-multiturn-rollout/bin/activate # Install uv python3 -m pip install uv Install verl Upstream ~~~~~~~~~~~~~~~~~~~~~ .. code:: bash cd ~ git clone https://github.com/volcengine/verl.git cd verl # Install verl python3 -m uv pip install . python3 -m uv pip install -r ./requirements_sglang.txt # Manually install flash-attn python3 -m uv pip install wheel python3 -m uv pip install packaging python3 -m uv pip install flash-attn --no-build-isolation --no-deps Set Up a Local Retrieval Engine ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are using your own local retrieval service, you can skip this step. We chose the local dense retriever provided in the search-R1 example; detailed instructions are in the `searchR1 docs `__. In brief: - The GPU version offers higher accuracy and speed; each GPU uses about 5–7 GB of memory. - The CPU version can be used for simple testing but has lower retrieval precision, which will degrade training performance. See the `retriever documentation `__ in search-R1 for details. - Recommend using Conda to install faiss-gpu=1.8.0; venv may cause errors. **Note**: To start both the training process and the local retrieval service, we launch two separate Python environments. The training uses uv in the verl-multiturn-rollout environment, while the retriever uses conda to install ``faiss-gpu``. .. code:: bash # Download the Miniconda installer script wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh # Install to $HOME/miniconda3 in batch mode bash ~/miniconda.sh -b -p $HOME/miniconda3 # Activate conda (only in the current shell) eval "$($HOME/miniconda3/bin/conda shell.bash hook)" # (Optional) Add conda to your default shell startup conda init # Reload shell config source ~/.bashrc # Create and activate the retriever environment with Python 3.10 conda create -n retriever python=3.10 -y conda activate retriever # Install PyTorch (with GPU support) and related libraries conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=12.1 -c pytorch -c nvidia -y # Install other Python packages pip install transformers datasets pyserini huggingface_hub # Install the GPU version of faiss conda install faiss-gpu=1.8.0 -c pytorch -c nvidia -y # Install the API service framework pip install uvicorn fastapi Download the Indexing and Corpus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The local retrieval files are large—prepare sufficient disk space. Downloading is about 60–70 GB, and uncompressed takes about 132 GB: .. code:: bash conda activate retriever save_path=/the/path/to/save python examples/sglang_multiturn/search_r1_like/local_dense_retriever/download.py --save_path $save_path cat $save_path/part_* > $save_path/e5_Flat.index gzip -d $save_path/wiki-18.jsonl.gz Start the Local flat e5 Retrieval Server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. The first startup will download models and load the index. 2. Apart from the download, startup takes about 1–2 minutes. 3. After startup, each GPU uses about 5–7 GB of memory, leaving the rest for multi-turn RL training. .. code:: bash conda activate retriever index_file=$save_path/e5_Flat.index corpus_file=$save_path/wiki-18.jsonl retriever_name=e5 retriever_path=intfloat/e5-base-v2 python examples/sglang_multiturn/search_r1_like/local_dense_retriever/retrieval_server.py \ --index_path $index_file \ --corpus_path $corpus_file \ --topk 3 \ --retriever_name $retriever_name \ --retriever_model $retriever_path \ --faiss_gpu Set Up WANDB_API_KEY ~~~~~~~~~~~~~~~~~~~~ .. code:: bash export WANDB_API_KEY={YOUR_WANDB_API_KEY} # Define a timestamp function function now() { date '+%Y-%m-%d-%H-%M' } **Preprocess the Dataset** ~~~~~~~~~~~~~~~~~~~~~~~~~~ **Note:** The following data processing and training commands must be run in the verl-multiturn-rollout environment. .. code:: bash python3 examples/data_preprocess/preprocess_search_r1_dataset.py Testing on 8 x H20 ~~~~~~~~~~~~~~~~~~ .. code:: bash # Ensure the now() function is defined # Create a logs directory mkdir -p logs # Set GPUs and run with a suitable log path export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 nohup bash examples/sglang_multiturn/search_r1_like/run_qwen2.5-3b_instruct_search_multiturn.sh \ trainer.experiment_name=qwen2.5-3b-it_rm-searchR1-like-sgl-multiturn-$(now) \ > logs/searchR1-like$(now).log 2>&1 & Custom Search Configuration --------------------------- To enable multi-turn reasoning, set the following fields in your config: .. code:: yaml actor_rollout_ref: rollout: name: "sglang" multi_turn: enable: True You must specify ``retrieval_service_url`` in ``examples/sglang_multiturn/config/tool_config/search_tool_config.yaml``, and properly configure concurrency. For more details on concurrency, refer to the Sandbox Fusion example: .. code:: yaml tools: - class_name: verl.tools.search_tool.SearchTool config: retrieval_service_url: http://127.0.0.1:8000/retrieve num_workers: 120 rate_limit: 120 timeout: 30 The retriever input/output formats are as follows. If your service parameters match, only modify ``retrieval_service_url``. You can also customize in ``search_r1_like_utils.py``. .. code:: python Input format: { "queries": ["What is Python?", "Tell me about neural networks."], "topk": 3, "return_scores": true } Output format (when return_scores=True, similarity scores are returned): { "result": [ [ # Results for each query { "document": doc, "score": score }, # ... more documents ], # ... results for other queries ] } Notes ----- 1. The total training time is about 27 hours; meanwhile, the validation dataset is very large (51 k), and each validation takes about 6000 s. (Therefore, ``val_before_train=False`` by default) ================================================ FILE: verl_rl/docs/single_controller.rst ================================================ The Design of ``verl.single_controller`` ============================================== Last updated: 05/21/2025. **Author:**\ `Wang Zhang `__ Preface ------- We prepared this document for developers of ``verl``, particularly those interested in understanding or contributing to the ``verl.single_controller`` module. It is not intended for end users, but for contributors seeking to understand the architectural rationale and internal mechanics. -------------- Origin ------ The ``single_controller`` module originated from a request I received — to adapt a toy single-process RLHF script into a distributed system with minimal changes, while maintaining ease of debugging. Common practice — such as using PyTorch’s Distributed Data Parallel (DDP) — typically involves wrapping ``nn.Module`` and launching multiple processes that execute the same function under different ranks. However, this approach presents two main limitations in the context of distributed RLHF: - Difficulty representing multiple DAGs as required by PPO; - Difficulty inspecting intermediate tensors during training. To maintain debuggability, we opted for a different approach — breaking the training loop into well-defined stages like ``generate_sequences``, ``compute_advantages``, and so on. We selected `Ray `__ as the initial backend for ``verl`` due to its ability to expose Python class methods as RPC endpoints. However, Ray’s default model only supports **one method call, one RPC**, while training LLMs typically requires coordination across multiple processes. To hide this multi-Ray actors invocation for a single method from users, we introduced the following components: - ``WorkerGroup`` – manages a group of remote workers and provides a unified interface for multi-process distributed computation; - ``ResourcePool`` – binds computational resources to worker processes; - ``ClassWithArgs`` – enables delayed remote instantiation with specified initialization arguments. -------------- A Running Example: ``generate_sequences`` ----------------------------------------- To illustrate the design, we walk through how the ``generate_sequences`` method in the ``ActorRolloutRefWorker`` class is registered and invoked across distributed workers. -------------- Step 1: Register with a Decorator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is to define the ``generate_sequences`` and decorate it with ``@register`` as it will be called in driver script. **Source:** `fsdp_workers.py `__ .. code:: python class ActorRolloutRefWorker(Worker): ... @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(self, prompts: DataProto): prompts = prompts.to(torch.cuda.current_device()) ... The ``@register`` decorator adds metadata to the ``generate_sequences`` method. Currently, it doesn’t alter functionality, but attaches attributes via a magic key (``MAGIC_ATTR``): **Source:** `decorator.py `__ .. code:: python def register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.ALL, blocking=True, materialize_futures=True): ... def decorator(func): @wraps(func) def inner(*args, **kwargs): if materialize_futures: args, kwargs = _materialize_futures(*args, **kwargs) return func(*args, **kwargs) attrs = {"dispatch_mode": dispatch_mode, "execute_mode": execute_mode, "blocking": blocking} setattr(inner, MAGIC_ATTR, attrs) return inner return decorator As the code shows, values of ``dispatch_mode``, ``execute_mode`` and ``blocking`` is attached the ``generate_sequences`` method. -------------- Step 2: Binding During Initialization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These attached attributes are extracted and utilized when ``ActorRolloutRefWorker``, wrapped in a ``RayClassWithArgs``, is passed into a ``RayWorkerGroup``. **Source:** `main_generation.py `__ .. code:: python ray_cls_with_init = RayClassWithInitArgs(cls=ray.remote(ActorRolloutRefWorker), config=config, role="rollout") resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes) wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init) During the `initialization `__ of ``RayWorkerGroup``, two key steps occur: 1. Worker instances (Ray actors) are created: `RayWorkerGroup._init_with_resource_pool `__ 2. Methods decorated with ``@register`` are bound to ``RayWorkerGroup``: `RayWorkerGroup._bind_worker_method `__ .. figure:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/worker_group_init.png?raw=true :alt: initialization_and_binding_of_worker_group initialization_and_binding_of_worker_group The binding procedure is the heart of ``verl.single_controller``. **Key function:** `WorkerGroup._bind_worker_method `__ .. code:: python def _bind_worker_method(self, user_defined_cls, func_generator): ... for method_name in dir(user_defined_cls): try: method = getattr(user_defined_cls, method_name) assert callable(method) except Exception: continue # Skip properties <<>> When a method has the ``MAGIC_ATTR``, the attributes set by ``@register`` are extracted: .. code:: python <<>> if hasattr(method, MAGIC_ATTR): attribute = getattr(method, MAGIC_ATTR) dispatch_mode = attribute["dispatch_mode"] execute_mode = attribute["execute_mode"] blocking = attribute["blocking"] <<>> As show in the flow chart above, these attributes are fed into ``func_generator``. However, ``func_generator`` takes ``method_name``, ``dispatch_fn``, ``collect_fn``, ``execute_fn``, ``blocking``. We need to find the corresponding ``dispatch_fn`` and ``collect_fn`` associated with the ``dispatch_mode`` (``DP_COMPUTE_PROTO``) from `DISPATCH_MODE_FN_REGISTRY `__: .. code:: python3 DISPATCH_MODE_FN_REGISTRY = { Dispatch.ONE_TO_ALL: { "dispatch_fn": dispatch_one_to_all, "collect_fn": collect_all_to_all, }, ... Dispatch.DP_COMPUTE_PROTO: { "dispatch_fn": dispatch_dp_compute_data_proto, "collect_fn": collect_dp_compute_data_proto, }, ... } Similarly, the ``execute_fn`` is selected by ``execute_mode`` and extracted by: .. code:: python <<>> # get execute_fn_name execute_mode = get_predefined_execute_fn(execute_mode=execute_mode) wg_execute_fn_name = execute_mode["execute_fn_name"] # get execute_fn from string try: execute_fn = getattr(self, wg_execute_fn_name) assert callable(execute_fn), "execute_fn must be callable" except Exception: print(f"execute_fn {wg_execute_fn_name} is invalid") raise <<>> In this ``generate_sequences`` cases: - ``dispatch_mode = Dispatch.DP_COMPUTE_PROTO`` - ``dispatch_fn = dispatch_dp_compute_data_proto`` - ``collect_fn = collect_dp_compute_data_proto`` - ``execute_fn = RayWorkerGroup.execute_all`` ONE_TO_ALL v.s. DP_COMPUTE_PROTO ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``dispatch_mode`` is associated with a ``dispatch_fn`` and a ``collect_fn``. As the name implies, ``dispatch_fn`` processes the input arguments in ``WorkerGroup`` and generate a batch (list) of input arguments, each of which will be fed into a worker attached to the ``WorkerGroup``. ``dispatch_fn`` of ``ONE_TO_ALL`` is `dispatch_one_to_all `__, which just duplicates all the input arguments into N replicas, where N equals the number of Workers attached to the ``worker_group``: .. code:: python def dispatch_one_to_all(worker_group, *args, **kwargs): args = tuple([arg] * worker_group.world_size for arg in args) kwargs = {k: [v] * worker_group.world_size for k, v in kwargs.items()} return args, kwargs ``dispatch_fn`` of ``DP_COMPUTE_PROTO`` is `dispatch_dp_compute_data_proto `__, which uses ``DataProto.chunk`` to split a large ``DataProto`` into N smaller ``DataProto``, where N equals the world_size (number of the workers) of the ``worker_group``: .. code:: python def dispatch_dp_compute_data_proto(worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) # Note: enable auto padding for dp compute DatapProto splitted_args, splitted_kwargs = _split_args_kwargs_data_proto_with_auto_padding( worker_group.world_size, *args, **kwargs, ) return splitted_args, splitted_kwargs The ``collect_fn`` follows the same pattern and process a batch (list) of returned value from all workers of a ``WorkerGroup`` and merge it into a list as ``collect_all_to_all`` does or a large ``DataProto`` as ``collect_dp_compute_data_proto`` does. Finally, a new method is dynamically generated using ``func_generator`` and added to the ``WorkerGroup`` instance: .. code:: python <<>> # bind a new method to the RayWorkerGroup func = func_generator( self, method_name, dispatch_fn=dispatch_fn, collect_fn=collect_fn, execute_fn=execute_fn, blocking=blocking, ) try: setattr(self, method_name, func) method_names.append(method_name) except Exception as e: raise ValueError(f"Fail to set method_name {method_name}") from e This makes the method invocable via the ``WorkerGroup`` interface. -------------- Step 3: Call Chain ~~~~~~~~~~~~~~~~~~ All the machinery above ensures that distributed calls feel identical to single-process ones. In the original single-process script, the code looks like: .. code:: python rollout = Rollout() rollout.generate_sequences(batch) With ``verl``, the multiprocess program becomes: .. code:: python rollout = RayWorkerGroup(resource_pool=[4], RayClassWithArgs(Rollout)) rollout.generate_sequences(batch) .. figure:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/call_generate_sequences.png?raw=true :alt: call_chain_of_generate_sequences call_chain_of_generate_sequences Behind this simple call: - ``dispatch_fn`` splits input across workers - ``execute_fn`` performs the actual remote invocation - ``collect_fn`` gathers the results All of this is abstracted away, enabling developers to write distributed code with minimal changes to their existing logic. -------------- Beyond RL Post-Training: Generalizing ``verl.single_controller`` ---------------------------------------------------------------- The ``verl.single_controller`` module generalizes well beyond reinforcement learning. It provides a clean abstraction to batch-process remote method calls, with automatic input/output handling. By minimizing the gap between single-process and multi-process scripts, ``verl.single_controller`` opens the door to distributed computing in broader domains — not limited to RL post-training. We hope this design inspires more examples and extensions from the community. ================================================ FILE: verl_rl/docs/start/agentic_rl.rst ================================================ Agentic RL Training =================== Last updated: 07/15/2025. Overview ---------- The goal of Agentic RL is to improve the performance of backend models from reinforcement learning to the Agent. During the training process, a series of features are developed: 1. Server-based asynchronous rollout 2. Multi-turn conversations and tool calls 3. LangGraph-based Agent This document explains the system principles and usage involved to help users implement Agentic RL. Server-based Asynchronous Rollout --------------------------------- Since Agents need to interact with the environment through various tool calls, in order to avoid GPU idling while waiting for tool call return results, an asyncio based co-routing mechanism is utilized to execute each rollout requests asynchronously, thereby improving training performance. To support asynchronous rollout, the inference engine (server) and the agent (client) are architecturally separated, implementing a server-based system with the following objectives: 1. Enabling load balancing mechanisms to balance loads across multiple GPUs and reduce the impact of long-tail requests on performance. For this purpose, scheduling capabilities in stream mode (recipe\stream_mode) are implemented as a recipe. 2. Preventing agent specific features such as tracing from affecting the inference engine. System Architecture ~~~~~~~~~~~~~~~~~~~ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/agent_loop.png?raw=true For more detail on internal design, please refer to :doc:`Agent Loop<../advance/agent_loop>`. System Components ~~~~~~~~~~~~~~~~~ +--------------------------+----------------------------------------------------------------------------+ | Component | Role | +==========================+============================================================================+ | AgentLoop | Client, implements Agent functions | +--------------------------+----------------------------------------------------------------------------+ | AsyncLLMServerManager | Inference gateway, provides generate interface for AgentLoop | +--------------------------+----------------------------------------------------------------------------+ | AsyncServer | Server, each instance is connected to one DP group of the inference engine | +--------------------------+----------------------------------------------------------------------------+ **"generate" Interface** The "generate" function based on ray actor is used between the Client and Server instead of the standard chat completion API. This is because the conversion between tokens and text can be irreversible. For example, the token converted from "" will be different from that generated by the LLM. During the training phase, it is necessary to strictly use the tokens generated by LLM inference to avoid inaccurate in computing advantage, which may affect model performance. Having the Server provide a token-based API helps the Client maintain the relationship between the text generated by tool calls and the tokens returned by the LLM, so as to output correct tokens for training. **Inference Engine Adaptation** AsyncServer uniformly provides a generate function to the upper layer, with separate implementations for SGLang and vLLM to hide underlying differences: 1. The SGLang AsyncServer uses the async_generate interface of the SGLang engine, which is located on the first GPU of each TP group. Therefore, AsyncServer needs to remotely call async_generate through ray actor. 2. The vLLM AsyncServer uses the generate interface of the vLLM engine, which can communicate with the GPUs in the TP group through ZMQ and can be directly called in AsyncServer. Usage Example ~~~~~~~~~~~~~ Follow :doc:`GSM8K example<../examples/gsm8k_example>` to prepare the dataset and model checkpoints. There are two options required to use agent loop: - `data.return_raw_chat=True` - `actor_rollout_ref.rollout.mode=async` This example uses the sglang inference engine by default, and you can also modify rollout_name to use vllm. .. code-block:: bash bash examples/grpo_trainer/run_qwen2-7b_seq_balance.sh Multi-turn Conversations and Tool Calls --------------------------------------- Follow :doc:`Multi-turn Rollout Support<../sglang_multiturn/multiturn>` to prepare tool and configuration files. The Tool Agent Loop has an additional requirement: adding an "agent_name" field to the dataset. During rollout, it will choose to use tool_agent_loop or single_turn_agent (default) based on this field. Usage Example ~~~~~~~~~~~~~ .. code-block:: bash # install mlflow to view toolcall and llm trace pip install mlflow # This will download and preprocess the GSM8K dataset into ~/data/gsm8k/ and add the "agent_name" field. bash examples/data_preprocess/gsm8k_tool_agent_loop.py # Start training with tool calls and enabled mlflow based trace helping to debug the rollout details bash examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_tool_agent_mlflow.sh # When training is done, start a mlflow server to view trace mlflow ui -h 0.0.0.0 -p 5000 --backend-store-uri sqlite:////tmp/mlruns.db # then you can open http://:5000 from browser to view trace Note: During training, because the model may sometimes fail to generate correct toolcall tags, an error message "Failed to decode tool call" will be output to the console, which does not indicate an abnormality in training. Follow :doc:`Rollout trace<../advance/rollout_trace>` to known more about trace feature. Agent Framework --------------- System Architecture ~~~~~~~~~~~~~~~~~~~ .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/langgraph_agent.png?raw=true System Components ~~~~~~~~~~~~~~~~~ +--------------------------+-----------------------------------------------------------------------------------------------+ | Component | Role | +==========================+===============================================================================================+ | ChatModel | LLM object of LangChain, used to adapt to the “generate” api provided by AsyncLLMServerManager| +--------------------------+-----------------------------------------------------------------------------------------------+ | RectAgentLoop | Agent adaptation layer, which by default supports a naive LangGraph Agentic. | | | New classes can be derived to support user-defined Agents, and the run function needs to be | | | implemented to complete Agent calls. | +--------------------------+-----------------------------------------------------------------------------------------------+ | AsyncServer | Server, each instance is connected to one DP group of the inference engine. | +--------------------------+-----------------------------------------------------------------------------------------------+ Follow doc "recipe/langgraph_agent/example/README.md" for more details. ================================================ FILE: verl_rl/docs/start/install.rst ================================================ Installation ============ Requirements ------------ - **Python**: Version >= 3.9 - **CUDA**: Version >= 12.1 verl supports various backends. Currently, the following configurations are available: - **FSDP** and **Megatron-LM** (optional) for training. - **SGLang**, **vLLM** and **TGI** for rollout generation. Choices of Backend Engines ---------------------------- 1. Training: We recommend using **FSDP** backend to investigate, research and prototype different models, datasets and RL algorithms. The guide for using FSDP backend can be found in :doc:`FSDP Workers<../workers/fsdp_workers>`. For users who pursue better scalability, we recommend using **Megatron-LM** backend. Currently, we support `Megatron-LM v0.12.2 `_. The guide for using Megatron-LM backend can be found in :doc:`Megatron-LM Workers<../workers/megatron_workers>`. 2. Inference: For inference, vllm 0.8.3 and later versions have been tested for stability. We recommend turning on env var `VLLM_USE_V1=1` for optimal performance. For SGLang, refer to the :doc:`SGLang Backend<../workers/sglang_worker>` for detailed installation and usage instructions. SGLang rollout is under extensive development and offers many advanced features and optimizations. We encourage users to report any issues or provide feedback via the `SGLang Issue Tracker `_. For huggingface TGI integration, it is usually used for debugging and single GPU exploration. Install from docker image ------------------------- We provide pre-built Docker images for quick setup. And from this version, we utilize a new image release hierarchy for productivity and stability. The image types are divided into three large categories: - **Base Image**: Without inference and training frameworks, only basic dependencies are installed. Can directly install vllm or SGLang on top of it, without need of reinstall torch or CUDA. - **Application Image**: Stable version with inference and training frameworks installed. - **Community Image**: Unstable version with the latest frameworks and features. The first two types of images are hosted on dockerhub `verlai/verl `_ repository, while the preview images are hosted on community repository. .. note:: The image versions are mapped with verl releases, for example, image with tag ``verl0.4`` is built for verl release ``v0.4.x``. Base Image :::::::::: The stable base image is ``verlai/verl:base-verl0.4-cu124-cudnn9.8-torch2.6-fa2.7.4``. The installed package versions can be found from tags, and the Dockerfile can be found in ``docker/verl[version]-[packages]/Dockerfile.base``. The base images for preview are ``verlai/verl:base-verl0.5-cu126-cudnn9.8-torch2.7.1-fa2.8.0` and ``verlai/verl:base-verl0.5-preview-cu128-cudnn9.8-torch2.7.1-fa2.8.0`` with different CUDA versions. From verl0.5, images are built with `Deep-EP `_ for efficient EP communication. The update of base image is not frequent, and the app image can be built on top of it without reinstalling base packages. Application Image ::::::::::::::::: From this version, we divide images built for vLLM and SGLang as the divergence of dependent packages like FlashInfer. There are four types of application images available: - **vLLM with FSDP and Megatron**: ``verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.2-te2.2``, with Deep-EP support: ``verlai/verl:app-verl0.4-vllm0.8.5-mcore0.12.2-te2.2-deepep``. - **SGLang with FSDP and Megatron**: ``verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.2-te2.2`` (need vLLM support, but can have some package conflicts), with Deep-EP support: ``verlai/verl:app-verl0.4-sglang0.4.6.post5-vllm0.8.5-mcore0.12.2-te2.2-deepep``. - **Preview version of SGLang with FSDP and Megatron, CUDA 12.6**: ``verlai/verl:app-verl0.5-sglang0.4.8-mcore0.12.2-te2.2`` - **Preview version of SGLang with FSDP and Megatron, CUDA 12.8**: ``verlai/verl:app-preview-verl0.5-sglang0.4.8-mcore0.12.2-te2.2`` The latest vLLM support is coming soon. Docker images with Megatron backends are runnable with large language model like ``Qwen/Qwen3-235B-A22B``, ``deepseek-ai/DeepSeek-V3-0324`` post-training. Refer to the :doc:`Large Language Model Post-Training documentation<../perf/dpsk>` for more details. Application images can be updated frequently, and the Dockerfile can be found in ``docker/verl[version]-[packages]/Dockerfile.app.[frameworks]``. Based on the base image, it is easy to build your own application image with the desired inference and training frameworks. Community Image ::::::::::::::: Community images are provided by the community, including the latest versions of vLLM and SGLang, and may include experimental features or configurations. And also works for other hardwares or platforms like AMD GPUs with ROCM or AWS EFA and Sagemaker. For latest vLLM with FSDP, please refer to `hiyouga/verl `_ repository and the latest version is ``hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.4-flashinfer0.2.2-cxx11abi0``. For latest SGLang with FSDP, please refer to `ocss884/verl-sglang `_ repository and the latest version is ``ocss884/verl-sglang:ngc-th2.6.0-cu126-sglang0.4.6.post5`` which is provided by SGLang RL Group. See files under ``docker/`` for NGC-based image or if you want to build your own. Note that For aws instances with EFA net interface (Sagemaker AI Pod), you need to install EFA driver as shown in ``docker/Dockerfile.extenstion.awsefa`` Installation from Docker :::::::::::::::::::::::: After pulling the desired Docker image and installing desired inference and training frameworks, you can run it with the following steps: 1. Launch the desired Docker image and attach into it: .. code:: bash docker create --runtime=nvidia --gpus all --net=host --shm-size="10g" --cap-add=SYS_ADMIN -v .:/workspace/verl --name verl sleep infinity docker start verl docker exec -it verl bash 2. If you use the images provided, you only need to install verl itself without dependencies: .. code:: bash # install the nightly version (recommended) git clone https://github.com/volcengine/verl && cd verl pip3 install --no-deps -e . [Optional] If you hope to switch between different frameworks, you can install verl with the following command: .. code:: bash # install the nightly version (recommended) git clone https://github.com/volcengine/verl && cd verl pip3 install -e .[vllm] pip3 install -e .[sglang] Install from custom environment --------------------------------------------- We recommend to use docker images for convenience. However, if your environment is not compatible with the docker image, you can also install verl in a python environment. Pre-requisites :::::::::::::: For training and inference engines to utilize better and faster hardware support, CUDA/cuDNN and other dependencies are required, and some of the dependencies are easy to be overridden when installing other packages, so we put them in the :ref:`Post-installation` step. .. note:: The installation steps below are recommended configurations for the latest version of verl. If you are trying to customize your own environment, please ignore the strict constraints. We need to install the following pre-requisites: - **CUDA**: Version >= 12.4 - **cuDNN**: Version >= 9.8.0 - **Apex** CUDA above 12.4 is recommended to use as the docker image, please refer to `NVIDIA's official website `_ for other version of CUDA. .. code:: bash # change directory to anywher you like, in verl source code directory is not recommended wget https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb dpkg -i cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb cp /var/cuda-repo-ubuntu2204-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/ apt-get update apt-get -y install cuda-toolkit-12-4 update-alternatives --set cuda /usr/local/cuda-12.4 cuDNN can be installed via the following command, please refer to `NVIDIA's official website `_ for other version of cuDNN. .. code:: bash # change directory to anywher you like, in verl source code directory is not recommended wget https://developer.download.nvidia.com/compute/cudnn/9.8.0/local_installers/cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb dpkg -i cudnn-local-repo-ubuntu2204-9.8.0_1.0-1_amd64.deb cp /var/cudnn-local-repo-ubuntu2204-9.8.0/cudnn-*-keyring.gpg /usr/share/keyrings/ apt-get update apt-get -y install cudnn-cuda-12 NVIDIA Apex is required for Megatron-LM and FSDP training. You can install it via the following command, but notice that this steps can take a very long time. It is recommended to set the ``MAX_JOBS`` environment variable to accelerate the installation process, but do not set it too large, otherwise the memory will be overloaded and your machines may hang. .. code:: bash # change directory to anywher you like, in verl source code directory is not recommended git clone https://github.com/NVIDIA/apex.git && \ cd apex && \ MAX_JOB=32 pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ Install dependencies :::::::::::::::::::: .. note:: We recommend to use a fresh new conda environment to install verl and its dependencies. **Notice that the inference frameworks often strictly limit your pytorch version and will directly override your installed pytorch if not paying enough attention.** As a countermeasure, it is recommended to install inference frameworks first with the pytorch they needed. For vLLM, if you hope to use your existing pytorch, please follow their official instructions `Use an existing PyTorch installation `_ . 1. First of all, to manage environment, we recommend using conda: .. code:: bash conda create -n verl python==3.10 conda activate verl 2. Then, execute the ``install.sh`` script that we provided in verl: .. code:: bash # Make sure you have activated verl conda env # If you need to run with megatron bash scripts/install_vllm_sglang_mcore.sh # Or if you simply need to run with FSDP USE_MEGATRON=0 bash scripts/install_vllm_sglang_mcore.sh If you encounter errors in this step, please check the script and manually follow the steps in the script. Install verl :::::::::::: For installing the latest version of verl, the best way is to clone and install it from source. Then you can modify our code to customize your own post-training jobs. .. code:: bash git clone https://github.com/volcengine/verl.git cd verl pip install --no-deps -e . Post-installation ::::::::::::::::: Please make sure that the installed packages are not overridden during the installation of other packages. The packages worth checking are: - **torch** and torch series - **vLLM** - **SGLang** - **pyarrow** - **tensordict** - **nvidia-cudnn-cu12**: For Magetron backend If you encounter issues about package versions during running verl, please update the outdated ones. Install with AMD GPUs - ROCM kernel support ------------------------------------------------------------------ When you run on AMD GPUs (MI300) with ROCM platform, you cannot use the previous quickstart to run verl. You should follow the following steps to build a docker and run it. If you encounter any issues in using AMD GPUs running verl, feel free to contact me - `Yusheng Su `_. Find the docker for AMD ROCm: `docker/Dockerfile.rocm `_ :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: .. code-block:: bash # Build the docker in the repo dir: # docker build -f docker/Dockerfile.rocm -t verl-rocm:03.04.2015 . # docker images # you can find your built docker FROM rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 # Set working directory # WORKDIR $PWD/app # Set environment variables ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942" # Install vllm RUN pip uninstall -y vllm && \ rm -rf vllm && \ git clone -b v0.6.3 https://github.com/vllm-project/vllm.git && \ cd vllm && \ MAX_JOBS=$(nproc) python3 setup.py install && \ cd .. && \ rm -rf vllm # Copy the entire project directory COPY . . # Install dependencies RUN pip install "tensordict<0.6" --no-deps && \ pip install accelerate \ codetiming \ datasets \ dill \ hydra-core \ liger-kernel \ numpy \ pandas \ datasets \ peft \ "pyarrow>=15.0.0" \ pylatexenc \ "ray[data,train,tune,serve]" \ torchdata \ transformers \ wandb \ orjson \ pybind11 && \ pip install -e . --no-deps Build the image :::::::::::::::::::::::: .. code-block:: bash docker build -t verl-rocm . Launch the container :::::::::::::::::::::::::::: .. code-block:: bash docker run --rm -it \ --device /dev/dri \ --device /dev/kfd \ -p 8265:8265 \ --group-add video \ --cap-add SYS_PTRACE \ --security-opt seccomp=unconfined \ --privileged \ -v $HOME/.ssh:/root/.ssh \ -v $HOME:$HOME \ --shm-size 128G \ -w $PWD \ verl-rocm \ /bin/bash If you do not want to root mode and require assign yourself as the user, Please add ``-e HOST_UID=$(id -u)`` and ``-e HOST_GID=$(id -g)`` into the above docker launch script. verl with AMD GPUs currently supports FSDP as the training engine, vLLM and SGLang as the inference engine. We will support Megatron in the future. ================================================ FILE: verl_rl/docs/start/more_resources.rst ================================================ More Resources ============== Last updated: 06/30/2025. - Introduction to verl (`Slides `_) - verl Code Walkthrough (`Slides `_, `Talk in Chinese `_) ================================================ FILE: verl_rl/docs/start/multinode.rst ================================================ Multinode Training ================== Last updated: 06/10/2025. .. _wuxibin89: https://github.com/wuxibin89 Author: `Xibin Wu `_, `Yusheng Su `_. Manual ------ Set up multinode ray cluster ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Start head node with ``ray start --head --dashboard-host=0.0.0.0``, there're 2 address you should care about: - GCS address: ``ray start --address=
``, where worker node should connect to. - Dashboard address: ``
:8265``, where you should submit job to the cluster. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/head.png?raw=true 2. Start worker node with ``ray start --address=
`` you get above. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/worker.png?raw=true 3. Now you should see the cluster have 2 nodes with ``ray status``. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/status.png?raw=true 4. Additionally, you can access dashboard in the browser with the address you get above. *Firewall rules maybe need configure to access the dashboard, if there's any trouble, please contact your network administrator.* .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/overview.png?raw=true Submit job to ray cluster ~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Submit ray job to cluster with the dashboard address you get above. .. code-block:: bash ray job submit --address="http://127.0.0.1:8265" \ --runtime-env=verl/trainer/runtime_env.yaml \ --no-wait \ -- \ python3 -m verl.trainer.main_ppo \ trainer.n_gpus_per_node=8 \ trainer.nnodes=2 \ ... .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/submit.png?raw=true 2. Then you can check the job status with the following commands: - ray job list: list all jobs submitted to the cluster. - ray job logs : query the logs of the job. - ray job status : query the status of the job. - ray job stop : request the job to be stopped. 3. You can also access driver/task/actor logs in ``/tmp/ray/session_latest/logs/``, driver log is ``job-driver-raysubmit_.log``. 4. We strongly recommend you to view job detail from dashboard in multinode training, because it provide more structure way to view the job information. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/job.png?raw=true .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/job_detail.png?raw=true Slurm ----- TBD dstack ------ `dstackai/dstack `_ is an open-source container orchestrator that simplifies distributed training across cloud providers and on-premises environments without the need to use K8S or Slurm. Prerequisite ~~~~~~~~~~~~ Once dstack is `installed `_, initialize the directory as a repo with ``dstack init``. .. code-block:: bash mkdir myproject && cd myproject dstack init **Create a fleet** Before submitting distributed training jobs, create a `dstack` `fleet `_. Run a Ray cluster task ~~~~~~~~~~~~~~~~~~~~~~ Once the fleet is created, define a Ray cluster task, e.g. in ``ray-cluster.dstack.yml``: .. code-block:: yaml type: task name: ray-verl-cluster nodes: 2 env: - WANDB_API_KEY - PYTHONUNBUFFERED=1 - CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 image: whatcanyousee/verl:ngc-cu124-vllm0.8.5-sglang0.4.6-mcore0.12.0-te2.2 commands: - git clone https://github.com/volcengine/verl - cd verl - pip install --no-deps -e . - pip install hf_transfer hf_xet - | if [ $DSTACK_NODE_RANK = 0 ]; then python3 examples/data_preprocess/gsm8k.py --local_dir ~/data/gsm8k python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2.5-7B-Instruct')" ray start --head --port=6379; else ray start --address=$DSTACK_MASTER_NODE_IP:6379 fi # Expose Ray dashboard port ports: - 8265 resources: gpu: 80GB:8 shm_size: 128GB # Save checkpoints on the instance volumes: - /checkpoints:/checkpoints Now, if you run this task via `dstack apply`, it will automatically forward the Ray's dashboard port to `localhost:8265`. .. code-block:: bash dstack apply -f ray-cluster.dstack.yml As long as the `dstack apply` is attached, you can use `localhost:8265` to submit Ray jobs for execution Submit Ray jobs ~~~~~~~~~~~~~~~ Before you can submit Ray jobs, ensure to install `ray` locally: .. code-block:: shell pip install ray Now you can submit the training job to the Ray cluster which is available at ``localhost:8265``: .. code-block:: shell $ RAY_ADDRESS=http://localhost:8265 $ ray job submit \ -- python3 -m verl.trainer.main_ppo \ data.train_files=/root/data/gsm8k/train.parquet \ data.val_files=/root/data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-7B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.project_name=ppo_training \ trainer.experiment_name=qwen-2.5-7B \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=2 \ trainer.default_local_dir=/checkpoints \ trainer.save_freq=10 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo.log \ trainer.resume_mode=disable For more details on how `dstack` works, check out its `documentation `_. How to debug? --------------------- Ray Distributed Debugger VSCode Extension (Recommended) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Starting with Ray 2.39, Anyscale has introduced the `Ray Distributed Debugger `_ VSCode extension. Follow the extension’s installation instructions, then add your cluster using the dashboard URL you obtained earlier. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/debugger.png?raw=true :alt: Ray Distributed Debugger VSCode extension screenshot 2. Prerequisites. Ensure the following are installed (see the extension README for more detail): - Visual Studio Code - `ray[default]` >= 2.9.1 - `debugpy` >= 1.8.0 .. image:: https://github.com/aoshen524/verl/blob/main/docs/start/c7098b755ff689859837773a916c857.png?raw=true :alt: VSCode with Ray prerequisites 3. Environment Variables. To enable post‑mortem debugging, set: .. code-block:: bash export RAY_DEBUG_POST_MORTEM=1 .. admonition:: Note :class: important Be sure to remove any legacy flags before starting Ray: - `RAY_DEBUG=legacy` - `--ray-debugger-external` 4. Configuring BreakpointsSet up breakpoint() in your code, and submit job to cluster. Then the extension will show the breakpoint information. 1. Insert `breakpoint()` calls into your remote functions. 2. Submit your job to the cluster. The extension will detect active breakpoints and display them in VSCode. .. image:: https://github.com/aoshen524/verl/blob/main/docs/start/4ddad74395c79a1402331c0ce73316f.png?raw=true :alt: Detected breakpoint in VSCode **Note:** Breakpoints are only supported inside functions decorated with `@ray.remote`. 5. Launching the Debugger. Run your job directly from the command line (do not use a `launch.json`): .. code-block:: bash python job.py 6. Attaching to a Breakpoint. Once the process hits the first `breakpoint()`, click the Ray Distributed Debugger icon in the VSCode sidebar to attach the debugger. .. image:: https://github.com/aoshen524/verl/blob/main/docs/start/4ddad74395c79a1402331c0ce73316f.png?raw=true :alt: Attaching VSCode debugger to Ray process 7. Debugging With Multiple breakpoint(). For each subsequent task, first disconnect the current debugger session, then click the extension icon again to attach to the next breakpoint. .. image:: https://github.com/aoshen524/verl/blob/main/docs/start/6e83c910a62c82fecb89c6619e001cd.png?raw=true :alt: Disconnecting and reconnecting the debugger Legacy Ray Debugger ~~~~~~~~~~~~~~~~~~~ 1. Ray has a builtin legacy `debugger `_ that allows you to debug your distributed applications. To enable debugger, start ray cluster with ``RAY_DEBUG=legacy`` and ``--ray-debugger-external``. .. code-block:: bash # start head node RAY_DEBUG=legacy ray start --head --dashboard-host=0.0.0.0 --ray-debugger-external # start worker node RAY_DEBUG=legacy ray start --address='10.124.46.192:6379' --ray-debugger-external 2. Set up breakpoint in your code, and submit job to cluster. Then run ``ray debug`` to wait breakpoint: .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/legacy.png?raw=true Multi-node training on AMD clusters --------------------------------------------------------------------------------------- If you want to run multi-node training with slurm with Docker/Podman container on AMD Cluster, you can use the following script. If you encounter any issues in using AMD GPUs running verl, please contact `Yusheng Su `_. .. note:: 1. You need to use ``podman`` or ``docker`` in the following script. We will release the apptainer script later. 2. If you want to use ``podman``, you just replace ``docker`` with ``podman`` in the following script. The script includes the following steps: 1. SLURM Configuration 2. Environment Setup 3. Docker/Podman Container Setup 4. Ray Cluster Initialization 5. Data Preprocessing 6. Model Setup 7. Training Launch slurm_script.sh ~~~~~~~~~~~~~~~~~~~~ .. code-block:: bash #!/bin/bash #SBATCH --job-name=verl-ray-on-slurm #SBATCH --nodes=2 #SBATCH --ntasks-per-node=2 #SBATCH --mem=200G #SBATCH --time=30-00:00:00 #SBATCH --gpus-per-node=8 #SBATCH --cpus-per-task=28 #SBATCH --output=../verl_log/slurm-%j.out #SBATCH --error=../verl_log/slurm-%j.err #SBATCH --nodelist=gpu-[0,1] # load necessary modules ### Run this setup # [Cluster]: Use docker # docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 ########################################################################## ###The following setting should be set in different project and cluster### ########################################################################## ### Project CONTAINER_NAME="multinode_verl_training" IMG="verl.rocm" DOCKERFILE="docker/Dockerfile.rocm" # echo $PWD verl_workdir="${HOME}/projects/verl_upstream" export TRANSFORMERS_CACHE="${HOME}/.cache/huggingface" export HF_HOME=$TRANSFORMERS_CACHE ### Cluster Network Setting export NCCL_DEBUG=TRACE export GPU_MAX_HW_QUEUES=2 export TORCH_NCCL_HIGH_PRIORITY=1 export NCCL_CHECKS_DISABLE=1 # export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7 export NCCL_IB_HCA=mlx5_0,mlx5_1,mlx5_2,mlx5_3,mlx5_4,mlx5_5,mlx5_8,mlx5_9 export NCCL_IB_GID_INDEX=3 export NCCL_CROSS_NIC=0 export CUDA_DEVICE_MAX_CONNECTIONS=1 export NCCL_PROTO=Simple export RCCL_MSCCL_ENABLE=0 export TOKENIZERS_PARALLELISM=false export HSA_NO_SCRATCH_RECLAIM=1 ########################################################################## ### For rocm and training script export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 export ROCR_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES export CUDA_VISIBLE_DEVICES=$HIP_VISIBLE_DEVICES # Build and launch the Docker container srun bash -c " # Exit on any error set -e # Clean up dangling images (images with tag) docker image prune -f # Need to pull the docker first docker pull docker.io/rocm/vllm:rocm6.2_mi300_ubuntu20.04_py3.9_vllm_0.6.4 if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -q "${IMG}"; then echo \"Building ${IMG} image...\" docker build -f \"${DOCKERFILE}\" -t \"${IMG}\" . else echo \"${IMG} image already exists, skipping build\" fi # Removing old container if exists docker rm \"${CONTAINER_NAME}\" 2>/dev/null || true # Checking network devices ibdev2netdev # Launch the docker docker run --rm -d \ -e HYDRA_FULL_ERROR=1 \ -e HIP_VISIBLE_DEVICES=${HIP_VISIBLE_DEVICES} \ -e ROCR_VISIBLE_DEVICES=${ROCR_VISIBLE_DEVICES} \ -e CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES} \ -e NCCL_DEBUG=${NCCL_DEBUG} \ -e GPU_MAX_HW_QUEUES=${GPU_MAX_HW_QUEUES} \ -e TORCH_NCCL_HIGH_PRIORITY=${TORCH_NCCL_HIGH_PRIORITY} \ -e NCCL_CHECKS_DISABLE=${NCCL_CHECKS_DISABLE} \ -e NCCL_IB_HCA=${NCCL_IB_HCA} \ -e NCCL_IB_GID_INDEX=${NCCL_IB_GID_INDEX} \ -e NCCL_CROSS_NIC=${NCCL_CROSS_NIC} \ -e CUDA_DEVICE_MAX_CONNECTIONS=${CUDA_DEVICE_MAX_CONNECTIONS} \ -e NCCL_PROTO=${NCCL_PROTO} \ -e RCCL_MSCCL_ENABLE=${RCCL_MSCCL_ENABLE} \ -e TOKENIZERS_PARALLELISM=${TOKENIZERS_PARALLELISM} \ -e HSA_NO_SCRATCH_RECLAIM=${HSA_NO_SCRATCH_RECLAIM} \ -e TRANSFORMERS_CACHE=${TRANSFORMERS_CACHE} \ -e HF_HOME=${HF_HOME} \ --network host \ --device /dev/dri \ --device /dev/kfd \ --device /dev/infiniband \ --group-add video \ --cap-add SYS_PTRACE \ --security-opt seccomp=unconfined \ --privileged \ -v \${HOME}:\${HOME} \ -v \${HOME}/.ssh:/root/.ssh \ -w "${verl_workdir}" \ --shm-size 128G \ --name \"${CONTAINER_NAME}\" \ \"${IMG}\" \ tail -f /dev/null echo \"Container setup completed\" " # (Optional): If you do not want to root mode and require assign yuorself as the user # Please add `-e HOST_UID=$(id -u)` and `-e HOST_GID=$(id -g)` into the above docker launch script. ### Ray launch the nodes before training # Getting the node names nodes_array=($(scontrol show hostnames "$SLURM_JOB_NODELIST" | tr '\n' ' ')) head_node=${nodes_array[0]} head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) # if we detect a space character in the head node IP, we'll # convert it to an ipv4 address. This step is optional. if [[ "$head_node_ip" == *" "* ]]; then IFS=' ' read -ra ADDR <<<"$head_node_ip" if [[ ${#ADDR[0]} -gt 16 ]]; then head_node_ip=${ADDR[1]} else head_node_ip=${ADDR[0]} fi echo "IPV6 address detected. We split the IPV4 address as $head_node_ip" fi port=6379 ip_head=$head_node_ip:$port export ip_head echo "IP Head: $ip_head" # make sure we set environment variables before Ray initialization # Print out all env variables printenv echo "Starting HEAD at $head_node" srun --nodes=1 --ntasks=1 -w "$head_node" \ docker exec "${CONTAINER_NAME}" \ ray start --head --node-ip-address="$head_node_ip" --port=$port \ --dashboard-port=8266 \ --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & # optional, though may be useful in certain versions of Ray < 1.0. sleep 10 # number of nodes other than the head node worker_num=$((SLURM_JOB_NUM_NODES - 1)) for ((i = 1; i <= worker_num; i++)); do node_i=${nodes_array[$i]} echo "Debug: Starting worker on node_i = ${node_i}" if [ -z "$node_i" ]; then echo "Error: Empty node name for worker $i" continue fi echo "Starting WORKER $i at $node_i" srun --nodes=1 --ntasks=1 -w "$node_i" \ docker exec "${CONTAINER_NAME}" \ ray start --address "$ip_head" --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & sleep 5 done # Ray initlization test (See whether any error in the above execution) echo "Testing Ray initialization in the slurm nodes..." docker exec "${CONTAINER_NAME}" python3 -c ' import ray try: ray.init(address="auto") print("\n=== Ray Cluster Status ===") print(f"Number of nodes: {len(ray.nodes())}") for node in ray.nodes(): print("Node: {}, Status: {}".format(node["NodeManagerHostname"], node["Alive"])) # print(f"Node: {node}") ray.shutdown() print("Ray initialization successful!") except Exception as e: print(f"Ray initialization failed: {str(e)}") ' echo "=== Ray test completed ===" ###### # Run data preprocessing echo "Starting data preprocessing..." docker exec "${CONTAINER_NAME}" \ python3 "examples/data_preprocess/gsm8k.py" "--local_dir" "../data/gsm8k" echo "Starting data preprocessing..." docker exec "${CONTAINER_NAME}" \ python3 "examples/data_preprocess/math_dataset.py" "--local_dir" "../data/math" train_files="../data/gsm8k/train.parquet" val_files="../data/gsm8k/test.parquet" # Download and test model echo "Loading model..." docker exec "${CONTAINER_NAME}" \ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')" MODEL_PATH="Qwen/Qwen2-7B-Instruct" # Set model path after pipeline test MODEL_PATH="Qwen/Qwen2.5-0.5B-Instruct" echo "== Data and model loading Done ==" echo "Start to train..." docker exec "${CONTAINER_NAME}" \ python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2-7B-Instruct')" MODEL_PATH="Qwen/Qwen2-7B-Instruct" PYTHONUNBUFFERED=1 srun --overlap --nodes=${SLURM_NNODES} --ntasks=1 -w "$head_node" \ docker exec "${CONTAINER_NAME}" \ python3 -m verl.trainer.main_ppo \ data.train_files=$train_files \ data.val_files=$val_files \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.enable_gradient_checkpointing=False \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=$MODEL_PATH \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.kl_ctrl.kl_coef=0.0001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \ trainer.n_gpus_per_node=${SLURM_GPUS_PER_NODE} \ trainer.val_before_train=False \ trainer.nnodes=${SLURM_NNODES} \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 Run multi-node training with above slurm_script.sh ~~~~~~~~~~~~~~~~~~~~ Just sbatch your slurm_script.sh .. code-block:: bash sbatch slurm_script.sh ================================================ FILE: verl_rl/docs/start/quickstart.rst ================================================ .. _quickstart: ========================================================= Quickstart: PPO training on GSM8K dataset ========================================================= Post-train a LLM using GSM8K dataset. Introduction ------------ .. _hf_dataset_gsm8k: https://huggingface.co/datasets/gsm8k In this example, we train an LLM to tackle the `GSM8k `_ task with function-based rewards. [1]_ Prerequisite: - the latest version of ``verl`` and its dependencies installed following the installation guide. Using the docker image is recommended. - a GPU with at least 24 GB HBM Dataset Introduction -------------------- GSM8k is a math problem dataset. The prompt is an elementary school problem. The LLM model is asked to solve the math problem. Below is an example: Prompt Katy makes coffee using teaspoons of sugar and cups of water in the ratio of 7:13. If she used a total of 120 teaspoons of sugar and cups of water, calculate the number of teaspoonfuls of sugar she used. Solution The total ratio representing the ingredients she used to make the coffee is 7+13 = <<7+13=20>>20 Since the fraction representing the number of teaspoons she used is 7/20, she used 7/20\ *120 = <<7/20*\ 120=42>>42 #### 42 Step 1: Prepare the dataset ---------------------------- We preprocess the dataset in parquet format so that (1) it contains necessary fields for computing RL rewards and (2) is faster to read. .. code-block:: bash python3 examples/data_preprocess/gsm8k.py --local_dir ~/data/gsm8k Step 2: Download a model for post-training ------------------------------------------- In this example, we start with the ``Qwen2.5-0.5B-Instruct`` model. If you want to perform SFT before RL, refer to the :doc:`Complete GSM8K Example<../examples/gsm8k_example>`, the `sft directory `_ and `SFT Trainer `_ for further details. .. code-block:: bash python3 -c "import transformers; transformers.pipeline('text-generation', model='Qwen/Qwen2.5-0.5B-Instruct')" Step 3: Perform PPO training with the instruct model ---------------------------------------------------------------------- **Reward Model/Function** We use a pre-defined rule-based reward model. We force the model to produce a final answer following 4 “#” as shown in the solution. We extract the final answer from both the solution and model's output using regular expression matching. We assign a reward of 1 to correct answer, 0.0 to incorrect answer and 0 to no answer. For more details, please refer to `verl/utils/reward_score/gsm8k.py `_. **Training Script** Now let's run PPO training with the dataset and model above. [2]_ Set the ``data.train_files`` ,\ ``data.val_files``, ``actor_rollout_ref.model.path`` and ``critic.model.path`` based on your dataset and model names or paths. You may set ``VERL_USE_MODELSCOPE=True`` to download models from `modelscope `_ instead of `huggingface `_. .. code-block:: bash PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=10 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo.log You are expected to see the following logs, indicating training in progress. The key metric ``val/test_score/openai/gsm8k`` is computed every ``trainer.test_freq`` steps: .. code-block:: bash step:0 - timing/gen:21.470 - timing/ref:4.360 - timing/values:5.800 - actor/reward_kl_penalty:0.000 - actor/reward_kl_penalty_coeff:0.001 - timing/adv:0.109 - timing/update_critic:15.664 - critic/vf_loss:14.947 - critic/vf_clipfrac:0.000 - critic/vpred_mean:-2.056 - critic/grad_norm:1023.278 - critic/lr(1e-4):0.100 - timing/update_actor:20.314 - actor/entropy_loss:0.433 - actor/pg_loss:-0.005 - actor/pg_clipfrac:0.000 - actor/ppo_kl:0.000 - actor/grad_norm:1.992 - actor/lr(1e-4):0.010 - critic/score/mean:0.004 - critic/score/max:1.000 - critic/score/min:0.000 - critic/rewards/mean:0.004 - critic/rewards/max:1.000 - critic/rewards/min:0.000 - critic/advantages/mean:-0.000 - critic/advantages/max:2.360 - critic/advantages/min:-2.280 - critic/returns/mean:0.003 - critic/returns/max:0.000 - critic/returns/min:0.000 - critic/values/mean:-2.045 - critic/values/max:9.500 - critic/values/min:-14.000 - response_length/mean:239.133 - response_length/max:256.000 - response_length/min:77.000 - prompt_length/mean:104.883 - prompt_length/max:175.000 - prompt_length/min:68.000 step:1 - timing/gen:23.020 - timing/ref:4.322 - timing/values:5.953 - actor/reward_kl_penalty:0.000 - actor/reward_kl_penalty:0.001 - timing/adv:0.118 - timing/update_critic:15.646 - critic/vf_loss:18.472 - critic/vf_clipfrac:0.384 - critic/vpred_mean:1.038 - critic/grad_norm:942.924 - critic/lr(1e-4):0.100 - timing/update_actor:20.526 - actor/entropy_loss:0.440 - actor/pg_loss:0.000 - actor/pg_clipfrac:0.002 - actor/ppo_kl:0.000 - actor/grad_norm:2.060 - actor/lr(1e-4):0.010 - critic/score/mean:0.000 - critic/score/max:0.000 - critic/score/min:0.000 - critic/rewards/mean:0.000 - critic/rewards/max:0.000 - critic/rewards/min:0.000 - critic/advantages/mean:0.000 - critic/advantages/max:2.702 - critic/advantages/min:-2.616 - critic/returns/mean:0.000 - critic/returns/max:0.000 - critic/returns/min:0.000 - critic/values/mean:-2.280 - critic/values/max:11.000 - critic/values/min:-16.000 - response_length/mean:232.242 - response_length/max:256.000 - response_length/min:91.000 - prompt_length/mean:102.398 - prompt_length/max:185.000 - prompt_length/min:70.000 Checkout ``Algorithm Baselines`` page for full training and validation logs for reference. The checkpoint is saved at the following dir by default: ``checkpoints/${trainer.project_name}/${trainer.experiment_name}``. You can merge the saved checkpoints to huggingface model using ``verl.model_merger`` module, for example: .. code-block:: bash python3 -m verl.model_merger merge \ --backend fsdp \ --local_dir checkpoints/${trainer.project_name}/${trainer.experiment_name}/global_step_1/actor \ --target_dir checkpoints/${trainer.project_name}/${trainer.experiment_name}/global_step_1/actor/huggingface For more details about checkpoint and model merging, please refer to :ref:`checkpoint-page`. To enable ``wandb`` for experiment tracking, set the following configs: .. code-block:: bash trainer.logger='["console","wandb"]' \ trainer.project_name=$YOUR_PROJECT_NAME \ trainer.experiment_name=$YOUR_RUN_NAME \ If you encounter out of memory issues with HBM less than 32GB, enable the following configs would help: .. code-block:: bash actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ critic.ppo_micro_batch_size_per_gpu=1 \ For the full set of configs, please refer to :ref:`config-explain-page` for detailed explanation and performance tuning. .. [1] The original paper (https://arxiv.org/pdf/2110.14168) mainly focuses on training a verifier (a reward model) to solve math problems via Best-of-N sampling. In this example, we train an RL agent using a rule-based reward model. .. [2] More training script examples for FSDP and Megatron-LM backend are stored in `examples/ppo_trainer `_ directory. ================================================ FILE: verl_rl/docs/start/ray_debug_tutorial.rst ================================================ Ray Debug Tutorial ================== Last updated: 04/23/2025 .. _wuxibin89: https://github.com/wuxibin89 Author: `Ao Shen `_. How to debug? --------------------- Ray Distributed Debugger VSCode Extension (Recommended) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Starting with Ray 2.39, Anyscale has introduced the `Ray Distributed Debugger `_ VSCode extension. Follow the extension’s installation instructions, then add your cluster using the dashboard URL you obtained earlier. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/debugger.png?raw=true :alt: Ray Distributed Debugger VSCode extension screenshot 2. Prerequisites. Ensure the following are installed (see the extension README for more detail): - Visual Studio Code - `ray[default]` >= 2.9.1 - `debugpy` >= 1.8.0 .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/readme.png?raw=true :alt: VSCode with Ray prerequisites 3. Environment Variables. To enable post‑mortem debugging, set: .. code-block:: bash export RAY_DEBUG_POST_MORTEM=1 .. admonition:: Note :class: important Be sure to remove any legacy flags before starting Ray: - `RAY_DEBUG=legacy` - `--ray-debugger-external` 4. Configuring BreakpointsSet up breakpoint() in your code, and submit job to cluster. Then the extension will show the breakpoint information. 1. Insert `breakpoint()` calls into your remote functions. 2. Submit your job to the cluster. The extension will detect active breakpoints and display them in VSCode. **Note:** Breakpoints are only supported inside functions decorated with `@ray.remote`. 5. Launching the Debugger. Run your job directly from the command line (do not use a `launch.json`): .. code-block:: bash python job.py 6. Attaching to a Breakpoint. Once the process hits the first `breakpoint()`, click the Ray Distributed Debugger icon in the VSCode sidebar to attach the debugger. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/launch.png?raw=true :alt: Attaching VSCode debugger to Ray process 7. Debugging With Multiple breakpoint(). For each subsequent task, first disconnect the current debugger session, then click the extension icon again to attach to the next breakpoint. .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/disconnect.png?raw=true :alt: Disconnecting and reconnecting the debugger Legacy Ray Debugger ~~~~~~~~~~~~~~~~~~~ 1. Ray has a builtin legacy `debugger `_ that allows you to debug your distributed applications. To enable debugger, start ray cluster with ``RAY_DEBUG=legacy`` and ``--ray-debugger-external``. .. code-block:: bash # start head node RAY_DEBUG=legacy ray start --head --dashboard-host=0.0.0.0 --ray-debugger-external # start worker node RAY_DEBUG=legacy ray start --address='10.124.46.192:6379' --ray-debugger-external 2. Set up breakpoint in your code, and submit job to cluster. Then run ``ray debug`` to wait breakpoint: .. image:: https://github.com/eric-haibin-lin/verl-community/blob/main/docs/ray/legacy.png?raw=true ================================================ FILE: verl_rl/docs/workers/fsdp_workers.rst ================================================ PyTorch FSDP Backend ====================== Last updated: 02/12/2025. We support PyTorch FSDP Backend by implementing various workers for actor, critic, reference, rollout and reward models. We also implement the ``FSDPVLLMShardingManager`` that reshard weight between FSDP and vLLM in `fsdp_vllm.py `_. **Pros** - Readily support various models. - Users only need to implement the corresponding ``dtensor_weight_loader`` for weight synchronization between FSDP and vLLM. While for ``hf_weight_loader``, users can directly apply any models supported both in HF and vLLM without any code change. - Easy to organize the forward and backward computation for each model. **Cons** - Poor scalability when it comes to large-scale models (e.g. Llama 70B and 405B) - The resharding overhead between actor and rollout could be larger than Megatron-LM backend. Due to the simplicity, we recommend using FSDP backend for algorithm research and prototyping. FSDP Workers -------------- ActorRolloutRefWorker ^^^^^^^^^^^^^^^^^^^^^ Actor/Rollout HybridEngine '''''''''''''''''''''''''' 1. HybridEngine, Actor and Rollout initialization API. .. code:: python @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): ``ONE_TO_ALL``: when calling the ``init_model`` function from the driver process, each worker (on a GPU) will execute the following model initialization process. The initialization details of HybridEngine, Actor and Rollout are highlighted below: 1. ``DataParallelPPOActor`` implements the simple PPO computation logics when the model is built with FSDP, including compute log prob, model update. 2. ``vLLMRollout`` support generation with vLLM. We modify the vLLM Engine and make it executed under SPMD to fit into our ``WorkerGroup`` design. 3. ``FSDPVLLMShardingManager`` a context manager to perform actual resharding between actor and rollout. See `source code `_. for more information. 1. Generate sequence and recompute log prob .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(self, prompts: DataProto): - ``Dispatch.DP_COMPUTE_PROTO``: The data will be dispatched and collected along the DP dimension - In this function, the rollout model will perform auto-regressive generation and the actor model will recompute the old log prob for the generated response. 3. Update actor model .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def update_actor(self, data: DataProto): - Update the actor model weight using PPO & entropy loss. ReferenceModel '''''''''''''' 1. Reference model initialization The reference model is initialized using the same function as the actor model without initializing the HybridEngine and Optimizer. Then the actor model is also wrapped by the ``DataParallelPPOActor``. 2. Compute reference log prob .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_ref_log_prob(self, data: DataProto): - In this function, the reference model will call the compute log prob function in ``DataParallelPPOActor`` to compute the reference log prob. CriticWorker and RewardWorker ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. Model initialization Quite similar to reference model. The CriticWorker will perform additional initialization for the Optimizer. 2. Compute Values for CriticWorker .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_values(self, data: DataProto): 3. Update Critic .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def update_critic(self, data: DataProto): 4. Compute Reward .. code:: python @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_rm_score(self, data: DataProto): HybridShard ------------ We didn't support FSDP `HybridShard`. To support this, we may need to construct a 2D device mesh and test the corresponding ``dtensor_weight_loader`` and ``hf_weight_loader`` for each model. ================================================ FILE: verl_rl/docs/workers/megatron_workers.rst ================================================ Megatron-LM Backend =================== Last updated: 06/24/2025. We support Megatron Backend by implementing various workers for actor, critic, reference, rollout and reward models. We also implement the ``3DHybridEngine`` using Megatron-LM and vLLM/SGLang in `megatron_vllm.py `_ and `megatron_sglang.py `_. **Pros** - Support 5D parallelism (TP, EP, CP, DP, PP) and sequence parallelism for best scalablility and throughput. - 3D HybridEngine can significantly reduce peak memory usage and reduce weight synchronize overhead between actor and rollout. **Cons** - Huggingface Models and Megatron checkpoints need tools for conversion. Development Progress -------------------- Note that [Deprecated] means that the feature is not supported in the latest version of verl. [To-Optimize] means that the feature is implemented but not optimized yet. [WIP] means that the feature is working in progress. [In-Release] means that the feature is ready and in review process, coming at any time. +---------------+-----------------------------------------------------------+ | [Deprecated] | Megatron 3D Parallelism with custom models | +---------------+-----------------------------------------------------------+ | [Done] | Megatron 0.11.0 ``GPTModel`` support | +---------------+-----------------------------------------------------------+ | [Done] | Megatron GRPO support | +---------------+-----------------------------------------------------------+ | [Done] | Megatron with vLLM 0.8.2, with per-tensor weights loading | +---------------+-----------------------------------------------------------+ | [Done] | Megatron with Context Parallel | +---------------+-----------------------------------------------------------+ | [Done] | Qwen2MoE model support | +---------------+-----------------------------------------------------------+ | [To-Optimize] | Megatron dist Checkpoint | +---------------+-----------------------------------------------------------+ | [To-Optimize] | Huggingface and Megatron Checkpoint Converter | +---------------+-----------------------------------------------------------+ | [To-Optimize] | Efficient fused linear, entropy and cross entropy | +---------------+-----------------------------------------------------------+ | [Done] | Megatron offload(param, grad, optimizer) | +---------------+-----------------------------------------------------------+ | [Done] | Megatron Profiler | +---------------+-----------------------------------------------------------+ | [In-Release] | Megatron 0.12.0, TE 2.2 with vLLM 0.8.3 and Fused Attn | +---------------+-----------------------------------------------------------+ | [WIP] | Moonlight/DeepSeek-V3 model support | +---------------+-----------------------------------------------------------+ | [WIP] | Expert Parallel support | +---------------+-----------------------------------------------------------+ | [WIP] | Megatron support dynamic batch size | +---------------+-----------------------------------------------------------+ | [To-Do] | Performance tuning | +---------------+-----------------------------------------------------------+ | [MileStone] | Runnable with DeepSeek-V3 671B post-training | +---------------+-----------------------------------------------------------+ Utils of Megatron Workers ------------------------- MegatronWorker ^^^^^^^^^^^^^^ ``MegatronWorker`` is the base class of different megatron worker classes. In this class, ``get_megatron_global_info`` and ``get_megatron_rank_info`` function to retrieve the 3D parallel world size and rank of each ``Worker`` running on specific GPU. These information will be used in transfer protocol for Megatron Backend. The following ``Worker`` class for different models will be utilized to construct the ``WorkerGroup`` . We implement various of APIs for each ``Worker`` class decorated by the ``@register(dispatch_mode=)`` . These APIs can be called by the ray driver process. The data can be correctly collect and dispatch following the ``dispatch_mode`` on each function. The supported dispatch_model (i.e., transfer protocols) can be found in `decorator.py `_. ActorRolloutRefWorker ^^^^^^^^^^^^^^^^^^^^^ This class is implemented for Actor/Rollout HybridEngine or for the reference model to initialize their model and perform computation. Actor/Rollout HybridEngine '''''''''''''''''''''''''' 1. HybridEngine, Actor and Rollout initialization API. .. code:: python @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): ``ONE_TO_ALL``: when calling the ``init_model`` function from the driver process, each worker (on a GPU) will execute the following model initialization process. The initialization details of HybridEngine, Actor and Rollout are highlighted below: 1. ``MegatronPPOActor`` implements the simple PPO computation logics when the model is built with Megatron, including compute log prob, model update. 2. ``vLLMRollout`` support generation with vLLM. We modify the vLLM Engine and make it executed under SPMD to fit into our ``WorkerGroup`` design. 3. ``MegatronVLLMShardingManager`` a context manager to perform actual resharding between actor and rollout. See `source code `_ for more information. .. code:: python # build actor model self.actor = MegatronPPOActor(config=self.config.actor, model_config=self.actor_model_config, megatron_config=megatron_config, actor_module=self.actor_module, actor_optimizer=self.actor_optimizer, actor_optimizer_config=self.actor_optim_config) # build rollout # rollout initialization rollout = vLLMRollout(actor_module=params, config=self.config.rollout, tokenizer=self.tokenizer, model_hf_config=self.actor_model_config, train_tp=mpu.get_tensor_model_parallel_world_size()) # perform weight resharding between actor and rollout sharding_manager = MegatronVLLMShardingManager(module=self.hybrid_engine, inference_engine=rollout.inference_engine, model_config=self.actor_model_config, layer_name_mapping=layer_name_mapping) ... 1. Generate sequence and recompute log prob .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_PP_AS_DP_PROTO) def generate_sequences(self, prompts: DataProto): - ``Dispatch.MEGATRON_PP_AS_DP_PROTO``: The PP dimension of the actor model will be regarded as DP dimension. Then the driver process will dispatch and collect the data according to this reorganization. This is because, in HybridEngine, the actor weight, which usually applied larger 3D parallel sizes, will be gathered along the PP dimension and TP dimension. Therefore, the corresponding data should be dispatched and collected through the 3D parallel group of the rollout model, rather than the actor model. However, the world_size and rank information can only be retrieved from ``get_megatron_global_info`` and ``get_megatron_rank_info``, which records the 3D information for the actor model. Moreover, the data resharding inside TP dimension will be processed within the HybridEngine. - In this function, the rollout model will perform auto-regressive generation and the actor model will recompute the old log prob for the generated response. 3. Update actor model .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def update_actor(self, data: DataProto): - ``Dispatch.MEGATRON_COMPUTE_PROTO``: User passes the data partitioned by DP dimension. The data is dispatched to all tp/pp ranks within the same dp group, and ultimately only collects output data from tp=0 and the last pp. - Update the actor model weight using PPO & entropy loss. ..note:: Currently, training Tensor Parallel Size can be different from inference Tensor Parallel Size. ReferenceModel '''''''''''''' 1. Reference model initialization The reference model is initialized using the same function as the actor model without initializing the HybridEngine and Optimizer. Then the actor model is also wrapped by the ``MegatronPPOActor``. 2. Compute reference log prob .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def compute_ref_log_prob(self, data: DataProto): - In this function, the reference model will call the compute log prob function in ``MegatronPPOActor`` to compute the reference log prob. CriticWorker and RewardWorker ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. Model initialization Quite similar to reference model. The CriticWorker will perform additional initialization for the Optimizer. 2. Compute Values for CriticWorker .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def compute_values(self, data: DataProto): 3. Update Critic .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def update_critic(self, data: DataProto): 4. Compute Reward .. code:: python @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) def compute_rm_score(self, data: DataProto): Utils of Train Optimization --------------------------- Offload ^^^^^^^ When resources are tight, the offload method can lower GPU memory usage, helping training and inference frameworks work well under verl. It moves parameters, gradients, and optimizers to CPU memory and only loads them back to the GPU when needed. If you want to use the offload, you can add the following parameters for the actor and ref separately. .. code:: python # For the actor actor_rollout_ref.actor.megatron.param_offload=True \ actor_rollout_ref.actor.megatron.grad_offload=True \ actor_rollout_ref.actor.megatron.optimizer_offload=True \ # For the ref w/o grad and optimizer actor_rollout_ref.ref.megatron.param_offload=True \ For the critic, you can include these parameters. .. code:: python # For the critic critic.megatron.param_offload=True \ critic.megatron.grad_offload=True \ critic.megatron.optimizer_offload=True \ Profiler ^^^^^^^^ The profiler is a tool that helps you understand the performance of your model. It can be used to profile the time spent on different operations and identify the bottlenecks. You can get more information from `torch.profiler `_. In verl, now the profiler is only support for the actor role In Megatron. You can set the begin step and end step to profile. Notice, one step means one gradient update. And the profile result will be saved in the save_path. If you just want to profile in the specific rank, you can set the profile_ranks, by default, it will be [0]. .. code:: python actor_rollout_ref.actor.profile.use_profile=True \ actor_rollout_ref.actor.profile.profile_ranks=[0] \ actor_rollout_ref.actor.profile.step_start=0 \ actor_rollout_ref.actor.profile.step_end=1 \ actor_rollout_ref.actor.profile.save_path="./profile" Related MCore Document ---------------------- There is also a detailed document of using MCore to train different kinds of models, please refer to `MCore Document `_. ================================================ FILE: verl_rl/docs/workers/ray_trainer.rst ================================================ PPO Ray Trainer =============== Last updated: 02/12/2025. We implement the RayPPOTrainer, which is a trainer runs on the driver process on a single CPU/GPU node (default is CPU). The PPORayTrainer include 3 core functions for data preparation, WorkerGroup initialization and PPO training loop. Data Preparation ---------------- The ``PPORayTrainer``, as a single process, is responsible for loading a complete batch of samples (prompts) from the dataset and then dispatch to different worker_groups running on different GPUs. To generalize the data loading, we implement the ``RLHFDataset`` class to load the preprocessed parquet files, apply chat templates to the prompts, add padding, truncate prompts that exceed max prompt length and then tokenize. .. code:: python self.train_dataset = RLHFDataset(data_files=self.config.data.train_files, tokenizer=self.tokenizer, config=self.config.data) Then, the dataloader will iterate the dataset under PPO mini batch size. WorkerGroup Initialization -------------------------- We first introduce a basic implementation of initializing the ``WorkerGroup`` of the actor model on a given set of GPUs. .. code:: python # max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool # For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one. # For Megatron backend, we recommend using max_colocate_count>1 that can utilize different WorkerGroup for differnt models resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes, use_gpu=True, max_colocate_count=1) # define actor rollout cls to be init on remote actor_rollout_cls = RayClassWithInitArgs(cls=ActorRolloutWorker) # define actor_rollout worker group actor_rollout_worker_group = MegatronRayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=actor_rollout_cls, default_megatron_kwargs=config.actor_rollout.megatron) Different WorkerGroups, like ``actor_rollout_worker_group`` , ``critic_worker_group`` and ``ref_worker_group`` lies on a separate process in the above implementation. The driver process can then call the distributed compute function within the ``actor_rollout_worker_group`` and other roles to construct the RL training loop. For models colocated in the same set of GPUs, we further provide a fine-grain optimization, which merge the ``worker_group`` of different roles in the same process. This optimization can save the redundant CUDA/distributed context in different processes. .. code:: python # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. Instead, directly pass different resource pool to different worker groups. # See TODO(url) for more information. all_wg = {} for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls(resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) if self.use_critic: self.critic_wg = all_wg['critic'] self.critic_wg.init_model() if self.use_reference_policy: self.ref_policy_wg = all_wg['ref'] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = all_wg['rm'] self.rm_wg.init_model() # we should create rollout at the end so that vllm can have a better estimation of kv cache memory self.actor_rollout_wg = all_wg['actor_rollout'] self.actor_rollout_wg.init_model() .. note:: For megatron backend, if we merge the ``worker_groups`` into the same processes, all the roles will utilize the same 3D parallel size. To optimize this, we may need to maintain several 3D process groups for each role in the same distributed context. If you want to use different 3D parallel size for different roles, please follow the similar architecture of the first code block to initialize each role's ``worker_group`` PPO Training Loop ----------------- We implement the PPO training loop by calling the functions in worker_group of each role. The input and output data of each function is a ``DataProto`` object implemented in `protocol.py `_. In the training loop, trainer will dispatch/collect the data to/from different GPUs following the transfer protocols wrapped in the workers' functions. The computation of PPO micro batches is processed in ``update_actor`` and ``update_critic`` functions. To extend to other RLHF algorithms, such as DPO, GRPO, please refer to :doc:`../advance/dpo_extension`. .. code:: python def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from verl.utils.tracking import Tracking from omegaconf import OmegaConf logger = Tracking(project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True)) global_steps = 0 # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None: val_metrics = self._validate() pprint(f'Initial validation metrics: {val_metrics}') for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} batch: DataProto = DataProto.from_single_dict(batch_dict) # batch = batch.to('cuda') # pop those keys for generation gen_batch = batch.pop(batch_keys=['input_ids', 'attention_mask', 'position_ids']) # generate a batch with Timer(name='gen', logger=None) as timer: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) metrics['timing/gen'] = timer.last batch = batch.union(gen_batch_output) if self.use_reference_policy: # compute reference log_prob with Timer(name='ref', logger=None) as timer: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) metrics['timing/ref'] = timer.last # compute values with Timer(name='values', logger=None) as timer: values = self.critic_wg.compute_values(batch) batch = batch.union(values) metrics['timing/values'] = timer.last with Timer(name='adv', logger=None) as timer: # compute scores. Support both model and function-based. # We first compute the scores using reward model. Then, we call reward_fn to combine # the results from reward model and rule-based results. if self.use_rm: # we first compute reward model score reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) # we combine with rule-based rm reward_tensor = self.reward_fn(batch) batch.batch['token_level_scores'] = reward_tensor # compute rewards. apply_kl_penalty if available batch, kl_metrics = apply_kl_penalty(batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty) metrics.update(kl_metrics) # compute advantages, executed on the driver process batch = compute_advantage(batch, self.config.algorithm.gamma, self.config.algorithm.lam, adv_estimator=self.config.algorithm.adv_estimator) metrics['timing/adv'] = timer.last # update critic if self.use_critic: with Timer(name='update_critic', logger=None) as timer: critic_output = self.critic_wg.update_critic(batch) metrics['timing/update_critic'] = timer.last critic_output_metrics = reduce_metrics(critic_output.meta_info['metrics']) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= global_steps: # update actor with Timer(name='update_actor', logger=None) as timer: actor_output = self.actor_rollout_wg.update_actor(batch) metrics['timing/update_actor'] = timer.last actor_output_metrics = reduce_metrics(actor_output.meta_info['metrics']) metrics.update(actor_output_metrics) # validate if self.val_reward_fn is not None and (global_steps + 1) % self.config.trainer.test_freq == 0: with Timer(name='testing', logger=None) as timer: val_metrics: dict = self._validate() val_metrics = {f'val/{key}': val for key, val in val_metrics.items()} metrics['timing/testing'] = timer.last metrics.update(val_metrics) # collect metrics data_metrics = compute_data_metrics(batch=batch) metrics.update(data_metrics) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=global_steps) if self.config.trainer.save_freq > 0 and (global_steps + 1) % self.config.trainer.save_freq == 0: actor_local_path = os.path.join(self.config.trainer.default_local_dir, 'actor', f'global_step_{global_steps}') actor_remote_path = os.path.join(self.config.trainer.default_hdfs_dir, 'actor') self.actor_rollout_wg.save_checkpoint(actor_local_path, actor_remote_path) if self.use_critic: critic_local_path = os.path.join(self.config.trainer.default_local_dir, 'critic', f'global_step_{global_steps}') critic_remote_path = os.path.join(self.config.trainer.default_hdfs_dir, 'critic') self.critic_wg.save_checkpoint(critic_local_path, critic_remote_path) global_steps += 1 # perform validation after training if self.val_reward_fn is not None: val_metrics = self._validate() pprint(f'Final validation metrics: {val_metrics}') ================================================ FILE: verl_rl/docs/workers/sglang_worker.rst ================================================ SGLang Backend ============== Last updated: 05/31/2025. **Authored By SGLang RL Team and listed alphabetically by last name** `Jingyi Chen `_, `Yitong Guan `_, `Zhuobin Huang `_, `Jiajun Li `_, `Ji Li `_, `Shenggui Li `_, `Junrong Lin `_, `Xiang Long `_, `Rui Lu `_, `Jin Pan `_, `Shuai Shi `_, `Yushen Su `_, `Xinyuan Tong `_, `Chendong Wang `_, `Hanchen Zhang `_, `Haoran Wang `_, `Yongan Xiang `_, `Chengxing Xie `_, `Yuhao Yang `_, `Jinwei Yao `_, `Qiaolin Yu `_, `Yuzhen Zhou `_, `Chenyang Zhao `_ Introduction ------------ `SGLang `_ is an open-source state-of-the-art inference service engine, fully adopted by xAI to support all inference needs of Grok during research and serving processes. Currently, verl fully supports using SGLang as the inference engine during the rollout phase. As a rollout engine, SGLang provides the same feature coverage as vLLM., including memory saving and multi-node rollout features. After installing verl and SGLang, simply add ``actor_rollout_ref.rollout.name=sglang`` at startup script to seamlessly switch between the two inference frameworks. In addition, the SGLang team is actively working on supporting features such as Multi-Turn Agentic RL, VLM RLHF, Server-Based RLHF, and Partial Rollout. You can track the related development progress in the `Tracking Roadmap `_. Installation ------------ Please always follow the following command to install SGLang with verl. .. code-block:: bash pip install --upgrade pip # Currently 0.4.6.post5, subject to updates at any time, please refer to the latest version specified in `setup.py` pip install -e ".[sglang]" You can check the following dependencies are in your environment: .. note:: - **PyTorch**: 2.6.0+cu124 - **CUDA**: 12.4 - **flashinfer-python**: 0.2.5+cu124torch2.6 - **sgLang**: 0.4.6.post5 - **sgl-kernel**: 0.1.4 Using SGLang as the Inference Backend for PPO Training on a Single Machine ------------------------------------------------------------------------- We use Qwen/Qwen2-7B-Instruct on the gsm8k dataset for a simple test. 1. Run the following command to prepare the gsm8k dataset: .. code-block:: bash python3 examples/data_preprocess/gsm8k.py 2. Run the following script to conduct a PPO experiment on a single machine with 4 GPUs: .. code-block:: bash export SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK=True PYTHONUNBUFFERED=1 python3 -m verl.trainer.main_ppo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ critic.model.fsdp_config.param_offload=True \ critic.model.fsdp_config.optimizer_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo.log Why export SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. ``verl`` initializes a ``SGLangRollout`` module during rollout, which is used to evaluate/generate samples. 2. ``SGLangRollout`` will initialize ``Engine``, and further initialize a ``torch.distributed.DeviceMesh``, used to support Tensor Parallel (TP). 3. ``DeviceMesh.init()`` internally checks the free GPU memory of all participating devices. If the difference is too large (more than ~10%), it directly reports an error to avoid initialization failures or deadlocks. Why might there be inconsistent GPU memory? """"""""""""""""""""""""""""""""""""""""""" **1. Ray Distributed Actor loads the model at different times** ``verl`` uses Ray-based multi-process, multi-GPU concurrent training. Each ``WorkerDict`` may be called at different times: .. code-block:: python self.rollout = SGLangRollout(...) Different workers initialize the model at different times → different memory usage. **2. Delayed initialization causes memory bias** Some workers start model loading/inference (e.g., ``generate_sequences()``, ``compute_log_prob()``) earlier than others. Early workers already use up GPU memory → late workers still have empty memory → memory difference appears. **3. SGLang's TP init uses "all-device broadcast", but there's no uniform release timing** Although ``SGLangRollout`` may only involve subset of GPUs, its ``Engine`` initialization calls ``torch.distributed.init_process_group()`` and broadcasts weights, so: - Non-rollout GPUs also join the communication. - Later on, ``DeviceMesh`` init will fail due to "inconsistent memory". **4. Different FSDP/TP loading behaviors also lead to mismatch** If using: .. code-block:: bash actor.fsdp_config.param_offload=True ref.fsdp_config.param_offload=True Then some workers keep params on CPU while others already sharded to GPU → leads to asymmetric memory layout. Using SGLang as the Inference Backend for PPO Training Across Multiple Machines ------------------------------------------------------------------------------ SGLang also supports running verl's RAY-based cross-machine inference in IPv4 and IPv6 scenarios. In the script below, we use TP=16 for cross-machine inference. Suppose we have two interconnected machines: node0 with IP 10.94.16.4 and node1 with IP 10.94.16.5. 1. Start Ray on node0: .. code-block:: bash ray start --head --dashboard-host=0.0.0.0 You will see the following prompt: .. code-block:: bash Usage stats collection is enabled. To disable this, add `--disable-usage-stats` to the command that starts the cluster, or run the following command: `ray disable-usage-stats` before starting the cluster. See https://docs.ray.io/en/master/cluster/usage-stats.html for more details. Local node IP: 10.94.16.4 -------------------- Ray runtime started. -------------------- Next steps To add another node to this Ray cluster, run ray start --address='10.94.16.4:6379' 2. Have node1 join the Ray cluster: Run the following command on node1: .. code-block:: bash ray start --address='10.94.16.4:6379' Run the following command to confirm that the Ray cluster now has two nodes: .. code-block:: bash ray status You can see that the cluster has two nodes with 16 GPUs: .. code-block:: bash ======== Autoscaler status: 2025-04-09 09:25:37.694016 ======== Node status --------------------------------------------------------------- Active: 1 node_ef382ffd687d8f6b060c1b68e63ada7341b936fe5b1901dd04de1027 1 node_1eb4d7d07e793114c23a89d1a41f1f76acf6ef5b35af844a4ee8e4ba Pending: (no pending nodes) Recent failures: (no failures) Resources --------------------------------------------------------------- Usage: 0.0/360.0 CPU 0.0/16.0 GPU 0B/3.39TiB memory 0B/372.53GiB object_store_memory 3. Run the following script to train meta-llama/Llama-3.1-8B-Instruct with TP=16 across 2 machines using 16 GPUs: .. code-block:: bash DATA_DIR=$HOME/data/gsm8k python3 -m verl.trainer.main_ppo \ actor_rollout_ref.rollout.name=sglang \ data.train_files=$DATA_DIR/train.parquet \ data.val_files=$DATA_DIR/test.parquet \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ actor_rollout_ref.model.path=meta-llama/Llama-3.1-8B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=16 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=meta-llama/Llama-3.1-8B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size=16 \ critic.model.fsdp_config.param_offload=True \ critic.model.fsdp_config.optimizer_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.val_before_train=True \ trainer.n_gpus_per_node=8 \ trainer.nnodes=2 \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo.log ================================================ FILE: verl_rl/examples/data_preprocess/aime2024_multiturn_w_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the DAPO-Math-17k dataset to multiturn format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/retool_aime2024") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_path = "BytedTsinghua-SIA/AIME-2024" dataset = datasets.load_dataset(data_path, "default") train_dataset = dataset["train"] # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): orig_extra_info = example.pop("extra_info") extra_info = orig_extra_info.copy() extra_info["need_tools_kwargs"] = True extra_info["tools_kwargs"] = { "code_interpreter": { "create_kwargs": { "ground_truth": example["reward_model"]["ground_truth"], }, }, } example["extra_info"] = extra_info return example return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/dapo_multiturn_w_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the DAPO-Math-17k dataset to multiturn format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/retool_dapo") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_path = "BytedTsinghua-SIA/DAPO-Math-17k" dataset = datasets.load_dataset(data_path, "default") train_dataset = dataset["train"] # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): orig_extra_info = example.pop("extra_info") extra_info = orig_extra_info.copy() extra_info["need_tools_kwargs"] = True extra_info["tools_kwargs"] = { "code_interpreter": { "create_kwargs": { "ground_truth": example["reward_model"]["ground_truth"], }, }, } example["extra_info"] = extra_info return example return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/full_hh_rlhf.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ - Preprocess data and split the training set into 75% for training RM and 25% for validting RM. - All the training data is used to train SFT and RL. - Both chosen and rejected is used to train SFT """ import argparse import os import pandas as pd from datasets import load_dataset from tqdm.auto import tqdm from verl.utils.fs import copy, makedirs def generate_sft_dataset(target_hdfs_path_dir, local_dir="~/data/full_hh_rlh/sft"): dataset = load_dataset("Dahoas/full-hh-rlhf") output = {"prompt": [], "response": []} for data in tqdm(dataset["train"]): # add chosen output["prompt"].append(data["prompt"]) output["response"].append(data["chosen"]) # add rejection output["prompt"].append(data["prompt"]) output["response"].append(data["rejected"]) df = pd.DataFrame(output) local_dir = os.path.expanduser(local_dir) os.makedirs(local_dir, exist_ok=True) local_path = os.path.join(local_dir, "train.parquet") df.to_parquet(path=local_path) if target_hdfs_path_dir is not None: hdfs_dir = target_hdfs_path_dir + "/" + "train.parquet" makedirs(hdfs_dir) copy(local_path, hdfs_dir) def generate_rm_dataset(target_hdfs_path_dir, local_dir="~/data/full_hh_rlh/rm"): train_dataset = load_dataset("Dahoas/full-hh-rlhf", split="train[:75%]") test_dataset = load_dataset("Dahoas/full-hh-rlhf", split="train[-25%:]") local_dir = os.path.expanduser(local_dir) os.makedirs(local_dir, exist_ok=True) for dataset, name in zip([train_dataset, test_dataset], ["train", "test"], strict=True): output = {"prompt": [], "chosen": [], "rejected": []} for data in tqdm(dataset): # add chosen output["prompt"].append(data["prompt"]) output["chosen"].append(data["chosen"]) output["rejected"].append(data["rejected"]) df = pd.DataFrame(output) local_path = os.path.join(local_dir, name + ".parquet") df.to_parquet(path=local_path) if target_hdfs_path_dir is not None: hdfs_dir = target_hdfs_path_dir + "/" + name + ".parquet" makedirs(hdfs_dir) copy(local_path, hdfs_dir) def generate_rl_dataset(target_hdfs_path_dir, local_dir="~/data/full_hh_rlhf/rl"): dataset = load_dataset("Dahoas/full-hh-rlhf") train_dataset = dataset["train"] data_source = "Dahoas/full-hh-rlhf" # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): prompt = example.pop("prompt") response = example.pop("response") data = { "data_source": data_source, "prompt": [{"role": "user", "content": prompt}], "ability": "alignment", "reward_model": { "style": "model", "ground_truth": response, # should not be used }, "extra_info": {"split": split, "index": idx}, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) local_dir = os.path.expanduser(local_dir) local_path = os.path.join(local_dir, "train.parquet") train_dataset.to_parquet(local_path) if target_hdfs_path_dir is not None: hdfs_dir = target_hdfs_path_dir + "/" + "train.parquet" makedirs(hdfs_dir) copy(local_path, hdfs_dir) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--split", type=str, choices=["sft", "rm", "rl"], required=True) parser.add_argument("--local_dir", type=str, default="~/data/full_hh_rlhf") parser.add_argument("--hdfs_dir", type=str, required=False, default=None) args = parser.parse_args() if args.split == "sft": generate_sft_dataset(args.hdfs_dir, os.path.join(args.local_dir, args.split)) elif args.split == "rm": generate_rm_dataset(args.hdfs_dir, os.path.join(args.local_dir, args.split)) elif args.split == "rl": generate_rl_dataset(args.hdfs_dir, os.path.join(args.local_dir, args.split)) else: raise NotImplementedError ================================================ FILE: verl_rl/examples/data_preprocess/geo3k.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the Geometry3k dataset to parquet format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/geo3k") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_source = "hiyouga/geometry3k" dataset = datasets.load_dataset(data_source) train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = ( r"You FIRST think about the reasoning process as an internal monologue and then provide the final answer. " r"The reasoning process MUST BE enclosed within tags. " r"The final answer MUST BE put in \boxed{}." ) # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): problem = example.pop("problem") prompt = problem + " " + instruction_following answer = example.pop("answer") images = example.pop("images") data = { "data_source": data_source, "prompt": [ { "role": "user", "content": prompt, } ], "images": images, "ability": "math", "reward_model": {"style": "rule", "ground_truth": answer}, "extra_info": { "split": split, "index": idx, "answer": answer, "question": problem, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True, num_proc=8) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True, num_proc=8) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/geo3k_multiturn_w_tool.py ================================================ # Copyright 2023-2025 SGLang Team # Copyright Amazon.com, Inc. or its affiliates. # Copyright 2025 Reallm Labs Ltd. or its affiliates # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the Geometry3k dataset to parquet format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/geo3k_multiturn_w_tool") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_source = "hiyouga/geometry3k" dataset = datasets.load_dataset(data_source) train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = ( r"You FIRST think about the reasoning process as an internal monologue and then provide the final answer. " r"The reasoning process MUST BE enclosed within tags. " r"The final answer MUST BE put in \boxed{}." ) # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): problem = example.pop("problem") prompt = problem + " " + instruction_following answer = example.pop("answer") images = example.pop("images") data = { "data_source": data_source, "prompt": [ { "role": "system", "content": ( "You are a math expert. You are given a question and you need to solve it step by step. " "Reasoning step by step before any tool call. " "You should use the `calc_geo3k_reward` tool after step by step solving the question, " "before generate final answer at least once and refine your answer if necessary. " ), }, { "role": "user", "content": prompt, }, ], "images": images, "ability": "math", "reward_model": {"style": "rule", "ground_truth": answer}, "extra_info": { "split": split, "index": idx, "answer": answer, "question": problem, "need_tools_kwargs": True, "tools_kwargs": { "calc_geo3k_reward": { "create_kwargs": {"ground_truth": answer}, # "execute_kwargs": {}, # "calc_reward_kwargs": {}, # "release_kwargs": {}, }, }, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True, num_proc=8) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True, num_proc=8) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/gsm8k.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/gsm8k") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_source = "openai/gsm8k" dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = 'Let\'s think step by step and output the final answer after "####".' # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") solution = extract_solution(answer_raw) data = { "data_source": data_source, "prompt": [ { "role": "user", "content": question, } ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "split": split, "index": idx, "answer": answer_raw, "question": question_raw, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/gsm8k_multiturn_w_interaction.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/gsm8k") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_source = "openai/gsm8k" dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = "Let's think step by step and output the final answer after `####`." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") solution = extract_solution(answer_raw) data = { "data_source": data_source, "prompt": [ { "role": "system", "content": ( "You are a math expert. You are given a question and you need to solve it step by step. " "You should rethinking carefully if user point out your answer is wrong. " "Put your final answer in the format of `#### `." ), }, { "role": "user", "content": question, }, ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "split": split, "index": idx, "answer": answer_raw, "question": question_raw, "interaction_kwargs": { "name": "gsm8k", "query": question, "ground_truth": solution, }, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/gsm8k_multiturn_w_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/gsm8k") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_source = "openai/gsm8k" dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = "Let's think step by step and output the final answer after `####`." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") solution = extract_solution(answer_raw) data = { "data_source": data_source, "prompt": [ { "role": "system", "content": ( "You are a math expert. You are given a question and you need to solve it step by step. " "Reasoning step by step before any tool call. " "You should use the `calc_gsm8k_reward` tool after step by step solving the question, " "before generate final answer at least once and refine your answer if necessary. " "Put your final answer in the format of `#### `." ), }, { "role": "user", "content": question, }, ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "split": split, "index": idx, "answer": answer_raw, "question": question_raw, "need_tools_kwargs": True, "tools_kwargs": { "calc_gsm8k_reward": { "create_kwargs": {"ground_truth": solution}, # "execute_kwargs": {}, # "calc_reward_kwargs": {}, # "release_kwargs": {}, }, }, "interaction_kwargs": { "query": question, "ground_truth": solution, }, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/gsm8k_tool_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the GSM8k dataset to parquet format """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def extract_solution(solution_str): solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str) assert solution is not None final_solution = solution.group(0) final_solution = final_solution.split("#### ")[1].replace(",", "") return final_solution if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/gsm8k") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_source = "openai/gsm8k" dataset = datasets.load_dataset(data_source, "main") train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = "Let's think step by step and output the final answer after `####`." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question_raw = example.pop("question") question = question_raw + " " + instruction_following answer_raw = example.pop("answer") solution = extract_solution(answer_raw) data = { "data_source": data_source, "agent_name": "tool_agent", "prompt": [ { "role": "system", "content": ( "You are a math expert. You are given a question and you need to solve it step by step. " "Reasoning step by step before any tool call. " "You should use the `calc_gsm8k_reward` tool after step by step solving the question, " "before generate final answer at least once and refine your answer if necessary. " "Put your final answer in the format of `#### `." ), }, { "role": "user", "content": question, }, ], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": { "split": split, "index": idx, "answer": answer_raw, "question": question_raw, "need_tools_kwargs": True, "tools_kwargs": { "calc_gsm8k_reward": { "create_kwargs": {"ground_truth": solution}, # "execute_kwargs": {}, # "calc_reward_kwargs": {}, # "release_kwargs": {}, }, }, "interaction_kwargs": { "query": question, "ground_truth": solution, }, }, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/hellaswag.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess Hellaswag dataset. """ import argparse import os import re import datasets from verl.utils.hdfs_io import copy, makedirs def preprocess(text): text = text.strip() # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag. text = text.replace(" [title]", ". ") text = re.sub("\\[.*?\\]", "", text) text = text.replace(" ", " ") return text if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="/opt/tiger/hellaswag") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() data_source = "Rowan/hellaswag" dataset = datasets.load_dataset(data_source, trust_remote_code=True) train_dataset = dataset["train"] val_dataset = dataset["validation"] test_dataset = dataset["test"] instruction = "Please complete the following sentence.\n" def make_map_fn(split): def process_fn(doc, idx): ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize() query = preprocess(doc["activity_label"] + ": " + ctx) choices = [preprocess(ending) for ending in doc["endings"]] gold = int(doc["label"]) data = { "data_source": data_source, "prompt": [{"role": "user", "content": query}], "ability": "nlp", "reward_model": { "style": "model", "eval": "multiple_choice", # using loglikelihood "ground_truth": gold, "choices": choices, }, "extra_info": {"split": split, "index": idx}, } return data return process_fn # filter data that doesn't have a label train_dataset = train_dataset.filter(lambda x: len(x["label"]) > 0) val_dataset = val_dataset.filter(lambda x: len(x["label"]) > 0) test_dataset = test_dataset.filter(lambda x: len(x["label"]) > 0) train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) val_dataset = val_dataset.map(function=make_map_fn("validation"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) val_dataset.to_parquet(os.path.join(local_dir, "validation.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/math_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocess the MATH-lighteval dataset to parquet format """ import argparse import os import datasets from verl.utils.hdfs_io import copy, makedirs from verl.utils.reward_score.math import last_boxed_only_string, remove_boxed def extract_solution(solution_str): return remove_boxed(last_boxed_only_string(solution_str)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/math") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() # 'lighteval/MATH' is no longer available on huggingface. # Use mirror repo: DigitalLearningGmbH/MATH-lighteval data_source = "DigitalLearningGmbH/MATH-lighteval" print(f"Loading the {data_source} dataset from huggingface...", flush=True) dataset = datasets.load_dataset(data_source, trust_remote_code=True) train_dataset = dataset["train"] test_dataset = dataset["test"] instruction_following = "Let's think step by step and output the final answer within \\boxed{}." # add a row to each data item that represents a unique id def make_map_fn(split): def process_fn(example, idx): question = example.pop("problem") question = question + " " + instruction_following answer = example.pop("solution") solution = extract_solution(answer) data = { "data_source": data_source, "prompt": [{"role": "user", "content": question}], "ability": "math", "reward_model": {"style": "rule", "ground_truth": solution}, "extra_info": {"split": split, "index": idx}, } return data return process_fn train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True) test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True) local_dir = args.local_dir hdfs_dir = args.hdfs_dir train_dataset.to_parquet(os.path.join(local_dir, "train.parquet")) test_dataset.to_parquet(os.path.join(local_dir, "test.parquet")) if hdfs_dir is not None: makedirs(hdfs_dir) copy(src=local_dir, dst=hdfs_dir) ================================================ FILE: verl_rl/examples/data_preprocess/multiturn.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Create a simple multi-turn dataset for testing """ import argparse import os import pandas as pd def main(): parser = argparse.ArgumentParser() parser.add_argument("--local_dir", default="~/data/multiturn") parser.add_argument("--hdfs_dir", default=None) args = parser.parse_args() # Create example conversations conversations = [] # Conversation 1 conversations.append( { "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is the capital of France?"}, {"role": "assistant", "content": "The capital of France is Paris."}, {"role": "user", "content": "And what about Germany?"}, {"role": "assistant", "content": "The capital of Germany is Berlin."}, ] } ) # Conversation 2 conversations.append( { "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Can you explain quantum computing?"}, { "role": "assistant", "content": "Quantum computing is a type of computing that uses quantum-mechanical phenomena, " "such as superposition and entanglement, to perform operations on data.", }, {"role": "user", "content": "How is it different from classical computing?"}, { "role": "assistant", "content": "Classical computing uses bits that are either 0 or 1, while quantum computing uses " "quantum bits or qubits that can exist in multiple states simultaneously due to superposition.", }, ] } ) # Conversation 3 conversations.append( { "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Write a simple Python function to calculate factorial."}, { "role": "assistant", "content": ( "```python\ndef factorial(n):\n if n == 0 or n == 1:\n return 1\n else:\n " "return n * factorial(n-1)\n```\n\nThis is a recursive function to calculate the " "factorial of a number." ), }, {"role": "user", "content": "Can you make it iterative instead?"}, { "role": "assistant", "content": ( "```python\ndef factorial(n):\n result = 1\n for i in range(1, n+1):\n " "result *= i\n return result\n```\n\nThis is an iterative version of the factorial function." ), }, ] } ) # Create train and test datasets train_data = conversations[:2] # First 2 conversations for training test_data = conversations[2:] # Last conversation for testing # Create output directory local_dir = os.path.expanduser(args.local_dir) os.makedirs(local_dir, exist_ok=True) # Save to parquet files train_df = pd.DataFrame(train_data) test_df = pd.DataFrame(test_data) train_df.to_parquet(os.path.join(local_dir, "train.parquet")) test_df.to_parquet(os.path.join(local_dir, "test.parquet")) # Handle HDFS if specified if args.hdfs_dir is not None: try: from verl.utils.hdfs_io import copy, makedirs makedirs(args.hdfs_dir) copy(src=local_dir, dst=args.hdfs_dir) except ImportError: print("Warning: HDFS support not available. Skipping HDFS copy.") # Print statistics print(f"Train dataset size: {len(train_df)}") print(f"Test dataset size: {len(test_df)}") print(f"Data saved to {local_dir}") if __name__ == "__main__": main() ================================================ FILE: verl_rl/examples/data_preprocess/preprocess_search_r1_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os import tempfile import pandas as pd from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError from verl.utils.hdfs_io import copy, makedirs # Setup logging logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") logger = logging.getLogger(__name__) # Configuration constants DEFAULT_SYSTEM_CONTENT = "You are a helpful and harmless assistant." DEFAULT_USER_CONTENT_PREFIX = ( "Answer the given question. You must conduct reasoning inside and " "first every time you get new information. After reasoning, if you find you lack " "some knowledge, you can call a search engine by query " "and it will return the top searched results between and " ". You can search as many times as your want. If you find no " "further external knowledge needed, you can directly provide the answer inside " " and , without detailed illustrations. For example, " " Beijing . Question: " ) def process_single_row(row, current_split_name, row_index): """ Process a single row of data for SearchR1-like format. Args: row: DataFrame row containing the original data current_split_name: Name of the current split (train/test) row_index: Index of the row in the DataFrame Returns: pd.Series: Processed row data in the required format """ question = row.get("question", "") # Build prompt structure user_content = user_content_prefix.rstrip("\n") + question prompt = [{"role": "system", "content": system_content}, {"role": "user", "content": user_content}] # Extract ground truth from reward_model or fallback to golden_answers reward_model_data = row.get("reward_model") if isinstance(reward_model_data, dict) and "ground_truth" in reward_model_data: ground_truth = reward_model_data.get("ground_truth") else: ground_truth = row.get("golden_answers", []) # Process data source data_source_tagged = "searchR1_" + str(row.get("data_source", "")) # Build tools kwargs structure tools_kwargs = { "search": { "create_kwargs": {"ground_truth": ground_truth, "question": question, "data_source": data_source_tagged} } } # Build complete extra_info structure extra_info = { "index": row_index, "need_tools_kwargs": True, "question": question, "split": current_split_name, "tools_kwargs": tools_kwargs, } return pd.Series( { "data_source": data_source_tagged, "prompt": prompt, "ability": row.get("ability"), "reward_model": reward_model_data, "extra_info": extra_info, "metadata": row.get("metadata"), } ) def main(): local_save_dir = os.path.expanduser(args.local_dir) os.makedirs(local_save_dir, exist_ok=True) processed_files = [] # Download and process files using temporary directory with tempfile.TemporaryDirectory() as tmp_download_dir: for split in ["train", "test"]: parquet_filename = f"{split}.parquet" logger.info(f"Processing {split} split...") try: # Download Parquet file from HuggingFace logger.info(f"Downloading {parquet_filename} from {args.hf_repo_id}") local_parquet_filepath = hf_hub_download( repo_id=args.hf_repo_id, filename=parquet_filename, repo_type="dataset", local_dir=tmp_download_dir, local_dir_use_symlinks=False, ) # Load and process Parquet file df_raw = pd.read_parquet(local_parquet_filepath) logger.info(f"Loaded {len(df_raw)} rows from {parquet_filename}") def apply_process_row(row, split_name=split): return process_single_row(row, current_split_name=split_name, row_index=row.name) df_processed = df_raw.apply(apply_process_row, axis=1) # Save processed DataFrame output_file_path = os.path.join(local_save_dir, f"{split}.parquet") df_processed.to_parquet(output_file_path, index=False) logger.info(f"Saved {len(df_processed)} processed rows to {output_file_path}") processed_files.append(output_file_path) except EntryNotFoundError: logger.warning(f"{parquet_filename} not found in repository {args.hf_repo_id}") except Exception as e: logger.error(f"Error processing {split} split: {e}") if not processed_files: logger.warning("No data was processed or saved") return logger.info(f"Successfully processed {len(processed_files)} files to {local_save_dir}") # Copy to HDFS if specified if args.hdfs_dir: try: makedirs(args.hdfs_dir) copy(src=local_save_dir, dst=args.hdfs_dir) logger.info(f"Successfully copied files to HDFS: {args.hdfs_dir}") except Exception as e: logger.error(f"Error copying files to HDFS: {e}") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Download Search-R1 from HuggingFace, process, and save to Parquet.") parser.add_argument( "--hf_repo_id", default="PeterJinGo/nq_hotpotqa_train", help="HuggingFace dataset repository ID." ) parser.add_argument( "--local_dir", default="~/data/searchR1_processed_direct", help="Local directory to save the processed Parquet files.", ) parser.add_argument("--hdfs_dir", default=None, help="Optional HDFS directory to copy the Parquet files to.") args = parser.parse_args() # System and user content configuration system_content = DEFAULT_SYSTEM_CONTENT user_content_prefix = DEFAULT_USER_CONTENT_PREFIX main() ================================================ FILE: verl_rl/examples/generation/run_deepseek7b_mutli_node.sh ================================================ set -x data_path=$HOME/data/rlhf/gsm8k/test.parquet save_path=$HOME/data/rlhf/math/deepseek_v2_lite_gen_test.parquet model_path=deepseek-ai/deepseek-llm-7b-chat python3 -m verl.trainer.main_generation \ trainer.nnodes=2 \ trainer.n_gpus_per_node=8 \ data.path=$data_path \ data.prompt_key=prompt \ data.n_samples=1 \ data.output_path=$save_path \ model.path=$model_path\ +model.trust_remote_code=True \ rollout.temperature=1.0 \ rollout.top_k=50 \ rollout.top_p=0.7 \ rollout.prompt_length=2048 \ rollout.response_length=1024 \ rollout.tensor_model_parallel_size=16 \ rollout.gpu_memory_utilization=0.8 ================================================ FILE: verl_rl/examples/generation/run_deepseek_v2_lite_math.sh ================================================ set -x data_path=$HOME/data/gsm8k/test.parquet save_path=$HOME/data/gsm8k/deepseek_v2_lite_gen_test.parquet model_path=deepseek-ai/deepseek-llm-7b-chat python3 -m verl.trainer.main_generation \ trainer.nnodes=1 \ trainer.n_gpus_per_node=8 \ data.path=$data_path \ data.prompt_key=prompt \ data.n_samples=1 \ data.output_path=$save_path \ model.path=$model_path \ +model.trust_remote_code=True \ rollout.temperature=1.0 \ rollout.top_k=50 \ rollout.top_p=0.7 \ rollout.prompt_length=2048 \ rollout.response_length=1024 \ rollout.tensor_model_parallel_size=2 \ rollout.gpu_memory_utilization=0.8 ================================================ FILE: verl_rl/examples/gpg_trainer/gpg.md ================================================ # GPG: Group Policy Gradient Group Policy Gradient (GPG) is a minimalist reinforcement learning (RL) method that enhances the reasoning ability of large language models without relying on supervised fine-tuning or complex tricks. GPG revisits traditional policy gradients and directly optimizes the RL objective—no surrogate losses, no KL penalties, no critic, and no reference model. Compared to GRPO, GPG is simpler, more efficient, and achieves better results on many tasks. For more details, please refer to the original paper [GPG: A Simple and Strong Reinforcement Learning Baseline for Model Reasoning ](https://arxiv.org/abs/2504.02546). ## Key Components - Use a corrected advantage function to improve policy gradient accuracy and training efficiency. - By eliminating the critic and reference models, avoiding KL divergence constraints, significantly simplifies the training process compared to Group Relative Policy Optimization (GRPO) ## Configuration To configure GPG within the framework, use the following YAML settings. ```yaml algorithm: adv_estimator: gpg actor_rollout_ref: actor: policy_loss: loss_mode: "gpg" ``` ## Advanced Extensions GPG is a simple and strong baseline for model reasoning. Although it avoids using KL loss in its original form, you can still use KL loss to further improve the performance. ```yaml algorithm: adv_estimator: gpg actor_rollout_ref: actor: use_kl_loss: True # enable kl regularization kl_loss_coef: 0.01 policy_loss: loss_mode: "gpg" ``` ================================================ FILE: verl_rl/examples/gpg_trainer/run_qwen2-7b_math.sh ================================================ set -x # If you are using vllm<=0.6.3, you might need to set the following environment variable to avoid bugs: # export VLLM_ATTENTION_BACKEND=XFORMERS gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gpg \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.policy_loss.loss_mode=gpg \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_gpg_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/gpg_trainer/run_qwen2-7b_math_megatron.sh ================================================ set -x # If you are using vllm<=0.6.3, you might need to set the following environment variable to avoid bugs: # export VLLM_ATTENTION_BACKEND=XFORMERS export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=gpg \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.policy_loss.loss_mode=gpg \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_gpg_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/README.md ================================================ # Group Relative Policy Optimization (GRPO) In reinforcement learning, classic algorithms like PPO rely on a "critic" model to estimate the value of actions, guiding the learning process. However, training this critic model can be resource-intensive. GRPO simplifies this process by eliminating the need for a separate critic model. Instead, it operates as follows: - Group Sampling: For a given problem, the model generates multiple possible solutions, forming a "group" of outputs. - Reward Assignment: Each solution is evaluated and assigned a reward based on its correctness or quality. - Baseline Calculation: The average reward of the group serves as a baseline. - Policy Update: The model updates its parameters by comparing each solution's reward to the group baseline, reinforcing better-than-average solutions and discouraging worse-than-average ones. This approach reduces computational overhead by avoiding the training of a separate value estimation model, making the learning process more efficient. For more details, refer to the original paper [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://arxiv.org/pdf/2402.03300) ## Key Components - No Value Function (Critic-less): unlike PPO, GRPO does not train a separate value network (critic) - Group Sampling (Grouped Rollouts): instead of evaluating one rollout per input, GRPO generates multiple completions (responses) from the current policy for each prompt. This set of completions is referred to as a group. - Relative Rewards: within each group, completions are scored (e.g., based on correctness), and rewards are normalized relative to the group. ## Configuration Note that all configs containing `micro_batch_size` are used to configure the maximum sample or token count per forward or backward pass to avoid GPU OOMs, whose value should not change algorithmic/convergence behavior. Despite that many configurations start with the `ppo_` prefix, they work across different RL algorithms in verl, as the GRPO training loop is similar to that of PPO (without critic). ![image](https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d) - `actor_rollout.ref.rollout.n`: For each prompt, sample n times. Default to 1. For GRPO, please set it to a value larger than 1 for group sampling. - `data.train_batch_size`: The global batch size of prompts used to generate a set of sampled trajectories/rollouts. The number of responses/trajectories is `data.train_batch_size * actor_rollout.ref.rollout.n` - `actor_rollout_ref.actor.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO actor updates. The ppo_mini_batch_size is a global size across all workers. - `actor_rollout_ref.actor.ppo_epochs`: Number of epochs for GRPO updates on one set of sampled trajectories for actor - `actor_rollout_ref.actor.clip_ratio`: The GRPO clip range. Default to 0.2 - `algorithm.adv_estimator`: Default is gae. Please set it to grpo instead - `actor_rollout_ref.actor.loss_agg_mode`: Default is "token-mean". Options include "token-mean", "seq-mean-token-sum", "seq-mean-token-mean". The original GRPO paper takes the sample-level loss (seq-mean-token-mean), which may be unstable in long-CoT scenarios. All GRPO example scripts provided in verl uses the default configuration "token-mean" for loss aggregation instead. Instead of adding KL penalty in the reward, GRPO regularizes by directly adding the KL divergence between the trained policy and the reference policy to the loss: - `actor_rollout_ref.actor.use_kl_loss`: To use kl loss in the actor. When used, we are not applying KL in the reward function. Default is False. Please set it to True for GRPO. - `actor_rollout_ref.actor.kl_loss_coef`: The coefficient of kl loss. Default is 0.001. - `actor_rollout_ref.actor.kl_loss_type`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. How to calculate the kl divergence between actor and reference policy. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html ## Advanced Extensions ### DrGRPO The work [Understanding R1-Zero-Like Training: A Critical Perspective](https://arxiv.org/pdf/2503.20783) claims there's optimization bias in GRPO, that leads to artificially longer responses, especially for incorrect outputs. This inefficiency stems from the way GRPO calculates advantages using group-based reward normalization, which can inadvertently favor longer, less accurate responses. Instead, DrGRPO aggregates token-level losses by normalizing with a global constant to eliminate length bias. Configure the following to enable DrGRPO, with all other parameters the same as GRPO's: - `actor_rollout_ref.actor.loss_agg_mode`: "seq-mean-token-sum-norm", which turns off seq-dim averaging - `actor_rollout_ref.actor.use_kl_loss`: Please set it to False for DrGRPO - `algorithm.norm_adv_by_std_in_grpo`: False, which turns off standard deviation norm ## Reference Example Qwen2.5 GRPO training log and commands: [link](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/qwen2-7b-fsdp2.log) ```bash bash examples/grpo_trainer/run_qwen3-8b.sh ``` For more reference performance, please see https://verl.readthedocs.io/en/latest/algo/baseline.html ================================================ FILE: verl_rl/examples/grpo_trainer/run_deepseek671b_math_megatron.sh ================================================ set -x # 0. download the config # only need to download the `configuration_deepseek.py`, `config.json`, `tokenizer_config.json`, `tokenizer.json` and `generation_config.json` # remove the `quantization_config` in the `config.json` # set `num_nextn_predict_layers=0` to disable MTP, which is not currently supported huggingface-cli download deepseek-ai/DeepSeek-V3-0324 configuration_deepseek.py config.json # 1. download the dist_ckpt format model from https://huggingface.co/BearBiscuit05/dpsk-v3-671B-BF16-dist_ckpt/tree/main # change the HF_MODEL_PATH and DIST_CKPT_PATH to your own path DIST_CKPT_PATH="" LLM="" # 2. run the script gsm8k_train_path=/data/gsm8k/train.parquet gsm8k_test_path=/data/gsm8k/test.parquet train_files=$gsm8k_train_path test_files=$gsm8k_test_path ALL_OFFLOAD=${ALL_OFFLOAD:-True} COMMON_PARAM_OFFLOAD=${COMMON_PARAM_OFFLOAD:-$ALL_OFFLOAD} COMMON_GRAD_OFFLOAD=${COMMON_GRAD_OFFLOAD:-$ALL_OFFLOAD} COMMON_OPTIMIZER_OFFLOAD=${COMMON_OPTIMIZER_OFFLOAD:-$ALL_OFFLOAD} ACTOR_PARAM_OFFLOAD=${ACTOR_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} ACTOR_GRAD_OFFLOAD=${ACTOR_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} ACTOR_OPTIMIZER_OFFLOAD=${ACTOR_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} REF_PARAM_OFFLOAD=${REF_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_PARAM_OFFLOAD=${CRITIC_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_GRAD_OFFLOAD=${CRITIC_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} CRITIC_OPTIMIZER_OFFLOAD=${CRITIC_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} RM_PARAM_OFFLOAD=${RM_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} # 512 H20(96GB) NODES=64 PP=16 TP=1 EP=32 ETP=1 INFER_TP=32 # consider TP/ETP, and enable recompute if short of memory # full recompute # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ n_resp_per_prompt=4 # RAY_ADDRESS='auto' ray job submit --working-dir . -- python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=512 \ data.max_prompt_length=2048 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$LLM \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.rollout.temperature=1.0 \ actor_rollout_ref.rollout.top_p=1.0 \ actor_rollout_ref.rollout.top_k=-1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$INFER_TP \ algorithm.use_kl_in_reward=False \ trainer.logger='["console","tensorboard"]' \ trainer.project_name='verl_megatron_gsm8k_examples' \ trainer.experiment_name='dsv3-32nodes' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=$NODES \ trainer.save_freq=-1 \ trainer.test_freq=5 \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_first_pipeline_stage=3 \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=2 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.actor.megatron.param_offload=${ACTOR_PARAM_OFFLOAD} \ actor_rollout_ref.actor.megatron.optimizer_offload=${ACTOR_OPTIMIZER_OFFLOAD} \ actor_rollout_ref.actor.megatron.grad_offload=${ACTOR_GRAD_OFFLOAD} \ actor_rollout_ref.ref.megatron.param_offload=${REF_PARAM_OFFLOAD} \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ trainer.default_local_dir=$CKPT_DIR \ trainer.val_before_train=False \ trainer.total_epochs=100 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_deepseek7b_llm.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=80 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=160 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=160 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_deepseek7b_llm_math.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=40 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='deepseek_llm_7b_function_rm_math' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_deepseek7b_llm_math_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='deepseek_llm_7b_math_megatron' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_deepseek7b_llm_seq_balance.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_minicpmo2_6.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=False \ data.truncation='error' \ data.image_key=images \ data.trust_remote_code=True \ data.custom_cls.path=recipe/minicpmo/rl_dataset.py \ data.custom_cls.name=RLHFDataset \ actor_rollout_ref.model.path=openbmb/MiniCPM-o-2_6 \ actor_rollout_ref.model.trust_remote_code=True \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.use_dynamic_bsz=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ +actor_rollout_ref.actor.fsdp_config.use_orig_params=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=False \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='minicpmo2_6_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_moonlight16b_math_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping HF_MODEL_PATH=moonshotai/Moonlight-16B-A3B DIST_CKPT_PATH=${DIST_CKPT_PATH} train_path=$HOME/data/gsm8k/train.parquet test_path=$HOME/data/gsm8k/test.parquet python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_path" \ data.val_files="$test_path" \ data.train_batch_size=192 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.trust_remote_code=True \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.model.trust_remote_code=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=3 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=4 \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=4 \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=1 \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=3 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=4 \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=4 \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=1 \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='moonlight_megatron_ep' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=3 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2-7b.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=40 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2-7b_math.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2-7b_math_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping rollout_mode="sync" if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" USE_FUSED_KERNELS=True python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=$return_raw_chat \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.model.use_fused_kernels=$USE_FUSED_KERNELS \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=$rollout_mode \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2-7b_seq_balance.sh ================================================ set -x # For async rollout mode, dataset should return raw chat. rollout_mode="async" rollout_name="sglang" # sglang or vllm if [ "$rollout_mode" = "async" ]; then export VLLM_USE_V1=1 return_raw_chat="True" fi python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.return_raw_chat=$return_raw_chat \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$rollout_name \ actor_rollout_ref.rollout.mode=$rollout_mode \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm_kl1e-3' \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2-7b_seq_balance_math_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2-7b_sgl_megatron.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=4 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5-3b_gsm8k_grpo_lora.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.lora_rank=64 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=40 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=40 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2.5_3b_grpo_lora' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5-7b_math_megatron_diff_tp.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_32b_grpo_npu.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-32B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6\ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_5_32b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=2 \ trainer.save_freq=-1 \ trainer.test_freq=10 \ trainer.total_epochs=15 \ trainer.device=npu $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_7b_grpo_discrete_prof_npu.sh ================================================ set -x # profiling configuration PROFILE_STEPS="[2,4]" PROFILE_RANKS_ALL=False DISCRETE=True PROFILE_RANKS="[1,2]" # profiling NPU options SAVE_PATH="$HOME/profile_data" LEVEL="level1" WITH_MEMORY=False RECORD_SHAPES=False WITH_NPU=True WITH_CPU=True WITH_MODULE=False WITH_STACK=False ANALYSIS=True python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=5e-8 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.profiler.ranks=$PROFILE_RANKS \ actor_rollout_ref.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.profiler.discrete=$DISCRETE \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.npu_profile.options.save_path=$SAVE_PATH \ trainer.npu_profile.options.level=$LEVEL \ trainer.npu_profile.options.with_memory=$WITH_MEMORY \ trainer.npu_profile.options.record_shapes=$RECORD_SHAPES \ trainer.npu_profile.options.with_npu=$WITH_NPU \ trainer.npu_profile.options.with_cpu=$WITH_CPU \ trainer.npu_profile.options.with_module=$WITH_MODULE \ trainer.npu_profile.options.with_stack=$WITH_STACK \ trainer.npu_profile.options.analysis=$ANALYSIS \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_5_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=5 \ trainer.profile_steps=$PROFILE_STEPS \ trainer.device=npu $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_7b_grpo_e2e_prof_npu.sh ================================================ set -x # profiling configuration PROFILE_STEPS="[2,4]" PROFILE_RANKS_ALL=True DISCRETE=False # profiling NPU options SAVE_PATH="$HOME/profile_data" LEVEL="level1" WITH_MEMORY=False RECORD_SHAPES=False WITH_NPU=True WITH_CPU=True WITH_MODULE=False WITH_STACK=False ANALYSIS=True python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=5e-8 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.profiler.discrete=$DISCRETE \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.npu_profile.options.save_path=$SAVE_PATH \ trainer.npu_profile.options.level=$LEVEL \ trainer.npu_profile.options.with_memory=$WITH_MEMORY \ trainer.npu_profile.options.record_shapes=$RECORD_SHAPES \ trainer.npu_profile.options.with_npu=$WITH_NPU \ trainer.npu_profile.options.with_cpu=$WITH_CPU \ trainer.npu_profile.options.with_module=$WITH_MODULE \ trainer.npu_profile.options.with_stack=$WITH_STACK \ trainer.npu_profile.options.analysis=$ANALYSIS \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_5_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=5 \ trainer.profile_steps=$PROFILE_STEPS \ trainer.device=npu $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_7b_grpo_npu.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=5e-8 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_5_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=5 \ trainer.device=npu $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_vl-7b-megatron.sh ================================================ set -x ENGINE=${1:-vllm} export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping HF_MODEL_PATH=Qwen/Qwen2.5-VL-7B-Instruct DIST_CKPT_PATH=${DIST_CKPT_PATH} # convert HF model to meagatron format offlinely # python scripts/converter_hf_to_mcore.py --hf_model_path $HF_MODEL_PATH --output_path $DIST_CKPT_PATH # megatron tuning guide: # 1. recommend to offload all states by setting ALL_OFFLOAD=True # 2. enable dynamic batch size by setting actor_rollout_ref.actor.use_dynamic_bsz=True ref.log_prob_use_dynamic_bsz=True rollout.log_prob_use_dynamic_bsz=True # 3. set ppo_max_token_len_per_gpu and log_prob_max_token_len_per_gpu as large as possible for better MFU (limited by GPU memory). assure ppo_max_token_len_per_gpu > max_prompt_length+max_response_length, if sequence length is too long, you can increase the TP/PP size # 4. if memory is very limited, enable full recompute, but the mfu will be 30% lower # full recompute settings: # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_method=uniform \ # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_granularity=full \ # +actor_rollout_ref.actor.megatron.override_transformer_config.recompute_num_layers=1 \ ALL_OFFLOAD=${ALL_OFFLOAD:-True} COMMON_PARAM_OFFLOAD=${COMMON_PARAM_OFFLOAD:-$ALL_OFFLOAD} COMMON_GRAD_OFFLOAD=${COMMON_GRAD_OFFLOAD:-$ALL_OFFLOAD} COMMON_OPTIMIZER_OFFLOAD=${COMMON_OPTIMIZER_OFFLOAD:-$ALL_OFFLOAD} ACTOR_PARAM_OFFLOAD=${ACTOR_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} ACTOR_GRAD_OFFLOAD=${ACTOR_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} ACTOR_OPTIMIZER_OFFLOAD=${ACTOR_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} REF_PARAM_OFFLOAD=${REF_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} train_path=$HOME/data/geo3k/train.parquet test_path=$HOME/data/geo3k/test.parquet python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_path" \ data.val_files="$test_path" \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=1 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=5120 \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=20480 \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=20480 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=1 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.actor.megatron.param_offload=${ACTOR_PARAM_OFFLOAD} \ actor_rollout_ref.actor.megatron.optimizer_offload=${ACTOR_OPTIMIZER_OFFLOAD} \ actor_rollout_ref.actor.megatron.grad_offload=${ACTOR_GRAD_OFFLOAD} \ actor_rollout_ref.ref.megatron.param_offload=${REF_PARAM_OFFLOAD} \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_vl-7b.sh ================================================ set -x ENGINE=${1:-vllm} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_vl-7b_lora.sh ================================================ set -x ENGINE=${1:-vllm} # If you are using vllm<=0.6.3, you might need to set the following environment variable to avoid bugs: # export VLLM_ATTENTION_BACKEND=XFORMERS python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=10 \ actor_rollout_ref.model.lora_rank=64 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.model.exclude_modules='.*visual.*' \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=False \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=20 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_vl-7b_seq_balance.sh ================================================ set -x ENGINE=${1:-vllm} python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=6144 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=False \ actor_rollout_ref.rollout.free_cache_engine=False \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=6144 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_vl_32b_npu.sh ================================================ set -x ENGINE=${1:-vllm} # Some models are optimized by vllm ascend. While in some case, e.g. rlhf training, # the optimized model may not be suitable. In this case, set this value to 0 to disable the optimized model. export USE_OPTIMIZED_MODEL=0 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-32B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_32b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=2 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=15 \ trainer.device=npu $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_vl_3b_npu.sh ================================================ set -x ENGINE=${1:-vllm} # Some models are optimized by vllm ascend. While in some case, e.g. rlhf training, # the optimized model may not be suitable. In this case, set this value to 0 to disable the optimized model. export USE_OPTIMIZED_MODEL=0 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=16 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_3b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=15 \ trainer.device=npu $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen2_5_vl_7b_npu.sh ================================================ set -x ENGINE=${1:-vllm} # Some models are optimized by vllm ascend. While in some case, e.g. rlhf training, # the optimized model may not be suitable. In this case, set this value to 0 to disable the optimized model. export USE_OPTIMIZED_MODEL=0 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.image_key=images \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=32 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.01 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.use_torch_compile=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=$ENGINE \ actor_rollout_ref.rollout.engine_kwargs.vllm.disable_mm_preprocessor_cache=True \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.enforce_eager=True \ actor_rollout_ref.rollout.free_cache_engine=True \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='verl_grpo_example_geo3k' \ trainer.experiment_name='qwen2_5_vl_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=15 \ trainer.device=npu $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen3-236b_megatron.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail # Note that we set the response length to 4k. This results in many truncations at the beginning. # So the training dynamic acts as using RL to compress the math capabilities of QWen3 236b into 4k response instead of verbose thinking. # We can achieve 0.5 on AIME'24 after 30 steps. project_name='DAPO' exp_name='DAPO-Qwen3-236b-megatron-0531a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 4)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=0.1 loss_agg_mode="token-mean" train_prompt_bsz=256 n_resp_per_prompt=4 train_prompt_mini_bsz=16 # H20 GPUs NNODES=${NNODES:-32} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=$RAY_DATA_HOME/models/Qwen3-235B-A22B MCORE_MODEL_PATH=$RAY_DATA_HOME/models/Qwen3-235B-A22B_dist_ckpt_mcore/ # convert QWen3-235b-A22b to dist ckpt of mcore. Conversion process will take about 4 hours # python scripts/converter_hf_to_mcore.py --hf_model_path $MODEL_PATH --output_path $MCORE_MODEL_PATH --use_cpu_initialization CKPTS_DIR=$RAY_DATA_HOME/ckpt/${project_name}/${exp_name} TRAIN_FILE=$RAY_DATA_HOME/dataset/dapo-math-17k.parquet TEST_FILE=$RAY_DATA_HOME/dataset/aime-2024.parquet # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter offload=True gen_tp=8 train_tp=4 train_ep=4 train_pp=8 python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=${train_ep} \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=${MCORE_MODEL_PATH} \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_first_pipeline_stage=5 \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=5 \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=${train_ep} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=${MCORE_MODEL_PATH} \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=20 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen3-8b.sh ================================================ # Tested successfully on the hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.4-flashinfer0.2.2-cxx11abi0 image. # It outperforms the Qwen2 7B base model by two percentage points on the test set of GSM8K. set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen3-8B \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen3_8b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/grpo_trainer/run_qwen3moe-30b_megatron.sh ================================================ set -x HF_MODEL_PATH=Qwen/Qwen3-30B-A3B DIST_CKPT_PATH=${DIST_CKPT_PATH} python scripts/converter_hf_to_mcore.py --hf_model_path $HF_MODEL_PATH --output_path $DIST_CKPT_PATH export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=64 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=4 \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=4 \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=4 \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=4 \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k_math' \ trainer.experiment_name='qwen3_30b_moe_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=4 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/README.md ================================================ # Proximal Policy Optimization (PPO) Proximal Policy Optimization (PPO) is a family of policy gradient methods for reinforcement learning, proposed by OpenAI in 2017. PPO strikes a balance between simplicity, stability, and performance, making it one of the most widely used algorithms in modern RL applications, including large-scale language model fine-tuning. Traditional policy gradient methods like REINFORCE or Vanilla Policy Gradient suffer from: - High variance and sample inefficiency. - Instability due to large policy updates. PPO addresses this problem using a clipped surrogate objective that avoids overly large updates without requiring second-order derivatives. For more technical details regarding PPO, we suggest reading the introduction in the [OpenAI spinning up tutorial](https://spinningup.openai.com/en/latest/algorithms/ppo.html), and the paper [Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347). ## Key Components - Actor-Critic Architecture: PPO requires both an actor model (policy) and a critic model (value function). This differs from other algorithms like GRPO and RLOO that don't require a critic model. - Generalized Advantage Estimation (GAE): PPO uses GAE for computing advantage values, which helps reduce variance in policy gradient estimates while maintaining low bias. - Clipped Surrogate Objective: The core of PPO is implemented through the clipped surrogate objective function that limits policy updates. ## Configuration Note that all configs containing `micro_batch_size` are used to configure the maximum sample or token count per forward or backward pass to avoid GPU OOMs, whose value should not change algorithmic/convergence behavior. Most critic configs are similar to those of actors. Note that the critic model is omitted from the figure below. ![image](https://github.com/user-attachments/assets/16aebad1-0da6-4eb3-806d-54a74e712c2d) - `data.train_batch_size`: The global batch size of prompts used to generate a set of sampled trajectories/rollouts. The number of responses/trajectories is `data.train_batch_size * actor_rollout.ref.rollout.n` - `actor_rollout_ref.actor.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO actor updates. The ppo_mini_batch_size is a global size across all workers - `actor_rollout_ref.critic.ppo_mini_batch_size`: The set of sampled trajectories is split into multiple mini-batches with batch_size=ppo_mini_batch_size for PPO critic updates. The ppo_mini_batch_size is a global size across all workers - `actor_rollout_ref.actor.clip_ratio`: The PPO clip range. Default to 0.2 - `actor_rollout_ref.actor.ppo_epochs`: Number of epochs for PPO updates on one set of sampled trajectories for actor - `critic.ppo_epochs`: Number of epochs for PPO updates on one set of sampled trajectories for critic. Defaults to `actor_rollout_ref.actor.ppo_epochs` - `algorithm.gamma`: discount factor - `algorithm.lam`: The lambda term that trades off between bias and variance in the GAE estimator - `algorithm.adv_estimator`: Support gae, grpo, reinforce_plus_plus, reinforce_plus_plus_baseline, rloo ## Advanced Extensions ### KL Divergence Control Options to prevent the policy from diverging too far from a reference policy. Two mechanisms are available: KL reward penalty and KL loss. For more technical details, see [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) Options to use KL loss for KL divergence control: - `actor_rollout_ref.actor.use_kl_loss`: to use kl loss in the actor. When used, we are not applying KL in the reward function. Default is False - `actor_rollout_ref.actor.kl_loss_coef`: The coefficient of kl loss. Default is 0.001. - `actor_rollout_ref.actor.kl_loss_type`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. How to calculate the kl divergence between actor and reference policy. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html Options to use KL penalty in the reward: - `algorithm.use_kl_in_reward`: Whether to enable in-reward kl penalty. Default is False. - `algorithm.kl_penalty`: Support kl(k1), abs, mse(k2), low_var_kl(k3) and full. This defines the way to calculate the kl divergence between actor and reference policy. For specific options, refer to `kl_penalty` in core_algos.py. See this blog post for detailed analysis: http://joschu.net/blog/kl-approx.html - `algorithm.kl_ctrl.kl_coef`: The (initial) coefficient of in-reward kl_penalty. Default is 0.001. - `algorithm.kl_ctrl.type`: 'fixed' for FixedKLController and 'adaptive' for AdaptiveKLController. - `algorithm.kl_ctrl.horizon`: See source code of AdaptiveKLController for details. - `algorithm.kl_ctrl.target_kl`: See source code of AdaptiveKLController for details. ### Dual-clip PPO The Dual-Clip PPO introduces a approach by applying a lower bound to the policy ratio when the advantage is less than zero, when multiplied by a large raito, does not exceed a specified lower bound. ![image](https://github.com/user-attachments/assets/fc232181-d8b0-4307-8dd2-4dc0a4c1c139) - `actor_rollout_ref.actor.clip_ratio_c`: lower bound of the value for Dual-clip PPO, defaults to 3.0 ## Reference Example Qwen2.5 training log and commands: [link](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) ```bash bash run_gemma.sh trainer.n_gpus_per_node=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ trainer.logger=console \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ data.train_batch_size=256 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size=2 \ critic.ppo_micro_batch_size=2 ``` Reference performance with verl v0.2: | Model | Method | Score | Link | |-------------------------------|------------------|-------|------------------------------------------------------------------------------------------------| | Qwen/Qwen2.5-0.5B-Instruct | pretrained model | 36.4 | [Qwen Blog](https://qwenlm.github.io/blog/qwen2.5-llm/) | | Qwen/Qwen2.5-0.5B-Instruct | PPO | 56.7 | [PPO Command and Logs](https://github.com/eric-haibin-lin/verl-data/blob/experiments/gsm8k/Qwen2.5-0.5B-bsz256_2-prompt1024-resp512-0.567.log) | ================================================ FILE: verl_rl/examples/ppo_trainer/run_deepseek7b_llm.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=1 \ trainer.use_legacy_worker_impl=auto \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_deepseek7b_llm_modelscope.sh ================================================ set -x VERL_USE_MODELSCOPE=True \ python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=1 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_deepseek7b_llm_pfppo.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ algorithm.use_pf_ppo=True \ algorithm.pf_ppo.reweight_method=pow \ # ["pow", "max_min", "max_random"] algorithm.pf_ppo.weight_pow=2.0 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=5 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=1 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_deepseek7b_llm_sandbox_fusion.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ reward_model.sandbox_fusion.url='https://xxxxxxxxx.apigateway-cn-beijing.volceapi.com/run_code' \ reward_model.sandbox_fusion.max_concurrent=128 \ reward_model.reward_manager=prime \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/Eurus-2-RL-Data/train.parquet \ data.val_files=$HOME/data/Eurus-2-RL-Data/validation.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_sandbox_fusion' \ trainer.experiment_name='deepseek_llm_7b_function_sandbox_fusion' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=1 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_deepseek7b_llm_sp2.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=64 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ critic.optim.lr=1e-5 \ critic.ulysses_sequence_parallel_size=2 \ critic.model.use_remove_padding=True \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=64 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm_sp2' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_deepseek_full_hh_rlhf.sh ================================================ set -x train_files=$HOME/data/full_hh_rlhf/rl/train.parquet test_files=$HOME/data/full_hh_rlhf/rl/train.parquet # no use python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=512 \ data.max_prompt_length=128 \ data.max_response_length=128 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=4 \ reward_model.enable=True \ reward_model.megatron.tensor_model_parallel_size=4 \ reward_model.model.path=deepseek-ai/deepseek-llm-7b-chat \ reward_model.micro_batch_size_per_gpu=4 \ reward_model.param_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_megatron_full_hh_rlhf_examples' \ trainer.experiment_name='deepseek_llm_7b_model_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=100 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_deepseek_math_gsm8k_megatron.sh ================================================ set -x # Example runnable on H20 * 8 export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ critic.optim.lr=1e-5 \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_ppo_gsm8k_math_examples' \ trainer.experiment_name='deepseek_llm_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=100 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_deepseek_math_gsm8k_megatron_nsys.sh ================================================ set -x # Example runnable on H20 * 8 export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files=${train_files:-"$gsm8k_train_path"} test_files=${test_files:-"$gsm8k_test_path"} # Nsight profiling configuration PROFILE_STEPS="[1,2,5]" # or [] or null PROFILE_RANKS_ALL=False # or True PROFILE_RANKS=[0,4,8,12] DISCRETE=True # or True python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.profiler.ranks=$PROFILE_RANKS \ actor_rollout_ref.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.profiler.discrete=$DISCRETE \ critic.optim.lr=1e-5 \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.ppo_micro_batch_size_per_gpu=4 \ critic.profiler.ranks=$PROFILE_RANKS \ critic.profiler.all_ranks=$PROFILE_RANKS_ALL \ critic.profiler.discrete=$DISCRETE \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_ppo_gsm8k_math_examples' \ trainer.experiment_name='deepseek_llm_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=2 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=100 \ trainer.total_training_steps=6 \ trainer.profile_steps=$PROFILE_STEPS $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_gemma.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=google/gemma-2-2b-it \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=False \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=False \ critic.model.path=google/gemma-2-2b-it \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=4 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='gemma2b_function_rm' \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=10 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_moonlight16b_a3b_gsm8k_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping # 0. download the model huggingface-cli download moonshotai/Moonlight-16B-A3B-Instruct # 1. convert the model to mcore format # change the HF_MODEL_PATH and DIST_CKPT_PATH to your own path HF_MODEL_PATH=/data/models/moonshotai/Moonlight-16B-A3B-Instruct DIST_CKPT_PATH=/data/mcore_ckpt/Moonlight-16B-A3B-Instruct python scripts/converter_hf_to_mcore.py --hf_model_path $HF_MODEL_PATH --output_path $DIST_CKPT_PATH # 2. run the script gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet train_files=$gsm8k_train_path test_files=$gsm8k_test_path ALL_OFFLOAD=${ALL_OFFLOAD:-False} COMMON_PARAM_OFFLOAD=${COMMON_PARAM_OFFLOAD:-$ALL_OFFLOAD} COMMON_GRAD_OFFLOAD=${COMMON_GRAD_OFFLOAD:-$ALL_OFFLOAD} COMMON_OPTIMIZER_OFFLOAD=${COMMON_OPTIMIZER_OFFLOAD:-$ALL_OFFLOAD} ACTOR_PARAM_OFFLOAD=${ACTOR_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} ACTOR_GRAD_OFFLOAD=${ACTOR_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} ACTOR_OPTIMIZER_OFFLOAD=${ACTOR_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} REF_PARAM_OFFLOAD=${REF_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_PARAM_OFFLOAD=${CRITIC_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} CRITIC_GRAD_OFFLOAD=${CRITIC_GRAD_OFFLOAD:-$COMMON_GRAD_OFFLOAD} CRITIC_OPTIMIZER_OFFLOAD=${CRITIC_OPTIMIZER_OFFLOAD:-$COMMON_OPTIMIZER_OFFLOAD} RM_PARAM_OFFLOAD=${RM_PARAM_OFFLOAD:-$COMMON_PARAM_OFFLOAD} NODES=4 PP=2 TP=8 EP=8 ETP=1 VLLM_TP=4 # RAY_ADDRESS='auto' ray job submit --working-dir . -- python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.trust_remote_code=True \ actor_rollout_ref.model.path=$LLM \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ critic.optim.lr=1e-5 \ critic.model.path=$LLM \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_megatron_gsm8k_examples' \ trainer.experiment_name='moonlight_16b_a3b_instruct_1node' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=$NODES \ trainer.save_freq=-1 \ trainer.test_freq=5 \ actor_rollout_ref.model.trust_remote_code=True \ critic.model.trust_remote_code=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=13 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$VLLM_TP \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=$PP \ critic.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=$TP \ critic.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=$EP \ critic.megatron.expert_model_parallel_size=$EP \ actor_rollout_ref.actor.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.ref.megatron.expert_tensor_parallel_size=$ETP \ critic.megatron.expert_tensor_parallel_size=$ETP \ actor_rollout_ref.actor.megatron.param_offload=${ACTOR_PARAM_OFFLOAD} \ actor_rollout_ref.actor.megatron.optimizer_offload=${ACTOR_OPTIMIZER_OFFLOAD} \ actor_rollout_ref.actor.megatron.grad_offload=${ACTOR_GRAD_OFFLOAD} \ actor_rollout_ref.ref.megatron.param_offload=${REF_PARAM_OFFLOAD} \ critic.megatron.param_offload=${CRITIC_PARAM_OFFLOAD} \ critic.megatron.optimizer_offload=${CRITIC_OPTIMIZER_OFFLOAD} \ critic.megatron.grad_offload=${CRITIC_GRAD_OFFLOAD} \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ critic.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ critic.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ trainer.val_before_train=False \ trainer.total_epochs=100 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen1.5_moe_a2.7b-gsm8k_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping # 0. download the model huggingface-cli download Qwen/Qwen1.5-MoE-A2.7B-Chat # 1. convert the model to mcore format # change the HF_MODEL_PATH and DIST_CKPT_PATH to your own path HF_MODEL_PATH=/data/models/Qwen/Qwen1.5-MoE-A2.7B-Chat DIST_CKPT_PATH=/data/mcore_ckpt/Qwen1.5-MoE-A2.7B-Chat python scripts/converter_hf_to_mcore.py --hf_model_path $HF_MODEL_PATH --output_path $DIST_CKPT_PATH # 2. run the script gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet train_files=$gsm8k_train_path test_files=$gsm8k_test_path NODES=4 PP=2 TP=4 CP=1 VLLM_TP=4 # RAY_ADDRESS='auto' ray job submit --working-dir . -- python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$HF_MODEL_PATH \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.actor.megatron.context_parallel_size=$CP \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=$TP \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=$PP \ actor_rollout_ref.ref.megatron.context_parallel_size=$CP \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.tensor_model_parallel_size=$VLLM_TP \ critic.optim.lr=1e-5 \ critic.model.path=$HF_MODEL_PATH \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=4 \ critic.megatron.tensor_model_parallel_size=$TP \ critic.megatron.pipeline_model_parallel_size=$PP \ critic.megatron.context_parallel_size=$CP \ critic.megatron.use_dist_checkpointing=True \ critic.megatron.dist_checkpointing_path=$DIST_CKPT_PATH \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_megatron_gsm8k_examples' \ trainer.experiment_name='qwen1.5_moe_nochat' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=$NODES \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=100 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen2-7b_math_gsm8k_megatron.sh ================================================ set -x export CUDA_DEVICE_MAX_CONNECTIONS=1 # For megatron communication/computation overlapping gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_ppo_gsm8k_math_examples' \ trainer.experiment_name='qwen2_7b_megatron' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=100 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen2-7b_rm.sh ================================================ # Discliamer: the model used in the script is only for academic purpose. set -x # Data preparation scripts are available in ``examples/data_preprocess``. # Example usage: # # python3 examples/data_preprocess/math_dataset.py --local_dir ~/data/math # python3 examples/data_preprocess/gsm8k.py --local_dir ~/data/gsm8k gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" # prepare model ckpt huggingface-cli download Qwen/Qwen2-7B-Instruct --local-dir $HOME/models/Qwen2-7B-Instruct & huggingface-cli download sfairXC/FsfairX-LLaMA3-RM-v0.1 --local-dir $HOME/models/FsfairX-LLaMA3-RM-v0.1 & wait python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path="$HOME/models/Qwen2-7B-Instruct" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.optim.lr_warmup_steps_ratio=0.05 \ critic.model.path="$HOME/models/Qwen2-7B-Instruct" \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=32 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ reward_model.enable=True \ reward_model.model.path="$HOME/models/FsfairX-LLaMA3-RM-v0.1" \ reward_model.model.use_remove_padding=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.micro_batch_size_per_gpu=32 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.val_before_train=False \ trainer.experiment_name='Qwen2-7B-Instruct_hybrid_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.use_dynamic_bsz=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ reward_model.enable=True \ reward_model.model.path=sfairXC/FsfairX-LLaMA3-RM-v0.1\ reward_model.model.use_remove_padding=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.micro_batch_size_per_gpu=32 \ reward_model.use_dynamic_bsz=True \ reward_model.forward_max_token_len_per_gpu=98304 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_hybrid_rm_bsz8k_p4k_r4k_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance_fused_kernels.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" FUSED_KERNEL_BACKEND=triton # or 'torch' for torch backend python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.use_fused_kernels=True \ actor_rollout_ref.model.fused_kernel_options.impl_backend=$FUSED_KERNEL_BACKEND \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.use_dynamic_bsz=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ reward_model.enable=True \ reward_model.model.path=sfairXC/FsfairX-LLaMA3-RM-v0.1\ reward_model.model.use_remove_padding=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.micro_batch_size_per_gpu=32 \ reward_model.use_dynamic_bsz=True \ reward_model.forward_max_token_len_per_gpu=98304 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_hybrid_rm_bsz8k_p4k_r4k_seq_packing_fused_kernel' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen2-7b_rm_seq_balance_nsys.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files=${train_files:-"$gsm8k_train_path"} test_files=${test_files:-"$gsm8k_test_path"} PROFILE_STEPS="[1,2,5]" # or [] or null PROFILE_RANKS_ALL=False # or True PROFILE_RANKS=[0,4,8,12] DISCRETE=True # or True python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ actor_rollout_ref.profiler.ranks=$PROFILE_RANKS \ actor_rollout_ref.profiler.all_ranks=$PROFILE_RANKS_ALL \ actor_rollout_ref.profiler.discrete=$DISCRETE \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_micro_batch_size_per_gpu=2 \ critic.use_dynamic_bsz=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ critic.profiler.ranks=$PROFILE_RANKS \ critic.profiler.all_ranks=$PROFILE_RANKS_ALL \ critic.profiler.discrete=$DISCRETE \ reward_model.enable=True \ reward_model.model.path=sfairXC/FsfairX-LLaMA3-RM-v0.1\ reward_model.model.use_remove_padding=True \ reward_model.model.fsdp_config.param_offload=True \ reward_model.micro_batch_size_per_gpu=32 \ reward_model.use_dynamic_bsz=True \ reward_model.forward_max_token_len_per_gpu=98304 \ reward_model.profiler.ranks=$PROFILE_RANKS \ reward_model.profiler.all_ranks=$PROFILE_RANKS_ALL \ reward_model.profiler.discrete=$DISCRETE \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_hybrid_rm_bsz8k_p4k_r4k_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=2 \ trainer.save_freq=-1 \ trainer.test_freq=-1 \ trainer.total_epochs=15 \ trainer.total_training_steps=6 \ trainer.profile_steps=$PROFILE_STEPS $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen2-7b_seq_balance.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" # For async rollout mode, dataset should return raw chat. rollout_mode="sync" if [ "$rollout_mode" = "async" ]; then return_raw_chat="True" fi python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=$return_raw_chat \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=$rollout_mode \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_function_rm_bsz8k_p4k_r4k_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen2-7b_sglang_seq_balance.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=4096 \ data.max_prompt_length=4096 \ data.max_response_length=4096 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.ppo_mini_batch_size=512 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=24000 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2-7B-Instruct \ critic.model.enable_gradient_checkpointing=True \ critic.ppo_max_token_len_per_gpu=98304 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='qwen2-7b_function_rm_bsz8k_p4k_r4k_seq_packing' \ trainer.n_gpus_per_node=8 \ trainer.val_before_train=False \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ppo_trainer/run_qwen2.5-32b.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-32B-Instruct \ actor_rollout_ref.model.enable_gradient_checkpointing=False \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ critic.optim.lr=1e-5 \ critic.model.use_remove_padding=True \ critic.model.path=Qwen/Qwen2.5-32B-Instruct \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=4 \ trainer.save_freq=20 \ trainer.test_freq=10 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/ray/tutorial.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0ddc582b", "metadata": {}, "source": [ "# VeRL Ray API Tutorial" ] }, { "cell_type": "markdown", "id": "71fe3b94", "metadata": {}, "source": [ "## Chapter 1: Ray Basics" ] }, { "cell_type": "code", "execution_count": 144, "id": "1347d381", "metadata": { "tags": [] }, "outputs": [], "source": [ "import os" ] }, { "cell_type": "code", "execution_count": 145, "id": "e75b9d44", "metadata": { "tags": [] }, "outputs": [], "source": [ "import warnings\n", "\n", "import ray\n", "import torch\n", "\n", "warnings.filterwarnings(\"ignore\")" ] }, { "cell_type": "code", "execution_count": 146, "id": "2e90ae00", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2024-11-01 17:27:19,132\tINFO worker.py:1752 -- Started a local Ray instance.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "9cc9d2ccbdfb48918c8fd6cd13a0807a", "version_major": 2, "version_minor": 0 }, "text/html": [ "
\n", "
\n", "
\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
Python version:3.9.2
Ray version:2.10.0
\n", "\n", "
\n", "
\n" ], "text/plain": [ "RayContext(dashboard_url='', python_version='3.9.2', ray_version='2.10.0', ray_commit='09abba26b5bf2707639bb637c208d062a47b46f6')" ] }, "execution_count": 146, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "\u001b[36m(GPUAccumulator pid=224400)\u001b[0m rank 0, value: tensor([1.], device='cuda:0')\n", "\u001b[36m(GPUAccumulator pid=225234)\u001b[0m rank 2, value: tensor([3.], device='cuda:0')\n", "\u001b[36m(GPUAccumulator pid=225607)\u001b[0m rank 0, value: tensor([2.], device='cuda:0')\n", "\u001b[36m(GPUAccumulator pid=226423)\u001b[0m rank 1, value: tensor([3.], device='cuda:0')\n", "\u001b[36m(GPUAccumulator pid=226857)\u001b[0m rank 3, value: tensor([6.], device='cuda:0')\n", "\u001b[36m(GPUAccumulatorDecorator pid=227475)\u001b[0m 10\n", "\u001b[36m(GPUAccumulatorDecorator pid=227475)\u001b[0m rank 0, value: tensor([10.], device='cuda:0')\n", "\u001b[36m(GPUAccumulatorDecorator pid=227655)\u001b[0m rank 1, value: tensor([11.], device='cuda:0')\n" ] } ], "source": [ "# Build a local ray cluster. The head node and worker node are on this machine\n", "ray.init()" ] }, { "cell_type": "markdown", "id": "a127e4e4", "metadata": {}, "source": [ "Implement an Accumulator class." ] }, { "cell_type": "code", "execution_count": 147, "id": "20e7b9a3", "metadata": { "tags": [] }, "outputs": [], "source": [ "@ray.remote\n", "class Accumulator:\n", " def __init__(self):\n", " self.value = 0\n", "\n", " def add(self, x):\n", " self.value += x\n", "\n", " def get_value(self):\n", " return self.value" ] }, { "cell_type": "code", "execution_count": 148, "id": "3b80098c", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Instantiate an accumulator. Accumulator can be viewed as a process, acting as an RPC service.\n", "accumulator = Accumulator.remote()" ] }, { "cell_type": "code", "execution_count": 149, "id": "b14b1009", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0\n" ] } ], "source": [ "value_ref = accumulator.get_value.remote() # Check the current value. Note that this function returns immediately and does not actually wait for the remote execution to complete.\n", "# Get the value\n", "value = ray.get(value_ref)\n", "print(value)" ] }, { "cell_type": "code", "execution_count": 150, "id": "513a84b3", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "10\n" ] } ], "source": [ "# Accumulate, then check the result.\n", "accumulator.add.remote(10) # Similarly, the 'add' here will return immediately.\n", "new_value = ray.get(accumulator.get_value.remote())\n", "print(new_value)" ] }, { "cell_type": "markdown", "id": "3c332fe0", "metadata": {}, "source": [ "## Chapter 2: Resource Pool and RayWorkerGroup\n", "In the previous example, it was a simple single-process worker. \n", "In this example, we implement a worker with a GPU and form a RayWorkerGroup. Within this RayWorkerGroup, we implement a simple operation of an accumulator." ] }, { "cell_type": "code", "execution_count": 151, "id": "04229afb", "metadata": { "tags": [] }, "outputs": [], "source": [ "from verl.single_controller.base import Worker\n", "from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, merge_resource_pool" ] }, { "cell_type": "code", "execution_count": 152, "id": "0d0dbd58", "metadata": { "tags": [] }, "outputs": [], "source": [ "resource_pool = RayResourcePool([4], use_gpu=True)" ] }, { "cell_type": "code", "execution_count": 153, "id": "68f6838a", "metadata": { "tags": [] }, "outputs": [], "source": [ "@ray.remote\n", "class GPUAccumulator(Worker):\n", " def __init__(self) -> None:\n", " super().__init__()\n", " # The initial value of each rank is the same as the rank\n", " self.value = torch.zeros(size=(1,), device=\"cuda\") + self.rank\n", "\n", " def add(self, x):\n", " self.value += x\n", " print(f\"rank {self.rank}, value: {self.value}\")\n", " return self.value.cpu()" ] }, { "cell_type": "code", "execution_count": 154, "id": "23aad8fe", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[tensor([1.]), tensor([2.]), tensor([3.]), tensor([4.])]\n" ] } ], "source": [ "# Each worker's initial value is its rank, and then each rank's value is incremented by 1, so the values obtained on each rank are [1, 2, 3, 4]\n", "class_with_args = RayClassWithInitArgs(cls=GPUAccumulator)\n", "worker_group = RayWorkerGroup(resource_pool, class_with_args)\n", "print(worker_group.execute_all_sync(\"add\", x=[1, 1, 1, 1]))" ] }, { "cell_type": "markdown", "id": "e6705284", "metadata": {}, "source": [ "The principle of parameter passing: The input parameter is a list of length world_size, where each element in the list is dispatched respectively to each worker in the RayWorkerGroup. \n", "The return parameter is also a list, corresponding to the return value of each worker." ] }, { "cell_type": "markdown", "id": "d25c2412", "metadata": {}, "source": [ "### GPU Resource Sharing" ] }, { "cell_type": "markdown", "id": "f74f6d24", "metadata": {}, "source": [ "RayWorkerGroups mapped to the same resource pool share the GPU. In this example, we implement three resource pools: the first occupies 4 GPUs, the second also occupies 4 GPUs, and the last occupies all 8 GPUs. Among them, the first resource pool reuses the resource pool mentioned above." ] }, { "cell_type": "code", "execution_count": 155, "id": "49f9c06f", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Create a new resource pool and then merge the newly created resource pool with the previous one.\n", "resource_pool_1 = RayResourcePool([4], use_gpu=True, name_prefix=\"a\")\n", "resource_pool_merge = merge_resource_pool(resource_pool, resource_pool_1)" ] }, { "cell_type": "code", "execution_count": 156, "id": "05c2e305", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Establish a RayWorkerGroup on the newly created resource pool.\n", "worker_group_1 = RayWorkerGroup(resource_pool_1, class_with_args)\n", "worker_group_merge = RayWorkerGroup(resource_pool_merge, class_with_args)" ] }, { "cell_type": "code", "execution_count": 157, "id": "6b9b13f4", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[tensor([2.]), tensor([3.]), tensor([4.]), tensor([5.])]\n" ] } ], "source": [ "# Run 'add' on the second set of 4 GPUs; the result should be [2, 3, 4, 5].\n", "output_1 = worker_group_1.execute_all_sync(\"add\", x=[2, 2, 2, 2])\n", "print(output_1)" ] }, { "cell_type": "code", "execution_count": 158, "id": "d856d030", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[tensor([3.]), tensor([4.]), tensor([5.]), tensor([6.]), tensor([7.]), tensor([8.]), tensor([9.]), tensor([10.])]\n" ] } ], "source": [ "# Run 'add' on the merged set of 8 GPUs; the result should be [3, 4, 5, 6, 7, 8, 9, 10].\n", "output_merge = worker_group_merge.execute_all_sync(\"add\", x=[3, 3, 3, 3, 3, 3, 3, 3])\n", "print(output_merge)" ] }, { "cell_type": "code", "execution_count": 159, "id": "33a4628c", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "4 4 8\n" ] } ], "source": [ "print(worker_group.world_size, worker_group_1.world_size, worker_group_merge.world_size)" ] }, { "cell_type": "markdown", "id": "3df19d13", "metadata": {}, "source": [ "## Chapter 3: Data Dispatch, Execution and Collection" ] }, { "cell_type": "markdown", "id": "acb22d9d", "metadata": {}, "source": [ "In the above example, we used the `execute_all_sync` function in the RayWorkerGroup to dispatch data from the driver to each worker. This is very inconvenient for coding. \n", "In this chapter, we use the form of function decorators to allow RayWorkerGroup to directly call functions written in the Worker, and to greatly simplify parameter passing." ] }, { "cell_type": "code", "execution_count": 160, "id": "35237432", "metadata": { "tags": [] }, "outputs": [], "source": [ "from verl.single_controller.base.decorator import Dispatch, Execute, register" ] }, { "cell_type": "code", "execution_count": 161, "id": "88b8ba3b", "metadata": { "tags": [] }, "outputs": [], "source": [ "@ray.remote\n", "class GPUAccumulatorDecorator(Worker):\n", " def __init__(self) -> None:\n", " super().__init__()\n", " # The initial value of each rank is the same as the rank\n", " self.value = torch.zeros(size=(1,), device=\"cuda\") + self.rank\n", "\n", " # map from a single input to all the worker\n", " @register(Dispatch.ONE_TO_ALL)\n", " def add(self, x):\n", " print(x)\n", " self.value = self.value + x\n", " print(f\"rank {self.rank}, value: {self.value}\")\n", " return self.value.cpu()" ] }, { "cell_type": "code", "execution_count": 162, "id": "eddaa043", "metadata": { "tags": [] }, "outputs": [], "source": [ "class_with_args = RayClassWithInitArgs(cls=GPUAccumulatorDecorator)\n", "gpu_accumulator_decorator = RayWorkerGroup(resource_pool_merge, class_with_args)" ] }, { "cell_type": "code", "execution_count": 163, "id": "10087c91", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[tensor([10.]), tensor([11.]), tensor([12.]), tensor([13.]), tensor([14.]), tensor([15.]), tensor([16.]), tensor([17.])]\n" ] } ], "source": [ "# As we can see, 10 is automatically dispatched to each Worker in this RayWorkerGroup.\n", "print(gpu_accumulator_decorator.add(x=10))" ] }, { "cell_type": "markdown", "id": "540ee6ad", "metadata": {}, "source": [ "### Custom Dispatch, Collection\n", "Users can customize `dispatch` and `collection` function. You only need to write the `dispatch_fn` and `collect_fn` functions yourself. We also support executing RPC only on rank_zero, with specific examples provided below." ] }, { "cell_type": "code", "execution_count": 164, "id": "8e041270", "metadata": { "tags": [] }, "outputs": [], "source": [ "from verl.single_controller.base.decorator import Dispatch, collect_all_to_all, register" ] }, { "cell_type": "code", "execution_count": 165, "id": "43b5be31", "metadata": { "tags": [] }, "outputs": [], "source": [ "def two_to_all_dispatch_fn(worker_group, *args, **kwargs):\n", " \"\"\"\n", " Assume the input is a list of 2. Duplicate the input interleaved and pass to each worker.\n", " \"\"\"\n", " for arg in args:\n", " assert len(arg) == 2\n", " for i in range(worker_group.world_size - 2):\n", " arg.append(arg[i % 2])\n", " for k, v in kwargs.items():\n", " assert len(v) == 2\n", " for i in range(worker_group.world_size - 2):\n", " v.append(v[i % 2])\n", " return args, kwargs\n", "\n", "\n", "@ray.remote\n", "class TestActor(Worker):\n", " # TODO: pass *args and **kwargs is bug prone and not very convincing\n", " def __init__(self, x) -> None:\n", " super().__init__()\n", " self._x = x\n", "\n", " def foo(self, y):\n", " return self._x + y\n", "\n", " @register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.RANK_ZERO)\n", " def foo_rank_zero(self, x, y):\n", " return self._x + y + x\n", "\n", " @register(dispatch_mode={\"dispatch_fn\": two_to_all_dispatch_fn, \"collect_fn\": collect_all_to_all})\n", " def foo_custom(self, x, y):\n", " return self._x + y + x" ] }, { "cell_type": "code", "execution_count": 166, "id": "83ec6609", "metadata": { "tags": [] }, "outputs": [], "source": [ "class_with_args = RayClassWithInitArgs(cls=TestActor, x=2)\n", "worker_group = RayWorkerGroup(resource_pool, class_with_args)" ] }, { "cell_type": "code", "execution_count": 167, "id": "62c58d8a", "metadata": { "tags": [] }, "outputs": [], "source": [ "output_ref = worker_group.foo_custom(x=[1, 2], y=[5, 6])\n", "assert output_ref == [8, 10, 8, 10]\n", "\n", "output_ref = worker_group.foo_rank_zero(x=1, y=2)\n", "assert output_ref == 5" ] }, { "cell_type": "code", "execution_count": 168, "id": "14689353", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "8\n" ] } ], "source": [ "print(gpu_accumulator_decorator.world_size)" ] }, { "cell_type": "code", "execution_count": 169, "id": "2c80bbf4", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Shutdown ray cluster\n", "ray.shutdown()" ] }, { "cell_type": "markdown", "id": "a5c8151c", "metadata": {}, "source": [ "## Chapter 4: NVMegatronRayWorkerGroup" ] }, { "cell_type": "markdown", "id": "cd5680e9", "metadata": {}, "source": [ "Due to the Ray issue, we can only support max_colocate_count=1 in RayResourcePool for now. \n", "This means that each GPU can only have one process.\n", "We can support max_colocate > 1 when applying this pull request: https://github.com/ray-project/ray/pull/44385" ] }, { "cell_type": "markdown", "id": "92724419", "metadata": {}, "source": [ "Therefore, we need to restart the ray and initialize a new resource_pool to demonstrate the **NVMegatronRayWorkerGroup**" ] }, { "cell_type": "code", "execution_count": null, "id": "9b038538", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Build a local ray cluster. The head node and worker node are on this machine\n", "ray.init()" ] }, { "cell_type": "markdown", "id": "ebfd8798", "metadata": {}, "source": [ "Finally, we implement a `NVMegatronRayWorkerGroup`, within which we create a Megatron and then run a tensor parallel (tp) split Llama mlp layer. Here, we use a complex dispatch mode, `Megatron_COMPUTE`. This dispatch mode assumes that user passes the data partitioned by DP dimension. The data is dispatched to all tp/pp ranks within the same dp group, and ultimately only collects output data from tp=0 and the last pp. In this way, for users that only write code on the driver, the Megatron behind the RPC becomes transparent." ] }, { "cell_type": "code", "execution_count": 171, "id": "5a032154", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/opt/tiger/Megatron-LM\n", "/opt/tiger/Megatron-LM/megatron/__init__.py\n" ] } ], "source": [ "import sys\n", "\n", "current_pythonpath = os.environ.get(\"PYTHONPATH\", \"\")\n", "\n", "new_path = \"/opt/tiger/Megatron-LM\"\n", "\n", "new_pythonpath = f\"{new_path}:{current_pythonpath}\" if current_pythonpath else new_path\n", "\n", "os.environ[\"PYTHONPATH\"] = new_pythonpath\n", "\n", "print(new_path)\n", "sys.path.append(new_path)\n", "\n", "import megatron\n", "\n", "print(megatron.__file__)" ] }, { "cell_type": "code", "execution_count": 172, "id": "8c84cd5a", "metadata": { "tags": [] }, "outputs": [], "source": [ "from megatron.core import parallel_state as mpu\n", "from omegaconf import OmegaConf\n", "\n", "from verl.single_controller.base.decorator import Dispatch, Execute, register\n", "from verl.single_controller.base.megatron.worker import MegatronWorker\n", "from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup\n", "from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup" ] }, { "cell_type": "code", "execution_count": 173, "id": "1b1debcc", "metadata": { "tags": [] }, "outputs": [], "source": [ "resource_pool = RayResourcePool([4], use_gpu=True, max_colocate_count=1)" ] }, { "cell_type": "code", "execution_count": 174, "id": "bccbe081", "metadata": { "tags": [] }, "outputs": [], "source": [ "@ray.remote\n", "class MLPLayerWorker(MegatronWorker):\n", " def __init__(self):\n", " super().__init__()\n", " rank = int(os.environ[\"LOCAL_RANK\"])\n", " torch.distributed.init_process_group(backend=\"nccl\")\n", " torch.cuda.set_device(rank)\n", "\n", " mpu.initialize_model_parallel(\n", " tensor_model_parallel_size=4,\n", " pipeline_model_parallel_size=1,\n", " virtual_pipeline_model_parallel_size=None,\n", " pipeline_model_parallel_split_rank=None,\n", " use_sharp=False,\n", " context_parallel_size=1,\n", " expert_model_parallel_size=1,\n", " nccl_communicator_config_path=None,\n", " )\n", " from megatron.core import tensor_parallel\n", "\n", " tensor_parallel.model_parallel_cuda_manual_seed(10)\n", "\n", " @register(Dispatch.ONE_TO_ALL)\n", " def init_model(self, config):\n", " from omegaconf import OmegaConf\n", "\n", " from verl.models.llama.megatron.layers import ParallelLlamaMLP\n", " from verl.utils.megatron_utils import init_model_parallel_config\n", "\n", " megatron_config = OmegaConf.create(\n", " {\n", " \"sequence_parallel\": False,\n", " \"param_dtype\": \"fp32\",\n", " \"tensor_model_parallel_size\": mpu.get_tensor_model_parallel_world_size(),\n", " \"pipeline_model_parallel_rank\": mpu.get_pipeline_model_parallel_rank(),\n", " \"pipeline_model_parallel_size\": mpu.get_pipeline_model_parallel_world_size(),\n", " \"virtual_pipeline_model_parallel_rank\": mpu.get_virtual_pipeline_model_parallel_rank(),\n", " \"virtual_pipeline_model_parallel_size\": mpu.get_virtual_pipeline_model_parallel_world_size(),\n", " }\n", " )\n", "\n", " megatron_config = init_model_parallel_config(megatron_config)\n", " self.parallel_layer = ParallelLlamaMLP(config=config, megatron_config=megatron_config)\n", "\n", " @register(Dispatch.ONE_TO_ALL)\n", " def get_weights(self):\n", " output = {}\n", " for key, val in self.parallel_layer.named_parameters():\n", " output[key] = val\n", " return output\n", "\n", " @register(Dispatch.MEGATRON_COMPUTE)\n", " def run_layer(self, x):\n", " x = x.to(\"cuda\")\n", " y = self.parallel_layer(x)\n", " return y" ] }, { "cell_type": "code", "execution_count": 175, "id": "a655271d", "metadata": { "tags": [] }, "outputs": [], "source": [ "layer_cls = RayClassWithInitArgs(cls=MLPLayerWorker)\n", "layer_worker_group = NVMegatronRayWorkerGroup(\n", " resource_pool=resource_pool,\n", " ray_cls_with_init=layer_cls,\n", ")" ] }, { "cell_type": "code", "execution_count": 176, "id": "f105ebee", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "4 4 1 1\n" ] } ], "source": [ "print(layer_worker_group.world_size, layer_worker_group.tp_size, layer_worker_group.pp_size, layer_worker_group.dp_size)" ] }, { "cell_type": "code", "execution_count": 177, "id": "38655091", "metadata": { "tags": [] }, "outputs": [], "source": [ "ffn_hidden_size = 11008\n", "batch_size = 16\n", "seq_len = 2048\n", "hidden_size = 4096\n", "\n", "config = OmegaConf.create(\n", " {\n", " \"hidden_size\": hidden_size,\n", " \"intermediate_size\": ffn_hidden_size,\n", " \"hidden_act\": \"silu\",\n", " \"pretraining_tp\": 1,\n", " \"tp\": layer_worker_group.tp_size,\n", " }\n", ")" ] }, { "cell_type": "code", "execution_count": 178, "id": "a026efca", "metadata": { "tags": [] }, "outputs": [], "source": [ "x = torch.rand(size=(seq_len, batch_size, hidden_size), dtype=torch.float32)" ] }, { "cell_type": "code", "execution_count": 179, "id": "f5fcaf13", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "[None, None, None, None]" ] }, "execution_count": 179, "metadata": {}, "output_type": "execute_result" } ], "source": [ "layer_worker_group.init_model(config)" ] }, { "cell_type": "code", "execution_count": 180, "id": "3f5cc9b4", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "torch.Size([2048, 16, 4096])\n" ] } ], "source": [ "output = layer_worker_group.run_layer(\n", " [x]\n", ") # This must be a list of size 1, ensuring that the input equals the data parallel (dp).\n", "print(output[0].shape)" ] }, { "cell_type": "code", "execution_count": 181, "id": "49792210", "metadata": { "tags": [] }, "outputs": [], "source": [ "# Shutdown ray cluster\n", "ray.shutdown()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.2" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: verl_rl/examples/reinforce_plus_plus_trainer/run_qwen2-7b_math_rf.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=reinforce_plus_plus \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=1024 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=mse \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/reinforce_plus_plus_trainer/run_qwen2-7b_math_rf_baseline.sh ================================================ set -x gsm8k_train_path=$HOME/data/gsm8k/train.parquet gsm8k_test_path=$HOME/data/gsm8k/test.parquet math_train_path=$HOME/data/math/train.parquet math_test_path=$HOME/data/math/test.parquet train_files="['$gsm8k_train_path', '$math_train_path']" test_files="['$gsm8k_test_path', '$math_test_path']" python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=reinforce_plus_plus_baseline \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=3e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=1024 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=mse \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=16 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/remax_trainer/run_qwen2.5-3b_seq_balance.sh ================================================ set -x export HF_DATASETS_OFFLINE=1 export TRANSFORMERS_OFFLINE=1 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=remax \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=512 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=128 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=30000 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_remax_example_gsm8k' \ trainer.experiment_name='qwen2.5_3b_function_rm_kl1e-3' \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=5 $@ ================================================ FILE: verl_rl/examples/remax_trainer/run_qwen2.5-7b_seq_balance.sh ================================================ set -x export HF_DATASETS_OFFLINE=1 export TRANSFORMERS_OFFLINE=1 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=remax \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=4 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_remax_example_gsm8k' \ trainer.experiment_name='qwen2.5_7b_function_rm_kl1e-3' \ trainer.val_before_train=False \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=10 $@ ================================================ FILE: verl_rl/examples/rloo_trainer/run_qwen2-7b.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=rloo \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=80 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=160 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=160 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=True \ algorithm.kl_penalty=kl \ algorithm.kl_ctrl.kl_coef=0.001 \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_rloo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/sft/gsm8k/run_deepseek_6b7.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_deepseek_6b7.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=deepseek-ai/deepseek-coder-6.7b-instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-deepseek-coder-6.7b-instruct \ trainer.total_epochs=4 \ trainer.logger='["console","wandb"]' $@ ================================================ FILE: verl_rl/examples/sft/gsm8k/run_gemma_2b.sh ================================================ # Tested with 2 & 4 GPUs set -x if [ "$#" -lt 2 ]; then echo "Usage: run_gemma_2b.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=google/gemma-2b-it \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-gemma-2b-it \ trainer.total_epochs=2 \ trainer.logger='["console","wandb"]' $@ ================================================ FILE: verl_rl/examples/sft/gsm8k/run_gemma_7b.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_gemma_7b.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ data.prompt_dict_keys=['question'] \ data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=google/gemma-1.1-7b-it \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-gemma-1.1-7b-it \ trainer.total_epochs=4 \ trainer.logger='["console","wandb"]' $@ ================================================ FILE: verl_rl/examples/sft/gsm8k/run_qwen2_5_05b_sft_peft_sp2_npu.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen2_5_05b_sft_peft_sp2_npu.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=64 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct \ trainer.logger=console \ trainer.total_epochs=2 $@ \ model.lora_rank=32 \ model.lora_alpha=16 \ model.target_modules=all-linear \ model.strategy=fsdp \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true \ trainer.device=npu ================================================ FILE: verl_rl/examples/sft/gsm8k/run_qwen_05_peft.sh ================================================ # Tested with 2 & 4 GPUs set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen_05_peft.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size_per_gpu=4 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct \ trainer.logger=console \ trainer.total_epochs=1 $@ \ model.lora_rank=32\ model.lora_alpha=16 \ model.target_modules=all-linear # Or you can do this: # model.target_modules=[q_proj,v_proj] \ ================================================ FILE: verl_rl/examples/sft/gsm8k/run_qwen_05_sp2.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen_05_sp2.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size=4 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct-sp2 \ trainer.logger=console \ trainer.total_training_steps=1 $@ \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true ================================================ FILE: verl_rl/examples/sft/gsm8k/run_qwen_05_sp2_liger.sh ================================================ set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen_05_sp2.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.prompt_key=extra_info \ data.response_key=extra_info \ optim.lr=1e-4 \ data.prompt_dict_keys=['question'] \ +data.response_dict_keys=['answer'] \ data.micro_batch_size=4 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ model.use_liger=True \ trainer.default_local_dir=$save_path \ trainer.project_name=gsm8k-sft \ trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct-sp2-liger \ trainer.logger=console $@ \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true ================================================ FILE: verl_rl/examples/sft/multiturn/run_qwen_05_sp2.sh ================================================ #!/bin/bash set -x if [ "$#" -lt 2 ]; then echo "Usage: run_qwen_05_sp2.sh [other_configs...]" exit 1 fi nproc_per_node=$1 save_path=$2 # Shift the arguments so $@ refers to the rest shift 2 torchrun --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/multiturn/train.parquet \ data.val_files=$HOME/data/multiturn/test.parquet \ data.multiturn.enable=true \ data.multiturn.messages_key=messages \ data.micro_batch_size=4 \ model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=multiturn-sft \ trainer.experiment_name=multiturn-sft-qwen-2.5-0.5b-instruct-sp2 \ trainer.logger=console \ trainer.total_training_steps=1 $@ \ ulysses_sequence_parallel_size=2 \ use_remove_padding=true ================================================ FILE: verl_rl/examples/sglang_multiturn/README.md ================================================ # Multi-Turn Rollout Example (GSM8K) This example demonstrates how to perform **multi-turn rollout** using SGLang with a tool-calling capable model (e.g., Qwen2.5-3B) on the GSM8K dataset. ## Usage ### Step 1: Download GSM8K Dataset ```bash cd examples/data_preprocess python3 gsm8k_multiturn_w_tool.py ``` This will download and preprocess the GSM8K dataset into ~/data/gsm8k/. ### Step 2: Run Multi-Turn Rollout If you have 8 GPUs Use the standard 8-GPU script: ```bash cd your_verl_root_dir bash examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn.sh ``` If you have only 4 GPUs Use the fallback 4-GPU script: ```bash cd your_verl_root_dir bash examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn_4xgpu.sh ``` ## Notes - The rollout supports multi-turn conversations with tool-calling capabilities. - Current tools are used for GSM8K answer evaluation. - Future versions may extend to search and code interpreter tools. ================================================ FILE: verl_rl/examples/sglang_multiturn/config/geo3k_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 2048 max_response_length: 2048 train_batch_size: 256 return_raw_chat: True return_multi_modal_inputs: False actor_rollout_ref: hybrid_engine: True model: custom_chat_template: "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{%- if tools %}{{- '<|im_start|>system\\n' }}{%- if messages[0]['role'] == 'system' %}{{- messages[0]['content'] }}{%- else %}{{- 'You are a helpful assistant.' }}{%- endif %}{{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}{%- for tool in tools %}{{- \"\\n\" }}{{- tool | tojson }}{%- endfor %}{{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}{% for message in messages %}{% if message['role'] != 'system' or loop.first == false %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endif %}{% endfor %}{%- else %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endfor %}{%- endif %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 # tool_config_path: "./config/tool_config/gsm8k_tool_config.yaml" ================================================ FILE: verl_rl/examples/sglang_multiturn/config/geo3k_multiturn_megatron_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_megatron_trainer - _self_ data: max_prompt_length: 2048 max_response_length: 2048 train_batch_size: 256 return_raw_chat: True return_multi_modal_inputs: False actor_rollout_ref: hybrid_engine: True model: custom_chat_template: "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{%- if tools %}{{- '<|im_start|>system\\n' }}{%- if messages[0]['role'] == 'system' %}{{- messages[0]['content'] }}{%- else %}{{- 'You are a helpful assistant.' }}{%- endif %}{{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}{%- for tool in tools %}{{- \"\\n\" }}{{- tool | tojson }}{%- endfor %}{{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}{% for message in messages %}{% if message['role'] != 'system' or loop.first == false %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endif %}{% endfor %}{%- else %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}{%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{%- elif message.role == \"assistant\" %}{{- '<|im_start|>' + message.role }}{%- if message.content %}{{- '\\n' + message.content }}{%- endif %}{%- for tool_call in message.tool_calls %}{%- if tool_call.function is defined %}{%- set tool_call = tool_call.function %}{%- endif %}{{- '\\n\\n{\"name\": \"' }}{{- tool_call.name }}{{- '\", \"arguments\": ' }}{{- tool_call.arguments | tojson }}{{- '}\\n' }}{%- endfor %}{{- '<|im_end|>\\n' }}{%- elif message.role == \"tool\" %}{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}{{- '<|im_start|>user' }}{%- endif %}{{- '\\n\\n' }}{% if message['content'] is string %}{{ message.content }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif content['type'] == 'text' or 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{- '\\n' }}{%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}{{- '<|im_end|>\\n' }}{%- endif %}{%- endif %}{% endfor %}{%- endif %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}" rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 # tool_config_path: "./config/tool_config/gsm8k_tool_config.yaml" ================================================ FILE: verl_rl/examples/sglang_multiturn/config/gsm8k_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 ================================================ FILE: verl_rl/examples/sglang_multiturn/config/gsm8k_multiturn_grpo_w_interaction.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_user_turns: 5 ================================================ FILE: verl_rl/examples/sglang_multiturn/config/gsm8k_multiturn_megatron_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_megatron_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 ================================================ FILE: verl_rl/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml ================================================ interaction: - name: "gsm8k" class_name: "verl.interactions.gsm8k_interaction.Gsm8kInteraction" config: {} ================================================ FILE: verl_rl/examples/sglang_multiturn/config/retool_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 5 tool_config_path: "./config/tool_config/sandbox_fusion_tool_config.yaml" ================================================ FILE: verl_rl/examples/sglang_multiturn/config/search_multiturn_grpo.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: max_prompt_length: 1024 max_response_length: 1024 train_batch_size: 256 return_raw_chat: True shuffle: False actor_rollout_ref: hybrid_engine: True rollout: name: sglang multi_turn: enable: True max_assistant_turns: 2 format: qwen ================================================ FILE: verl_rl/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml ================================================ tools: - class_name: "verl.tools.geo3k_tool.Geo3kTool" config: type: native tool_schema: type: "function" function: name: "calc_geo3k_reward" description: "A tool for calculating the reward of geo3k. (1.0 if parsed answer is correct, 0.0 if parsed answer is incorrect or not correctly parsed)" parameters: type: "object" properties: answer: type: "string" description: "The model's answer to the geo3k problem, must be a digits" required: ["answer"] ================================================ FILE: verl_rl/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml ================================================ tools: - class_name: "verl.tools.gsm8k_tool.Gsm8kTool" config: type: native tool_schema: type: "function" function: name: "calc_gsm8k_reward" description: "A tool for calculating the reward of gsm8k. (1.0 if parsed answer is correct, 0.0 if parsed answer is incorrect or not correctly parsed)" parameters: type: "object" properties: answer: type: "string" description: "The model's answer to the GSM8K math problem, must be a digits" required: ["answer"] ================================================ FILE: verl_rl/examples/sglang_multiturn/config/tool_config/mcp_server.json ================================================ { "mcpServers": { "Tavily Expert": { "url": "your_tavily_expert_url", "auth_token": "your_tavily_api_token" } } } ================================================ FILE: verl_rl/examples/sglang_multiturn/config/tool_config/mcp_tool_config.yaml ================================================ tools: - class_name: verl.tools.mcp_search_tool.MCPSearchTool config: rate_limit: 120 timeout: 120 type: mcp mcp: mcp_servers_config_path: ./mcp_server.json # optional tool_selected_list: - tavily_search_tool ================================================ FILE: verl_rl/examples/sglang_multiturn/config/tool_config/sandbox_fusion_tool_config.yaml ================================================ tools: - class_name: "verl.tools.sandbox_fusion_tools.SandboxFusionTool" config: sandbox_fusion_url: "https://xxx.apigateway-cn-beijing.volceapi.com/run_code" num_workers: 10 enable_global_rate_limit: true rate_limit: 10 default_timeout: 30 default_language: "python" memory_limit_mb: 1024 type: native tool_schema: type: "function" function: name: "code_interpreter" description: "A tool for executing code." parameters: type: "object" properties: code: type: "string" description: "The code to execute." required: ["code"] ================================================ FILE: verl_rl/examples/sglang_multiturn/config/tool_config/search_tool_config.yaml ================================================ tools: - class_name: verl.tools.search_tool.SearchTool config: retrieval_service_url: http://127.0.0.1:8000/retrieve num_workers: 120 rate_limit: 120 timeout: 30 type: native tool_schema: type: function function: name: search description: Searches the web for relevant information based on the given query. parameters: type: object properties: query_list: type: array item: type: string description: A list of fully-formed semantic queries. The tool will return search results for each query. required: - query_list ================================================ FILE: verl_rl/examples/sglang_multiturn/geo3k/run_qwen2.5-3b_geo3k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='geo3k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=2048 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='geo3k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-geo3k-sgl-multi-w-tool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/geo3k_multiturn_w_tool/train.parquet \ data.val_files=$HOME/data/geo3k_multiturn_w_tool/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/geo3k/run_qwen2.5-3b_geo3k_multiturn_4xgpu.sh ================================================ # run on 4xH100 # make sure your current working directory is the root of the project set -x export HYDRA_FULL_ERROR=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='geo3k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=2048 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='geo3k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-geo3k-async-sgl-multi-w-tool-verify-n16-4cards' \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.total_epochs=15 \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=8192 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=8192 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=8192 \ critic.ppo_max_token_len_per_gpu=8192 \ critic.forward_max_token_len_per_gpu=8192 \ data.train_files=$HOME/data/geo3k/train.parquet \ data.val_files=$HOME/data/geo3k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml" \ $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/geo3k/run_qwen2.5-3b_megatron_geo3k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project # this is a verification training script, the parallel setting should be tuned to your model set -x export PYTHONUNBUFFERED=1 export RAY_DEDUP_LOGS=0 export RUST_BACKTRACE=1 export HYDRA_FULL_ERROR=1 export CUDA_DEVICE_MAX_CONNECTIONS=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='geo3k_multiturn_megatron_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=2048 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-VL-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.context_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.megatron.seed=42 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.context_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='geo3k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-geo3k-sgl-multi-w-tool-n8-mcore-v2505201745_seed42' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/geo3k_multiturn_w_tool/train.parquet \ data.val_files=$HOME/data/geo3k_multiturn_w_tool/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/geo3k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/run_qwen0.5b_gsm8k_multiturn_curriculum.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.sampler.class_name="RandomCurriculumSampler" \ data.sampler.class_path="pkg://tests.utils.dataset.test_create_rl_sampler_on_cpu" \ data.dataloader_num_workers=0 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.train_batch_size=256 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen3-4b_function_rm-gsm8k-sgl-multi-w-tool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/run_qwen2.5-0.5b_gsm8k_multiturn_w_interaction.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" TRAIN_BATCH_SIZE=${TRAIN_BATCH_SIZE:-512} MICRO_BATCH_SIZE=${MICRO_BATCH_SIZE:-8} OFFLOAD=${OFFLOAD:-False} python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo_w_interaction' \ algorithm.adv_estimator=grpo \ data.train_batch_size=$TRAIN_BATCH_SIZE \ data.max_prompt_length=1024 \ data.max_response_length=$((1024 * 3)) \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ +actor_rollout_ref.model.enable_activation_offloading=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=$TRAIN_BATCH_SIZE \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=$MICRO_BATCH_SIZE \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.fsdp_config.param_offload=$OFFLOAD \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$OFFLOAD \ +actor_rollout_ref.actor.fsdp_config.model_dtype=bfloat16 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=$MICRO_BATCH_SIZE \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=$MICRO_BATCH_SIZE \ actor_rollout_ref.ref.fsdp_config.param_offload=$OFFLOAD \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen2.5-0.5b_function_rm-gsm8k-sgl-multi-w-interaction-n8' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/gsm8k_verl_sgl_multi_turn_w_interaction/train.parquet \ data.val_files=$HOME/data/gsm8k_verl_sgl_multi_turn_w_interaction/test.parquet \ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-sgl-multi-w-tool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 \ actor_rollout_ref.rollout.update_weights_bucket_megabytes=512 $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_multiturn_4xgpu.sh ================================================ # run on 4xH100 # make sure your current working directory is the root of the project set -x export HYDRA_FULL_ERROR=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-async-sgl-multi-w-tool-verify-n16-4cards' \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.total_epochs=15 \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=8192 \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=8192 \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=8192 \ critic.ppo_max_token_len_per_gpu=8192 \ critic.forward_max_token_len_per_gpu=8192 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ actor_rollout_ref.rollout.multi_turn.interaction_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/interaction_config/gsm8k_interaction_config.yaml" \ actor_rollout_ref.rollout.multi_turn.max_user_turns=1 \ $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/run_qwen2.5-3b_gsm8k_tool_agent_mlflow.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.rollout.trace.backend=mlflow \ actor_rollout_ref.rollout.trace.token2text=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","mlflow"]' \ trainer.project_name='gsm8k_tool-agent' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-sgl-tool-agent-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ trainer.total_training_steps=2 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/run_qwen2.5-3b_megatron_gsm8k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project # this is a verification training script, the parallel setting should be tuned to your model set -x export PYTHONUNBUFFERED=1 export RAY_DEDUP_LOGS=0 export RUST_BACKTRACE=1 export HYDRA_FULL_ERROR=1 export CUDA_DEVICE_MAX_CONNECTIONS=1 ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_megatron_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=/user/longxiang1/models/Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.actor.megatron.context_parallel_size=2 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.megatron.seed=42 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.virtual_pipeline_model_parallel_size=2 \ actor_rollout_ref.ref.megatron.context_parallel_size=2 \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen2.5-3b_function_rm-gsm8k-sgl-multi-w-tool-n8-mcore-v2505201745_seed42' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=/user/longxiang1/data/gsm8k_verl_sgl_multi_turn_preprocessed_v2/train.parquet \ data.val_files=/user/longxiang1/data/gsm8k_verl_sgl_multi_turn_preprocessed_v2/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/run_qwen3-4b_gsm8k_multiturn.sh ================================================ # run on 8xH100 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='gsm8k_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=256 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen3-4B \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=16 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='gsm8k_async_rl' \ trainer.experiment_name='qwen3-4b_function_rm-gsm8k-sgl-multi-w-tool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=20 \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$PROJECT_DIR/examples/sglang_multiturn/config/tool_config/gsm8k_tool_config.yaml" \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/sglang_multiturn/search_r1_like/local_dense_retriever/download.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 Search-R1 Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/PeterGriffinJin/Search-R1/blob/main/scripts/download.py import argparse from huggingface_hub import hf_hub_download parser = argparse.ArgumentParser(description="Download files from a Hugging Face dataset repository.") parser.add_argument("--repo_id", type=str, default="PeterJinGo/wiki-18-e5-index", help="Hugging Face repository ID") parser.add_argument("--save_path", type=str, required=True, help="Local directory to save files") args = parser.parse_args() repo_id = "PeterJinGo/wiki-18-e5-index" for file in ["part_aa", "part_ab"]: hf_hub_download( repo_id=repo_id, filename=file, # e.g., "e5_Flat.index" repo_type="dataset", local_dir=args.save_path, ) repo_id = "PeterJinGo/wiki-18-corpus" hf_hub_download( repo_id=repo_id, filename="wiki-18.jsonl.gz", repo_type="dataset", local_dir=args.save_path, ) ================================================ FILE: verl_rl/examples/sglang_multiturn/search_r1_like/local_dense_retriever/retrieval_server.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 Search-R1 Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/PeterGriffinJin/Search-R1/blob/main/search_r1/search/retrieval_server.py import argparse import json import warnings from typing import Optional import datasets import faiss import numpy as np import torch import uvicorn from fastapi import FastAPI from pydantic import BaseModel from tqdm import tqdm from transformers import AutoModel, AutoTokenizer def load_corpus(corpus_path: str): corpus = datasets.load_dataset("json", data_files=corpus_path, split="train", num_proc=4) return corpus def load_docs(corpus, doc_idxs): results = [corpus[int(idx)] for idx in doc_idxs] return results def load_model(model_path: str, use_fp16: bool = False): model = AutoModel.from_pretrained(model_path, trust_remote_code=True) model.eval() model.cuda() if use_fp16: model = model.half() tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True, trust_remote_code=True) return model, tokenizer def pooling(pooler_output, last_hidden_state, attention_mask=None, pooling_method="mean"): if pooling_method == "mean": last_hidden = last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] elif pooling_method == "cls": return last_hidden_state[:, 0] elif pooling_method == "pooler": return pooler_output else: raise NotImplementedError("Pooling method not implemented!") class Encoder: def __init__(self, model_name, model_path, pooling_method, max_length, use_fp16): self.model_name = model_name self.model_path = model_path self.pooling_method = pooling_method self.max_length = max_length self.use_fp16 = use_fp16 self.model, self.tokenizer = load_model(model_path=model_path, use_fp16=use_fp16) self.model.eval() @torch.no_grad() def encode(self, query_list: list[str], is_query=True) -> np.ndarray: # processing query for different encoders if isinstance(query_list, str): query_list = [query_list] if "e5" in self.model_name.lower(): if is_query: query_list = [f"query: {query}" for query in query_list] else: query_list = [f"passage: {query}" for query in query_list] if "bge" in self.model_name.lower(): if is_query: query_list = [ f"Represent this sentence for searching relevant passages: {query}" for query in query_list ] inputs = self.tokenizer( query_list, max_length=self.max_length, padding=True, truncation=True, return_tensors="pt" ) inputs = {k: v.cuda() for k, v in inputs.items()} if "T5" in type(self.model).__name__: # T5-based retrieval model decoder_input_ids = torch.zeros((inputs["input_ids"].shape[0], 1), dtype=torch.long).to( inputs["input_ids"].device ) output = self.model(**inputs, decoder_input_ids=decoder_input_ids, return_dict=True) query_emb = output.last_hidden_state[:, 0, :] else: output = self.model(**inputs, return_dict=True) query_emb = pooling( output.pooler_output, output.last_hidden_state, inputs["attention_mask"], self.pooling_method ) if "dpr" not in self.model_name.lower(): query_emb = torch.nn.functional.normalize(query_emb, dim=-1) query_emb = query_emb.detach().cpu().numpy() query_emb = query_emb.astype(np.float32, order="C") del inputs, output torch.cuda.empty_cache() return query_emb class BaseRetriever: def __init__(self, config): self.config = config self.retrieval_method = config.retrieval_method self.topk = config.retrieval_topk self.index_path = config.index_path self.corpus_path = config.corpus_path def _search(self, query: str, num: int, return_score: bool): raise NotImplementedError def _batch_search(self, query_list: list[str], num: int, return_score: bool): raise NotImplementedError def search(self, query: str, num: int = None, return_score: bool = False): return self._search(query, num, return_score) def batch_search(self, query_list: list[str], num: int = None, return_score: bool = False): return self._batch_search(query_list, num, return_score) class BM25Retriever(BaseRetriever): def __init__(self, config): super().__init__(config) from pyserini.search.lucene import LuceneSearcher self.searcher = LuceneSearcher(self.index_path) self.contain_doc = self._check_contain_doc() if not self.contain_doc: self.corpus = load_corpus(self.corpus_path) self.max_process_num = 8 def _check_contain_doc(self): return self.searcher.doc(0).raw() is not None def _search(self, query: str, num: int = None, return_score: bool = False): if num is None: num = self.topk hits = self.searcher.search(query, num) if len(hits) < 1: if return_score: return [], [] else: return [] scores = [hit.score for hit in hits] if len(hits) < num: warnings.warn("Not enough documents retrieved!", stacklevel=2) else: hits = hits[:num] if self.contain_doc: all_contents = [json.loads(self.searcher.doc(hit.docid).raw())["contents"] for hit in hits] results = [ { "title": content.split("\n")[0].strip('"'), "text": "\n".join(content.split("\n")[1:]), "contents": content, } for content in all_contents ] else: results = load_docs(self.corpus, [hit.docid for hit in hits]) if return_score: return results, scores else: return results def _batch_search(self, query_list: list[str], num: int = None, return_score: bool = False): results = [] scores = [] for query in query_list: item_result, item_score = self._search(query, num, True) results.append(item_result) scores.append(item_score) if return_score: return results, scores else: return results class DenseRetriever(BaseRetriever): def __init__(self, config): super().__init__(config) self.index = faiss.read_index(self.index_path) if config.faiss_gpu: co = faiss.GpuMultipleClonerOptions() co.useFloat16 = True co.shard = True self.index = faiss.index_cpu_to_all_gpus(self.index, co=co) self.corpus = load_corpus(self.corpus_path) self.encoder = Encoder( model_name=self.retrieval_method, model_path=config.retrieval_model_path, pooling_method=config.retrieval_pooling_method, max_length=config.retrieval_query_max_length, use_fp16=config.retrieval_use_fp16, ) self.topk = config.retrieval_topk self.batch_size = config.retrieval_batch_size def _search(self, query: str, num: int = None, return_score: bool = False): if num is None: num = self.topk query_emb = self.encoder.encode(query) scores, idxs = self.index.search(query_emb, k=num) idxs = idxs[0] scores = scores[0] results = load_docs(self.corpus, idxs) if return_score: return results, scores.tolist() else: return results def _batch_search(self, query_list: list[str], num: int = None, return_score: bool = False): if isinstance(query_list, str): query_list = [query_list] if num is None: num = self.topk results = [] scores = [] for start_idx in tqdm(range(0, len(query_list), self.batch_size), desc="Retrieval process: "): query_batch = query_list[start_idx : start_idx + self.batch_size] batch_emb = self.encoder.encode(query_batch) batch_scores, batch_idxs = self.index.search(batch_emb, k=num) batch_scores = batch_scores.tolist() batch_idxs = batch_idxs.tolist() # load_docs is not vectorized, but is a python list approach flat_idxs = sum(batch_idxs, []) batch_results = load_docs(self.corpus, flat_idxs) # chunk them back batch_results = [batch_results[i * num : (i + 1) * num] for i in range(len(batch_idxs))] results.extend(batch_results) scores.extend(batch_scores) del batch_emb, batch_scores, batch_idxs, query_batch, flat_idxs, batch_results torch.cuda.empty_cache() if return_score: return results, scores else: return results def get_retriever(config): if config.retrieval_method == "bm25": return BM25Retriever(config) else: return DenseRetriever(config) ##################################### # FastAPI server below ##################################### class Config: """ Minimal config class (simulating your argparse) Replace this with your real arguments or load them dynamically. """ def __init__( self, retrieval_method: str = "bm25", retrieval_topk: int = 10, index_path: str = "./index/bm25", corpus_path: str = "./data/corpus.jsonl", dataset_path: str = "./data", data_split: str = "train", faiss_gpu: bool = True, retrieval_model_path: str = "./model", retrieval_pooling_method: str = "mean", retrieval_query_max_length: int = 256, retrieval_use_fp16: bool = False, retrieval_batch_size: int = 128, ): self.retrieval_method = retrieval_method self.retrieval_topk = retrieval_topk self.index_path = index_path self.corpus_path = corpus_path self.dataset_path = dataset_path self.data_split = data_split self.faiss_gpu = faiss_gpu self.retrieval_model_path = retrieval_model_path self.retrieval_pooling_method = retrieval_pooling_method self.retrieval_query_max_length = retrieval_query_max_length self.retrieval_use_fp16 = retrieval_use_fp16 self.retrieval_batch_size = retrieval_batch_size class QueryRequest(BaseModel): queries: list[str] topk: Optional[int] = None return_scores: bool = False app = FastAPI() @app.post("/retrieve") def retrieve_endpoint(request: QueryRequest): """ Endpoint that accepts queries and performs retrieval. Input format: { "queries": ["What is Python?", "Tell me about neural networks."], "topk": 3, "return_scores": true } Output format (when return_scores=True,similarity scores are returned): { "result": [ [ # Results for each query { {"document": doc, "score": score} }, # ... more documents ], # ... results for other queries ] } """ if not request.topk: request.topk = config.retrieval_topk # fallback to default # Perform batch retrieval results, scores = retriever.batch_search( query_list=request.queries, num=request.topk, return_score=request.return_scores ) # Format response resp = [] for i, single_result in enumerate(results): if request.return_scores: # If scores are returned, combine them with results combined = [] for doc, score in zip(single_result, scores[i], strict=True): combined.append({"document": doc, "score": score}) resp.append(combined) else: resp.append(single_result) return {"result": resp} if __name__ == "__main__": parser = argparse.ArgumentParser(description="Launch the local faiss retriever.") parser.add_argument( "--index_path", type=str, default="/home/peterjin/mnt/index/wiki-18/e5_Flat.index", help="Corpus indexing file." ) parser.add_argument( "--corpus_path", type=str, default="/home/peterjin/mnt/data/retrieval-corpus/wiki-18.jsonl", help="Local corpus file.", ) parser.add_argument("--topk", type=int, default=3, help="Number of retrieved passages for one query.") parser.add_argument("--retriever_name", type=str, default="e5", help="Name of the retriever model.") parser.add_argument( "--retriever_model", type=str, default="intfloat/e5-base-v2", help="Path of the retriever model." ) parser.add_argument("--faiss_gpu", action="store_true", help="Use GPU for computation") args = parser.parse_args() # 1) Build a config (could also parse from arguments). # In real usage, you'd parse your CLI arguments or environment variables. config = Config( retrieval_method=args.retriever_name, # or "dense" index_path=args.index_path, corpus_path=args.corpus_path, retrieval_topk=args.topk, faiss_gpu=args.faiss_gpu, retrieval_model_path=args.retriever_model, retrieval_pooling_method="mean", retrieval_query_max_length=256, retrieval_use_fp16=True, retrieval_batch_size=512, ) # 2) Instantiate a global retriever so it is loaded once and reused. retriever = get_retriever(config) # 3) Launch the server. By default, it listens on http://127.0.0.1:8000 uvicorn.run(app, host="0.0.0.0", port=8000) ================================================ FILE: verl_rl/examples/sglang_multiturn/search_r1_like/run_qwen2.5-3b_instruct_search_multiturn.sh ================================================ # run on 8xH20 # make sure your current working directory is the root of the project set -x ulimit -n 65535 PROJECT_DIR="$(pwd)" CONFIG_PATH="$PROJECT_DIR/examples/sglang_multiturn/config" TRAIN_DATA="$HOME/data/searchR1_processed_direct/train.parquet" VAL_DATA="$HOME/data/searchR1_processed_direct/test.parquet" TOOL_CONFIG="$CONFIG_PATH/tool_config/search_tool_config.yaml" python3 -m verl.trainer.main_ppo \ --config-path="$CONFIG_PATH" \ --config-name='search_multiturn_grpo' \ algorithm.adv_estimator=grpo \ data.train_batch_size=512 \ data.val_batch_size=256 \ data.max_prompt_length=4096 \ data.max_response_length=3000 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.return_raw_chat=True \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.285 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.max_model_len=15000 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=sglang \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.val_before_train=False \ trainer.logger='["console","wandb"]' \ trainer.project_name='search_r1_like_async_rl' \ trainer.experiment_name='qwen2.5-3b-instruct_function_rm-search-async-sgl-multi-w-searchtool-verify-n16' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=100 \ trainer.test_freq=50 \ data.train_files="$TRAIN_DATA" \ data.val_files="$VAL_DATA" \ actor_rollout_ref.rollout.multi_turn.tool_config_path="$TOOL_CONFIG" \ trainer.total_epochs=1 $@ ================================================ FILE: verl_rl/examples/slurm/ray_on_slurm.slurm ================================================ #!/bin/bash #SBATCH --job-name=verl-ray-on-slurm #SBATCH --nodes=2 #SBATCH --ntasks-per-node=1 #SBATCH --mem=200G #SBATCH --partition=your-partition #SBATCH --time=01:00:00 #SBATCH --account=your-account #SBATCH --gpus-per-node=4 #SBATCH --cpus-per-task=64 #SBATCH --output=slurm-%j.out #SBATCH --error=slurm-%j.err # load necessary modules # replace these information with your own verl_workdir=/path/to/verl train_files=/path/to/gsm8k/train.parquet val_files=/path/to/gsm8k/test.parquet apptainer_image_path=/path/to/verl-ngc.sif # replace these information with your own # Getting the node names nodes=$(scontrol show hostnames "$SLURM_JOB_NODELIST") nodes_array=("$nodes") head_node=${nodes_array[0]} head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) # if we detect a space character in the head node IP, we'll # convert it to an ipv4 address. This step is optional. if [[ "$head_node_ip" == *" "* ]]; then IFS=' ' read -ra ADDR <<<"$head_node_ip" if [[ ${#ADDR[0]} -gt 16 ]]; then head_node_ip=${ADDR[1]} else head_node_ip=${ADDR[0]} fi echo "IPV6 address detected. We split the IPV4 address as $head_node_ip" fi port=6379 ip_head=$head_node_ip:$port export ip_head echo "IP Head: $ip_head" # make sure we set environment variables before Ray initialization printenv echo "Starting HEAD at $head_node" srun --nodes=1 --ntasks=1 -w "$head_node" \ apptainer run --nv --bind $verl_workdir $apptainer_image_path \ ray start --head --node-ip-address="$head_node_ip" --port=$port \ --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & # optional, though may be useful in certain versions of Ray < 1.0. sleep 10 # number of nodes other than the head node worker_num=$((SLURM_JOB_NUM_NODES - 1)) for ((i = 1; i <= worker_num; i++)); do node_i=${nodes_array[$i]} echo "Starting WORKER $i at $node_i" srun --nodes=1 --ntasks=1 -w "$node_i" \ apptainer run --nv --bind $verl_workdir $apptainer_image_path \ ray start --address "$ip_head" --num-cpus "${SLURM_CPUS_PER_TASK}" --num-gpus "${SLURM_GPUS_PER_NODE}" --block & sleep 5 done PYTHONUNBUFFERED=1 srun --overlap --nodes=1 --ntasks=1 -w "$head_node" \ apptainer run --nv --bind $verl_workdir $apptainer_image_path \ python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=gae \ data.train_files=$train_files \ data.val_files=$val_files \ data.train_batch_size=256 \ data.max_prompt_length=512 \ data.max_response_length=256 \ actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=64 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.path=Qwen/Qwen2.5-0.5B-Instruct \ critic.ppo_micro_batch_size_per_gpu=4 \ algorithm.use_kl_in_reward=False \ trainer.logger=console \ trainer.val_before_train=False \ trainer.n_gpus_per_node="${SLURM_GPUS_PER_NODE}" \ trainer.nnodes="${SLURM_NNODES}" \ trainer.save_freq=10 \ trainer.test_freq=10 \ trainer.total_epochs=15 2>&1 | tee verl_demo_slurm.log ================================================ FILE: verl_rl/examples/split_placement/README.md ================================================ # Split Placement Example Here we introduce how to run the naive implementation of the split placement of PPO algorithm. We will release the complete version of flexible placement in the near future. For quickstart, you can only follow Step 2 to modify the code and then follow Step 4 to execute the split placement example. ### Step 1: Placing the models to different GPUs Specify the placement and resource allocation. In the example, we place the actor and reference in the first half of the GPUs while map the critic and reward model (if any) to the second half of the GPUs. ```python actor_rollout_ref_pool_id = 'actor_rollout_ref_pool' critic_pool_id = 'critic_pool' if config.trainer.nnodes // 2 == 0 and config.trainer.n_gpus_per_node // 2 > 0: resource_pool_spec = { actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, critic_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, } else: resource_pool_spec = { actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), critic_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), } print(f'resource_pool_spec: {resource_pool_spec}') mapping = { Role.ActorRollout: actor_rollout_ref_pool_id, Role.Critic: critic_pool_id, Role.RefPolicy: actor_rollout_ref_pool_id, } mapping[Role.RewardModel] = critic_pool_id ``` ### Step 2: Make the models executed asynchronously Based on the model placement, we need to make the models executed asynchronously. To do so, you need to turn off the `blocking` flag (i.e., `blocking=False`) in our decorator of some model operations. For example, we hope the actor update and critic update can be executed in parallel, then we need to make the following modification in `fsdp_workers.py` ``` @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO, blocking=False) def update_actor(self, data: DataProto): ... @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO, blocking=False) def update_critic(self, data: DataProto): ... ``` We can also parallelize the computation of `ref_log_prob` and `values` and `rewards` in the split placement. For simplicity of the tutorial, we don't do this in this example. ### Step 3: Execute these operation in parallel in the single controller process To implement the parallel execution of the actor and critic update, the only thing we need to modify in the `ray_trainer.py` is to `get` the concurrent `futures` on the single controller process. ```python critic_output = critic_output.get() actor_output = actor_output.get() ``` ### Step 4: Run the split placement example ``` bash run_deepseek7b_llm.sh ``` ================================================ FILE: verl_rl/examples/split_placement/config/ppo_trainer_split.yaml ================================================ # the ppo trainer split config will override default ppo_trainer.yaml hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: tokenizer: null train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet prompt_key: prompt max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 val_batch_size: null # DEPRECATED: Validation datasets are sent to inference engines as a whole batch, which will schedule the memory themselves return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs return_raw_chat: False return_full_prompt: False shuffle: True actor_rollout_ref: hybrid_engine: True model: path: ~/models/deepseek-llm-7b-chat external_lib: null override_config: { } enable_gradient_checkpointing: True use_remove_padding: False actor: strategy: fsdp # This is for backward-compatibility ppo_mini_batch_size: 256 ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu ppo_micro_batch_size_per_gpu: null use_dynamic_bsz: False ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length} grad_clip: 1.0 clip_ratio: 0.2 entropy_coeff: 0.0 use_kl_loss: False # True for GRPO kl_loss_coef: 0.001 # for grpo kl_loss_type: low_var_kl # for grpo ppo_epochs: 1 shuffle: False ulysses_sequence_parallel_size: 1 # sp size optim: lr: 1e-6 lr_warmup_steps: -1 # Prioritized. Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime min_lr_ratio: null # only useful for warmup with cosine warmup_style: constant # select from constant/cosine total_training_steps: -1 # must be override by program fsdp_config: wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 param_offload: False optimizer_offload: False fsdp_size: -1 ref: fsdp_config: param_offload: False wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} ulysses_sequence_parallel_size: ${actor_rollout_ref.actor.ulysses_sequence_parallel_size} # sp size rollout: name: vllm temperature: 1.0 top_k: -1 # 0 for hf rollout, -1 for vllm rollout top_p: 1 prompt_length: ${data.max_prompt_length} # not use for opensource response_length: ${data.max_response_length} # for vllm rollout dtype: bfloat16 # should align with FSDP gpu_memory_utilization: 0.5 ignore_eos: False enforce_eager: True free_cache_engine: True load_format: dummy_dtensor tensor_model_parallel_size: 2 max_num_batched_tokens: 8192 max_num_seqs: 1024 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} disable_log_stats: True enable_chunked_prefill: True # could get higher throughput # for hf rollout do_sample: True # number of responses (i.e. num sample times) n: 1 # > 1 for grpo critic: strategy: fsdp optim: lr: 1e-5 lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime min_lr_ratio: null # only useful for warmup with cosine warmup_style: constant # select from constant/cosine total_training_steps: -1 # must be override by program model: path: ~/models/deepseek-llm-7b-chat tokenizer_path: ${actor_rollout_ref.model.path} override_config: { } external_lib: ${actor_rollout_ref.model.external_lib} enable_gradient_checkpointing: True use_remove_padding: False fsdp_config: param_offload: False optimizer_offload: False wrap_policy: # transformer_layer_cls_to_wrap: None min_num_params: 0 fsdp_size: -1 ppo_mini_batch_size: ${actor_rollout_ref.actor.ppo_mini_batch_size} ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu ppo_micro_batch_size_per_gpu: null forward_micro_batch_size: ${critic.ppo_micro_batch_size} forward_micro_batch_size_per_gpu: ${critic.ppo_micro_batch_size_per_gpu} use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} ppo_max_token_len_per_gpu: 32768 # (${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}) * 2 forward_max_token_len_per_gpu: ${critic.ppo_max_token_len_per_gpu} ulysses_sequence_parallel_size: 1 # sp size ppo_epochs: ${actor_rollout_ref.actor.ppo_epochs} shuffle: ${actor_rollout_ref.actor.shuffle} grad_clip: 1.0 cliprange_value: 0.5 reward_model: enable: False strategy: fsdp model: input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical path: ~/models/FsfairX-LLaMA3-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} use_remove_padding: False fsdp_config: min_num_params: 0 param_offload: False fsdp_size: -1 micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu micro_batch_size_per_gpu: null # set a number max_length: null ulysses_sequence_parallel_size: 1 # sp size use_dynamic_bsz: ${critic.use_dynamic_bsz} forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} reward_manager: naive algorithm: gamma: 1.0 lam: 1.0 adv_estimator: gae use_kl_in_reward: False kl_penalty: kl # how to estimate kl divergence kl_ctrl: type: fixed kl_coef: 0.001 trainer: total_epochs: 30 total_training_steps: null project_name: verl_examples experiment_name: gsm8k logger: [ 'console', 'wandb' ] log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 # auto: find the last ckpt to resume. If can't find, start from scratch resume_mode: auto # or disable or resume_path if resume_from_path is set resume_from_path: null test_freq: -1 critic_warmup: 0 default_hdfs_dir: null default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. ================================================ FILE: verl_rl/examples/split_placement/main_ppo_split.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import hydra import ray import torch from split_monkey_patch import fit from verl import DataProto from verl.trainer.ppo.ray_trainer import RayPPOTrainer from verl.utils.reward_score import gsm8k, math def _select_rm_score_fn(data_source): if data_source == "openai/gsm8k": return gsm8k.compute_score elif data_source == "lighteval/MATH": return math.compute_score else: raise NotImplementedError class RewardManager: def __init__(self, tokenizer, num_examine) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console def __call__(self, data: DataProto, return_dict: bool = False): """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) already_print_data_sources = {} for i in range(len(data)): data_item = data[i] # DataProtoItem prompt_ids = data_item.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() valid_prompt_ids = prompt_ids[-valid_prompt_length:] response_ids = data_item.batch["responses"] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode sequences = torch.cat((valid_prompt_ids, valid_response_ids)) sequences_str = self.tokenizer.decode(sequences) ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] # select rm_score data_source = data_item.non_tensor_batch["data_source"] compute_score_fn = _select_rm_score_fn(data_source) score = compute_score_fn(solution_str=sequences_str, ground_truth=ground_truth) reward_tensor[i, valid_response_length - 1] = score if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print(sequences_str) if return_dict: return {"reward_tensor": reward_tensor} else: return reward_tensor @hydra.main(config_path="config", config_name="ppo_trainer_split", version_base=None) def main(config): if not ray.is_initialized(): # this is for local ray cluster ray.init( runtime_env={"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}}, num_cpus=config.ray_init.num_cpus, ) ray.get(main_task.remote(config)) @ray.remote def main_task(config): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # download the checkpoint from hdfs local_path = copy_to_local(config.actor_rollout_ref.model.path) # instantiate tokenizer from verl.utils import hf_tokenizer tokenizer = hf_tokenizer(local_path) # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker ray_worker_group_cls = NVMegatronRayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ray.remote(ActorRolloutRefWorker), Role.Critic: ray.remote(CriticWorker), } # NOTE: initialze two resource pool actor_rollout_ref_pool_id = "actor_rollout_ref_pool" critic_pool_id = "critic_pool" if config.trainer.nnodes // 2 == 0 and config.trainer.n_gpus_per_node // 2 > 0: resource_pool_spec = { actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, critic_pool_id: [config.trainer.n_gpus_per_node // 2] * config.trainer.nnodes, } else: resource_pool_spec = { actor_rollout_ref_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), critic_pool_id: [config.trainer.n_gpus_per_node] * (config.trainer.nnodes // 2), } print(f"resource_pool_spec: {resource_pool_spec}") mapping = { Role.ActorRollout: actor_rollout_ref_pool_id, Role.Critic: critic_pool_id, } # use reference model if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = actor_rollout_ref_pool_id # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = critic_pool_id reward_fn = RewardManager(tokenizer=tokenizer, num_examine=0) # Note that we always use function-based RM for validation val_reward_fn = RewardManager(tokenizer=tokenizer, num_examine=1) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) RayPPOTrainer.fit = fit trainer = RayPPOTrainer( config=config, tokenizer=tokenizer, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, ) trainer.init_workers() trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_rl/examples/split_placement/run_deepseek7b_llm.sh ================================================ set -x python3 main_ppo_split.py \ algorithm.adv_estimator=gae \ data.train_files=$HOME/data/gsm8k/train.parquet \ data.val_files=$HOME/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ critic.optim.lr=1e-5 \ critic.model.path=deepseek-ai/deepseek-llm-7b-chat \ critic.model.enable_gradient_checkpointing=False \ critic.ppo_micro_batch_size_per_gpu=8 \ critic.model.fsdp_config.param_offload=False \ critic.model.fsdp_config.optimizer_offload=False \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_example_gsm8k' \ trainer.experiment_name='deepseek_llm_7b_function_rm' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/split_placement/split_monkey_patch.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ An naive implementation of split placment example """ import uuid from copy import deepcopy from pprint import pprint import numpy as np import torch from verl import DataProto from verl.trainer.ppo.ray_trainer import ( AdvantageEstimator, apply_kl_penalty, compute_advantage, compute_data_metrics, compute_timing_metrics, marked_timer, ) from verl.utils.metric import reduce_metrics def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # we start from step 1 self.global_steps += 1 last_val_metrics = None for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} batch: DataProto = DataProto.from_single_dict(batch_dict) # pop those keys for generation gen_batch = batch.pop(batch_keys=["input_ids", "attention_mask", "position_ids"]) is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw): gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with marked_timer("gen_max", timing_raw): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) batch = batch.union(gen_baseline_output) reward_baseline_tensor = self.reward_fn(batch) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) batch.pop(batch_keys=list(gen_baseline_output.batch.keys())) batch.batch["reward_baselines"] = reward_baseline_tensor del gen_baseline_batch, gen_baseline_output batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() # recompute old_log_probs with marked_timer("old_log_prob", timing_raw): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw): ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw): # compute scores. Support both model and function-based. # We first compute the scores using reward model. Then, we call reward_fn to combine # the results from reward model and rule-based results. if self.use_rm: # we first compute reward model score reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) # we combine with rule-based rm reward_tensor = self.reward_fn(batch) batch.batch["token_level_scores"] = reward_tensor # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get("norm_adv_by_std_in_grpo", True) batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor_call", timing_raw): actor_output = self.actor_rollout_wg.update_actor(batch) else: actor_output = None # update critic if self.use_critic: with marked_timer("update_critic_call", timing_raw): critic_output = self.critic_wg.update_critic(batch) # NOTE: make sure you set blocking=False in update_actor and update_crtic in the worker class with marked_timer("update_actor_critic", timing_raw): critic_output = critic_output.get() critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) if actor_output is not None: actor_output = actor_output.get() actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with marked_timer("save_checkpoint", timing_raw): self._save_checkpoint() # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) if self.global_steps >= self.total_training_steps: pprint(f"Final validation metrics: {last_val_metrics}") return self.global_steps += 1 ================================================ FILE: verl_rl/examples/tuning/0.5b/qwen2-0.5b_grpo-lora_1_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-0.5b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=0.5b-${NOW} MODEL_PATH=Qwen/Qwen2.5-0.5B-Instruct set -x nproc_per_gpu=116 nnodes=1 ngpu_per_node=1 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.1 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_rl/examples/tuning/1.5b/qwen2-1.5b_grpo-lora_1_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-1.5b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=1.5b-${NOW} MODEL_PATH=Qwen/Qwen2.5-1.5B-Instruct set -x nproc_per_gpu=128 nnodes=1 ngpu_per_node=1 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.1 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_rl/examples/tuning/14b/qwen2-14b_grpo-lora_2_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-14b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=14b-${NOW} MODEL_PATH=Qwen/Qwen2.5-14B-Instruct set -x nproc_per_gpu=58 # 32√ → 64× → 48√ → 56√ → 60× → 58√ → 59× nnodes=1 ngpu_per_node=2 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.25 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_rl/examples/tuning/14b/qwen2_14b_grpo_4_h800_fsdp_vllm.sh ================================================ set -x gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_test_path=$HOME/data/rlhf/math/test.parquet model_path=Qwen/Qwen2.5-Coder-14B-Instruct train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" PYTHONPATH=/opt/tiger/open_verl python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_14b_function_rm' \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_rl/examples/tuning/32b/qwen2-32b_grpo-lora_4_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-32b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=32b-${NOW} MODEL_PATH=Qwen/Qwen2.5-32B-Instruct set -x nproc_per_gpu=45 # 32√ → 64× → 48× → 40√ → 44√ → 46× → 45× nnodes=1 ngpu_per_node=4 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.3 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=4 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_rl/examples/tuning/32b/qwen2_32B_grpo_8_h20_megatron_vllm.sh ================================================ set -x # we need this to avoid fragmentation of GPU memory export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256 gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_test_path=$HOME/data/rlhf/math/test.parquet train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" model_path=Qwen/Qwen2.5-32B python3 -m verl.trainer.main_ppo --config-path=config \ --config-name='ppo_megatron_trainer.yaml'\ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=512 \ data.max_prompt_length=2048 \ data.max_response_length=6144 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=1 \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=8 \ actor_rollout_ref.actor.megatron.param_offload=True \ actor_rollout_ref.actor.megatron.grad_offload=True \ actor_rollout_ref.actor.megatron.optimizer_offload=True \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=1 \ actor_rollout_ref.ref.megatron.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger=console \ trainer.project_name='megatron_vllm_qwen2_32b' \ trainer.experiment_name='qwen2_32b_grpo_8_h20' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/examples/tuning/3b/qwen2-3b_grpo-lora_1_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-3b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=3b-${NOW} MODEL_PATH=Qwen/Qwen2.5-3B-Instruct set -x nproc_per_gpu=62 nnodes=1 ngpu_per_node=1 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.1 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_rl/examples/tuning/70b/qwen2-70b_grpo_32_h20_fsdp_vllm.sh ================================================ set -x gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_val_path=$HOME/data/rlhf/math/test.parquet model_path=Qwen/Qwen2-72B-Instruct python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$data_path \ data.val_files=$gsm8k_val_path \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=16 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='Qwen2_72B_Instruct' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=4 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_rl/examples/tuning/70b/qwen2-70b_grpo_32_h800_fsdp_vllm.sh ================================================ set -x #### important: vllm version must be >= 0.8.3 gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_val_path=$HOME/data/rlhf/math/test.parquet model_path=Qwen/Qwen2-72B-Instruct python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$gsm8k_train_path \ data.val_files=$gsm8k_val_path \ data.train_batch_size=1024 \ data.max_prompt_length=512 \ data.max_response_length=512 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=16 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='Qwen2_72B_Instruct' \ trainer.n_gpus_per_node=8 \ trainer.nnodes=4 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_rl/examples/tuning/70b/qwen2-72b_grpo-lora_8_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-72b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=72b-${NOW} MODEL_PATH=Qwen/Qwen2.5-72B-Instruct set -x nproc_per_gpu=22 # 16√ → 32× → 24× → 20√ → 22√ → 23× nnodes=1 ngpu_per_node=8 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=8 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=8 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_rl/examples/tuning/7b/qwen2-7b_grpo-lora_1_h100_fsdp_vllm.sh ================================================ # -*- coding: utf-8 -*- export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 NOW=$(date +%Y%m%d) export WANDB_DIR=gsm8k-grpo-lora-qwen2.5-7b-${NOW} export WANDB_PROJECT=${WANDB_DIR} export WANDB_EXP=7b-${NOW} MODEL_PATH=Qwen/Qwen2.5-7B-Instruct set -x nproc_per_gpu=16 # 64√ → 128× → 96√ → 112× → 104× → 100√ → 102× → 101× nnodes=1 ngpu_per_node=1 total_procs=$(( nproc_per_gpu * nnodes * ngpu_per_node )) mini_batch_size=$(( total_procs )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=data/gsm8k/train.parquet \ data.val_files=data/gsm8k/test.parquet \ data.train_batch_size=${total_procs} \ data.val_batch_size=${total_procs} \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ data.shuffle=False \ actor_rollout_ref.model.path=$MODEL_PATH \ actor_rollout_ref.model.use_shm=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=32 \ actor_rollout_ref.model.lora_alpha=32 \ actor_rollout_ref.model.target_modules=all-linear \ actor_rollout_ref.actor.optim.lr=3e-5 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.ppo_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.2 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.max_num_seqs=512 \ actor_rollout_ref.rollout.max_model_len=1536 \ actor_rollout_ref.rollout.max_num_batched_tokens=1536 \ actor_rollout_ref.rollout.enable_chunked_prefill=False \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size=${mini_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.entropy_coeff=0.001 \ algorithm.kl_ctrl.kl_coef=0.001 \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name=${WANDB_PROJECT} \ trainer.experiment_name=${WANDB_EXP} \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ 2>&1 | tee ${WANDB_PROJECT}.log ================================================ FILE: verl_rl/examples/tuning/7b/qwen2-7b_grpo_2_h800_fsdp_vllm.sh ================================================ set -x gsm8k_train_path=$HOME/data/rlhf/gsm8k/train.parquet gsm8k_test_path=$HOME/data/rlhf/math/test.parquet model_path=Qwen/Qwen2-7B-Instruct train_files="['$gsm8k_train_path']" test_files="['$gsm8k_test_path']" PYTHONPATH=/opt/tiger/open_verl python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_grpo_example_gsm8k' \ trainer.experiment_name='qwen2_7b_function_rm' \ trainer.n_gpus_per_node=2 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=15 $@ ================================================ FILE: verl_rl/init_ray.sh ================================================ #!/bin/bash # Single Node Ray Initialization Script # Usage: bash init_ray.sh # HEAD_NODE_IP: IP address of the head node # PORT: Ray port (default: 6379) # RANK: Node rank (0 for head, >0 for workers) set -e # Parse arguments HEAD_NODE_IP=${1:-"127.0.0.1"} PORT=${2:-6379} RANK=${3:-0} # Configuration NUM_CPUS=${NUM_CPUS:-""} NUM_GPUS=${NUM_GPUS:-""} OBJECT_STORE_MEMORY=${OBJECT_STORE_MEMORY:-""} CONDA_ENV_NAME=${CONDA_ENV_NAME:-"verl"} # Colors GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' log_info() { echo -e "${GREEN}[INFO]${NC} $(hostname): $1" } log_warn() { echo -e "${YELLOW}[WARN]${NC} $(hostname): $1" } # Activate conda environment if [ -f "/root/anaconda3/etc/profile.d/conda.sh" ]; then source "/root/anaconda3/etc/profile.d/conda.sh" elif [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then source "$HOME/anaconda3/etc/profile.d/conda.sh" elif [ -f "$HOME/miniconda3/etc/profile.d/conda.sh" ]; then source "$HOME/miniconda3/etc/profile.d/conda.sh" fi if command -v conda &> /dev/null; then conda activate ${CONDA_ENV_NAME} 2>/dev/null || log_warn "Could not activate conda env: ${CONDA_ENV_NAME}" fi # Build ray start command options RAY_OPTS="" if [ -n "${NUM_CPUS}" ]; then RAY_OPTS="${RAY_OPTS} --num-cpus=${NUM_CPUS}" fi if [ -n "${NUM_GPUS}" ]; then RAY_OPTS="${RAY_OPTS} --num-gpus=${NUM_GPUS}" fi if [ -n "${OBJECT_STORE_MEMORY}" ]; then RAY_OPTS="${RAY_OPTS} --object-store-memory=${OBJECT_STORE_MEMORY}" fi # Stop existing Ray instance ray stop --force 2>/dev/null || true sleep 2 # Start Ray if [ "${RANK}" -eq 0 ]; then log_info "Starting Ray HEAD node on port ${PORT}..." ray start --head --port=${PORT} ${RAY_OPTS} else log_info "Starting Ray WORKER node, connecting to ${HEAD_NODE_IP}:${PORT}..." ray start --address=${HEAD_NODE_IP}:${PORT} ${RAY_OPTS} fi sleep 3 # Check status log_info "Ray node started. Checking status..." ray status ================================================ FILE: verl_rl/init_ray_cluster.sh ================================================ #!/bin/bash # Multi-node Ray Cluster Initialization Script # Usage: bash init_ray_cluster.sh [--stop] # --stop: Stop Ray on all nodes instead of starting set -e SCRIPT_DIR=$(cd $(dirname $0); pwd) PROJECT_DIR=${SCRIPT_DIR} # Configuration PORT=${RAY_PORT:-6379} HOSTFILE=${HOSTFILE:-"/etc/mpi/hostfile"} CONDA_ENV_NAME=${CONDA_ENV_NAME:-"verl"} LOG_DIR="${PROJECT_DIR}/logs/ray" # Colors RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' log_info() { echo -e "${GREEN}[INFO]${NC} $1" } log_warn() { echo -e "${YELLOW}[WARN]${NC} $1" } log_error() { echo -e "${RED}[ERROR]${NC} $1" } # Generate conda initialization command that works with both anaconda and miniconda get_conda_init_cmd() { cat << 'EOF' for conda_sh in /root/miniconda3/etc/profile.d/conda.sh \ /root/anaconda3/etc/profile.d/conda.sh \ $HOME/miniconda3/etc/profile.d/conda.sh \ $HOME/anaconda3/etc/profile.d/conda.sh \ /opt/conda/etc/profile.d/conda.sh; do [ -f "$conda_sh" ] && source "$conda_sh" && break done EOF } # Function to stop Ray on all nodes stop_cluster() { log_info "Stopping Ray on all nodes..." if [ ! -f "${HOSTFILE}" ]; then log_warn "Hostfile not found, stopping local Ray only" ray stop --force 2>/dev/null || true return fi ALL_NODES=$(awk '!a[$1]++ {print $1}' ${HOSTFILE}) for node in ${ALL_NODES}; do log_info "Stopping Ray on ${node}..." ssh -n ${node} "$(get_conda_init_cmd) && conda activate ${CONDA_ENV_NAME} && ray stop --force" 2>/dev/null & done wait log_info "Ray stopped on all nodes" } # Function to start Ray cluster start_cluster() { # Check hostfile if [ ! -f "${HOSTFILE}" ]; then log_error "Hostfile not found: ${HOSTFILE}" log_info "Please create a hostfile with one IP per line" log_info "Example:" echo " 192.168.1.100" echo " 192.168.1.101" echo " 192.168.1.102" exit 1 fi # Get head node (first line) HEAD_NODE=$(awk 'NR==1 {print $1}' ${HOSTFILE}) ALL_NODES=$(awk '!a[$1]++ {print $1}' ${HOSTFILE}) log_info "Head node: ${HEAD_NODE}" log_info "Ray port: ${PORT}" log_info "Conda env: ${CONDA_ENV_NAME}" echo "" log_info "Nodes in cluster:" echo "${ALL_NODES}" echo "" # Create log directory mkdir -p "${LOG_DIR}" # Stop existing Ray instances first log_info "Stopping any existing Ray instances..." stop_cluster sleep 3 # Start head node first (synchronously) log_info "Starting Ray HEAD on ${HEAD_NODE}..." ssh -n ${HEAD_NODE} "CONDA_ENV_NAME=${CONDA_ENV_NAME} bash ${SCRIPT_DIR}/init_ray.sh ${HEAD_NODE} ${PORT} 0" \ > "${LOG_DIR}/ray_${HEAD_NODE}.log" 2>&1 if [ $? -ne 0 ]; then log_error "Failed to start Ray HEAD. Check ${LOG_DIR}/ray_${HEAD_NODE}.log" exit 1 fi log_info "Ray HEAD started successfully" # Wait for head to be ready sleep 5 # Start worker nodes (asynchronously) rank=1 for node in ${ALL_NODES}; do if [ "${node}" == "${HEAD_NODE}" ]; then continue fi log_info "Starting Ray WORKER on ${node} (rank ${rank})..." ssh -n ${node} "CONDA_ENV_NAME=${CONDA_ENV_NAME} bash ${SCRIPT_DIR}/init_ray.sh ${HEAD_NODE} ${PORT} ${rank}" \ > "${LOG_DIR}/ray_${node}.log" 2>&1 & rank=$((rank + 1)) done # Wait for all workers log_info "Waiting for all workers to join..." wait sleep 3 # Check cluster status echo "" log_info "Ray cluster initialization complete!" log_info "Logs saved to: ${LOG_DIR}/" echo "" log_info "Cluster status:" ssh -n ${HEAD_NODE} "$(get_conda_init_cmd) && conda activate ${CONDA_ENV_NAME} && ray status" } # Main case "${1}" in --stop) stop_cluster ;; *) start_cluster ;; esac ================================================ FILE: verl_rl/pyproject.toml ================================================ # ------------------------------- # build-system # ------------------------------- [build-system] requires = [ "setuptools>=61.0", "wheel" ] build-backend = "setuptools.build_meta" # ------------------------------- # project (PEP 621 metadata) # ------------------------------- [project] name = "verl" # We'll mark the version as "dynamic" because it's read from the file "verl/version/version" # (PEP 621 calls this "dynamic version"). # The actual version is specified in the [tool.setuptools.dynamic] section below. dynamic = ["version", "dependencies", "optional-dependencies", "authors", "urls"] description = "verl: Volcano Engine Reinforcement Learning for LLM" license = {text = "Apache-2.0"} # Changed from file to text format readme = {file = "README.md", content-type = "text/markdown"} requires-python = ">=3.10" # ------------------------------- # tool.ruff - Linting configuration # ------------------------------- [tool.ruff] # Note: While the formatter will attempt to format lines such that they remain within the line-length, # it isn't a hard upper bound, and formatted lines may exceed the line-length. line-length = 120 exclude = ["tests/workers/rollout/test_sglang_async_rollout_sf_tools.py", "scripts/legacy_model_merger.py"] [tool.ruff.lint] isort = {known-first-party = ["verl"]} # c.f. https://github.com/vllm-project/vllm/blob/ce8d6b75fc0586045df75ee1568a5b5f9957251b/pyproject.toml select = [ # pycodestyle "E", # Pyflakes "F", # pyupgrade "UP", # flake8-bugbear "B", # isort "I", "G", ] ignore = [ # star imports "F405", "F403", # lambda expression assignment "E731", # Loop control variable not used within loop body "B007", # f-string format "UP032", # `.log()` statement uses f-string "G004", # X | None for type annotations "UP045", # deprecated import "UP035", ] # ------------------------------- # tool.setuptools - Additional config # ------------------------------- [tool.setuptools] # True means `setuptools` will attempt to include all relevant files in package_data automatically. # This corresponds to `include_package_data=True` in setup.py. include-package-data = true # We read the version from a file in 'verl/version/version' [tool.setuptools.dynamic] version = {file = "verl/version/version"} # If you need to mimic `package_dir={'': '.'}`: [tool.setuptools.package-dir] "" = "." # If you need to include specific non-Python data (like YAML files or version file): # This is the rough equivalent of package_data={'': ['version/*'], 'verl': ['trainer/config/*.yaml']} [tool.setuptools.package-data] verl = [ "version/*", "trainer/config/*.yaml", "trainer/config/*/*.yaml", ] ================================================ FILE: verl_rl/recipe/README.md ================================================ # Recipe The examples under `recipes/` are representative extensions to verl for specific end-to-end RL training recipes. The help the community reproduce experiments, verl team provides a snapshot of the codebase when each recipe is initially PR'ed to verl main. You can find them via [github branches](https://github.com/volcengine/verl/branches/all?query=recipe) # Awesome work using verl - [Logic-RL](https://github.com/Unakar/Logic-RL): a reproduction of DeepSeek R1 Zero on 2K Tiny Logic Puzzle Dataset. ![GitHub Repo stars](https://img.shields.io/github/stars/Unakar/Logic-RL) - [Seed-Coder](https://github.com/ByteDance-Seed/Seed-Coder): RL training of Seed-Coder boosts performance on competitive programming ![GitHub Repo stars](https://img.shields.io/github/stars/ByteDance-Seed/Seed-Coder) - [all-hands/openhands-lm-32b-v0.1](https://www.all-hands.dev/blog/introducing-openhands-lm-32b----a-strong-open-coding-agent-model): A strong, open coding agent model, trained with [multi-turn fine-tuning](https://github.com/volcengine/verl/pull/195) - [s3](https://github.com/pat-jj/s3) **Efficient Yet Effective** Search Agent Training via RL ![GitHub Repo stars](https://img.shields.io/github/stars/pat-jj/s3) - [Rec-R1](https://arxiv.org/pdf/2503.24289): Bridging Generative Large Language Models and Recommendation Systems via Reinforcement Learning - [Explore RL Data Scaling](https://arxiv.org/abs/2503.22230): Exploring Data Scaling Trends and Effects in Reinforcement Learning from Human Feedback - [FIRE](https://arxiv.org/abs/2410.21236): Flaming-hot initiation with regular execution sampling for large language models - [DQO](https://arxiv.org/abs/2410.09302): Enhancing multi-Step reasoning abilities of language models through direct Q-function optimization - [ProRL](https://arxiv.org/abs/2505.24864): Prolonged Reinforcement Learning Expands Reasoning Boundaries in Large Language Models - [cognition-engineering](https://github.com/gair-nlp/cognition-engineering): Test time scaling drives cognition engineering. ![GitHub Repo stars](https://img.shields.io/github/stars/gair-nlp/cognition-engineering) - [Trust Region Preference Approximation](https://github.com/XueruiSu/Trust-Region-Preference-Approximation): A simple and stable **reinforcement learning algorithm** for LLM reasoning. ![GitHub Repo stars](https://img.shields.io/github/stars/XueruiSu/Trust-Region-Preference-Approximation) - [AdaRFT](https://github.com/uscnlp-lime/verl): Efficient Reinforcement Finetuning via **Adaptive Curriculum Learning** ![GitHub Repo stars](https://img.shields.io/github/stars/uscnlp-lime/verl) - [critic-rl](https://github.com/HKUNLP/critic-rl): LLM critics for code generation ![GitHub Repo stars](https://img.shields.io/github/stars/HKUNLP/critic-rl) - [self-rewarding-reasoning-LLM](https://arxiv.org/pdf/2502.19613): self-rewarding and correction with **generative reward models** ![GitHub Repo stars](https://img.shields.io/github/stars/RLHFlow/Self-rewarding-reasoning-LLM) - [DeepEnlighten](https://github.com/DolbyUUU/DeepEnlighten): Reproduce R1 with **social reasoning** tasks and analyze key findings ![GitHub Repo stars](https://img.shields.io/github/stars/DolbyUUU/DeepEnlighten) - [MetaSpatial](https://github.com/PzySeere/MetaSpatial): Reinforcing **3D Spatial Reasoning** in **VLMs** for the **Metaverse** ![GitHub Repo stars](https://img.shields.io/github/stars/PzySeere/MetaSpatial) - [PURE](https://github.com/CJReinforce/PURE): **Credit assignment** is the key to successful reinforcement fine-tuning using **process reward model** ![GitHub Repo stars](https://img.shields.io/github/stars/CJReinforce/PURE) - [cognitive-behaviors](https://github.com/kanishkg/cognitive-behaviors): Cognitive Behaviors that Enable Self-Improving Reasoners, or, Four Habits of Highly Effective STaRs ![GitHub Repo stars](https://img.shields.io/github/stars/kanishkg/cognitive-behaviors) - [deepscaler](https://github.com/agentica-project/rllm/tree/deepscaler): iterative context scaling with GRPO ![GitHub Repo stars](https://img.shields.io/github/stars/agentica-project/deepscaler) - [DAPO](https://dapo-sia.github.io/): the fully open source SOTA RL algorithm that beats DeepSeek-R1-zero-32B ![GitHub Repo stars](https://img.shields.io/github/stars/volcengine/verl) ================================================ FILE: verl_rl/recipe/char_count/README.md ================================================ # Char Count ## Introduction Char count is a simple NLP task. We create it for beginners to grasp the idea of RLVR. The task can be trained using a tiny model (e.g., https://huggingface.co/HuggingFaceTB/SmolLM2-135M) on a consumer GPU with only 8GB. ## Problem formulation The prompt is: "How many {char} are there in {word}?". In order for LLM to better answer this question, we create SFT dataset with intermediate steps. For example, ```text Question: How many n are there in n-i-n-e? Answer: n = n i != n n = n e != n \boxed{2} ``` Note that - We add a dash between each individual char to make the task easier because each individual char will be tokenized to the same token by most tokenizer. - In the SFT dataset, we create a CoT by listing all the individual chars and whether it equals to the target. In the end, it outputs the final answer inside the box. - The task can be verified. - The word is not always meaningful. Each char is sampled uniformly from a to z. We make the total length and the answer uniformly distributed within a range. ## Scripts To create the dataset, run ```bash python3 create_dataset.py ``` We create a train set and a val set. Both of them are used of SFT and RL. You can specify the total number of data, min/max length and data path. To run the SFT ```bash bash train_sft.sh ``` We train SFT for 3 epochs. After 3 epochs, the validation score is around 0.12. To run GRPO ```bash bash train_grpo.sh ``` We train GRPO for 2 epochs. After 2 epochs, the validation score is around 0.36. ================================================ FILE: verl_rl/recipe/char_count/create_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Task description: Given a random word and a random char, count the number of occurrence of char in the word. Create CoT dataset that split the word into separate char. Then list the char and count the occurrence. The word set comes from shakespeare """ import os.path import random prompt_template = "How many {} are there in word {}?" def generate_random_char(): return chr(97 + random.randint(0, 25)) def create_prompt_response(min_length=3, max_length=5): # randomly generate a length word_length = random.randint(min_length, max_length) # randomly generate a target count number. This makes the target number target_count_number = random.randint(1, word_length) char_lst = [] # generate the word # step 1: generate the target word target_char = generate_random_char() for _ in range(target_count_number): char_lst.append(target_char) # step 2: generate other words for _ in range(word_length - target_count_number): while True: char = generate_random_char() if char != target_char: char_lst.append(char) break # step 3: random permute char_lst random.shuffle(char_lst) word = "-".join(char_lst) prompt = prompt_template.format(target_char, word) final_answer = [] # cot number = 0 for i, char in enumerate(char_lst): cot = f"{char}" if char != target_char: cot += " != " else: cot += " = " number += 1 cot += f"{target_char}." final_answer.append(cot) conclusion = f"\\boxed{{{number}}} {target_char} in {word}." final_answer.append(conclusion) final_answer = "\n".join(final_answer) return prompt, final_answer if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--total_number", type=int, default=10000) parser.add_argument("--min_length", type=int, default=5) parser.add_argument("--max_length", type=int, default=20) parser.add_argument("--data_path", type=str, default="~/data/char_count") args = vars(parser.parse_args()) total_number = args["total_number"] min_length = args["min_length"] max_length = args["max_length"] data_path = args["data_path"] data_path = os.path.expanduser(data_path) full_output = [] for _ in range(total_number): output = create_prompt_response(min_length=min_length, max_length=max_length) full_output.append(output) # random reorder random.shuffle(full_output) # split for train and test train_split_len = int(0.9 * len(full_output)) train_outputs = full_output[:train_split_len] test_output = full_output[train_split_len:] sft_train_dataset = {"prompt": [], "response": []} for o in train_outputs: sft_train_dataset["prompt"].append(o[0]) sft_train_dataset["response"].append(o[1]) sft_test_dataset = {"prompt": [], "response": []} for o in test_output: sft_test_dataset["prompt"].append(o[0]) sft_test_dataset["response"].append(o[1]) import pandas as pd sft_train_dataset = pd.DataFrame(data=sft_train_dataset) sft_test_dataset = pd.DataFrame(data=sft_test_dataset) folder = os.path.join(data_path, "sft") os.makedirs(folder, exist_ok=True) sft_train_dataset.to_parquet(os.path.join(folder, "train.parquet")) sft_test_dataset.to_parquet(os.path.join(folder, "test.parquet")) # build RL dataset rl_train_dataset = {"prompt": [], "data_source": [], "ability": [], "reward_model": [], "extra_info": []} rl_test_dataset = {"prompt": [], "data_source": [], "ability": [], "reward_model": [], "extra_info": []} from verl.utils.reward_score.math import last_boxed_only_string, remove_boxed for o in train_outputs: prompt = o[0] response = o[1] prompt_with_template = [ { "role": "user", "content": prompt, } ] rl_train_dataset["prompt"].append(prompt_with_template) rl_train_dataset["data_source"].append("char_count") rl_train_dataset["ability"].append("other") rl_train_dataset["reward_model"].append( {"style": "rule", "ground_truth": remove_boxed(last_boxed_only_string(response))} ) rl_train_dataset["extra_info"].append({"response": response}) for o in test_output: prompt = o[0] response = o[1] prompt_with_template = [ { "role": "user", "content": prompt, } ] rl_test_dataset["prompt"].append(prompt_with_template) rl_test_dataset["data_source"].append("char_count") rl_test_dataset["ability"].append("other") rl_test_dataset["reward_model"].append( {"style": "rule", "ground_truth": remove_boxed(last_boxed_only_string(response))} ) rl_test_dataset["extra_info"].append({"response": response}) rl_train_dataset = pd.DataFrame(data=rl_train_dataset) rl_test_dataset = pd.DataFrame(data=rl_test_dataset) folder = os.path.join(data_path, "rl") os.makedirs(folder, exist_ok=True) rl_train_dataset.to_parquet(os.path.join(folder, "train.parquet")) rl_test_dataset.to_parquet(os.path.join(folder, "test.parquet")) ================================================ FILE: verl_rl/recipe/char_count/reward_function.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Reward function """ from verl.utils.reward_score import math def char_count_reward_function(data_source, solution_str, ground_truth, extra_info=None): try: last_boxed_string = math.last_boxed_only_string(solution_str) if last_boxed_string is None: return 0 solution = math.remove_boxed(last_boxed_string) if solution == ground_truth: return 1 else: return 0 except Exception: print(ground_truth, solution_str) return 0 ================================================ FILE: verl_rl/recipe/char_count/train_grpo.sh ================================================ set -x python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=$HOME/data/char_count/rl/train.parquet \ data.val_files=$HOME/data/char_count/rl/test.parquet \ data.train_batch_size=128 \ data.max_prompt_length=128 \ data.max_response_length=128 \ data.filter_overlong_prompts=False \ data.truncation='error' \ actor_rollout_ref.model.path=./models/sft/global_step_105 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=16 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=5000 \ actor_rollout_ref.actor.use_kl_loss=False \ actor_rollout_ref.actor.kl_loss_coef=0.0 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=True \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.n=8 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.logger='["console","tensorboard"]' \ trainer.project_name='verl_example' \ trainer.experiment_name='smol135m_grpo' \ trainer.val_before_train=True \ trainer.n_gpus_per_node=1 \ trainer.nnodes=1 \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=2 \ custom_reward_function.path=recipe/char_count/reward_function.py \ custom_reward_function.name=char_count_reward_function ================================================ FILE: verl_rl/recipe/char_count/train_sft.sh ================================================ set -x nproc_per_node=1 save_path=./models/sft torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \ -m verl.trainer.fsdp_sft_trainer \ data.train_files=$HOME/data/char_count/sft/train.parquet \ data.val_files=$HOME/data/char_count/sft/test.parquet \ data.prompt_key=prompt \ data.response_key=response \ data.micro_batch_size_per_gpu=8 \ data.max_length=256 \ data.train_batch_size=256 \ use_remove_padding=True \ model.partial_pretrain=HuggingFaceTB/SmolLM2-135M-Instruct \ trainer.default_local_dir=$save_path \ trainer.project_name=char_count-sft \ trainer.experiment_name=char_count-sft-SmolLM2-135M-Instruct \ trainer.total_epochs=3 \ trainer.logger=console ================================================ FILE: verl_rl/recipe/dapo/README.md ================================================ # Recipe: Decoupled Clip and Dynamic Sampling Policy Optimization (DAPO) > Open-Source Algorithm Implementation & Expriement Running: [Yuxuan Tong](https://tongyx361.github.io/), [Guangming Sheng](https://hk.linkedin.com/in/guangming-sheng-b50640211) > [!IMPORTANT] > > **🔥 News!!!** > > - [2025/04] We reproduced the results of two versions of DAPO ([Full](./run_dapo_qwen2.5_32b.sh) & [w/o Dynamic Sampling](./run_dapo_wo_ds_qwen2.5_32b.sh)), achieving 52% and 50% on AIME 2024 respectively, based on [the latest codebase on `recipe/dapo`](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo). Please check the details in [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n). > - [2025/03] We published the training record of [an early version of DAPO (w/o Token-level PG Loss & Dynamic Sampling)](./run_dapo_early_qwen2.5_32b.sh), achieving 44% on AIME 2024, in [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n). 🏠 [Homepage](https://dapo-sia.github.io/) | 📝 [Paper@arXiv](https://arxiv.org/abs/2503.14476) | 🤗 [Datasets&Models@HF](https://huggingface.co/collections/BytedTsinghua-SIA/dapo-67d7f1517ee33c8aed059da0) | 🐱 [Code@GitHub](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo) | 🐱 [Repo@GitHub](https://github.com/BytedTsinghua-SIA/DAPO) > We propose the **D**ecoupled Clip and Dynamic s**A**mpling **P**olicy **O**ptimization (DAPO) algorithm. By making our work publicly available, we provide the broader research community and society with practical access to scalable reinforcement learning, enabling all to benefit from these advancements. Our system is based on the awesome [verl](https://github.com/volcengine/verl) framework. Thanks for their great work! Applying DAPO training to Qwen2.5-32B base model proves to outperform the previous state-of-the-art DeepSeek-R1-Zero-Qwen-32B on AIME 2024, achieving **50%** accuracy with **50%** less training steps. > > ![dapo-main-result](https://dapo-sia.github.io/static/images/score.png) ## Quickstart 1. Prepare the datasets **on the Ray cluster**: ```bash bash prepare_dapo_data.sh # This downloads the datasets to ${HOME}/verl/data by default ``` 2. Submit the job to the Ray cluster **from any machine**: ```bash cd verl # Repo root export RAY_ADDRESS="http://${RAY_IP:-localhost}:8265" # The Ray cluster address to connect to export WORKING_DIR="${PWD}" # The local directory to package to the Ray cluster # Set the runtime environment like env vars and pip packages for the Ray cluster in yaml export RUNTIME_ENV="./recipe/dapo/runtime_env.yaml" # This sets environment variables for the Ray cluster bash recipe/dapo/run_dapo_qwen2.5_32b.sh # or other scripts ``` ## Reproduction Runs | Setup | AIME 2024 Acc. | Hardware | Image | Commit | Environment Variables | Training Script | Training Record | | -------------------------------------------- | -------------- | --------- | -------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | | DAPO | 52% | 16x8xH800 | `hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | | DAPO w/o Dynamic Sampling | 50% | 16x8xH800 | `hiyouga/verl:ngc-th2.6.0-cu126-vllm0.8.3-flashinfer0.2.2-cxx11abi0` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_wo_ds_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_wo_ds_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | | DAPO w/o Token-level Loss & Dynamic Sampling | 44% | 16x8xH20 | `hiyouga/verl:ngc-th2.5.1-cu120-vllm0.7.4-hotfix` | [`4f80e4`](https://github.com/volcengine/verl/tree/4f80e465c2ec79ab9c3c30ec74b9745de61d0490) | [runtime_env.yaml](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/runtime_env.yaml) | [run_dapo_early_qwen2.5_32b.sh](https://github.com/volcengine/verl/blob/4f80e465c2ec79ab9c3c30ec74b9745de61d0490/recipe/dapo/run_dapo_early_qwen2.5_32b.sh) | [W&B](https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=wmb4qxfht0n) | > [!IMPORTANT] > > **📢 Call for Contribution!** > > Welcome to submit your reproduction runs and setups! ## Configuration ### Separated Clip Epsilons (-> Clip-Higher) An example configuration: ```yaml actor_rollout_ref: actor: clip_ratio_low: 0.2 clip_ratio_high: 0.28 ``` `clip_ratio_low` and `clip_ratio_high` specify the $\varepsilon_{\text {low }}$ and $\varepsilon_{\text {high }}$ in the DAPO objective. Core relevant code: ```python pg_losses1 = -advantages * ratio pg_losses2 = -advantages * torch.clamp(ratio, 1 - cliprange_low, 1 + cliprange_high) pg_losses = torch.maximum(pg_losses1, pg_losses2) ``` ### Dynamic Sampling (with Group Filtering) An example configuration: ```yaml data: gen_batch_size: 1536 train_batch_size: 512 algorithm: filter_groups: enable: True metric: acc # score / seq_reward / seq_final_reward / ... max_num_gen_batches: 10 # Non-positive values mean no upper limit ``` Setting `filter_groups.enable` to `True` will filter out groups whose outputs' `metric` are all the same, e.g., for `acc`, groups whose outputs' accuracies are all 1 or 0. The trainer will repeat sampling with `gen_batch_size` until there are enough qualified groups for `train_batch_size` or reaching the upper limit specified by `max_num_gen_batches`. Core relevant code: ```python prompt_bsz = self.config.data.train_batch_size if num_prompt_in_batch < prompt_bsz: print(f'{num_prompt_in_batch=} < {prompt_bsz=}') num_gen_batches += 1 max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches: print(f'{num_gen_batches=} < {max_num_gen_batches=}. Keep generating...') continue else: raise ValueError( f'{num_gen_batches=} >= {max_num_gen_batches=}. Generated too many. Please check your data.' ) else: # Align the batch traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n batch = batch[:traj_bsz] ``` ### Flexible Loss Aggregation Mode (-> Token-level Loss) An example configuration: ```yaml actor_rollout_ref: actor: loss_agg_mode: "token-mean" # / "seq-mean-token-sum" / "seq-mean-token-mean" # NOTE: "token-mean" is the default behavior ``` Setting `loss_agg_mode` to `token-mean` will mean the (policy gradient) loss across all the tokens in all the sequences in a mini-batch. Core relevant code: ```python if loss_agg_mode == "token-mean": loss = verl_F.masked_mean(loss_mat, loss_mask) elif loss_agg_mode == "seq-mean-token-sum": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) # token-sum loss = torch.mean(seq_losses) # seq-mean elif loss_agg_mode == "seq-mean-token-mean": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) / torch.sum(loss_mask, dim=-1) # token-mean loss = torch.mean(seq_losses) # seq-mean else: raise ValueError(f"Invalid loss_agg_mode: {loss_agg_mode}") ``` ### Overlong Reward Shaping An example configuration: ```yaml data: max_response_length: 20480 # 16384 + 4096 reward_model: overlong_buffer: enable: True len: 4096 penalty_factor: 1.0 ``` Setting `overlong_buffer.enable` to `True` will penalize the outputs whose lengths are overlong but still within the hard context limit. Specifically, the penalty increases linearly from `0` to `overlong_buffer.penalty_factor` when the length of the output exceeds the `max_response_length` by `0` to `overlong_buffer.len` tokens. Core relevant code: ```python if self.overlong_buffer_cfg.enable: overlong_buffer_len = self.overlong_buffer_cfg.len expected_len = self.max_resp_len - overlong_buffer_len exceed_len = valid_response_length - expected_len overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0) reward += overlong_reward ``` ## FAQ ### Where is the "Overlong Filtering" in the paper? Most experiments in the paper, including the best-performant one, are run without Overlong Filtering because it's somehow overlapping with Overlong Reward Shaping in terms of properly learning from the longest outputs. So we don't implement it here. ### What's the difference between [the `recipe/dapo` directory in the `main` branch](https://github.com/volcengine/verl/tree/main/recipe/dapo) and the [`recipe/dapo` branch](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo)? [The `recipe/dapo` branch](https://github.com/volcengine/verl/tree/recipe/dapo/recipe/dapo) is for **as-is reproduction** and thus won't be updated with new features. [The `recipe/dapo` directory in the `main` branch](https://github.com/volcengine/verl/tree/main/recipe/dapo) works as an example of how to extend the latest `verl` to implement an algorithm recipe, which will be maintained with new features. ### Why can't I produce similar results after modifications? RL infrastructures nowadays still have inherent unrobustness, on which we are still working hard to improve. We strongly recommend to only modify one thing at a time. We also list some known problems here: 1. Enabling CUDA graph (`enforce_eager=False`) might cause model performance degradation, whose cause is still under investigation. ================================================ FILE: verl_rl/recipe/dapo/config/dapo_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: gen_batch_size: ${data.train_batch_size} reward_model: reward_manager: dapo overlong_buffer: enable: False # We try to avoid forgetting to set enable len: 0 penalty_factor: 0.0 log: False algorithm: filter_groups: _target_: verl.trainer.config.FilterGroupsConfig enable: False # We try to avoid forgetting to set enable metric: null # acc / score / seq_reward / seq_final_reward / ... max_num_gen_batches: 0 # Non-positive values mean no upper limit trainer: project_name: verl-dapo ================================================ FILE: verl_rl/recipe/dapo/dapo_ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSDP PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import uuid from collections import defaultdict from copy import deepcopy from pprint import pprint import numpy as np import torch from tqdm import tqdm from verl import DataProto from verl.trainer.ppo.core_algos import agg_loss from verl.trainer.ppo.metric_utils import ( compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, reduce_metrics, ) from verl.trainer.ppo.ray_trainer import ( AdvantageEstimator, RayPPOTrainer, apply_kl_penalty, compute_advantage, compute_response_mask, ) from verl.utils.profiler import marked_timer class RayDAPOTrainer(RayPPOTrainer): """ Note that this trainer runs on the driver process on a single CPU/GPU node. """ def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 self.gen_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 self.gen_steps += 1 last_val_metrics = None timing_raw = defaultdict(float) batch = None num_prompt_in_batch = 0 num_gen_batches = 0 for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} do_profile = ( self.global_steps in self.config.trainer.profile_steps if self.config.trainer.profile_steps is not None else False ) with marked_timer("start_profile", timing_raw): if do_profile: self.actor_rollout_wg.start_profile(role="e2e", profile_step=self.global_steps) if self.use_reference_policy: self.ref_policy_wg.start_profile() if self.use_critic: self.critic_wg.start_profile() if self.use_rm: self.rm_wg.start_profile() new_batch: DataProto = DataProto.from_single_dict(batch_dict) num_gen_batches += 1 # pop those keys for generation if "multi_modal_data" in new_batch.non_tensor_batch.keys(): gen_batch = new_batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids", "multi_modal_data"], ) else: gen_batch = new_batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids"], ) gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) is_last_step = self.gen_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw, "red"): gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with marked_timer("gen_max", timing_raw, "red"): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) new_batch = new_batch.union(gen_baseline_output) reward_baseline_tensor = self.reward_fn(new_batch) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) new_batch.pop(batch_keys=list(gen_baseline_output.batch.keys())) new_batch.batch["reward_baselines"] = reward_baseline_tensor del gen_baseline_batch, gen_baseline_output new_batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout new_batch = new_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) new_batch = new_batch.union(gen_batch_output) with marked_timer("reward", timing_raw, "yellow"): # compute scores. Support both model and function-based. # We first compute the scores using reward model. Then, we call reward_fn to combine # the results from reward model and rule-based results. if self.use_rm: # we first compute reward model score reward_tensor = self.rm_wg.compute_rm_score(new_batch) new_batch = new_batch.union(reward_tensor) # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] try: reward_result = self.reward_fn(new_batch, return_dict=True) reward_tensor = reward_result["reward_tensor"] reward_extra_infos_dict = reward_result.get("reward_extra_info", {}) except Exception as e: print(f"Error in reward_fn: {e}") reward_tensor = self.reward_fn(new_batch) reward_extra_infos_dict = {} new_batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: new_batch.non_tensor_batch.update( {k: np.array(v) for k, v in reward_extra_infos_dict.items()} ) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: new_batch, kl_metrics = apply_kl_penalty( new_batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update( kl_metrics ) # TODO: This will be cleared if we use multiple genenration batches else: new_batch.batch["token_level_rewards"] = new_batch.batch["token_level_scores"] if not self.config.algorithm.filter_groups.enable: batch = new_batch else: # NOTE: When prompts after filtering is less than train batch size, # we skip to the next generation batch metric_name = self.config.algorithm.filter_groups.metric if metric_name == "seq_final_reward": # Turn to numpy for easier filtering new_batch.non_tensor_batch["seq_final_reward"] = ( new_batch.batch["token_level_rewards"].sum(dim=-1).numpy() ) elif metric_name == "seq_reward": new_batch.non_tensor_batch["seq_reward"] = ( new_batch.batch["token_level_scores"].sum(dim=-1).numpy() ) # Collect the sequence reward for each trajectory prompt_uid2metric_vals = defaultdict(list) for uid, metric_val in zip( new_batch.non_tensor_batch["uid"], new_batch.non_tensor_batch[metric_name], strict=True ): prompt_uid2metric_vals[uid].append(metric_val) prompt_uid2metric_std = {} for prompt_uid, metric_vals in prompt_uid2metric_vals.items(): prompt_uid2metric_std[prompt_uid] = np.std(metric_vals) kept_prompt_uids = [ uid for uid, std in prompt_uid2metric_std.items() if std > 0 or len(prompt_uid2metric_vals[uid]) == 1 ] num_prompt_in_batch += len(kept_prompt_uids) kept_traj_idxs = [] for idx, traj_from_prompt_uid in enumerate(new_batch.non_tensor_batch["uid"]): if traj_from_prompt_uid in kept_prompt_uids: kept_traj_idxs.append(idx) new_batch = new_batch[kept_traj_idxs] batch = new_batch if batch is None else DataProto.concat([batch, new_batch]) prompt_bsz = self.config.data.train_batch_size if num_prompt_in_batch < prompt_bsz: print(f"{num_prompt_in_batch=} < {prompt_bsz=}") max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches: print(f"{num_gen_batches=}. Keep generating...") progress_bar.update(1) self.gen_steps += 1 continue else: raise ValueError( f"{num_gen_batches=} >= {max_num_gen_batches=}." + " Generated too many. Please check if your data are too difficult." + " You could also try set max_num_gen_batches=0 to enable endless trials." ) else: # Align the batch traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n batch = batch[:traj_bsz] # === Updating === batch.batch["response_mask"] = compute_response_mask(batch) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, "blue"): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw, "olive"): ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw, "cyan"): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw, "brown"): # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get("norm_adv_by_std_in_grpo", True) batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, "pink"): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, "red"): actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, "green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with marked_timer("save_checkpoint", timing_raw, "green"): self._save_checkpoint() with marked_timer("stop_profile", timing_raw): if do_profile: self.actor_rollout_wg.stop_profile() if self.use_reference_policy: self.ref_policy_wg.stop_profile() if self.use_critic: self.critic_wg.stop_profile() if self.use_rm: self.rm_wg.stop_profile() # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) timing_raw = defaultdict(float) # clear timing metrics["train/num_gen_batches"] = num_gen_batches batch = None num_prompt_in_batch = 0 num_gen_batches = 0 # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return progress_bar.update(1) self.global_steps += 1 self.gen_steps += 1 ================================================ FILE: verl_rl/recipe/dapo/main_dapo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import os import socket import hydra import ray from omegaconf import OmegaConf from verl.trainer.ppo.reward import load_reward_manager from verl.utils.device import is_cuda_available from .dapo_ray_trainer import RayDAPOTrainer @hydra.main(config_path="config", config_name="dapo_trainer", version_base=None) def main(config): run_ppo(config) def run_ppo(config) -> None: if not ray.is_initialized(): # this is for local ray cluster ray.init( runtime_env={ "env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN"} }, num_cpus=config.ray_init.num_cpus, ) if ( is_cuda_available and OmegaConf.select(config.trainer, "profile_steps") is not None and len(OmegaConf.select(config.trainer, "profile_steps")) > 0 ): nsight_options = OmegaConf.to_container(config.trainer.controller_nsight_options) runner = TaskRunner.options(runtime_env={"nsight": nsight_options}).remote() else: runner = TaskRunner.remote() ray.get(runner.run.remote(config)) @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: def run(self, config): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # download the checkpoint from hdfs local_path = copy_to_local(config.actor_rollout_ref.model.path) # instantiate tokenizer from verl.utils import hf_processor, hf_tokenizer tokenizer = hf_tokenizer(local_path) processor = hf_processor(local_path, use_fast=True) # used for multimodal LLM, could be none # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker ray_worker_group_cls = NVMegatronRayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ray.remote(ActorRolloutRefWorker), Role.Critic: ray.remote(CriticWorker), } global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, Role.Critic: global_pool_id, } # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # reference model if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id reward_fn = load_reward_manager( config, tokenizer, 0, max_resp_len=config.data.max_response_length, overlong_buffer_cfg=config.reward_model.overlong_buffer, ) # Note that we always use function-based RM for validation val_reward_fn = load_reward_manager( config, tokenizer, 1, max_resp_len=config.data.max_response_length, overlong_buffer_cfg=config.reward_model.overlong_buffer, ) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) trainer = RayDAPOTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, ) trainer.init_workers() trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_rl/recipe/dapo/prepare_dapo_data.sh ================================================ #!/usr/bin/env bash set -uxo pipefail export VERL_HOME=${VERL_HOME:-"${HOME}/verl"} export TRAIN_FILE=${TRAIN_FILE:-"${VERL_HOME}/data/dapo-math-17k.parquet"} export TEST_FILE=${TEST_FILE:-"${VERL_HOME}/data/aime-2024.parquet"} export OVERWRITE=${OVERWRITE:-0} mkdir -p "${VERL_HOME}/data" if [ ! -f "${TRAIN_FILE}" ] || [ "${OVERWRITE}" -eq 1 ]; then wget -O "${TRAIN_FILE}" "https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k/resolve/main/data/dapo-math-17k.parquet?download=true" fi if [ ! -f "${TEST_FILE}" ] || [ "${OVERWRITE}" -eq 1 ]; then wget -O "${TEST_FILE}" "https://huggingface.co/datasets/BytedTsinghua-SIA/AIME-2024/resolve/main/data/aime-2024.parquet?download=true" fi ================================================ FILE: verl_rl/recipe/dapo/run_dapo_early_qwen2.5_32b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Early-Qwen2.5-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 # An early version for DAPO loss_agg_mode="seq-mean-token-mean" enable_filter_groups=False gen_prompt_bsz=512 # NOTE: no filtering here train_prompt_bsz=512 train_prompt_mini_bsz=32 n_resp_per_prompt=16 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-16} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto ================================================ FILE: verl_rl/recipe/dapo/run_dapo_qwen2.5_32b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=512 gen_prompt_bsz=$((train_prompt_bsz * 3)) n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-16} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ algorithm.filter_groups.metric=${filter_groups_metric} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto ================================================ FILE: verl_rl/recipe/dapo/run_dapo_wo_ds_qwen2.5_32b.sh ================================================ #!/usr/bin/env bash set -euxo pipefail # DAPO (w/o Dynamic Sampling) project_name='DAPO-verl' exp_name='DAPO-wo-DS-Qwen2.5-32B' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 20)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=False train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-16} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-32B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=8 use_dynamic_bsz=True actor_ppo_max_token_len=$((max_prompt_length + max_response_length)) infer_ppo_max_token_len=$((max_prompt_length + max_response_length)) offload=True gen_tp=4 ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.filter_groups.enable=${enable_filter_groups} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.7 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto ================================================ FILE: verl_rl/recipe/dapo/runtime_env.yaml ================================================ working_dir: ./ excludes: ["/.git/"] env_vars: TORCH_NCCL_AVOID_RECORD_STREAMS: "1" VLLM_USE_V1: "1" ================================================ FILE: verl_rl/recipe/dapo/test_dapo_7b.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7B-Math-Test' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 2)) enable_overlong_buffer=True overlong_buffer_len=512 overlong_penalty_factor=1.0 loss_agg_mode="token-mean" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=512 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=16 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False ray job submit --no-wait --runtime-env="${RUNTIME_ENV}" \ --working-dir "${WORKING_DIR}" \ -- python3 -m recipe.dapo.main_dapo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=2 \ trainer.save_freq=2 \ trainer.total_epochs=1 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_rl/recipe/dapo/test_dapo_7b_math.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=32 # reference run wandb: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/runs/ow47vvon?nw=nwusertongyuxuan361 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=200 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/recipe/dapo/test_dapo_7b_math_lora.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=32 # remember to set VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 for this model python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.model.lora_rank=8 \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=200 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/recipe/dapo/test_dapo_7b_math_megatron.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-megatron-0519a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 train_tp=4 train_pp=2 # TODO: support dynamic_bsz for megatron # actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ # actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ # actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=16 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/recipe/dapo/test_dapo_dspk_671b_megatron.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail # 0. download the config # only need to download the configuration_deepseek.py and config.json # remove the `quantization_config` in the `config.json` # set `num_nextn_predict_layers=0` to disable MTP, which is not currently supported huggingface-cli download deepseek-ai/DeepSeek-V3-0324 configuration_deepseek.py config.json project_name='DAPO' exp_name='DAPO-DeepSeek-671b-megatron' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 4)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=0.1 loss_agg_mode="token-mean" train_prompt_bsz=512 # must be > n_gpus. need to fix n_resp_per_prompt=2 train_prompt_mini_bsz=16 # mini_bsz * n >= micro_bsz * pp * dp NNODES=${NNODES:-64} # 1. download the dist_ckpt format model from https://huggingface.co/BearBiscuit05/dpsk-v3-671B-BF16-dist_ckpt/tree/main # change the MODEL_PATH and MCORE_MODEL_PATH to your own path # Paths MODEL_PATH="" MCORE_MODEL_PATH="" RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} aime24_test_path=${RAY_DATA_HOME}/data/aime-2024.parquet # TEST_FILE="['$math500_test_path', '$aime24_test_path']" TEST_FILE="['$aime24_test_path']" # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=32 train_tp=1 train_ep=32 train_pp=16 python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=2 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.megatron.expert_model_parallel_size=${train_ep} \ actor_rollout_ref.actor.megatron.dist_checkpointing_path=${MCORE_MODEL_PATH} \ actor_rollout_ref.actor.megatron.use_dist_checkpointing=True \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_first_pipeline_stage=3 \ +actor_rollout_ref.actor.megatron.override_transformer_config.num_layers_in_last_pipeline_stage=2 \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.expert_model_parallel_size=${train_ep} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ actor_rollout_ref.ref.megatron.dist_checkpointing_path=${MCORE_MODEL_PATH} \ actor_rollout_ref.ref.megatron.use_dist_checkpointing=True \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=5 \ trainer.save_freq=5 \ trainer.total_epochs=10 \ trainer.total_training_steps=10 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/recipe/dapo/test_dapo_qwen3_30b_math.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen3-30B-A3B-Base-MATH-0527a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-8} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B-Base"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=32 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=10 \ trainer.total_epochs=10 \ trainer.total_training_steps=300 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/recipe/dapo/test_dapo_qwen3_30b_math_single_node.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen3-30B-A3B-Base-MATH-0719a1' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 4)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=0.1 loss_agg_mode="token-mean" train_prompt_bsz=64 n_resp_per_prompt=16 train_prompt_mini_bsz=16 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-30B-A3B-Base"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter sp_size=4 use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 1)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=4 fsdp_size=8 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=300 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/recipe/entropy/32b_clip_cov.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-32B' exp_name='clipcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=1 clip_ratio_high=1 clip_cov_ratio=0.0002 clip_cov_lb=1.0 clip_cov_ub=5.0 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="clip_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=8 max_token=20480 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.02 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.clip_cov_ratio=${clip_cov_ratio} \ actor_rollout_ref.actor.policy_loss.clip_cov_lb=${clip_cov_lb} \ actor_rollout_ref.actor.policy_loss.clip_cov_ub=${clip_cov_ub} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.warmup_style=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.clip_cov_ratio=${clip_cov_ratio} \ actor_rollout_ref.actor.clip_cov_lb=${clip_cov_lb} \ actor_rollout_ref.actor.clip_cov_ub=${clip_cov_ub} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_rl/recipe/entropy/32b_kl_cov.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-32B' exp_name='klcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.2 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="kl_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=8 max_token=20480 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.0002 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.kl_cov_ratio=${kl_cov_ratio} \ actor_rollout_ref.actor.policy_loss.ppo_kl_coef=${ppo_kl_coef} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.warmup_style=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_rl/recipe/entropy/32b_kl_cov_mininbsz.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-32B' exp_name='klcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.2 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="kl_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=16 n_resp_per_prompt=8 max_token=20480 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.0002 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.kl_cov_ratio=${kl_cov_ratio} \ actor_rollout_ref.actor.policy_loss.ppo_kl_coef=${ppo_kl_coef} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.warmup_style=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_rl/recipe/entropy/7b_clip_cov.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-7B' exp_name='clipcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=1 clip_ratio_high=1 clip_cov_ratio=0.0002 clip_cov_lb=1.0 clip_cov_ub=5.0 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="clip_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=8 max_token=30720 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.2 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.clip_cov_ratio=${clip_cov_ratio} \ actor_rollout_ref.actor.policy_loss.clip_cov_lb=${clip_cov_lb} \ actor_rollout_ref.actor.policy_loss.clip_cov_ub=${clip_cov_ub} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.warmup_style=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_rl/recipe/entropy/7b_kl_cov.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail export WANDB_API_KEY=YOUR_WANDB_API_KEY # export VLLM_USE_V1=1 project_name='Qwen2.5-7B' exp_name='klcov' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.2 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=False overlong_buffer_len=$((1024 * 2)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" loss_mode="kl_cov" enable_filter_groups=True filter_groups_metric=acc max_num_gen_batches=10 train_prompt_bsz=256 gen_prompt_bsz=$((train_prompt_bsz * 3)) train_prompt_mini_bsz=32 n_resp_per_prompt=8 max_token=30720 # Ray RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} WORKING_DIR=${WORKING_DIR:-"${PWD}"} RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-4} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"/YOUR_MODELPATH"} CKPTS_DIR=${CKPTS_DIR:-"/YOUR_CKPTS_PATH"} TRAIN_FILE=${TRAIN_FILE:-"/YOUR_TRAIN_FILE_PATH"} TEST_FILE=${TEST_FILE:-["/YOUR_TRAIN_FILE_PATH"]} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout ppo_kl_coef=1 kl_cov_ratio=0.002 # Mathematically equivalent use_dynamic_bsz=True infer_micro_batch_size=null train_micro_batch_size=null offload=False HYDRA_FULL_ERROR=1 python -m recipe.entropy.main_entropy \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.filter_overlong_prompts=False \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.gen_batch_size=${gen_prompt_bsz} \ data.train_batch_size=${train_prompt_bsz} \ data.return_raw_chat=True \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.policy_loss.loss_mode=${loss_mode} \ actor_rollout_ref.actor.policy_loss.kl_cov_ratio=${kl_cov_ratio} \ actor_rollout_ref.actor.policy_loss.ppo_kl_coef=${ppo_kl_coef} \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \ actor_rollout_ref.rollout.mode=sync \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ algorithm.filter_groups.enable=${enable_filter_groups} \ algorithm.filter_groups.metric=${filter_groups_metric} \ algorithm.filter_groups.max_num_gen_batches=${max_num_gen_batches} \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${max_token} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.weight_decay=0 \ actor_rollout_ref.actor.optim.warmup_style=constant \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.ppo_micro_batch_size=${train_micro_batch_size} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ actor_rollout_ref.rollout.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=${max_token} \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k="${top_k}" \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=False \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.log_prob_micro_batch_size=${infer_micro_batch_size} \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=1 \ actor_rollout_ref.actor.fsdp_config.fsdp_size=-1 \ reward_model.reward_manager=dapo \ reward_model.overlong_buffer.enable=${enable_overlong_buffer} \ reward_model.overlong_buffer.len=${overlong_buffer_len} \ reward_model.overlong_buffer.penalty_factor=${overlong_penalty_factor} \ trainer.logger='["console","wandb"]' \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=False \ trainer.test_freq=4 \ trainer.save_freq=32 \ trainer.total_epochs=1000 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=disable ================================================ FILE: verl_rl/recipe/entropy/README.md ================================================
# The Entropy Mechanism of Reinforcement Learning for Large Language Model Reasoning. [![Paper](https://img.shields.io/badge/paper-A42C25?style=for-the-badge&logo=arxiv&logoColor=white)](https://arxiv.org/pdf/2505.22617) [![Github](https://img.shields.io/badge/PRIME-000000?style=for-the-badge&logo=github&logoColor=000&logoColor=white)](https://github.com/PRIME-RL/Entropy-Mechanism-of-RL) [![alphaXiv](https://img.shields.io/badge/discussion-A42C25?style=for-the-badge&logo=arxiv&logoColor=white&color=blue )](https://www.alphaxiv.org/abs/2505.22617) [![Twitter](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/stingning/status/1928088554166505667) [![Twitter](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/charlesfornlp/status/1928089451080585283) [![Twitter-ak](https://img.shields.io/badge/Twitter-%23000000.svg?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/_akhaliq/status/1928077929105268861)
# 🎉News - **[2025/05/29]** 🎉 Ranked **#1** of the day on [Huggingface Daily Papers](https://huggingface.co/papers?date=2025-05-29). - **[2025/05/29]** Released our Paper on arXiv. See [here](https://arxiv.org/pdf/2505.22617). We provide insights into the entropy mechanism of RL for LLMs and propose two simple yet effective strategies to alleviate the entropy collapse. # ✨Getting started After preparing the training data, for training Qwen2.5-7B on a single node, taking the KL-Cov approach as an example, you can simply run: ``` cd verl conda activate your_env bash recipe/dapo/7b_kl_cov.sh ``` While for training Qwen2.5-32B on multi nodes, you can run the following commands: ``` cd verl conda activate your_env bash recipe/dapo/32b_kl_cov.sh ``` # 📖Introduction
issue
This paper addresses the entropy collapse issue in scaling reinforcement learning (RL) for large language models (LLMs), where policy entropy drops sharply during training, leading to overconfidence and performance saturation. We empirically establish a relationship between entropy ($H$) and performance ($R$): $R=−aexp(H)+b$, showing performance is bottlenecked by entropy exhaustion.
issue
Theoretically, we find entropy changes are driven by the covariance between action probability and logit updates, which correlates with advantage in Policy Gradient methods. High-probability, high-advantage actions reduce entropy, while rare, high-advantage actions increase it. Empirically, the covariance term remains positive, explaining entropy’s monotonic decline. To mitigate this, we propose ​​Clip-Cov​​ and ​​KL-Cov​​, which restrict updates for high-covariance tokens. These methods effectively prevent entropy collapse, and improve performance. # 📃Evaluation
issue
Our method is able to maintain a considerably higher level of entropy throughout training. For example, when the baseline's entropy reaches a plateau and can no longer be consumed, the KL-Cov method still sustains an entropy level over 10 times higher. Meanwhile, the response length of the policy model steadily increases, and its performance on the test set consistently surpasses that of the baseline. This indicates that our model is able to explore more freely during training, learning better policy through RL. | **Method** | **AIME24** | **AIME25** | **AMC** | **MATH-500** | **OMNI-MATH** | **OlympiadBench** | **Minerva** | **Avg.** | | ----------------- | ---------: | ---------: | -------: | -----------: | ------------: | ----------------: | ----------: | -------: | | *Qwen2.5-7B* | | | | | | | | | | GRPO | 21.2 | 9.6 | 58.7 | 78.8 | 27.9 | 40.7 | 36.7 | 38.6 | | w. Clip-higher | 18.1 | 11.5 | 56.6 | 79.2 | 29.8 | 43.3 | 40.4 | 38.8 | | w. **`CLIP-Cov`** | 22.1 | **15.8** | 58.2 | 80.4 | **30.5** | **44.1** | **41.1** | 40.4 | | w. **`KL-Cov`** | **22.6** | 12.9 | **61.4** | **80.8** | 29.1 | 42.6 | 38.2 | **40.6** | | *Qwen2.5-32B* | | | | | | | | | | GRPO | 21.8 | 16.2 | 69.7 | 84.2 | 35.2 | 43.6 | 45.5 | 45.8 | | w. Clip-higher | 35.6 | 22.3 | 69.5 | 77.2 | 35.1 | 42.5 | 43.0 | 47.2 | | w. **`CLIP-Cov`** | 32.3 | 22.7 | 67.2 | **87.0** | **42.0** | **57.2** | 46.0 | 50.3 | | w. **`KL-Cov`** | **36.8** | **30.8** | **74.5** | 84.6 | 39.1 | 49.0 | **46.3** | **52.2** | Our two approaches both achieve non-trivial improvements across all benchmarks. Compared to GRPO, our method outperforms it by 2.0% on average for the 7B model and by 6.4% for the 32B model. Moreover, we observe that our method yields more substantial gains on the larger Qwen2.5-32B. Specifically, our method achieves improvements of 15.0% and 14.6% compared to GRPO on the most challenging benchmarks, AIME24 and AIME25, respectively. # 🎈Citation If you find this paper or repo helpful, please cite us. ```bibtex @article{cui2025entropy, title={The Entropy Mechanism of Reinforcement Learning for Reasoning Language Models}, author={Cui, Ganqu and Zhang, Yuchen and Chen, Jiacheng and Yuan, Lifan and Wang, Zhi and Zuo, Yuxin and Li, Haozhan and Fan, Yuchen and Chen, Huayu and Chen, Weize and others}, journal={arXiv preprint arXiv:2505.22617}, year={2025} } ``` # 🌻Acknowledgement We implement our reinforcement learning algorithm extending from [verl](https://github.com/volcengine/verl). We utilize [vLLM](https://github.com/vllm-project/vllm) for inference. Our models are trained primarily on [Qwen2.5 family](https://github.com/QwenLM/Qwen2.5). Our training data is built from [DAPO-MATH](https://huggingface.co/datasets/BytedTsinghua-SIA/DAPO-Math-17k). Thanks for their great contributions! # 📬 Contact For questions, discussion, or collaboration opportunities, feel free to contact: - Ganqu Cui: cuiganqu@pjlab.org.cn - Yuchen Zhang: yuchen.zhang2003@gmail.com - Jiacheng Chen: jackchan9345@gmail.com - Ning Ding: ningding.cs@gmail.com ================================================ FILE: verl_rl/recipe/entropy/config/entropy_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ data: gen_batch_size: ${data.train_batch_size} reward_model: reward_kwargs: overlong_buffer_cfg: ${reward_model.overlong_buffer} reward_manager: dapo overlong_buffer: enable: False len: 0 penalty_factor: 0.0 log: False algorithm: filter_groups: enable: False # We try to avoid forgetting to set enable metric: null # acc / score / seq_reward / seq_final_reward / ... max_num_gen_batches: 0 # Non-positive values mean no upper limit trainer: project_name: verl-entropy actor_rollout_ref: actor: policy_loss: loss_mode: "vanilla" # /clip-cov / kl-cov from https://arxiv.org/abs/2505. clip_cov_ratio: 0.0002 # for clip-cov loss clip_cov_lb: 1.0 # for clip-cov loss clip_cov_ub: 5.0 # for clip-cov loss kl_cov_ratio: 0.0002 # for kl-cov loss ppo_kl_coef: 0.1 # for kl-cov loss ================================================ FILE: verl_rl/recipe/entropy/entropy_ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FSDP PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import uuid from collections import defaultdict from copy import deepcopy from pprint import pprint import numpy as np import torch from tqdm import tqdm from verl import DataProto from verl.trainer.ppo.metric_utils import ( compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, reduce_metrics, ) from verl.trainer.ppo.ray_trainer import ( AdvantageEstimator, RayPPOTrainer, apply_kl_penalty, compute_advantage, compute_response_mask, ) from verl.utils.profiler import simple_timer class RayEntropyTrainer(RayPPOTrainer): """ Note that this trainer runs on the driver process on a single CPU/GPU node. """ def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None timing_raw = defaultdict(float) batch = None num_prompt_in_batch = 0 num_gen_batches = 0 for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} new_batch: DataProto = DataProto.from_single_dict(batch_dict) num_gen_batches += 1 # pop those keys for generation if "multi_modal_inputs" in new_batch.non_tensor_batch.keys(): gen_batch = new_batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids", "multi_modal_data", "multi_modal_inputs"], ) else: gen_batch = new_batch.pop( batch_keys=["input_ids", "attention_mask", "position_ids"], non_tensor_batch_keys=["raw_prompt_ids"], ) gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) is_last_step = self.global_steps >= self.total_training_steps with simple_timer("step", timing_raw): # generate a batch # with simple_timer("gen", timing_raw): # gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) with simple_timer("gen", timing_raw): if not self.async_rollout_mode: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) else: gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with simple_timer("gen_max", timing_raw): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) new_batch = new_batch.union(gen_baseline_output) reward_baseline_tensor = self.reward_fn(new_batch) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) new_batch.pop(batch_keys=list(gen_baseline_output.batch.keys())) new_batch.batch["reward_baselines"] = reward_baseline_tensor del gen_baseline_batch, gen_baseline_output new_batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout new_batch = new_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) new_batch = new_batch.union(gen_batch_output) with simple_timer("reward", timing_raw): # compute scores. Support both model and function-based. # We first compute the scores using reward model. Then, we call reward_fn to combine # the results from reward model and rule-based results. if self.use_rm: # we first compute reward model score reward_tensor = self.rm_wg.compute_rm_score(new_batch) new_batch = new_batch.union(reward_tensor) # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] try: reward_result = self.reward_fn(new_batch, return_dict=True) reward_tensor = reward_result["reward_tensor"] reward_extra_infos_dict = reward_result["reward_extra_info"] except Exception as e: print(f"Error in reward_fn: {e}") reward_tensor = self.reward_fn(new_batch) reward_extra_infos_dict = {} new_batch.batch["token_level_scores"] = reward_tensor print(f"{list(reward_extra_infos_dict.keys())=}") if reward_extra_infos_dict: new_batch.non_tensor_batch.update( {k: np.array(v) for k, v in reward_extra_infos_dict.items()} ) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: new_batch, kl_metrics = apply_kl_penalty( new_batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update( kl_metrics ) # TODO: This will be cleared if we use multiple genenration batches else: new_batch.batch["token_level_rewards"] = new_batch.batch["token_level_scores"] if not self.config.algorithm.filter_groups.enable: batch = new_batch else: # NOTE: When prompts after filtering is less than train batch size, # we skip to the next generation batch metric_name = self.config.algorithm.filter_groups.metric if metric_name == "seq_final_reward": # Turn to numpy for easier filtering new_batch.non_tensor_batch["seq_final_reward"] = ( new_batch.batch["token_level_rewards"].sum(dim=-1).numpy() ) elif metric_name == "seq_reward": new_batch.non_tensor_batch["seq_reward"] = ( new_batch.batch["token_level_scores"].sum(dim=-1).numpy() ) # Collect the sequence reward for each trajectory prompt_uid2metric_vals = defaultdict(list) for uid, metric_val in zip( new_batch.non_tensor_batch["uid"], new_batch.non_tensor_batch[metric_name], strict=True ): prompt_uid2metric_vals[uid].append(metric_val) prompt_uid2metric_std = {} for prompt_uid, metric_vals in prompt_uid2metric_vals.items(): prompt_uid2metric_std[prompt_uid] = np.std(metric_vals) kept_prompt_uids = [ uid for uid, std in prompt_uid2metric_std.items() if std > 0 or len(prompt_uid2metric_vals[uid]) == 1 ] num_prompt_in_batch += len(kept_prompt_uids) kept_traj_idxs = [] for idx, traj_from_prompt_uid in enumerate(new_batch.non_tensor_batch["uid"]): if traj_from_prompt_uid in kept_prompt_uids: kept_traj_idxs.append(idx) new_batch = new_batch[kept_traj_idxs] batch = new_batch if batch is None else DataProto.concat([batch, new_batch]) prompt_bsz = self.config.data.train_batch_size if num_prompt_in_batch < prompt_bsz: print(f"{num_prompt_in_batch=} < {prompt_bsz=}") max_num_gen_batches = self.config.algorithm.filter_groups.max_num_gen_batches if max_num_gen_batches <= 0 or num_gen_batches < max_num_gen_batches: print(f"{num_gen_batches=}. Keep generating...") continue else: raise ValueError( f"{num_gen_batches=} >= {max_num_gen_batches=}." + " Generated too many. Please check if your data are too difficult." + " You could also try set max_num_gen_batches=0 to enable endless trials." ) else: # Align the batch traj_bsz = self.config.data.train_batch_size * self.config.actor_rollout_ref.rollout.n print( f"Collected {num_prompt_in_batch} / {self.config.data.train_batch_size} prompt. " f"Collecting finished." ) batch = batch[:traj_bsz] # === Updating === batch.batch["response_mask"] = compute_response_mask(batch) # balance the number of valid tokens on each dp rank. # Note that this breaks the order of data inside the batch. # Please take care when you implement group based adv computation such as GRPO and rloo if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() # recompute old_log_probs with simple_timer("old_log_prob", timing_raw): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) batch = batch.union(old_log_prob) if self.use_reference_policy: # compute reference log_prob with simple_timer("ref", timing_raw): ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with simple_timer("values", timing_raw): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with simple_timer("adv", timing_raw): # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get("norm_adv_by_std_in_grpo", True) batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) # update critic if self.use_critic: with simple_timer("update_critic", timing_raw): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with simple_timer("update_actor", timing_raw): actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with simple_timer("testing", timing_raw): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with simple_timer("save_checkpoint", timing_raw): self._save_checkpoint() # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) timing_raw = defaultdict(float) # clear timing metrics["train/num_gen_batches"] = num_gen_batches batch = None num_prompt_in_batch = 0 num_gen_batches = 0 # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return progress_bar.update(1) self.global_steps += 1 ================================================ FILE: verl_rl/recipe/entropy/main_entropy.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import hydra import ray from .entropy_ray_trainer import RayEntropyTrainer from .reward import load_reward_manager @hydra.main(config_path="config", config_name="entropy_trainer", version_base=None) def main(config): run_ppo(config) def run_ppo(config) -> None: if not ray.is_initialized(): # this is for local ray cluster ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN", "WANDB_API_KEY": "YOUR_WANDB_API_KEY", } }, num_cpus=config.ray_init.num_cpus, ) runner = TaskRunner.remote() ray.get(runner.run.remote(config)) def merge_dict(a: dict, b: dict) -> dict: """Return a new dict that has `a` updated with `b` (b wins on conflicts). Example:: >>> d1 = {"x": 1, "y": 2} >>> d2 = {"y": 20, "z": 3} >>> new_dict = merge_dict(d1, d2) >>> print(new_dict) # {'x': 1, 'y': 20, 'z': 3} >>> print(d1) # {"x": 1, "y": 2} (unchanged) >>> print(d2) # {"y": 20, "z": 3} (unchanged) """ return a | b @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: def run(self, config): # print initial config from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) # download the checkpoint from hdfs local_path = copy_to_local(config.actor_rollout_ref.model.path) print(f"{config.actor_rollout_ref.model.path}") # instantiate tokenizer from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) processor = hf_processor(local_path, use_fast=True) # used for multimodal LLM, could be none # define worker classes if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup from verl.workers.fsdp_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker actor_rollout_cls = ActorRolloutRefWorker ray_worker_group_cls = NVMegatronRayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.ActorRollout: ray.remote(actor_rollout_cls), Role.Critic: ray.remote(CriticWorker), } global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, Role.Critic: global_pool_id, } # we should adopt a multi-source reward function here # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # - finally, we combine all the rewards together # - The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # use reference model if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id reward_kwargs = { "max_resp_len": config.data.max_response_length, "overlong_buffer_cfg": config.reward_model.overlong_buffer, } cfg_reward_kwargs = config.reward_model.get("reward_kwargs", {}) reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **OmegaConf.merge(OmegaConf.create(reward_kwargs), cfg_reward_kwargs) ) val_reward_fn = load_reward_manager(config, tokenizer, num_examine=1, **reward_kwargs) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) from verl.utils.dataset.rl_dataset import collate_fn train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor) val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor) train_sampler = create_rl_sampler(config.data, train_dataset) trainer = RayEntropyTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, ) trainer.init_workers() trainer.fit() def create_rl_dataset(data_paths, data_config, tokenizer, processor): """Create a dataset. Arguments: data_config: The data config. tokenizer (Tokenizer): The tokenizer. processor (Processor): The processor. Returns: dataset (Dataset): The dataset. """ from torch.utils.data import Dataset from verl.utils.dataset.rl_dataset import RLHFDataset if "custom_cls" in data_config and data_config.custom_cls.get("path", None) is not None: from verl.utils.import_utils import load_extern_type dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name) if not issubclass(dataset_cls, Dataset): raise TypeError( f"The custom dataset class '{data_config.custom_cls.name}' from '{data_config.custom_cls.path}' " f"must inherit from torch.utils.data.Dataset" ) else: dataset_cls = RLHFDataset print(f"Using dataset class: {dataset_cls.__name__}") dataset = dataset_cls( data_files=data_paths, tokenizer=tokenizer, processor=processor, config=data_config, ) return dataset def create_rl_sampler(data_config, dataset): """Create a sampler for the dataset. Arguments: data_config: The data config. dataset (Dataset): The dataset. Returns: sampler (Sampler): The sampler. """ import torch from torch.utils.data import RandomSampler, SequentialSampler # use sampler for better ckpt resume if data_config.shuffle: train_dataloader_generator = torch.Generator() train_dataloader_generator.manual_seed(data_config.get("seed", 1)) sampler = RandomSampler(data_source=dataset, generator=train_dataloader_generator) else: sampler = SequentialSampler(data_source=dataset) return sampler if __name__ == "__main__": main() ================================================ FILE: verl_rl/recipe/entropy/reward.py ================================================ # Copyright 2025 Individual Contributor: Thibaut Barroyer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing from functools import partial import ray from verl import DataProto from verl.trainer.ppo.reward import compute_reward, get_custom_reward_fn from .reward_score import _default_compute_score def load_reward_manager(config, tokenizer, num_examine, **reward_kwargs): """ Load and initialize a reward manager based on the configuration. Args: config: PPO trainer configuration object containing reward_model fields. tokenizer: Tokenizer object used for processing text. num_examine: Number of samples to examine. **reward_kwargs: Additional keyword arguments for the reward manager. Returns: An instance of the specified reward manager class. """ from verl.workers.reward_manager import get_reward_manager_cls # The list of pre-defined reward managers are defined in `verl/workers/reward_manager/`: # naive: NaiveRewardManager # prime: PrimeRewardManager # batch: BatchRewardManager # dapo: DAPORewardManager # Note(haibin.lin): For custom reward managers, please make sure they are imported and # registered via `verl.workers.reward_manager.register` # By default reward_manager is set to naive (NaiveRewardManager) reward_manager_name = config.reward_model.get("reward_manager", "naive") reward_manager_cls = get_reward_manager_cls(reward_manager_name) # Try to get a custom reward function based on the configuration compute_score = get_custom_reward_fn(config) final_compute_score = compute_score if compute_score is None: sandbox_config = config.reward_model.get("sandbox_fusion") sandbox_url = sandbox_config.get("url") if sandbox_config else None if sandbox_url: sandbox_manager = multiprocessing.Manager() # Create a semaphore to control concurrent access to the sandbox _concurrent_semaphore = sandbox_manager.Semaphore(sandbox_config.get("max_concurrent", 64)) final_compute_score = partial( _default_compute_score, sandbox_fusion_url=sandbox_url, concurrent_semaphore=_concurrent_semaphore ) else: final_compute_score = _default_compute_score # Instantiate and return the reward manager with the specified parameters return reward_manager_cls( tokenizer=tokenizer, num_examine=num_examine, compute_score=final_compute_score, reward_fn_key=config.data.reward_fn_key, **reward_kwargs, ) @ray.remote(num_cpus=1) def compute_reward_async(data: DataProto, config, tokenizer): """ Load the reward manager and compute the reward for a batch of data. This is meant to be run in a separate Ray worker. """ reward_fn = load_reward_manager(config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {})) return compute_reward(data, reward_fn) ================================================ FILE: verl_rl/recipe/entropy/reward_score/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from . import gsm8k, math, prime_math, prime_code import traceback from . import entropy_math def _default_compute_score( data_source, solution_str, ground_truth, extra_info=None, sandbox_fusion_url=None, concurrent_semaphore=None ): try: res = entropy_math.compute_score(solution_str, str(ground_truth)) # print(f"data_source: {data_source}") # raise NotImplementedError(f"Reward function is not implemented for {data_source=}") if isinstance(res, dict): return res elif isinstance(res, int | float | bool): return float(res) else: return float(res[0]) except Exception as e: print(f"[ERROR] Error in process_completion for task : {str(e)}") traceback.print_exc() # 打印完整堆栈 raise # 重新抛出异常以便上层捕获 ================================================ FILE: verl_rl/recipe/entropy/reward_score/entropy_math/__init__.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except Exception in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides a math answer grading function with high recall. Based on HF math_verify, verl, open reasoner zero, etc. """ import os import re import signal from itertools import islice, zip_longest from math import isclose from typing import Optional import sympy from latex2sympy2_extended import latex2sympy from math_verify import ExprExtractionConfig, LatexExtractionConfig, parse, verify from pylatexenc import latex2text from sympy import N, simplify from sympy.parsing import sympy_parser from sympy.parsing.latex import parse_latex from sympy.parsing.sympy_parser import parse_expr """ This code is adapted from: Dr. GRPO (https://github.com/sail-sg/understand-r1-zero/blob/main/understand_r1_zero/math_grader.py). """ def timeout_ours(timeout_seconds: int = 8): if os.name == "posix": import signal def decorator(func): def handler(signum, frame): raise TimeoutError("Operation timed out!") def wrapper(*args, **kwargs): old_handler = signal.getsignal(signal.SIGALRM) signal.signal(signal.SIGALRM, handler) signal.alarm(timeout_seconds) try: return func(*args, **kwargs) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) return wrapper return decorator else: raise NotImplementedError(f"Unsupported OS: {os.name}") # Dan Hendrycks' code def mathd_normalize_answer(answer: Optional[str]) -> Optional[str]: if answer is None: return None answer = answer.strip() try: # Remove enclosing `\text{}`. m = re.search("^\\\\text\{(?P.+?)\}$", answer) if m is not None: answer = m.group("text").strip() return _strip_string(answer) except Exception: return answer # units mainly from MathQA unit_texts = [ "east", "degree", "mph", "kmph", "ft", "m sqaure", " m east", "sq m", "deg", "mile", "q .", "monkey", "prime", "ratio", "profit of rs", "rd", "o", "gm", "p . m", "lb", "tile", "per", "dm", "lt", "gain", "ab", "way", "west", "a .", "b .", "c .", "d .", "e .", "f .", "g .", "h .", "t", "a", "h", "no change", "men", "soldier", "pie", "bc", "excess", "st", "inches", "noon", "percent", "by", "gal", "kmh", "c", "acre", "rise", "a . m", "th", "π r 2", "sq", "mark", "l", "toy", "coin", "sq . m", "gallon", "° f", "profit", "minw", "yr", "women", "feet", "am", "pm", "hr", "cu cm", "square", "v â € ™", "are", "rupee", "rounds", "cubic", "cc", "mtr", "s", "ohm", "number", "kmph", "day", "hour", "minute", "min", "second", "man", "woman", "sec", "cube", "mt", "sq inch", "mp", "∏ cm ³", "hectare", "more", "sec", "unit", "cu . m", "cm 2", "rs .", "rs", "kg", "g", "month", "km", "m", "cm", "mm", "apple", "liter", "loss", "yard", "pure", "year", "increase", "decrease", "d", "less", "Surface", "litre", "pi sq m", "s .", "metre", "meter", "inch", ] unit_texts.extend([t + "s" for t in unit_texts]) def _strip_string(string): def _fix_fracs(string): substrs = string.split("\\frac") new_str = substrs[0] if len(substrs) > 1: substrs = substrs[1:] for substr in substrs: new_str += "\\frac" if substr[0] == "{": new_str += substr else: try: assert len(substr) >= 2 except Exception: return string a = substr[0] b = substr[1] if b != "{": if len(substr) > 2: post_substr = substr[2:] new_str += "{" + a + "}{" + b + "}" + post_substr else: new_str += "{" + a + "}{" + b + "}" else: if len(substr) > 2: post_substr = substr[2:] new_str += "{" + a + "}" + b + post_substr else: new_str += "{" + a + "}" + b string = new_str return string def _fix_a_slash_b(string): if len(string.split("/")) != 2: return string a = string.split("/")[0] b = string.split("/")[1] try: a = int(a) b = int(b) assert string == "{}/{}".format(a, b) new_string = "\\frac{" + str(a) + "}{" + str(b) + "}" return new_string except Exception: return string def _remove_right_units(string): # "\\text{ " only ever occurs (at least in the val set) when describing units if "\\text{ " in string: splits = string.split("\\text{ ") assert len(splits) == 2 return splits[0] else: return string def _fix_sqrt(string): if "\\sqrt" not in string: return string splits = string.split("\\sqrt") new_string = splits[0] for split in splits[1:]: if split[0] != "{": a = split[0] new_substr = "\\sqrt{" + a + "}" + split[1:] else: new_substr = "\\sqrt" + split new_string += new_substr return new_string # linebreaks string = string.replace("\n", "") # print(string) # remove inverse spaces string = string.replace("\\!", "") # print(string) # replace \\ with \ string = string.replace("\\\\", "\\") # print(string) # matrix string = re.sub(r"\\begin\{array\}\{.*?\}", r"\\begin{pmatrix}", string) string = re.sub(r"\\end\{array\}", r"\\end{pmatrix}", string) string = string.replace("bmatrix", "pmatrix") # replace tfrac and dfrac with frac string = string.replace("tfrac", "frac") string = string.replace("dfrac", "frac") string = string.replace("\\neq", "\\ne").replace("\\leq", "\\le").replace("\\geq", "\\ge") # print(string) # remove \left and \right string = string.replace("\\left", "") string = string.replace("\\right", "") # print(string) # Remove unit: miles, dollars if after is not none _string = re.sub(r"\\text{.*?}$", "", string).strip() if _string != "" and _string != string: # print("Warning: unit not removed: '{}' -> '{}'".format(string, _string)) string = _string # Remove unit: texts for _ in range(2): for unit_text in unit_texts: # use regex, the prefix should be either the start of the string or a non-alphanumeric character # the suffix should be either the end of the string or a non-alphanumeric character _string = re.sub(r"(^|\W)" + unit_text + r"($|\W)", r"\1\2", string) if _string != "": string = _string # Remove circ (degrees) string = string.replace("^{\\circ}", "") string = string.replace("^\\circ", "") # remove dollar signs string = string.replace("\\$", "") # remove units (on the right) string = _remove_right_units(string) # remove percentage string = string.replace("\\%", "") string = string.replace("\%", "") # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string string = string.replace(" .", " 0.") string = string.replace("{.", "{0.") # if empty, return empty string if len(string) == 0: return string if string[0] == ".": string = "0" + string # to consider: get rid of e.g. "k = " or "q = " at beginning if len(string.split("=")) == 2: if len(string.split("=")[0]) <= 2: string = string.split("=")[1] # fix sqrt3 --> sqrt{3} string = _fix_sqrt(string) # remove spaces string = string.replace(" ", "") # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). # Also does a/b --> \\frac{a}{b} string = _fix_fracs(string) # manually change 0.5 --> \frac{1}{2} if string == "0.5": string = "\\frac{1}{2}" # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y string = _fix_a_slash_b(string) return string SUBSTITUTIONS = [ ("an ", ""), ("a ", ""), (".$", "$"), ("\\$", ""), (r"\ ", ""), (" ", ""), ("mbox", "text"), (",\\text{and}", ","), ("\\text{and}", ","), ("\\text{m}", "\\text{}"), ] REMOVED_EXPRESSIONS = [ "square", "ways", "integers", "dollars", "mph", "inches", "ft", "hours", "km", "units", "\\ldots", "sue", "points", "feet", "minutes", "digits", "cents", "degrees", "cm", "gm", "pounds", "meters", "meals", "edges", "students", "childrentickets", "multiples", "\\text{s}", "\\text{.}", "\\text{\ns}", "\\text{}^2", "\\text{}^3", "\\text{\n}", "\\text{}", r"\mathrm{th}", r"^\circ", r"^{\circ}", r"\;", r",\!", "{,}", '"', "\\dots", ] def normalize_final_answer(final_answer: str) -> str: """ Normalize a final answer to a quantitative reasoning question. This code comes from https://arxiv.org/pdf/2206.14858.pdf, page18. """ # final_answer = final_answer.split("=")[-1] for before, after in SUBSTITUTIONS: final_answer = final_answer.replace(before, after) for expr in REMOVED_EXPRESSIONS: final_answer = final_answer.replace(expr, "") # Extract answer that is in LaTeX math, is bold, # is surrounded by a box, etc. final_answer = re.sub(r"(.*?)(\$)(.*?)(\$)(.*)", "$\\3$", final_answer) final_answer = re.sub(r"(\\text\{)(.*?)(\})", "\\2", final_answer) final_answer = re.sub(r"(\\textbf\{)(.*?)(\})", "\\2", final_answer) final_answer = re.sub(r"(\\overline\{)(.*?)(\})", "\\2", final_answer) final_answer = re.sub(r"(\\boxed\{)(.*)(\})", "\\2", final_answer) # Normalize shorthand TeX: # \fracab -> \frac{a}{b} # \frac{abc}{bef} -> \frac{abc}{bef} # \fracabc -> \frac{a}{b}c # \sqrta -> \sqrt{a} # \sqrtab -> sqrt{a}b final_answer = re.sub(r"(frac)([^{])(.)", "frac{\\2}{\\3}", final_answer) final_answer = re.sub(r"(sqrt)([^{])", "sqrt{\\2}", final_answer) final_answer = final_answer.replace("$", "") # Normalize 100,000 -> 100000 if final_answer.replace(",", "").isdigit(): final_answer = final_answer.replace(",", "") return final_answer def repeatness(s: str): def ranks(seq): index = {v: i for i, v in enumerate(sorted(set(seq)))} return [index[v] for v in seq] def suffixArray(s): line = ranks(s) n, k, ans, sa = len(s), 1, line, [0] * len(s) while k < n - 1: line = ranks(list(zip_longest(line, islice(line, k, None), fillvalue=-1))) ans, k = line, k << 1 for i, k in enumerate(ans): sa[k] = i return ans, sa def lcp(arr, suffixArr, inv_suff): n, ans, k = len(arr), [0] * len(arr), 0 for i in range(n): if inv_suff[i] == n - 1: k = 0 continue j = suffixArr[inv_suff[i] + 1] while i + k < n and j + k < n and arr[i + k] == arr[j + k]: k += 1 ans[inv_suff[i]] = k if k > 0: k -= 1 return ans arr = [ord(i) for i in s] n = len(arr) if n <= 1: return 0 c, sa = suffixArray(arr) cnt = sum(lcp(arr, sa, c)) return (cnt * 2 / (n * (n + 1))) > 0.2 class timeout: def __init__(self, seconds=1, error_message="Timeout"): self.seconds = seconds self.error_message = error_message def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) def __enter__(self): signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) def __exit__(self, type, value, traceback): signal.alarm(0) def latex_eval(latex): sym = parse_latex(latex) val = sym.evalf() return sym, val def numeric_equal(prediction: float, reference: float): # Note that relative tolerance has significant impact # on the result of the synthesized GSM-Hard dataset # if reference.is_integer(): # return isclose(reference, round(prediction), abs_tol=1e-4) # else: # prediction = round(prediction, len(str(reference).split(".")[-1])) return isclose(reference, prediction, rel_tol=1e-4) @timeout_ours(timeout_seconds=5) def symbolic_equal(a, b): def _parse(s): for f in [parse_latex, parse_expr, latex2sympy]: try: return f(s.replace("\\\\", "\\")) except Exception: try: return f(s) except Exception: pass return s a = _parse(a) b = _parse(b) # direct equal try: if str(a) == str(b) or a == b: return True except Exception: pass # simplify equal try: if a.equals(b) or simplify(a - b) == 0: return True except Exception: pass # equation equal try: if (abs(a.lhs - a.rhs)).equals(abs(b.lhs - b.rhs)): return True except Exception: pass try: if numeric_equal(float(N(a)), float(N(b))): return True except Exception: pass # matrix try: # if a and b are matrix if a.shape == b.shape: _a = a.applyfunc(lambda x: round(x, 3)) _b = b.applyfunc(lambda x: round(x, 3)) if _a.equals(_b): return True except Exception: pass return False def _is_latex_equal(str1, str2): try: sym1, val1 = latex_eval(str1) sym2, val2 = latex_eval(str2) if sym1 == sym2 or val1 == val2: return True else: raise ValueError except Exception: # noqa try: norm1, norm2 = normalize_final_answer(str1), normalize_final_answer(str2) sym1, val1 = latex_eval(norm1) sym2, val2 = latex_eval(norm2) if sym1 == sym2 or val1 == val2: return True except Exception: # noqa return norm1 == norm2 return False def is_latex_equal(given_answer: str, ground_truth: str) -> bool: try: with timeout(1): try: if (len(given_answer) > 128 and repeatness(given_answer)) or ( len(ground_truth) > 128 and repeatness(ground_truth) ): return False # First conduct normalized string matching. ground_truth_normalized = _normalize(ground_truth) given_normalized = _normalize(given_answer) if ground_truth_normalized is None: return False if ground_truth_normalized == given_normalized: return True # Next call math verify. given_answer.replace("\n", "") ground_truth.replace("\n", "") if "$" not in given_answer: given_answer = f"${given_answer}$" if "$" not in ground_truth: ground_truth = f"${ground_truth}$" return verify( parse( ground_truth, extraction_config=( LatexExtractionConfig(boxed_match_priority=0), ExprExtractionConfig(), ), fallback_mode="no_fallback", extraction_mode=["first_match"], parsing_timeout=1, ), parse( given_answer, extraction_config=( LatexExtractionConfig(boxed_match_priority=0), ExprExtractionConfig(), ), fallback_mode="no_fallback", extraction_mode=["first_match"], parsing_timeout=1, ), timeout_seconds=1, ) # or symbolic_equal(ground_truth, given_answer) except Exception: return False except TimeoutError: return False def is_value_equal(given_answer: str, ground_truth: str) -> bool: assert ground_truth is not None ground_truth_normalized_mathd = mathd_normalize_answer(ground_truth) given_answer_normalized_mathd = mathd_normalize_answer(given_answer) str_equal = ground_truth_normalized_mathd == given_answer_normalized_mathd try: number_equal = float(ground_truth_normalized_mathd) == float(given_answer_normalized_mathd) return str_equal or number_equal except Exception: return str_equal # sympy might hang -- we don't care about trying to be lenient in these cases BAD_SUBSTRINGS = ["^{", "^("] BAD_REGEXES = ["\^[0-9]+\^", "\^[0-9][0-9]+"] TUPLE_CHARS = "()[]" def _sympy_parse(expr: str): """Parses an expression with sympy.""" py_expr = expr.replace("^", "**") return sympy_parser.parse_expr( py_expr, transformations=(sympy_parser.standard_transformations + (sympy_parser.implicit_multiplication_application,)), ) def _parse_latex(expr: str) -> str: """Attempts to parse latex to an expression sympy can read.""" expr = expr.replace("\\tfrac", "\\frac") expr = expr.replace("\\dfrac", "\\frac") expr = expr.replace("\\frac", " \\frac") # Play nice with mixed numbers. expr = latex2text.LatexNodes2Text().latex_to_text(expr) # Replace the specific characters that this parser uses. expr = expr.replace("√", "sqrt") expr = expr.replace("π", "pi") expr = expr.replace("∞", "inf") expr = expr.replace("∪", "U") expr = expr.replace("·", "*") expr = expr.replace("×", "*") return expr.strip() def _is_float(num: str) -> bool: try: float(num) return True except ValueError: return False def _is_int(x: float) -> bool: try: return abs(x - int(round(x))) <= 1e-7 except Exception: return False def _is_frac(expr: str) -> bool: return bool(re.search(r"^-?[0-9]+.?/0*[1-9][0-9]*.?$", expr)) def _str_is_int(x: str) -> bool: try: x = _strip_properly_formatted_commas(x) x = float(x) return abs(x - int(round(x))) <= 1e-7 except Exception: return False def _str_to_int(x: str) -> bool: x = x.replace(",", "") x = float(x) return int(x) def _inject_implicit_mixed_number(step: str): """ Automatically make a mixed number evalable e.g. 7 3/4 => 7+3/4 """ p1 = re.compile("([0-9]) +([0-9])") step = p1.sub("\\1+\\2", step) ## implicit mults return step def _strip_properly_formatted_commas(expr: str): # We want to be careful because we don't want to strip tuple commas p1 = re.compile("(\d)(,)(\d\d\d)($|\D)") while True: next_expr = p1.sub("\\1\\3\\4", expr) if next_expr == expr: break expr = next_expr return next_expr def _normalize(expr: str) -> str: """Normalize answer expressions.""" if expr is None: return None # Remove enclosing `\text{}`. m = re.search("^\\\\text\{(?P.+?)\}$", expr) if m is not None: expr = m.group("text") expr = expr.replace("\\%", "%") expr = expr.replace("\\$", "$") expr = expr.replace("$", "") expr = expr.replace("%", "") expr = expr.replace(" or ", " , ") expr = expr.replace(" and ", " , ") expr = expr.replace("million", "*10^6") expr = expr.replace("billion", "*10^9") expr = expr.replace("trillion", "*10^12") for unit in [ "degree", "cm", "centimeter", "meter", "mile", "second", "minute", "hour", "day", "week", "month", "year", "foot", "feet", "inch", "yard", ]: expr = re.sub(f"{unit}(es)?(s)? *(\^[0-9]+)?", "", expr) expr = re.sub("\^ *\\\\circ", "", expr) if len(expr) > 0 and expr[0] == "{" and expr[-1] == "}": expr = expr[1:-1] expr = re.sub(",\\\\! *", "", expr) if _is_float(expr) and _is_int(float(expr)): expr = str(int(round(float(expr)))) if "\\" in expr: try: expr = _parse_latex(expr) except Exception: pass # edge case with mixed numbers and negative signs expr = re.sub("- *", "-", expr) expr = _inject_implicit_mixed_number(expr) expr = expr.replace(" ", "") # if we somehow still have latex braces here, just drop them expr = expr.replace("{", "") expr = expr.replace("}", "") # don't be case sensitive for text answers expr = expr.lower() if _str_is_int(expr): expr = str(_str_to_int(expr)) return expr def count_unknown_letters_in_expr(expr: str): expr = expr.replace("sqrt", "") expr = expr.replace("frac", "") letters_in_expr = set([x for x in expr if x.isalpha()]) return len(letters_in_expr) def should_allow_eval(expr: str): # we don't want to try parsing unknown text or functions of more than two variables if count_unknown_letters_in_expr(expr) > 2: return False for bad_string in BAD_SUBSTRINGS: if bad_string in expr: return False for bad_regex in BAD_REGEXES: if re.search(bad_regex, expr) is not None: return False return True @timeout_ours(timeout_seconds=5) def are_equal_under_sympy(ground_truth_normalized: str, given_normalized: str): are_equal = False try: expr = f"({ground_truth_normalized})-({given_normalized})" if should_allow_eval(expr): sympy_diff = _sympy_parse(expr) simplified = sympy.simplify(sympy_diff) if simplified == 0: are_equal = True except Exception: pass return are_equal def split_tuple(expr: str): """ Split the elements in a tuple/interval, while handling well-formatted commas in large numbers """ expr = _strip_properly_formatted_commas(expr) if len(expr) == 0: return [] if ( len(expr) > 2 and expr[0] in TUPLE_CHARS and expr[-1] in TUPLE_CHARS and all([ch not in expr[1:-1] for ch in TUPLE_CHARS]) ): elems = [elem.strip() for elem in expr[1:-1].split(",")] else: elems = [expr] return elems def last_boxed_only_string(string): idx = string.rfind("\\boxed") if idx < 0: idx = string.rfind("\\fbox") if idx < 0: return None i = idx right_brace_idx = None num_left_braces_open = 0 while i < len(string): if string[i] == "{": num_left_braces_open += 1 if string[i] == "}": num_left_braces_open -= 1 if num_left_braces_open == 0: right_brace_idx = i break i += 1 if right_brace_idx is None: retval = None else: retval = string[idx : right_brace_idx + 1] return retval def remove_boxed(s): left = "\\boxed{" try: assert s[: len(left)] == left assert s[-1] == "}" return s[len(left) : -1] except Exception: return None def extract_boxed_answer(solution: str) -> str: """Extract the answer from inside a LaTeX \\boxed{} command""" solution = last_boxed_only_string(solution) solution = remove_boxed(solution) return solution def grade_answer_sympy(given_answer: str, ground_truth: str) -> bool: ground_truth_normalized = _normalize(ground_truth) given_normalized = _normalize(given_answer) if ground_truth_normalized is None: return False if ground_truth_normalized == given_normalized: return True if len(given_normalized) == 0: return False ground_truth_elems = split_tuple(ground_truth_normalized) given_elems = split_tuple(given_normalized) if len(ground_truth_elems) > 1 and ( ground_truth_normalized[0] != given_normalized[0] or ground_truth_normalized[-1] != given_normalized[-1] ): is_correct = False elif len(ground_truth_elems) != len(given_elems): is_correct = False else: for ground_truth_elem, given_elem in zip(ground_truth_elems, given_elems, strict=True): if _is_frac(ground_truth_elem) and _is_frac(given_elem): # if fractions aren't reduced, then shouldn't be marked as correct # so, we don't want to allow sympy.simplify in this case is_correct = ground_truth_elem == given_elem elif _str_is_int(ground_truth_elem) != _str_is_int(given_elem): # if the ground truth answer is an integer, we require the given answer to be a strict match # (no sympy.simplify) is_correct = False else: is_correct = are_equal_under_sympy(ground_truth_elem, given_elem) if not is_correct: break return is_correct def grade_answer_mathd(given_answer: str, ground_truth: str) -> bool: ground_truth_normalized_mathd = mathd_normalize_answer(ground_truth) given_answer_normalized_mathd = mathd_normalize_answer(given_answer) # be at least as lenient as mathd if ground_truth_normalized_mathd == given_answer_normalized_mathd: return True return False def extract_answer(passage: str) -> str: if "\\boxed" in passage: return extract_boxed_answer(passage) return None def grade(model_answer: str, gt_answer: str, fast: bool = True): if "\\boxed" in gt_answer: gt_answer = extract_answer(gt_answer) correct = grade_answer_mathd(model_answer, gt_answer) or grade_answer_sympy(model_answer, gt_answer) if not fast: # This mode further uses math_verify to recall originally false positives. # Will be a bit slower, and sensitive to bad inputs. correct = correct or is_latex_equal( model_answer, gt_answer, ) return correct def compute_score(model_response, gt_answer, fast=False): model_answer = extract_answer(model_response) if model_answer is None: return { "score": 0.0, "format_score": 0.0, "acc": False, "extracted_gt": gt_answer, # "extracted_pred": None, } # return 0.0, 0.0 # Cannot even parse anything. is_correct = False if isinstance(gt_answer, float) or isinstance(gt_answer, int): gt_answer = str(gt_answer) if isinstance(gt_answer, str): is_correct = grade(model_answer, gt_answer, fast) elif isinstance(gt_answer, list): is_correct = False for gt in gt_answer: is_correct |= grade(model_answer, gt, fast) if is_correct: return { "score": 1.0, "format_score": 1.0, "acc": True, "extracted_gt": gt_answer, # "extracted_pred": None, } else: return { "score": 0.0, "format_score": 1.0, "acc": False, "extracted_gt": gt_answer, # "extracted_pred": None, } ================================================ FILE: verl_rl/recipe/entropy/reward_score/entropy_math/grader.py ================================================ # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Microsoft Corporation. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE # Copyright (c) 2023 OpenAI # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Copyright (c) 2021 Dan Hendrycks # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This logic is largely copied from the Hendrycks' MATH release (math_equivalence), and borrowed from: - https://github.com/microsoft/ToRA/blob/main/src/eval/grader.py - https://github.com/microsoft/ProphetNet/tree/master/CRITIC - https://github.com/openai/prm800k """ import contextlib import math import re from math import isclose # sympy related from sympy import N, simplify from sympy.parsing.latex import parse_latex from sympy.parsing.sympy_parser import parse_expr # verl related from verl.utils.py_functional import timeout_limit def is_digit(s): try: if "{,}" in str(s): num = float(str(s).replace("{,}", "")) return True, num num = float(str(s).replace(",", "")) return True, num except ValueError: return False, None def normalize(answer, pi) -> str: # checking if answer is $ and removing $ in that case to compare if isinstance(answer, str) and bool(re.match(r"\$\d+(\.\d+)?", answer)): return answer[1:] # checking if answer is % or \\% and removing % if isinstance(answer, str) and ( bool(re.match(r"^\d+(\.\d+)?%$", answer)) or bool(re.match(r"^\d+(\.\d+)?\\%$", answer)) ): return answer.replace("\\%", "").replace("%", "") # handle base answer = handle_base(answer) # handle pi answer = handle_pi(answer, pi) return answer def handle_base(x) -> str: if isinstance(x, str) and "_" in x: # Due to base x = x.split("_")[0] x = float(x) return int(x) return x def handle_pi(string, pi): if isinstance(string, str) and "\pi" in string: # Find the first occurrence of "\pi" idx = string.find("\pi") # Iterate over the string and find all occurrences of "\pi" with a valid previous character while idx != -1: if idx > 0 and string[idx - 1].isdigit(): # Replace "\pi" with "*math.pi" if the previous character is a digit string = string[:idx] + f"*{pi}" + string[idx + 3 :] else: # Replace "\pi" with "1*math.pi" if the previous character is not a digit string = string[:idx] + f"1*{pi}" + string[idx + 3 :] # Find the next occurrence of "\pi" idx = string.find("\pi", idx + 1) # Evaluate the expression using eval() function with contextlib.suppress(Exception): string = eval(string) return string def math_equal( prediction: bool | float | str, reference: float | str, include_percentage: bool = True, tolerance: float = 1e-4, timeout: float = 10.0, pi: float = math.pi, ) -> bool: """ Exact match of math if and only if: 1. numerical equal: both can convert to float and are equal 2. symbolic equal: both can convert to sympy expression and are equal """ prediction = normalize(prediction, pi) reference = normalize(reference, pi) if isinstance(prediction, str) and len(prediction) > 1000: # handling weird corner-cases prediction = prediction[:1000] # 0. string comparison if isinstance(prediction, str) and isinstance(reference, str): if prediction.strip().lower() == reference.strip().lower(): return True if prediction.replace(" ", "") == reference.replace(" ", ""): return True try: # 1. numerical equal if is_digit(prediction)[0] and is_digit(reference)[0]: prediction = is_digit(prediction)[1] reference = is_digit(reference)[1] # number questions gt_result = [reference / 100, reference, reference * 100] if include_percentage else [reference] for item in gt_result: try: if isclose(item, prediction, rel_tol=tolerance): return True except Exception: continue return False except Exception: pass if not prediction and prediction not in [0, False]: return False # 2. symbolic equal reference = str(reference).strip() prediction = str(prediction).strip() ## deal with [], (), {} prediction = format_intervals(prediction) pred_str, ref_str = prediction, reference if (prediction.startswith("[") and prediction.endswith("]") and not reference.startswith("(")) or ( prediction.startswith("(") and prediction.endswith(")") and not reference.startswith("[") ): pred_str = pred_str.strip("[]()") ref_str = ref_str.strip("[]()") for s in ["{", "}", "(", ")"]: ref_str = ref_str.replace(s, "") pred_str = pred_str.replace(s, "") if pred_str == ref_str: return True ## [a, b] vs. [c, d], return a==c and b==d if ( prediction and reference and prediction[0] in "([" and prediction[-1] in ")]" and prediction[0] == reference[0] and prediction[-1] == reference[-1] ): pred_parts = prediction[1:-1].split(",") ref_parts = reference[1:-1].split(",") if len(pred_parts) == len(ref_parts) and all( [ math_equal(pred_pt, ref_pt, include_percentage, tolerance) for pred_pt, ref_pt in zip(pred_parts, ref_parts, strict=True) ] ): return True if "," in prediction and "," in reference: pred_parts = [item.strip() for item in prediction.split(",")] ref_parts = [item.strip() for item in reference.split(",")] if len(pred_parts) == len(ref_parts): return bool( all( [ math_equal(pred_parts[i], ref_parts[i], include_percentage, tolerance) for i in range(len(pred_parts)) ] ) ) # if we have point == tuple of values if prediction.startswith("Point") and reference[0] == "(" and reference[-1] == ")": pred_parts = prediction[prediction.find("(") + 1 : -1].split(",") ref_parts = reference[1:-1].split(",") if len(pred_parts) == len(ref_parts) and all( [ math_equal(pred_pt, ref_pt, include_percentage, tolerance) for pred_pt, ref_pt in zip(pred_parts, ref_parts, strict=True) ] ): return True # if reference is a matrix if "\begin{pmatrix}" in reference and prediction.startswith("Matrix"): try: pred_matrix = parse_expr(prediction) ref_matrix_items = reference.split()[1:-1:2] if len(pred_matrix) == len(ref_matrix_items) and all( [ math_equal(pred, ref, include_percentage, tolerance) for ref, pred in zip(ref_matrix_items, pred_matrix, strict=True) ] ): return True except Exception: pass elif "\begin{pmatrix}" in reference and prediction.startswith("[") and prediction.endswith("]"): if isinstance(eval(prediction), list): try: pred_matrix = eval(prediction) # ref_matrix_items = reference.split()[1:-1:2] ref_matrix_items = ( reference.lstrip("\\begin{pmatrix}") # noqa: B005 .lstrip("\begin{pmatrix}") .rstrip("\\end{pmatrix}") .rstrip("\end{pmatrix}") ) # noqa: B005 ref_matrix_items = ref_matrix_items.split("\\") ref_matrix_items = [row.split("&") if "&" in row else row for row in ref_matrix_items] if len(pred_matrix) == len(ref_matrix_items) and all( [ math_equal(pred, ref, include_percentage, tolerance) for ref, pred in zip(ref_matrix_items, pred_matrix, strict=True) ] ): return True except Exception: pass return symbolic_equal(prediction, reference, tolerance, timeout) def symbolic_equal(a, b, tolerance, timeout=10.0): def _parse(s): for f in [parse_expr, parse_latex]: try: with timeout_limit(seconds=timeout): return f(s) except TimeoutError: print(f"Parsing timed out for {s}") continue except Exception: continue return s a = _parse(a) b = _parse(b) try: with timeout_limit(seconds=timeout): if simplify(a - b) == 0: return True except TimeoutError: print(f"Simplification timed out for {a} - {b}") pass except Exception: pass try: with timeout_limit(seconds=timeout): if isclose(N(a), N(b), rel_tol=tolerance): return True except TimeoutError: print(f"Numerical evaluation timed out for {a}, {b}") pass except Exception: pass return False def format_intervals(prediction): patterns = { "Interval(": r"^Interval\((.*)\)$", "Interval.Ropen(": r"^Interval\.Ropen\((.*)\)$", "Interval.Lopen(": r"^Interval\.Lopen\((.*)\)$", "Interval.open(": r"^Interval\.open\((.*)\)$", } for key, pattern in patterns.items(): match = re.match(pattern, prediction) if match: inner_content = match.group(1) if key == "Interval(": # Intarval(a, b) == [a, b] return f"[{inner_content}]" elif key == "Interval.Ropen(": # Intarval.Ropen(a, b) == [a, b) return f"[{inner_content})" elif key == "Interval.Lopen(": # Intarval.Lopen(a, b) == (a, b] return f"({inner_content}]" elif key == "Interval.open(": # Intarval.open(a, b) == (a, b) return f"({inner_content})" return prediction ================================================ FILE: verl_rl/recipe/entropy/reward_score/entropy_math/math_normalize.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) 2021 Dan Hendrycks # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ This logic is largely copied from the Hendrycks' MATH release (math_equivalence). From: https://github.com/openai/prm800k/blob/main/prm800k/grading/math_normalize.py """ import re from typing import Optional def normalize_answer(answer: Optional[str]) -> Optional[str]: if answer is None: return None answer = answer.strip() try: # Remove enclosing `\text{}`. m = re.search("^\\\\text\{(?P.+?)\}$", answer) if m is not None: answer = m.group("text").strip() return _strip_string(answer) except Exception: # noqa: E722 return answer def _fix_fracs(string): substrs = string.split("\\frac") new_str = substrs[0] if len(substrs) > 1: substrs = substrs[1:] for substr in substrs: new_str += "\\frac" if substr[0] == "{": new_str += substr else: try: assert len(substr) >= 2 except Exception: # noqa: E722 return string a = substr[0] b = substr[1] if b != "{": if len(substr) > 2: post_substr = substr[2:] new_str += "{" + a + "}{" + b + "}" + post_substr else: new_str += "{" + a + "}{" + b + "}" else: if len(substr) > 2: post_substr = substr[2:] new_str += "{" + a + "}" + b + post_substr else: new_str += "{" + a + "}" + b string = new_str return string def _fix_a_slash_b(string): if len(string.split("/")) != 2: return string a = string.split("/")[0] b = string.split("/")[1] try: a = int(a) b = int(b) assert string == "{}/{}".format(a, b) new_string = "\\frac{" + str(a) + "}{" + str(b) + "}" return new_string except Exception: # noqa: E722 return string def _remove_right_units(string): # "\\text{ " only ever occurs (at least in the val set) when describing units if "\\text{ " in string: splits = string.split("\\text{ ") assert len(splits) == 2 return splits[0] else: return string def _fix_sqrt(string): if "\\sqrt" not in string: return string splits = string.split("\\sqrt") new_string = splits[0] for split in splits[1:]: if split[0] != "{": a = split[0] new_substr = "\\sqrt{" + a + "}" + split[1:] else: new_substr = "\\sqrt" + split new_string += new_substr return new_string def _strip_string(string): # linebreaks string = string.replace("\n", "") # remove inverse spaces string = string.replace("\\!", "") # replace \\ with \ string = string.replace("\\\\", "\\") # replace tfrac and dfrac with frac string = string.replace("tfrac", "frac") string = string.replace("dfrac", "frac") # remove \left and \right string = string.replace("\\left", "") string = string.replace("\\right", "") # Remove circ (degrees) string = string.replace("^{\\circ}", "") string = string.replace("^\\circ", "") # remove dollar signs string = string.replace("\\$", "") # remove units (on the right) string = _remove_right_units(string) # remove percentage string = string.replace("\\%", "") string = string.replace("\%", "") # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string string = string.replace(" .", " 0.") string = string.replace("{.", "{0.") # if empty, return empty string if len(string) == 0: return string if string[0] == ".": string = "0" + string # to consider: get rid of e.g. "k = " or "q = " at beginning if len(string.split("=")) == 2 and len(string.split("=")[0]) <= 2: string = string.split("=")[1] # fix sqrt3 --> sqrt{3} string = _fix_sqrt(string) # remove spaces string = string.replace(" ", "") # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). # Also does a/b --> \\frac{a}{b} string = _fix_fracs(string) # manually change 0.5 --> \frac{1}{2} if string == "0.5": string = "\\frac{1}{2}" # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y string = _fix_a_slash_b(string) return string ================================================ FILE: verl_rl/recipe/genrm_remote/README.md ================================================ # Generative Reward Model ## Scripts ### Step 1: Launch a vLLM Server (Optional) Deploy the pretrained GenRM model using vLLM. Skip this step if you want to use an external api service. ```bash vllm serve verl-team/GenRM-CI-Test-1.5B --served-model-name genrm-demo ``` ### Step 2: Perform RL using GenRM ```bash bash recipe/api-genrm/run_genrm_remote.sh ``` The implementation works by passing a customized reward function (see `reward_function.py`) For convenience, we run both the RL training and server on the same machine. To use an external server, configure the `BASE_URL` and `API_KEY` in `reward_function.py` first. ## Advanced: Customizing Your GenRM You can use sglang server with data parallel for faster inference: ```bash CUDA_VISIBLE_DEVICES=0,1,2,3 python -m sglang_router.launch_server --model-path verl-team/GenRM-CI-Test-1.5B --dp-size 4 ``` Note that you should modify the `BASE_URL` in `reward_function.py` to match your SGLang Server address. You can also create your own customized GenRM by implementing a custom reward function. Here are some tips for customizing your own GenRM based on `reward_function.py`: - Design appropriate prompts for your GenRM - Convert GenRM responses into RL rewards - ... Since these aspects are highly flexible, we only provide a demo implementation. The actual design and implementation of GenRM is left to the user's discretion. ================================================ FILE: verl_rl/recipe/genrm_remote/reward_function.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from concurrent.futures import ThreadPoolExecutor from time import sleep import requests from verl.utils.reward_score.math import last_boxed_only_string, remove_boxed BASE_URL = "http://localhost:30000" API_KEY = "EMPTY" MAX_RETRIES = 3 BASE_DELAY = 2 MAX_WORKERS = 32 MODEL_NAME = "genrm-demo" GENRM_PROMPT_TEMPLATE = """ The following is a math problem and an AI solution: [Math Problem] {problem} [AI Solution] {solution} Your task is to review and critique the solution step by step, and output whether the AI solution is correct. Please put your final answer (i.e., 'True' or 'False') in \\boxed{{}}. """.strip() def get_response(problem, solution_str, ground_truth): prompt = GENRM_PROMPT_TEMPLATE.format(problem=problem, solution=solution_str) messages = [{"role": "user", "content": prompt}] for attempt in range(MAX_RETRIES): try: headers = {"Content-Type": "application/json"} chat_url = f"{BASE_URL}/v1/chat/completions" data = {"model": MODEL_NAME, "messages": messages} output = requests.post(chat_url, headers=headers, json=data, timeout=30) response = output.json()["choices"][0]["message"]["content"] return response except Exception as e: if attempt < MAX_RETRIES - 1: print("Exception: ", repr(e)) delay = BASE_DELAY * (2**attempt) print(f"Retrying in {delay} seconds...") sleep(delay) else: print(f"Failed after {MAX_RETRIES} attempts. Error: {e}") raise ConnectionRefusedError(f"Failed to run the model for {prompt}!") def compute_reward(response): reward_score = 0.0 try: boxed_result = last_boxed_only_string(response) if boxed_result is not None: result = remove_boxed(boxed_result) reward_score = float(result == "True") except Exception as e: print(e) return reward_score def compute_score(data_source, solution_str, ground_truth, extra_info): split = extra_info["split"] from verl.utils.reward_score import default_compute_score func_rm_score = default_compute_score(data_source, solution_str, ground_truth, extra_info) if split == "test": return func_rm_score else: problem = extra_info["question"] response = get_response(problem, solution_str, ground_truth) if response is not None: reward_score = compute_reward(response) else: reward_score = 0.0 return reward_score def compute_score_batch(data_sources, solution_strs, ground_truths, extra_infos): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: futures = [] for data_source, solution_str, ground_truth, extra_info in zip( data_sources, solution_strs, ground_truths, extra_infos, strict=True ): future = executor.submit(compute_score, data_source, solution_str, ground_truth, extra_info) futures.append(future) results = [future.result() for future in futures] return results ================================================ FILE: verl_rl/recipe/genrm_remote/run_genrm_remote.sh ================================================ # vllm server # CUDA_VISIBLE_DEVICES=0,1,2,3 vllm serve verl-team/GenRM-CI-Test-1.5B --served_model_name genrm-demo # sglang server # CUDA_VISIBLE_DEVICES=0,1,2,3 python -m sglang_router.launch_server --model-path verl-team/GenRM-CI-Test-1.5B --dp-size 4 set -x CUDA_VISIBLE_DEVICES=4,5,6,7 python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files=${HOME}/data/gsm8k/train.parquet \ data.val_files=${HOME}/data/gsm8k/test.parquet \ data.train_batch_size=1024 \ data.max_prompt_length=1024 \ data.max_response_length=2048 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=256 \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \ actor_rollout_ref.rollout.n=8 \ algorithm.use_kl_in_reward=False \ reward_model.reward_manager=batch \ custom_reward_function.path=recipe/genrm_remote/reward_function.py \ custom_reward_function.name=compute_score_batch \ trainer.critic_warmup=0 \ trainer.logger='["console","wandb"]' \ trainer.project_name='verl_func_rm_example_gsm8k' \ trainer.experiment_name='qwen2_5_3b_gen_rm' \ trainer.n_gpus_per_node=4 \ trainer.val_before_train=True \ trainer.nnodes=1 \ trainer.save_freq=20 \ trainer.test_freq=5 \ trainer.total_epochs=10 \ trainer.resume_mode='disable' ================================================ FILE: verl_rl/recipe/langgraph_agent/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/recipe/langgraph_agent/chat_model.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Ref: https://python.langchain.com/docs/how_to/custom_chat_model/ """ import asyncio import json import logging import os import uuid from typing import Any, Optional from langchain_core.language_models import BaseChatModel from langchain_core.language_models.base import LanguageModelInput from langchain_core.messages import ( AIMessage, BaseMessage, convert_to_openai_messages, ) from langchain_core.messages.tool import InvalidToolCall, ToolCall from langchain_core.outputs import ChatGeneration, ChatResult from langchain_core.runnables import Runnable, RunnableConfig from langchain_core.tools import StructuredTool from langchain_core.utils.function_calling import convert_to_openai_tool from pydantic import Field from verl.experimental.agent_loop.agent_loop import AgentLoopOutput, AsyncLLMServerManager from verl.experimental.agent_loop.tool_parser import ToolParser logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MaxTokenExceededError(Exception): """Indicate that history chat messages + tool message exceeds LLM max_tokens.""" pass class ChatModel(BaseChatModel): model_name: str = Field(alias="model") """The name of the model""" client: AsyncLLMServerManager """AsyncLLM server manager""" tokenizer: Any """Tokenizer for the model""" max_tokens: int """Max tokens to generate""" tool_parser: str = "hermes" """Tool parser for the model""" max_parallel_calls: int = 1 """Max parallel tool calls""" temperature: float = 1.0 """Temperature for sampling""" top_p: float = 1.0 """Top p for sampling""" repetition_penalty: float = 1.0 """Repetition penalty for sampling""" def bind_tools(self, tools, **kwargs) -> Runnable[LanguageModelInput, BaseMessage]: """Bind tools to the model. Args: tools: Sequence of tools to bind to the model. Returns: A Runnable that returns a message. """ formatted_tools: list = [convert_to_openai_tool(tool) for tool in tools] # used to remove system prompt prefix when encoding tool response system_prompt = self.tokenizer.apply_chat_template([{}], add_generation_prompt=False, tokenize=True) kwargs["system_prompt"] = system_prompt return self.bind(tools=formatted_tools, **kwargs) def with_structured_output( self, schema: dict | type, *, include_raw: bool = False, **kwargs: Any, ) -> Runnable[LanguageModelInput, dict | BaseChatModel]: """Ref: https://langchain-ai.github.io/langgraph/how-tos/react-agent-structured-output/""" raise NotImplementedError def _generate( self, messages: list[BaseMessage], stop: Optional[list[str]] = None, **kwargs: Any, ) -> ChatResult: raise NotImplementedError async def _agenerate( self, messages: list[BaseMessage], stop: Optional[list[str]] = None, **kwargs: Any, ) -> ChatResult: """Asynchronously generate chat completion message. Args: messages (list[BaseMessage]): List of list of messages. stop (Optional[list[str]], optional): Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. Defaults to None. Returns: ChatResult: Chat result. """ request_id, prompt_ids, response_mask = await self._preprocess(messages, **kwargs) sampling_params = { "temperature": self.temperature, "top_p": self.top_p, "repetition_penalty": self.repetition_penalty, } if "sampling_params" in kwargs: sampling_params.update(kwargs["sampling_params"]) response_ids = await self.client.generate( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params ) message = await self._postprocess(request_id, prompt_ids, response_mask, response_ids, **kwargs) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @property def _llm_type(self) -> str: """Get the type of language model used by this chat model.""" return self.model_name async def _preprocess(self, messages: list[BaseMessage], **kwargs: Any) -> tuple[str, list[int], list[int]]: """Preprocess messages for chat completion. To ensure strong consistency with policy model, AsyncLLM server generate response with token in token out instead of messages list. But all agent frameworks use messages list to represent chat history. To mitigate the gap, we store trajectory (prompt_ids, response_mask) in lastest AIMessage.response_metadata. 1. Encode ToolMessage to token ids. 2. Retrieve trajectory (prompt_ids, response_mask) from lastest AIMessage.response_metadata. 3. Append ToolMessage token ids to prompt_ids, and append 0 to response_mask. Ref: https://python.langchain.com/docs/concepts/chat_history/ Args: messages (list[BaseMessage]): List of messages. Returns: tuple[str, list[int], list[int]]: Request id, prompt ids, response mask. """ # messages: [system], human, ai, human|tool, ai, human|tool, ... assert messages[-1].type in ["human", "tool"], ( f"Last message must be human or tool, but got {messages[-1].type}" ) loop = asyncio.get_running_loop() # Case 1: initial chat completion: [system], human if messages[-1].type == "human" and (len(messages) == 1 or messages[-2].type != "ai"): prompt_ids = await loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template( convert_to_openai_messages(messages), tools=kwargs.get("tools"), add_generation_prompt=True, tokenize=True, ), ) return str(uuid.uuid4()), prompt_ids, [] # Case 2: follow up chat completion with tool/human response: [system], human, ai, human|tool, ... for i in range(len(messages) - 1, -1, -1): if messages[i].type == "ai": break assert "prompt_ids" in messages[i].response_metadata, "Last message must have prompt_ids in response_metadata" assert "response_mask" in messages[i].response_metadata, ( "Last message must have response_mask in response_metadata" ) # encode tool response tool_responses = convert_to_openai_messages(messages[i + 1 :]) tool_response_ids = await loop.run_in_executor( None, lambda messages=tool_responses: self.tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True ), ) tool_response_ids = tool_response_ids[len(kwargs["system_prompt"]) :] # stop generation if response length exceeds max response length if len(messages[i].response_metadata["response_mask"]) + len(tool_response_ids) >= self.max_tokens: raise MaxTokenExceededError(f"Max response length {self.max_tokens} exceeded") # append tool response to prompt request_id = messages[i].response_metadata.pop("request_id") prompt_ids = messages[i].response_metadata.pop("prompt_ids") response_mask = messages[i].response_metadata.pop("response_mask") prompt_ids += tool_response_ids response_mask += [0] * len(tool_response_ids) return request_id, prompt_ids, response_mask async def _postprocess( self, request_id: str, prompt_ids: list[int], response_mask: list[int], response_ids: list[int], **kwargs: Any ) -> AIMessage: """Postprocess response_ids when chat completion is done. 1. Decode response_ids, parse tool calls to AIMessage. 2. Append response_ids to prompt_ids, and append 1 to response_mask. 3. Store trajectory (prompt_ids, response_mask) in AIMessage.response_metadata. Args: request_id (str): Unique request id. prompt_ids (list[int]): Input prompt token ids in this chat completion. response_mask (list[int]): Response mask before this chat completion. response_ids (list[int]): LLM generated token ids in this chat completion. Returns: AIMessage: Postprocessed message. """ prompt_ids += response_ids response_mask += [1] * len(response_ids) tool_parser = ToolParser.get_tool_parser(self.tool_parser, self.tokenizer) content, function_calls = await tool_parser.extract_tool_calls(response_ids) tool_calls, invalid_tool_calls = [], [] for function_call in function_calls: try: args = json.loads(function_call.arguments) if not isinstance(args, dict): raise json.JSONDecodeError(f"Invalid json tool arguments: {args}") tool_call = ToolCall( args=args, name=function_call.name, id=str(uuid.uuid4()), ) tool_calls.append(tool_call) except json.JSONDecodeError as e: logger.warning(f"Invalid json tool arguments: {e}") tool_call = InvalidToolCall( args=function_call.arguments, name=function_call.name, error=f"Invalid json tool arguments: {e}", ) invalid_tool_calls.append(tool_call) message = AIMessage( content=content, tool_calls=tool_calls[: self.max_parallel_calls], invalid_tool_calls=invalid_tool_calls[: self.max_parallel_calls], response_metadata={ "request_id": request_id, "prompt_ids": prompt_ids, "response_mask": response_mask, }, ) return message class TruncateStructuredTool(StructuredTool): """Structured tool with response truncation.""" tool_response_truncate_side: str """truncate side of tool response: left, middle, right""" max_tool_response_length: int """max length of tool response""" async def _arun( self, *args: Any, config: RunnableConfig, **kwargs: Any, ) -> Any: tool_response = await super()._arun(*args, config=config, **kwargs) tool_response = str(tool_response) if len(tool_response) > self.max_tool_response_length: if self.tool_response_truncate_side == "left": tool_response = tool_response[: self.max_tool_response_length] + "...(truncated)" elif self.tool_response_truncate_side == "right": tool_response = "(truncated)..." + tool_response[-self.max_tool_response_length :] else: length = self.max_tool_response_length // 2 tool_response = tool_response[:length] + "...(truncated)..." + tool_response[-length:] return tool_response def convert_to_agent_output(messages: list[BaseMessage], response_length: int) -> AgentLoopOutput: """Convert messages to AgentLoopOutput. Args: messages (List[BaseMessage]): List of messages, last message must be assistant with response_metadata containing `prompt_ids` and `response_mask`. response_length (int): Max length of response. Returns: AgentLoopOutput: agent loop output trajectory used for training. """ # skip last tool calls for i in range(len(messages) - 1, -1, -1): if messages[i].type != "tool": break last_message = messages[i] assert last_message.type == "ai", f"Last message must be assistant, but got {last_message.type}" assert "prompt_ids" in last_message.response_metadata, "Last message must have prompt_ids in response_metadata" assert "response_mask" in last_message.response_metadata, ( "Last message must have response_mask in response_metadata" ) num_turns = 0 for i in range(len(messages)): if messages[i].type == "system": continue # parallel tool calls are in single turn if i == 0 or messages[i].type != messages[i - 1].type: num_turns += 1 prompt_ids = last_message.response_metadata["prompt_ids"] response_mask = last_message.response_metadata["response_mask"] response_ids = prompt_ids[-len(response_mask) :] prompt_ids = prompt_ids[: len(prompt_ids) - len(response_mask)] output = AgentLoopOutput( prompt_ids=prompt_ids, response_ids=response_ids[:response_length], response_mask=response_mask[:response_length], num_turns=num_turns, metrics={}, ) return output ================================================ FILE: verl_rl/recipe/langgraph_agent/example/README.md ================================================ # MathExpression: LangGraph Agent Example MathExpression is a tiny example to demonstrate multi-turn rollout with [LangGraph ReactAgent](https://langchain-ai.github.io/langgraph/agents/overview/). ### Define react agent with tool Firstly, to force ReactAgent to evaluate math expression by tool, we define a special operand `@`: ```python @tool(parse_docstring=True) def calculate(a: int, b: int, operand: str) -> int: """ Compute the results using operand with two integers Args: a: the first operand b: the second operand operand: '+' or '-' or '*' or '@' """ assert operand in ["+", "-", "*", "@"], f"unknown operand {operand}" if operand == "@": return 3 * a - 2 * b return eval(f"{a} {operand} {b}") ``` Without calling `calculate`, ReactAgent is impossible to evaluate math expression correctly. Then, we can equip ReactAgent with `calculate` tool: ```python class MathExpressionReactAgentLoop(ReactAgentLoop): @classmethod def init_class(cls, config, tokenizer): cls.tools = [calculate] super().init_class(config, tokenizer) ``` We can define agent loop config in yaml file, which will be used by AgentLoopWorker to dynamic load custom AgentLoop class. ```yaml - name: math_expression _target_: recipe.langgraph_agent.example.math_expression.MathExpressionReactAgentLoop ``` ### Prepare dataset Now, let's prepare two small datasets for training and evaluation: ```bash python recipe/langgraph_agent/example/create_dataset.py ``` Note that dataset should contain a column `agent_name` with `math_expression`, which is used by `AgentLoopWorker` to select the agent loop class. | prompt | reward_model | agent_name | |--------------------------------------|------------------------------|-----------------| | [{'role': 'user', 'content': '...'}] | {'ground_truth': '-10', ...} | math_expression | | [{'role': 'user', 'content': '...'}] | {'ground_truth': '-10', ...} | math_expression | Generated math expressions are like below, requiring model to call `calculate` multiple times to solve sub expressions. ``` (2 @ (8 @ 8 @ 5 @ 5 @ 3) @ 6 @ (1 @ 4 @ 4 @ 4) @ 2) @ 6 (4.6 @ (9.05 @ 4.0) @ 8.3 @ 1.21) @ 8.6 9 @ 4 ((2 @ 2) @ (3 @ 3)) @ 4 ``` ### Training Hook all these up and start training: ```bash bash recipe/langgraph_agent/example/run_qwen2.5_3b.sh 2>&1 | tee train.log ``` After total 39 steps, model should achieve 100% accuray on test dataset: - val-aux/lighteval/MATH/reward: 1.0 - val-aux/num_turns/mean: 9.0, average number of messages include assistant and tool turns. We can see model can solve math expression by calling `calculate` tool: ``` system You are Qwen, created by Alibaba Cloud. You are a helpful assistant. user We define a new math operator @, where you can only call an external tool to compute. Please put your final answer inside \boxed{} only in the last turn. Now answer the following questions: Compute 5 @ (3 @ 6 @ 8) assistant Sure, let's break this down step by step. We need to compute the expression \(5 @ (3 @ 6 @ 8)\). First, we need to compute the inner expression \(3 @ 6 @ 8\). Let's compute \(3 @ 6\) first using the external tool. {"name": "calculate", "arguments": {"a": 3, "b": 6, "operand": "@"}} user -3 assistant We have computed that \(3 @ 6 = -3\). Now we need to compute \(-3 @ 8\). Let's use the external tool to compute this. {"name": "calculate", "arguments": {"a": -3, "b": 8, "operand": "@"}} user -25 assistant We have computed that \(-3 @ 8 = -25\). Now we can put this result into the original expression to get the final result of \(5 @ (3 @ 6 @ 8)\), which is \(5 @ (-25)\). Let's compute \(5 @ (-25)\) using the external tool. {"name": "calculate", "arguments": {"a": 5, "b": -25, "operand": "@"}} user 65 assistant The final result of \(5 @ (3 @ 6 @ 8)\) is \(\boxed{65}\). ``` ================================================ FILE: verl_rl/recipe/langgraph_agent/example/agent.yaml ================================================ - name: math_expression _target_: recipe.langgraph_agent.example.math_expression.MathExpressionReactAgentLoop ================================================ FILE: verl_rl/recipe/langgraph_agent/example/create_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Create dataset for calculator """ import random import pandas as pd def generate_math_expression(min_terms=2, max_terms=5, min_number=1, max_number=10, allow_decimals=False, max_depth=2): """ Generate a random mathematical expression with operators +, -, *, /, and parentheses. Args: min_terms (int): Minimum number of terms in the expression. max_terms (int): Maximum number of terms in the expression. max_number (int): Maximum value for numbers in the expression. allow_decimals (bool): Whether to allow decimal numbers. max_depth (int): Maximum nesting depth for parentheses. Returns: str: A valid mathematical expression as a string. """ def generate_number(): """Generate a random number (integer or float).""" assert min_number < max_number num = random.uniform(min_number, max_number) if not allow_decimals: num = int(num) else: num = round(num, random.randint(0, 2)) # Round to 0-2 decimal places return str(num) def generate_term(depth=0): """Generate a term (number or parenthesized expression).""" if depth < max_depth and random.random() < 0.5: # 50% chance to add parentheses expr = generate_expression(depth + 1) return f"({expr})" else: return generate_number() def generate_expression(depth=0): """Generate a full expression with multiple terms and operators.""" num_terms = random.randint(min_terms, max_terms) terms = [generate_term(depth) for _ in range(num_terms)] # Randomly select operators operators = ["+", "-", "*", "/", "@"] expr = terms[0] for i in range(1, num_terms): # Bias towards + and - for readability op = random.choices( operators, weights=[0, 0, 0, 0, 1], # + and - are 1.5x more likely than * and / )[0] expr += f" {op} " + terms[i] return expr return generate_expression() def test(): # Example 1: Basic integer expression print(generate_math_expression()) # Output: (3 + 7) * 2 - 5 # Example 2: Expression with decimals print(generate_math_expression(allow_decimals=True)) # Output: 4.5 / (2.1 + 3.7) - 1.2 # Example 3: More complex expression with higher depth print(generate_math_expression(max_terms=6, max_depth=3)) # Output: ((5 * 2) - (3 + 1)) / (7 - 2) + 4 # Example 4: Simplified expression print(generate_math_expression(min_terms=2, max_terms=3, max_number=5)) # Output: 4 - 2 * 3 def calculate(expression: str) -> float: """ Evaluate a mathematical expression with +, -, *, /, @, and parentheses. The @ operator is defined as: a @ b = 3a - 2b. Args: expression (str): Input mathematical expression (e.g., "3@2+4"). Returns: float: Result of the evaluated expression. Raises: ValueError: For invalid expressions (e.g., mismatched parentheses, division by zero). """ def tokenize(s: str) -> list: """Convert the input string into tokens (numbers, operators, parentheses).""" tokens = [] i = 0 while i < len(s): if s[i].isdigit() or s[i] == ".": # Parse number (integer or float) j = i while j < len(s) and (s[j].isdigit() or s[j] == "."): j += 1 tokens.append(s[i:j]) i = j elif s[i] in "+-*/@()": # Operator or parenthesis tokens.append(s[i]) i += 1 elif s[i].isspace(): # Skip whitespace i += 1 else: raise ValueError(f"Invalid character: {s[i]}") return tokens def infix_to_postfix(tokens: list) -> list: """Convert infix notation to postfix notation (Reverse Polish Notation).""" output = [] stack = [] # Higher precedence for @ (between * and +) precedence = {"@": 3, "*": 2, "/": 2, "+": 1, "-": 1} for token in tokens: if token.isdigit() or "." in token: output.append(token) elif token == "(": stack.append(token) elif token == ")": while stack and stack[-1] != "(": output.append(stack.pop()) if not stack or stack[-1] != "(": raise ValueError("Mismatched parentheses") stack.pop() # Discard '(' else: # Operator while stack and stack[-1] != "(" and precedence.get(stack[-1], 0) >= precedence.get(token, 0): output.append(stack.pop()) stack.append(token) # Pop remaining operators while stack: if stack[-1] in "()": raise ValueError("Mismatched parentheses") output.append(stack.pop()) return output def evaluate_postfix(postfix: list) -> float: """Evaluate postfix expression using a stack.""" stack = [] for token in postfix: if token.isdigit() or "." in token: stack.append(float(token)) else: if len(stack) < 2: raise ValueError("Invalid expression") b = stack.pop() a = stack.pop() if token == "+": res = a + b elif token == "-": res = a - b elif token == "*": res = a * b elif token == "/": if b == 0: raise ValueError("Division by zero") res = a / b elif token == "@": res = 3 * a - 2 * b # Custom @ operator implementation else: raise ValueError(f"Invalid operator: {token}") stack.append(res) if len(stack) != 1: raise ValueError("Invalid expression") return stack[0] # Remove spaces and validate parentheses expression = expression.replace(" ", "") if expression.count("(") != expression.count(")"): raise ValueError("Mismatched parentheses") tokens = tokenize(expression) postfix = infix_to_postfix(tokens) result = evaluate_postfix(postfix) # Convert integers to integer representation if result.is_integer(): return int(result) return result def generate_data(total_num_dataset, split): rl_dataset = { "prompt": [], "data_source": [], "ability": [], "reward_model": [], "extra_info": [], "agent_name": [], } for idx in range(total_num_dataset): while True: try: expression: str = generate_math_expression( min_terms=2, max_terms=3, min_number=1, max_number=10, allow_decimals=False, max_depth=1 ) num_plus = expression.count("+") num_minus = expression.count("-") num_mul = expression.count("*") num_star = expression.count("@") answer = str(calculate(expression)) # answer = str(eval(expression)) break except Exception as e: print(e) continue num_tool_calls = num_plus + num_minus + num_mul + num_star prompt = ( f"We define a new math operator @, where you can only call an external tool to compute. " f"Please put your final answer inside \\boxed{{}} only in the last turn. Now answer the " f"following questions:\nCompute {expression}" ) prompt_with_template = [ { "role": "user", "content": prompt, } ] rl_dataset["prompt"].append(prompt_with_template) rl_dataset["data_source"].append("lighteval/MATH") rl_dataset["ability"].append("math") rl_dataset["reward_model"].append({"style": "lighteval/MATH", "ground_truth": answer}) rl_dataset["extra_info"].append( {"index": idx, "expression": expression, "split": split, "expected_tool_calls": num_tool_calls} ) rl_dataset["agent_name"].append("math_expression") rl_dataset = pd.DataFrame(data=rl_dataset) return rl_dataset if __name__ == "__main__": # print(calculate("3@2")) # Output: 5 (3*3 - 2*2) # print(calculate("3@2+4")) # Output: 9 (5 + 4) # print(calculate("3*(4@2)")) # Output: 24 (3 * 8) # print(calculate("(5@3)*2")) # Output: 18 (9 * 2) train_dataset = generate_data(total_num_dataset=5000, split="train") test_dataset = generate_data(total_num_dataset=500, split="test") train_dataset.to_parquet("train.parquet") test_dataset.to_parquet("test.parquet") ================================================ FILE: verl_rl/recipe/langgraph_agent/example/math_expression.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from langchain_core.tools import tool from recipe.langgraph_agent.react_agent_loop import ReactAgentLoop @tool(parse_docstring=True) def calculate(a: int, b: int, operand: str) -> int: """ Compute the results using operand with two integers Args: a: the first operand b: the second operand operand: '+' or '-' or '*' or '@' """ assert operand in ["+", "-", "*", "@"], f"unknown operand {operand}" if operand == "@": return 3 * a - 2 * b return eval(f"{a} {operand} {b}") class MathExpressionReactAgentLoop(ReactAgentLoop): @classmethod def init_class(cls, config, tokenizer, **kwargs): cls.tools = [calculate] super().init_class(config, tokenizer) ================================================ FILE: verl_rl/recipe/langgraph_agent/example/run_qwen2.5_3b.sh ================================================ set -x # ================= data/model/tool ================= HDFS_ROOT=${HDFS_ROOT:-$PWD} DATA_ROOT=${DATA_ROOT:-$PWD} model_path=$DATA_ROOT/model/Qwen2.5-3B-Instruct train_files=$DATA_ROOT/dataset/math_expression_tool/train.parquet test_files=$DATA_ROOT/dataset/math_expression_tool/test.parquet # agent agent_loop_config_path=recipe/langgraph_agent/example/agent.yaml # wandb project_name=math_expression_tool experiment_name=qwen2.5-3b default_local_dir=$DATA_ROOT/checkpoint/$experiment_name # ================= algorithm ================= adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_turns=8 max_prompt_length=1024 max_response_length=2048 actor_lr=1e-6 train_batch_size=128 ppo_mini_batch_size=16 n_resp_per_prompt=8 n_resp_per_prompt_val=1 # ================= perfomance ================= infer_tp=2 # vllm train_sp=4 # train offload=True actor_max_token_len_per_gpu=$(( (max_prompt_length + max_response_length) * 4 )) log_prob_max_token_len_per_gpu=$(( actor_max_token_len_per_gpu * 2 )) python3 -m verl.trainer.main_ppo \ algorithm.adv_estimator=$adv_estimator \ algorithm.use_kl_in_reward=$use_kl_in_reward \ algorithm.kl_ctrl.kl_coef=$kl_coef \ data.train_files="$train_files" \ data.val_files="$test_files" \ data.return_raw_chat=True \ data.train_batch_size=$train_batch_size \ data.max_prompt_length=$max_prompt_length \ data.max_response_length=$max_response_length \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.model.path=$model_path \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.use_kl_loss=$use_kl_loss \ actor_rollout_ref.actor.kl_loss_coef=$kl_loss_coef \ actor_rollout_ref.actor.clip_ratio_low=$clip_ratio_low \ actor_rollout_ref.actor.clip_ratio_high=$clip_ratio_high \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.actor.optim.lr=$actor_lr \ actor_rollout_ref.actor.use_dynamic_bsz=True \ actor_rollout_ref.actor.ppo_mini_batch_size=$ppo_mini_batch_size \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=$actor_max_token_len_per_gpu \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=$train_sp \ actor_rollout_ref.actor.fsdp_config.param_offload=$offload \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=$offload \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=$log_prob_max_token_len_per_gpu \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.mode=async \ actor_rollout_ref.rollout.tensor_model_parallel_size=$infer_tp \ actor_rollout_ref.rollout.multi_turn.max_user_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.max_assistant_turns=$max_turns \ actor_rollout_ref.rollout.multi_turn.format=hermes \ actor_rollout_ref.rollout.agent.agent_loop_config_path=$agent_loop_config_path \ actor_rollout_ref.rollout.gpu_memory_utilization=0.9 \ actor_rollout_ref.rollout.n=$n_resp_per_prompt \ actor_rollout_ref.rollout.val_kwargs.top_p=0.6 \ actor_rollout_ref.rollout.val_kwargs.temperature=1.0 \ actor_rollout_ref.rollout.val_kwargs.n=$n_resp_per_prompt_val \ trainer.logger=['console','wandb'] \ trainer.project_name=$project_name \ trainer.experiment_name=$experiment_name \ trainer.n_gpus_per_node=$ARNOLD_WORKER_GPU \ trainer.val_before_train=True \ trainer.log_val_generations=50 \ trainer.nnodes=$ARNOLD_WORKER_NUM \ trainer.save_freq=-1 \ trainer.default_local_dir=$default_local_dir \ trainer.test_freq=5 \ trainer.total_epochs=1 $@ ================================================ FILE: verl_rl/recipe/langgraph_agent/react_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LangGraph React Agent Loop. This implementation is exact same as `ToolAgentLoop`. Ref: https://langchain-ai.github.io/langgraph/tutorials/workflows/ """ from typing import Any, Literal from langchain_core.runnables import RunnableConfig from langgraph.graph import END, MessagesState, StateGraph from langgraph.prebuilt import ToolNode from recipe.langgraph_agent.chat_model import ( ChatModel, MaxTokenExceededError, convert_to_agent_output, ) from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput async def call_model(state: MessagesState, config: RunnableConfig): model = config["configurable"]["model"] sampling_params = config["configurable"]["sampling_params"] try: message = await model.ainvoke(state["messages"], sampling_params=sampling_params) return {"messages": [message]} except MaxTokenExceededError: # last message is ToolMessage return {"messages": []} def should_continue(state: MessagesState, config: RunnableConfig) -> Literal["tools", END]: max_assistant_turns = config["configurable"]["max_assistant_turns"] num_assistant_turns = 0 for message in state["messages"]: if message.type == "ai": num_assistant_turns += 1 last_message = state["messages"][-1] # LLM call failed, e.g: max response length exceeded if last_message.type == "tool": return END # max assistant turns exceeded if max_assistant_turns and num_assistant_turns >= max_assistant_turns: return END # no tool calls if not last_message.tool_calls: return END return "tools" class ReactAgentLoop(AgentLoopBase): @classmethod def init_class(cls, config, tokenizer, **kwargs): if cls._class_initialized: return cls._class_initialized = True print("Performing class-level ReactAgentLoop initialization") # build graph cls.graph = cls.build_graph() @classmethod def build_graph(cls) -> StateGraph: workflow = StateGraph(MessagesState) workflow.add_node("agent", call_model) workflow.add_node("tools", ToolNode(cls.tools)) workflow.set_entry_point("agent") workflow.add_conditional_edges( "agent", should_continue, { "tools": "tools", END: END, }, ) workflow.add_edge("tools", "agent") graph = workflow.compile() return graph async def run(self, messages: list[dict[str, Any]], sampling_params: dict[str, Any]) -> AgentLoopOutput: model_path = self.config.actor_rollout_ref.model.path model_name = "/".join(model_path.split("/")[-2:]) rollout = self.config.actor_rollout_ref.rollout model = ChatModel( model=model_name, client=self.server_manager, tokenizer=self.tokenizer, max_tokens=rollout.response_length, max_parallel_calls=rollout.multi_turn.max_parallel_calls, tool_parser=rollout.multi_turn.format, ) model = model.bind_tools(self.tools, tool_choice="any") config = { "configurable": { "model": model, "sampling_params": sampling_params, "max_user_turns": rollout.multi_turn.max_user_turns, "max_assistant_turns": rollout.multi_turn.max_assistant_turns, } } # TODO: how to handle multiple trajectories in an graph invocation? # Each graph node may has its own LLM calls and state, e.g: # https://github.com/google-gemini/gemini-fullstack-langgraph-quickstart state = await self.graph.ainvoke(input={"messages": messages}, config=config) output = convert_to_agent_output(state["messages"], rollout.response_length) return output ================================================ FILE: verl_rl/recipe/langgraph_agent/test_react_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import numpy as np import pytest import ray from langchain_core.tools import tool from omegaconf import DictConfig from recipe.langgraph_agent.react_agent_loop import ReactAgentLoop from tests.experimental.agent_loop.agent_utils import init_agent_loop_manager from verl.protocol import DataProto from verl.utils import hf_tokenizer @pytest.fixture def init_config() -> DictConfig: from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose(config_name="ppo_trainer") model_path = "Qwen/Qwen2.5-1.5B-Instruct" config.actor_rollout_ref.model.path = model_path config.actor_rollout_ref.rollout.name = os.getenv("ROLLOUT_NAME", "vllm") config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.prompt_length = 4096 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.n = 4 config.actor_rollout_ref.rollout.agent.num_workers = 2 # test sleep/wake_up with fsdp offload config.actor_rollout_ref.actor.fsdp_config.param_offload = True config.actor_rollout_ref.actor.fsdp_config.optimizer_offload = True return config @tool(parse_docstring=True) def get_current_temperature(location: str, unit: str = "celsius"): """Get current temperature at a location. Args: location: The location to get the temperature for, in the format "City, State, Country". unit: The unit to return the temperature in. Defaults to "celsius". (choices: ["celsius", "fahrenheit"]) Returns: the temperature, the location, and the unit in a dict """ print(f"[DEBUG] get_current_temperature: {location}, {unit}") return { "temperature": 26.1, "location": location, "unit": unit, } @tool(parse_docstring=True) def get_temperature_date(location: str, date: str, unit: str = "celsius"): """Get temperature at a location and date. Args: location: The location to get the temperature for, in the format "City, State, Country". date: The date to get the temperature for, in the format "Year-Month-Day". unit: The unit to return the temperature in. Defaults to "celsius". (choices: ["celsius", "fahrenheit"]) Returns: the temperature, the location, the date and the unit in a dict """ print(f"[DEBUG] get_temperature_date: {location}, {date}, {unit}") return { "temperature": 25.9, "location": location, "date": date, "unit": unit, } class TestReactAgentLoop(ReactAgentLoop): @classmethod def init_class(cls, config, tokenizer, **kwargs): # TODO: find better way to configure tools cls.tools = [get_current_temperature, get_temperature_date] super().init_class(config, tokenizer, **kwargs) def test_react_agent(init_config): ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) # =========================== 1. Init rollout manager =========================== agent_loop_config = [ { "_target_": "recipe.langgraph_agent.test_react_agent_loop.TestReactAgentLoop", "name": "react_agent", }, ] agent_loop_config_path = "/tmp/agent_loop_config.json" with open(agent_loop_config_path, "w") as f: json.dump(agent_loop_config, f) n = 2 init_config.actor_rollout_ref.rollout.n = n # init_config.actor_rollout_ref.rollout.multi_turn.tool_config_path = tool_config_path init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 2 init_config.actor_rollout_ref.rollout.agent.agent_loop_config_path = agent_loop_config_path agent_loop_manager = init_agent_loop_manager(init_config) # =========================== 2. Generate sequences =========================== raw_prompts = [ [ {"role": "user", "content": "How are you?"}, ], [ {"role": "user", "content": "What's the temperature in Los Angeles now?"}, ], [ {"role": "user", "content": "What's the temperature in New York now?"}, ], [ { "role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\n\n" "Current Date: 2024-09-30", }, {"role": "user", "content": "What's the temperature in San Francisco now? How about tomorrow?"}, ], ] batch = DataProto( non_tensor_batch={ "raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object), "agent_name": np.array(["react_agent"] * len(raw_prompts)), }, ) batch = batch.repeat(n) result = agent_loop_manager.generate_sequences(prompts=batch) assert len(result) == len(raw_prompts) * n # Check turns num_turns = result.non_tensor_batch["__num_turns__"] print(f"num_turns: {num_turns}") for i in range(len(num_turns)): if i // n == 0: # [user, assistant] assert num_turns[i] == 2 else: # [user, assistant, tool, assistant] assert num_turns[i] == 4 # Check response_mask tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path) responses = result.batch["responses"] response_mask = result.batch["response_mask"] attention_mask = result.batch["attention_mask"] assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}" response_length = response_mask.size(1) for i in range(len(responses)): # response with tool response valid_tokens = responses[i][attention_mask[i][-response_length:].bool()] response_with_obs = tokenizer.decode(valid_tokens) # response without tool response valid_tokens = responses[i][response_mask[i].bool()] response_without_obs = tokenizer.decode(valid_tokens) assert "" not in response_without_obs, ( f"found in response: {response_without_obs}" ) assert "" not in response_without_obs, ( f"found in response: {response_without_obs}" ) print("=========================") print(response_with_obs) print("---") print(response_without_obs) print("Test passed!") ray.shutdown() ================================================ FILE: verl_rl/recipe/minicpmo/rl_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import logging import math import os import re from typing import Optional import datasets import torch from omegaconf import DictConfig, ListConfig from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from transformers import PreTrainedTokenizer, ProcessorMixin import verl.utils.torch_functional as verl_F from verl.utils.dataset.vision_utils import process_image from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__name__) def build_transform(): IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) # timm.data.IMAGENET_INCEPTION_MEAN IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) # timm.data.IMAGENET_INCEPTION_STD return transforms.Compose( [ transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), ] ) def build_image_bound(input_ids, tokenizer, new_schema=True, logger=None): if new_schema: start_cond = (input_ids == tokenizer.im_start_id) | (input_ids == tokenizer.slice_start_id) end_cond = (input_ids == tokenizer.im_end_id) | (input_ids == tokenizer.slice_end_id) else: start_cond = input_ids == tokenizer.im_start_id end_cond = input_ids == tokenizer.im_end_id image_start_tokens = torch.where(start_cond)[0] image_start_tokens += 1 image_end_tokens = torch.where(end_cond)[0] if len(image_start_tokens) != len(image_end_tokens): logger.error("image start token != image end tokens") raise Exception("image start token != image end tokens") if len(image_start_tokens) > 0: image_bound = torch.hstack([image_start_tokens.unsqueeze(-1), image_end_tokens.unsqueeze(-1)]) else: image_bound = [] return image_bound def preprocess( images_dict, conversations, tokenizer, transform, query_nums=64, slice_config=None, llm_type=None, patch_size=14, batch_vision=False, max_length=2048, truncation="error", logger=None, ): """ single(multi) image(s) preprocess, the image(s) will be placed at the top of the conversation """ conversations = copy.deepcopy(conversations) assert conversations[0]["role"] == "user", "the first role must be user" if slice_config is not None: assert isinstance(slice_config, dict) assert "patch_size" in slice_config assert "max_slice_nums" in slice_config assert "scale_resolution" in slice_config default_image_placeholder = tokenizer.im_start + tokenizer.unk_token * query_nums + tokenizer.im_end new_schema = False use_image_id = False if llm_type == "qwen": new_schema = True use_image_id = True image_placeholder_dict = {} images = [] image_id_cnt = 0 for img_name, image in images_dict.items(): if slice_config: source_image, patches, best_grid = slice_image( image, slice_config["max_slice_nums"], slice_config["scale_resolution"], slice_config["patch_size"], ) images.append(source_image) image_placeholder = default_image_placeholder if len(patches) > 0: for i in range(len(patches)): for j in range(len(patches[0])): images.append(patches[i][j]) if use_image_id: image_placeholder = ( f"{tokenizer.im_id_start}{image_id_cnt}{tokenizer.im_id_end}" + image_placeholder ) image_id_cnt += 1 image_placeholder += get_grid_placeholder(tokenizer, best_grid, query_nums, new_schema=new_schema) image_placeholder_dict[img_name] = image_placeholder else: images.append(image) if use_image_id: image_placeholder = f"{tokenizer.im_id_start}{image_id_cnt}{tokenizer.im_id_end}" + image_placeholder image_id_cnt += 1 else: image_placeholder = default_image_placeholder image_placeholder_dict[img_name] = image_placeholder images = [transform(i) for i in images] if len(images_dict) == 1 and "" in images_dict: if "" in conversations[0]["content"]: conversations[0]["content"] = conversations[0]["content"].replace("", image_placeholder) else: conversations[0]["content"] = image_placeholder + "\n" + conversations[0]["content"] else: pattern = r"" new_conversations = [] for conversation in conversations: content = conversation["content"] parts = re.split(f"({pattern})", content) for i, part in enumerate(parts): if not part.strip(): continue if re.match(pattern, part): if part in image_placeholder_dict: parts[i] = image_placeholder_dict[part] else: raise Exception(f"not found {part} in image dict") conversation["content"] = "\n".join(parts) new_conversations.append(conversation) conversations = new_conversations # TODO change role in conversation for different llm prompt_with_chat_template = tokenizer.apply_chat_template(conversations, add_generation_prompt=True, tokenize=False) input_ids, attention_mask = verl_F.tokenize_and_postprocess_data( prompt=prompt_with_chat_template, tokenizer=tokenizer, max_length=max_length, pad_token_id=tokenizer.pad_token_id, left_pad=True, truncation=truncation, ) position_ids = compute_position_id_with_mask(attention_mask) image_bound = build_image_bound(input_ids[0], tokenizer, new_schema, logger) input_dict = { "input_ids": input_ids[0], "attention_mask": attention_mask[0], "position_ids": position_ids[0], "image_bound": image_bound, } if batch_vision: tgt_sizes = [] reshape_images = [] for image in images: H, W = image.shape[1:] reshape_image = reshape_by_patch(image, patch_size) reshape_images.append(reshape_image) tgt_sizes.append([H // patch_size, W // patch_size]) if tgt_sizes: tgt_sizes = torch.Tensor(tgt_sizes).type(torch.int32) input_dict["pixel_values"] = reshape_images input_dict["tgt_sizes"] = tgt_sizes else: input_dict["pixel_values"] = images input_dict["tgt_sizes"] = [] return input_dict def slice_image(image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False): original_size = image.size original_width, original_height = original_size log_ratio = math.log(original_width / original_height) ratio = original_width * original_height / (scale_resolution * scale_resolution) multiple = min(math.ceil(ratio), max_slice_nums) source_image = None best_grid = None patches = [] if multiple <= 1 or never_split: # dont need to slice, upsample best_size = find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=True) source_image = image.resize(best_size, Image.Resampling.BICUBIC) else: candidate_split_grids_nums = [] for i in [multiple - 1, multiple, multiple + 1]: if i == 1 or i > max_slice_nums: continue candidate_split_grids_nums.append(i) # source image, down-sampling and ensure divided by patch_size best_resize = find_best_resize(original_size, scale_resolution, patch_size) source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC) candidate_grids = [] # find best grid for split_grids_nums in candidate_split_grids_nums: m = 1 while m <= split_grids_nums: if split_grids_nums % m == 0: candidate_grids.append([m, split_grids_nums // m]) m += 1 best_grid = [1, 1] min_error = float("inf") for grid in candidate_grids: error = abs(log_ratio - math.log(grid[0] / grid[1])) if error < min_error: best_grid = grid min_error = error refine_size = get_refine_size(original_size, best_grid, scale_resolution, patch_size, allow_upscale=True) refine_image = image.resize(refine_size, Image.Resampling.BICUBIC) patches = split_to_patches(refine_image, best_grid) return source_image, patches, best_grid def ensure_divide(length, patch_size): return max(round(length / patch_size) * patch_size, patch_size) def find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=False): width, height = original_size if (width * height > scale_resolution * scale_resolution) or allow_upscale: r = width / height height = int(scale_resolution / math.sqrt(r)) width = int(height * r) best_width = ensure_divide(width, patch_size) best_height = ensure_divide(height, patch_size) return (best_width, best_height) def get_refine_size(original_size, grid, scale_resolution, patch_size, allow_upscale=False): width, height = original_size grid_x, grid_y = grid refine_width = ensure_divide(width, grid_x) refine_height = ensure_divide(height, grid_y) grid_width = refine_width / grid_x grid_height = refine_height / grid_y best_grid_size = find_best_resize( (grid_width, grid_height), scale_resolution, patch_size, allow_upscale=allow_upscale, ) refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y) return refine_size def split_to_patches(image, grid): patches = [] width, height = image.size grid_x = int(width / grid[0]) grid_y = int(height / grid[1]) for i in range(0, height, grid_y): images = [] for j in range(0, width, grid_x): box = (j, i, j + grid_x, i + grid_y) patch = image.crop(box) images.append(patch) patches.append(images) return patches def get_grid_placeholder(tokenizer, grid, query_num, new_schema=False): if new_schema: image_placeholder = tokenizer.slice_start + tokenizer.unk_token * query_num + tokenizer.slice_end else: image_placeholder = tokenizer.im_start + tokenizer.unk_token * query_num + tokenizer.im_end cols = grid[0] rows = grid[1] slices = [] for i in range(rows): lines = [] for j in range(cols): lines.append(image_placeholder) slices.append("".join(lines)) if new_schema: slice_placeholder = "\n".join(slices) else: slice_placeholder = tokenizer.slice_start + "\n".join(slices) + tokenizer.slice_end return slice_placeholder def reshape_by_patch(image_tensor, patch_size): """ :param image_tensor: shape [3, H, W] :param patch_size: :return: [3, patch_size, HW/patch_size] """ patches = torch.nn.functional.unfold(image_tensor, (patch_size, patch_size), stride=(patch_size, patch_size)) patches = patches.reshape(image_tensor.size(0), patch_size, patch_size, -1) patches = patches.permute(0, 1, 3, 2).reshape(image_tensor.size(0), patch_size, -1) return patches def init_minicpmo_config(processor, config): """Initialize MiniCPM-o specific configuration""" minicpmo_config = { "transform": build_transform(), "patch_size": config.get("patch_size", 14), "query_nums": config.get("query_nums", 64), "slice_config": config.get( "slice_config", {"max_slice_nums": 9, "patch_size": config.get("patch_size", 14), "scale_resolution": 448} ), "llm_type": config.get("llm_type", "qwen"), "batch_vision": config.get("batch_vision", True), } return minicpmo_config def process_minicpmo_data( row_dict, messages, tokenizer, minicpmo_config, image_key, max_prompt_length, truncation, logger ): """Process data for MiniCPM-o model""" if len(row_dict[image_key]) == 1: multi_modal_data = {} image = process_image(row_dict.pop(image_key)[0]) multi_modal_data["image"] = [image] images_dict = {"": image} else: raise NotImplementedError model_inputs = preprocess( images_dict, messages, tokenizer, minicpmo_config["transform"], query_nums=minicpmo_config["query_nums"], slice_config=minicpmo_config["slice_config"], llm_type=minicpmo_config["llm_type"], patch_size=minicpmo_config["patch_size"], batch_vision=minicpmo_config["batch_vision"], max_length=max_prompt_length, truncation=truncation, logger=logger, ) raw_prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) raw_prompt = raw_prompt.replace("", "(./)") return model_inputs, multi_modal_data, raw_prompt class RLHFDataset(Dataset): """ Load and preprocess RLHF data from Parquet files. - Caches files locally. - Reads into a HuggingFace Dataset and tokenizes prompts. - Optionally handles images/videos via a ProcessorMixin. - Filters prompts over a max length. - Supports resuming from checkpoints. Args: data_files (str or list): Path(s) to Parquet file(s). tokenizer (PreTrainedTokenizer): For the tokenization of text to token IDs. config (DictConfig): Options like cache_dir, prompt_key, max_prompt_length, truncation, etc. processor (ProcessorMixin, optional): Multimodal preprocessor for images/videos. """ def __init__( self, data_files: str | list[str], tokenizer: PreTrainedTokenizer, config: DictConfig, processor: Optional[ProcessorMixin] = None, ): if not isinstance(data_files, list | ListConfig): data_files = [data_files] self.data_files = copy.deepcopy(data_files) self.original_data_files = copy.deepcopy(data_files) # use for resume self.tokenizer = tokenizer self.processor = processor self.config = config self.cache_dir = os.path.expanduser(config.get("cache_dir", "~/.cache/verl/rlhf")) self.prompt_key = config.get("prompt_key", "prompt") self.image_key = config.get("image_key", "images") self.video_key = config.get("video_key", "videos") self.max_prompt_length = config.get("max_prompt_length", 1024) self.return_raw_chat = config.get("return_raw_chat", False) self.return_full_prompt = config.get("return_full_prompt", False) self.truncation = config.get("truncation", "error") self.filter_overlong_prompts = config.get("filter_overlong_prompts", True) self.num_workers = config.get("filter_overlong_prompts_workers", max(1, os.cpu_count() // 4)) self.num_workers = min(self.num_workers, os.cpu_count()) self.use_shm = config.get("use_shm", False) self.chat_template_func = config.get("chat_template_func", None) self.need_tools_kwargs = config.get("need_tools_kwargs", False) self.filter_prompts = config.get("filter_prompts", True) self.serialize_dataset = False self.minicpmo_config = init_minicpmo_config(self.processor, config) self._download() self._read_files_and_tokenize() def _download(self, use_origin_parquet=False): from verl.utils.fs import copy_to_local data_files = self.data_files if not use_origin_parquet else self.original_data_files for i, parquet_file in enumerate(data_files): self.data_files[i] = copy_to_local(src=parquet_file, cache_dir=self.cache_dir, use_shm=self.use_shm) def _read_files_and_tokenize(self): dataframes = [] for parquet_file in self.data_files: # read parquet files and cache dataframe = datasets.load_dataset("parquet", data_files=parquet_file)["train"] dataframes.append(dataframe) self.dataframe: datasets.Dataset = datasets.concatenate_datasets(dataframes) print(f"dataset len: {len(self.dataframe)}") def resume_dataset_state(self): self.serialize_dataset = not hasattr(self, "original_data_files") # resume dataframe if not it's serialized in data.pt if not self.serialize_dataset: self._download(use_origin_parquet=True) # download and resume from original parquet files self._read_files_and_tokenize() else: print(r"old dataloader ckpt file is used, please train from scratch for better ckpt performance") def __len__(self): return len(self.dataframe) def _build_messages(self, example: dict): return example.pop(self.prompt_key) def __getitem__(self, item): """ Note that we also return the raw_input_ids so that it can be combined with other chat template """ row_dict: dict = self.dataframe[item] messages = self._build_messages(row_dict) model_inputs = {} if self.processor is not None: model_inputs, multi_modal_data, raw_prompt = process_minicpmo_data( row_dict, messages, self.tokenizer, self.minicpmo_config, self.image_key, self.max_prompt_length, self.truncation, logger, ) input_ids = model_inputs.pop("input_ids") attention_mask = model_inputs.pop("attention_mask") position_ids = model_inputs.pop("position_ids") # There's a trap here, multi_modal_inputs has to be a dict, not BatchFeature row_dict["multi_modal_data"] = multi_modal_data row_dict["multi_modal_inputs"] = dict(model_inputs) else: raw_prompt = self.tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) model_inputs = self.tokenizer(raw_prompt, return_tensors="pt", add_special_tokens=False) input_ids = model_inputs.pop("input_ids") attention_mask = model_inputs.pop("attention_mask") position_ids = compute_position_id_with_mask(attention_mask) row_dict["input_ids"] = input_ids row_dict["attention_mask"] = attention_mask row_dict["position_ids"] = position_ids raw_prompt_ids = self.tokenizer.encode(raw_prompt, add_special_tokens=False) if len(raw_prompt_ids) > self.max_prompt_length: if self.truncation == "left": raw_prompt_ids = raw_prompt_ids[-self.max_prompt_length :] elif self.truncation == "right": raw_prompt_ids = raw_prompt_ids[: self.max_prompt_length] elif self.truncation == "middle": left_half = self.max_prompt_length // 2 right_half = self.max_prompt_length - left_half raw_prompt_ids = raw_prompt_ids[:left_half] + raw_prompt_ids[-right_half:] elif self.truncation == "error": raise RuntimeError(f"Prompt length {len(raw_prompt_ids)} is longer than {self.max_prompt_length}.") row_dict["raw_prompt_ids"] = raw_prompt_ids # encode prompts without chat template if self.return_raw_chat: row_dict["raw_prompt"] = messages # get prompts with chat template if self.return_full_prompt: row_dict["full_prompts"] = raw_prompt # array of strings # add index for each prompt index = row_dict.get("extra_info", {}).get("index", 0) tools_kwargs = row_dict.get("extra_info", {}).get("tools_kwargs", {}) interaction_kwargs = row_dict.get("extra_info", {}).get("interaction_kwargs", {}) need_tools_kwargs = row_dict.get("extra_info", {}).get("need_tools_kwargs", self.need_tools_kwargs) if need_tools_kwargs and not tools_kwargs: logger.warning("tools_kwargs is empty for index {}, data source: {}", index, row_dict["data_source"]) row_dict["index"] = index row_dict["tools_kwargs"] = tools_kwargs row_dict["interaction_kwargs"] = interaction_kwargs return row_dict def __getstate__(self): if not self.serialize_dataset: state = self.__dict__.copy() if "dataframe" in state: del state["dataframe"] return state return self.__dict__.copy() ================================================ FILE: verl_rl/recipe/one_step_off_policy/README.md ================================================ # Recipe: One Step Off Policy Async Trainer **Author:** `https://github.com/meituan-search` Last updated: 07/17/2025. ## Introduction ### Background The current reinforcement learning training process implemented by verl is synchronous, adhering to the algorithmic workflows of established methods like PPO, GRPO, and DAPO. In each step, training samples are generated by the latest model, and the model is updated after training completes. While this approach aligns with off-policy reinforcement learning and stabilizes RL training, but it suffers from severe efficiency issues. Model updates must wait for the longest output in the generation phase to complete. During the generation of long-tail samples, GPUs remain idle, resulting in significant underutilization. The more severe the long-tail problem in sample generation, the lower the overall training efficiency. For example, in DAPO 32B training, the Rollout phase accounts for approximately 70% of the total time, and increasing resources does not reduce the Rollout duration. ![DAPO 32B Math Performance]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/dapo_32b_math.png) > source data: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/workspace?nw=nwusertongyuxuan361 ### Solution We have implemented the **One Step Off Async Trainer** to help alleviate this issue. This approach parallelizes the generation and training processes, utilizing samples generated in the previous step for current training. It also involves appropriately partitioning resources, allocating dedicated resources for generation while automatically assigning the remainder to training. By reducing resources allocated to the generation phase, we mitigate GPU idle time during long-tail sample generation. Throughout this process, generation and training parameters maintain a one-step off policy. ![One Step Off Policy Diagram]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/one_step_off_policy.png) > reference: [AReaL: A Large-Scale Asynchronous Reinforcement Learning System for Language Reasoning]( > https://arxiv.org/abs/2505.24298) Our core contributions include: 1. **Parallel Generation and Training**: Samples for the next batch are asynchronously generated while the current batch is being trained. 2. **Resource Isolation**: Unlike `hybrid_engine`, this method requires explicit resource allocation for rollout, with remaining resources automatically assigned to training. 3. **NCCL Parameter Synchronization**: Employs NCCL communication primitives for seamless parameter transfer between generation and training modules. ### Experimental Results - **Machine Configuration**: 2 nodes with 16 H20 GPUs each - Generation: 4 GPUs - Training: 12 GPUs - **Model**: Qwen2.5-Math-7B - **Rollout Configuration**: - **Max Response Length**: FSDP2: 20,480 tokens; Megatron: 8,192 tokens - **Algorithm**: DAPO - **Rollout Engine**: vLLM | training mode | engine | step | gen | wait_prev_gen | generate_sequences | old_log_prob | update_actor | total time | acc/best@32/mean | acc/maj@32/mean | |------------------------|---------------|------|-----|---------------|--------------------|--------------|--------------|---------------|------------------|-----------------| | colocate sync | VLLM+FSDP2 | 749 | 321 | - | 247 | 88 | 286 | 19h18m | 0.5948 | 0.417 | | one-step-overlap async | VLLM+FSDP2 | 520 | - | 45 | 458 | 108 | 337 | 15h34m(+23%) | 0.6165 | 0.494 | | colocate sync | VLLM+Megatron | 699 | 207 | - | 162 | 119 | 344 | 18h21m | 0.605 | 0.4217 | | one-step-overlap async | VLLM+Megatron | 566 | - | 59 | 501 | 120 | 347 | 13h06m (+40%) | 0.6569 | 0.4038 | * colocate sync: step ≈ gen + old_log_prob + update_actor * one-step-overlap async: step ≈ wait_prev_gen + old_log_prob + update_actor ![One Step Off Megatron Performance]( https://raw.githubusercontent.com/eric-haibin-lin/verl-community/refs/heads/main/docs/one_step_off_megatron.png) > source data: https://wandb.ai/hou-zg-meituan/one-step-off-policy?nw=nwuserhouzg ## Implementation ### One Step Off Policy Async Pipline Our implemented **One Step Off Policy Async Pipeline** integrates seamlessly into existing training logic at minimal cost, eliminating the need for additional sample storage management. The core mechanism uses `async_gen_next_batch` for asynchronous rollout generation while maintaining continuous operation during epoch transitions via `create_continuous_iterator`. ```python # iterator generator, simplify one-step integration of the training process def _create_continuous_iterator(self): for epoch in range(self.config.trainer.total_epochs): iterator = iter(self.train_dataloader) for batch_dict in iterator: yield epoch, batch_dict # read next batch samples, parameters sync and launch asyn gen_seq def _async_gen_next_batch(self, continuous_iterator): # read train_data try: epoch, batch_dict = next(continuous_iterator) except StopIteration: return None batch = DataProto.from_single_dict(batch_dict) gen_batch = batch_pocess(batch) # sync weights from actor to rollout self.sync_rollout_weights() # async generation gen_batch_output = self.rollout_wg.async_generate_sequences(gen_batch) # future encapsulated return GenerationBatchFuture(epoch, batch, gen_batch_output) continuous_iterator = self._create_continuous_iterator() # run rollout first to achieve one-step-off batch_data_future = self._async_gen_next_batch(continuous_iterator) while batch_data_future is not None: # wait for the gen_seq result from the previous step batch = batch_data_future.get() # launch the next async call to generate sequences batch_data_future = self._async_gen_next_batch(continuous_iterator) # compute advantages batch = critic.compute_values(batch) batch = reference.compute_log_prob(batch) batch = reward.compute_reward(batch) batch = compute_advantages(batch) # model update critic_metrics = critic.update_critic(batch) actor_metrics = actor.update_actor(batch) ``` ### Parameter Synchronization The exciting point is that our nccl based weights updating for rollout model has great performance. At most of time, the latency is under 300ms, which is negligible for RLHF. > **sync_rollout_weights**:The time for synchronizing parameters from actor to rollout is extremely fast and can almost > be ignored because it is implemented with nccl. ```python class ActorRolloutRefWorker: # actor acquires the meta-info of model parameters for parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): params = self._get_actor_params() ret = [] for key, tensor in params.items(): ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret # rollout sets the meta-info of model parameters for parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): self._weights_info = weights_info class AsyncRayPPOTrainer(RayPPOTrainer): def init_workers(self): ... # rollout obtains the meta-info of model parameters from the actor for parameter sync weights_info = self.actor_wg.get_actor_weights_info()[0] self.rollout_wg.set_actor_weights_info(weights_info) # Create an actor-rollout communication group for parameter sync actor_rollout_workers = self.actor_wg.workers + self.rollout_wg.workers collective.create_collective_group( actor_rollout_workers, len(actor_rollout_workers), list(range(0, len(actor_rollout_workers))), backend="nccl", group_name="actor_rollout" ) ``` ```python # drive process call the actor and rollout respectively to sync parameters by nccl def sync_rollout_weights(self): self.actor_wg.sync_rollout_weights() ray.get(self.rollout_wg.sync_rollout_weights()) # fsdp model parameter sync @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): params = self._get_actor_params() if self._is_actor else None if self._is_rollout: inference_model = ( self.rollout.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model ) patch_vllm_moe_model_weight_loader(inference_model) # Model parameters are broadcast tensor-by-tensor from actor to rollout for key, shape, dtype in self._weights_info: tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor: assert key in params origin_data = params[key] if hasattr(origin_data, "full_tensor"): origin_data = origin_data.full_tensor() if torch.distributed.get_rank() == 0: tensor.copy_(origin_data) from ray.util.collective import collective collective.broadcast(tensor, src_rank=0, group_name="actor_rollout") if self._is_rollout: inference_model.load_weights([(key, tensor)]) ``` ## Usage ### FSDP2 Configuration Example ```shell python3 -m recipe.one_step_off_policy.async_main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_trainer.yaml' \ actor_rollout_ref.actor.strategy=fsdp2 \ # actor and rollout are placed separately actor_rollout_ref.hybrid_engine=False \ # actor and rollout resource trainer.nnodes=1 \ trainer.n_gpus_per_node=6 \ rollout.nnodes=1 \ rollout.n_gpus_per_node=2 ``` ### Megatron Configuration Example ```shell python3 -m recipe.one_step_off_policy.async_main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_megatron_trainer.yaml' \ actor_rollout_ref.actor.strategy=megatron \ # actor and rollout are placed separately actor_rollout_ref.hybrid_engine=False \ # actor and rollout resource trainer.nnodes=1 \ trainer.n_gpus_per_node=6 \ rollout.nnodes=1 \ rollout.n_gpus_per_node=2 ``` ### Configuration Guidelines 1. **Card Number Relationships** Maintain either of these relationships for optimal batch distribution: - `actor_rollout_ref.rollout.n` should be an integer divisor of: `trainer.n_gpus_per_node * trainer.nnodes` - `actor_rollout_ref.rollout.n * data.train_batch_size` should be evenly divisible by: `trainer.n_gpus_per_node * trainer.nnodes` > Rationale: Ensures training samples can be evenly distributed across training GPUs when using partial resources for generation. 2. **Dynamic Resource Tuning** Adjust `trainer.nnodes` `trainer.n_gpus_per_node` `rollout.nnodes` `rollout.n_gpus_per_node` based on phase durations: - **Ideal state**: Rollout and training phases have comparable durations - **Diagnostic metrics**: - Monitor `wait_prev_gen` duration - Analyze `sequence_length` distribution - **Adjustment strategy**: - High `wait_prev_gen` + uniform sequence lengths → Increase rollout resources - High `wait_prev_gen` + long-tail sequences → Optimize stopping criteria (resource increase won't help) > **wait_prev_gen**:The time consumed waiting for the previous rollout to end (the part that is not fully overlapped). **Resource Configuration Strategies:** - **Resource-constrained scenario**: Optimize resource utilization by adjusting GPU allocation ratios, keeping the number of nodes equal to allow training and rollout to share nodes; - Configure `trainer.nnodes = rollout.nnodes` with `trainer.n_gpus_per_node + rollout.n_gpus_per_node = physical_gpus_per_node`. Control rollout resource allocation by adjusting `n_gpus_per_node`. - **Resource-abundant scenario**: Optimize performance by adjusting the number of nodes, keeping the number of GPUs per node equal to enable independent scaling of training and rollout parallelism. - Configure `trainer.n_gpus_per_node = rollout.n_gpus_per_node` and control rollout resource allocation by adjusting `trainer.nnodes` and `rollout.nnodes`to achieve optimal performance. > **Note**: The total number of nodes required by the system is not simply `trainer.nnodes + rollout.nnodes`. The > actual calculation depends on GPU capacity: > - When `trainer.n_gpus_per_node + rollout.n_gpus_per_node <= physical_gpus_per_node`, > the required node count is `max(trainer.nnodes, rollout.nnodes)` > - When `trainer.n_gpus_per_node + rollout.n_gpus_per_node > physical_gpus_per_node`, > the required node count is `trainer.nnodes + rollout.nnodes` ## Functional Support | Category | Support Situation | |--------------------|-----------------------------------------------------------------------------------------------------------------| | train engine | FSDP2
Megatron | | rollout engine | vLLM | | AdvantageEstimator | GRPO
GRPO_PASSK
REINFORCE_PLUS_PLUS
RLOO
OPO
REINFORCE_PLUS_PLUS_BASELINE
GPG | | Reward | all | ================================================ FILE: verl_rl/recipe/one_step_off_policy/config/one_step_off_ppo_megatron_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_megatron_trainer - _self_ # config for the rollout (only for resource isolation) rollout: # Number of nodes used in the rollout nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 ================================================ FILE: verl_rl/recipe/one_step_off_policy/config/one_step_off_ppo_trainer.yaml ================================================ hydra: searchpath: - file://verl/trainer/config defaults: - ppo_trainer - _self_ # config for the rollout (only for resource isolation) rollout: # Number of nodes used in the rollout nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 ================================================ FILE: verl_rl/recipe/one_step_off_policy/dapo_7b_math_fsdp2_4_12.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-one-step-off-4-12' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=12 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=2 sp_size=4 fsdp_size=2 python3 -m recipe.one_step_off_policy.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${actor_offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${ref_offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" ================================================ FILE: verl_rl/recipe/one_step_off_policy/dapo_7b_math_fsdp2_colocate.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-fsdp2-colocate' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=12 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=2 sp_size=4 fsdp_size=2 # reference run wandb: https://wandb.ai/verl-org/DAPO%20Reproduction%20on%20verl/runs/ow47vvon?nw=nwusertongyuxuan361 python3 -m verl.trainer.main_ppo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.model.use_remove_padding=True \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.fsdp_config.param_offload=${offload} \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=${offload} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.grad_clip=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.actor.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.fsdp_config.param_offload=${offload} \ actor_rollout_ref.ref.ulysses_sequence_parallel_size=${sp_size} \ actor_rollout_ref.actor.fsdp_config.fsdp_size=${fsdp_size} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node="${NGPUS_PER_NODE}" \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/recipe/one_step_off_policy/dapo_7b_math_megatron_4_12.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0527a1-megatron-one-step-off-4-12' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=12 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) ref_offload=True actor_offload=False gen_tp=2 train_tp=2 train_pp=2 # TODO: support dynamic_bsz for megatron # actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ # actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ # actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ python3 -m recipe.one_step_off_policy.main_ppo \ --config-path=config \ --config-name='one_step_off_ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=megatron \ critic.strategy=megatron \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ actor_rollout_ref.hybrid_engine=False \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.grad_offload=${actor_offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.param_offload=${ref_offload} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" ================================================ FILE: verl_rl/recipe/one_step_off_policy/dapo_7b_math_megatron_colocate.sh ================================================ #!/usr/bin/env bash set -xeuo pipefail project_name='DAPO' exp_name='DAPO-Qwen2.5-7b-MATH-0519a1-megatron-colocate' adv_estimator=grpo use_kl_in_reward=False kl_coef=0.0 use_kl_loss=False kl_loss_coef=0.0 clip_ratio_low=0.2 clip_ratio_high=0.28 max_prompt_length=$((1024 * 2)) max_response_length=$((1024 * 8)) enable_overlong_buffer=True overlong_buffer_len=$((1024 * 4)) overlong_penalty_factor=1.0 loss_agg_mode="token-mean" train_prompt_bsz=512 n_resp_per_prompt=16 train_prompt_mini_bsz=32 # Ray # RAY_ADDRESS=${RAY_ADDRESS:-"http://localhost:8265"} # WORKING_DIR=${WORKING_DIR:-"${PWD}"} # RUNTIME_ENV=${RUNTIME_ENV:-"${WORKING_DIR}/verl/trainer/runtime_env.yaml"} NNODES=${NNODES:-2} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} # very important! please modify the max_position_embeddings in config.json to 32768 after downloading from huggingface MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen2.5-Math-7B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/dapo-math-17k.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/aime-2024.parquet"} # Algorithm temperature=1.0 top_p=1.0 top_k=-1 # 0 for HF rollout, -1 for vLLM rollout val_top_p=0.7 # Performance Related Parameter use_dynamic_bsz=True actor_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 2)) infer_ppo_max_token_len=$(((max_prompt_length + max_response_length) * 3)) offload=True gen_tp=2 train_tp=2 train_pp=2 # TODO: support dynamic_bsz for megatron # actor_rollout_ref.actor.use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.ref.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=${use_dynamic_bsz} \ # actor_rollout_ref.actor.ppo_max_token_len_per_gpu=${actor_ppo_max_token_len} \ # actor_rollout_ref.ref.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ # actor_rollout_ref.rollout.log_prob_max_token_len_per_gpu=${infer_ppo_max_token_len} \ python3 -m verl.trainer.main_ppo \ --config-path=config \ --config-name='ppo_megatron_trainer.yaml' \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.prompt_key=prompt \ data.truncation='left' \ data.max_prompt_length=${max_prompt_length} \ data.max_response_length=${max_response_length} \ data.train_batch_size=${train_prompt_bsz} \ actor_rollout_ref.rollout.n=${n_resp_per_prompt} \ algorithm.adv_estimator=${adv_estimator} \ algorithm.use_kl_in_reward=${use_kl_in_reward} \ algorithm.kl_ctrl.kl_coef=${kl_coef} \ actor_rollout_ref.actor.strategy=megatron \ critic.strategy=megatron \ actor_rollout_ref.actor.use_kl_loss=${use_kl_loss} \ actor_rollout_ref.actor.kl_loss_coef=${kl_loss_coef} \ actor_rollout_ref.actor.clip_ratio_low=${clip_ratio_low} \ actor_rollout_ref.actor.clip_ratio_high=${clip_ratio_high} \ actor_rollout_ref.actor.clip_ratio_c=10.0 \ +actor_rollout_ref.model.override_config.max_position_embeddings=32768 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=2 \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.actor.optim.lr_warmup_steps=10 \ actor_rollout_ref.actor.optim.weight_decay=0.1 \ actor_rollout_ref.actor.ppo_mini_batch_size=${train_prompt_mini_bsz} \ actor_rollout_ref.actor.megatron.param_offload=${offload} \ actor_rollout_ref.actor.megatron.optimizer_offload=${offload} \ actor_rollout_ref.actor.megatron.grad_offload=${offload} \ actor_rollout_ref.actor.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.actor.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.actor.optim.clip_grad=1.0 \ actor_rollout_ref.actor.loss_agg_mode=${loss_agg_mode} \ actor_rollout_ref.rollout.gpu_memory_utilization=0.80 \ actor_rollout_ref.rollout.tensor_model_parallel_size=${gen_tp} \ actor_rollout_ref.rollout.enable_chunked_prefill=True \ actor_rollout_ref.rollout.max_num_batched_tokens=$((max_prompt_length + max_response_length)) \ actor_rollout_ref.rollout.temperature=${temperature} \ actor_rollout_ref.rollout.top_p=${top_p} \ actor_rollout_ref.rollout.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.temperature=${temperature} \ actor_rollout_ref.rollout.val_kwargs.top_p=${val_top_p} \ actor_rollout_ref.rollout.val_kwargs.top_k=${top_k} \ actor_rollout_ref.rollout.val_kwargs.do_sample=True \ actor_rollout_ref.rollout.val_kwargs.n=1 \ actor_rollout_ref.ref.megatron.pipeline_model_parallel_size=${train_pp} \ actor_rollout_ref.ref.megatron.tensor_model_parallel_size=${train_tp} \ actor_rollout_ref.ref.megatron.param_offload=${offload} \ reward_model.reward_manager=dapo \ +reward_model.reward_kwargs.overlong_buffer_cfg.enable=${enable_overlong_buffer} \ +reward_model.reward_kwargs.overlong_buffer_cfg.len=${overlong_buffer_len} \ +reward_model.reward_kwargs.overlong_buffer_cfg.penalty_factor=${overlong_penalty_factor} \ +reward_model.reward_kwargs.overlong_buffer_cfg.log=False \ +reward_model.reward_kwargs.max_resp_len=${max_response_length} \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.n_gpus_per_node=8 \ trainer.nnodes="${NNODES}" \ trainer.val_before_train=True \ trainer.test_freq=10 \ trainer.save_freq=-1 \ trainer.total_epochs=10 \ trainer.total_training_steps=100 \ trainer.default_local_dir="${CKPTS_DIR}" \ trainer.resume_mode=auto \ trainer.log_val_generations=10 ================================================ FILE: verl_rl/recipe/one_step_off_policy/fsdp_workers.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import torch import torch.distributed from omegaconf import DictConfig, OmegaConf from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from transformers import AutoConfig from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.utils import hf_processor, hf_tokenizer, omega_conf_to_dataclass from verl.utils.debug import DistProfiler, DistProfilerExtension, log_gpu_memory_usage from verl.utils.device import ( get_device_name, get_nccl_backend, get_torch_device, ) from verl.utils.fs import copy_to_local from verl.utils.fsdp_utils import ( fsdp_version, ) from verl.utils.import_utils import import_external_libs from verl.utils.model import get_generation_config, update_model_config from verl.utils.vllm_utils import patch_vllm_moe_model_weight_loader from verl.workers.fsdp_workers import ActorRolloutRefWorker as ARRWorker from verl.workers.fsdp_workers import CriticWorker logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() __all__ = ["ActorRolloutRefWorker", "AsyncActorRolloutRefWorker", "CriticWorker", "RolloutWorker"] class ActorRolloutRefWorker(ARRWorker): def _get_actor_params(self): assert self._is_actor params = self.actor_module_fsdp.state_dict() from verl.utils.model import convert_weight_keys params = convert_weight_keys( params, getattr(self.actor_module_fsdp, "_fsdp_wrapped_module", self.actor_module_fsdp) ) return params @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine assert hasattr(self, "_weights_info") and self._weights_info is not None params = self._get_actor_params() if self._is_actor else None if self._is_rollout: inference_model = ( self.rollout.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model ) patch_vllm_moe_model_weight_loader(inference_model) for key, shape, dtype in self._weights_info: tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor: assert key in params origin_data = params[key] if hasattr(origin_data, "full_tensor"): origin_data = origin_data.full_tensor() if torch.distributed.get_rank() == 0: tensor.copy_(origin_data) from ray.util.collective import collective collective.broadcast(tensor, src_rank=0, group_name="actor_rollout") if self._is_rollout: inference_model.load_weights([(key, tensor)]) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): assert self._is_actor if hasattr(self, "_weights_info"): return self._weights_info if fsdp_version(self.actor_module_fsdp) == 1: from torch.distributed.fsdp.api import ShardedStateDictConfig, StateDictType FSDP.set_state_dict_type( self.actor_module_fsdp, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) params = self._get_actor_params() ret = [] for key, tensor in params.items(): ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret class RolloutWorker(ActorRolloutRefWorker): def __init__(self, config: DictConfig, role: str): Worker.__init__(self) assert role == "rollout" self.config = config import torch.distributed if not torch.distributed.is_initialized(): rank = int(os.environ.get("RANK", 0)) world_size = int(os.environ.get("WORLD_SIZE", 1)) torch.distributed.init_process_group( backend=f"cpu:gloo,{get_device_name()}:{get_nccl_backend()}", rank=rank, world_size=world_size, init_method=os.environ.get("DIST_INIT_METHOD", None), ) # TODO(haibin.lin): # As of now the type of config is DictConfig, if we assign config.profiler with ProfilerConfig, # it will actually convert the ProfilerConfig dataclass back to a DictConfig. # We can still use ProfilerConfig for testing purpose (tests/utils/test_nvtx_profile.py) # as they provides DictConfig-like interface # The benefit of creating the dataclass config is to perform validation during __post_init__ profiler_config = omega_conf_to_dataclass(config.rollout.get("profiler", {})) DistProfilerExtension.__init__(self, DistProfiler(rank=self.rank, config=profiler_config)) self._is_rollout = True self._is_actor = False @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) override_model_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create())) use_shm = self.config.model.get("use_shm", False) local_path = copy_to_local(self.config.model.path, use_shm=use_shm) trust_remote_code = self.config.model.get("trust_remote_code", False) self.tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) self.processor = hf_processor(local_path, trust_remote_code=trust_remote_code) if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template # override model kwargs actor_model_config = AutoConfig.from_pretrained( local_path, trust_remote_code=trust_remote_code, attn_implementation="flash_attention_2" ) # patch for kimi-vl if getattr(actor_model_config, "model_type", None) == "kimi_vl": actor_model_config.text_config.topk_method = "greedy" self.generation_config = get_generation_config(local_path, trust_remote_code=trust_remote_code) override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_model_config) update_model_config(actor_model_config, override_config_kwargs=override_config_kwargs) if self.rank == 0: print(f"Model config after override: {actor_model_config}") infer_tp = self.config.rollout.tensor_model_parallel_size dp = self.world_size // infer_tp assert self.world_size % infer_tp == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}" ) rollout_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, infer_tp), mesh_dim_names=["dp", "infer_tp"] ) rollout_name = self.config.rollout.name assert rollout_name == "vllm" from verl.workers.rollout.vllm_rollout import vLLMRollout log_gpu_memory_usage(f"Before building {rollout_name} rollout", logger=logger) from verl.workers.rollout.vllm_rollout import vLLMAsyncRollout vllm_rollout_cls = vLLMRollout if self.config.rollout.mode == "sync" else vLLMAsyncRollout rollout = vllm_rollout_cls( model_path=local_path, config=self.config.rollout, tokenizer=self.tokenizer, model_hf_config=actor_model_config, device_mesh=rollout_device_mesh, trust_remote_code=trust_remote_code, ) log_gpu_memory_usage(f"After building {rollout_name} rollout", logger=logger) from .vllm_sharding_manager import VLLMShardingManager rollout_sharding_manager = VLLMShardingManager( inference_engine=rollout.inference_engine, device_mesh=rollout_device_mesh ) log_gpu_memory_usage("After building sharding manager", logger=logger) self.rollout = rollout self.rollout_sharding_manager = rollout_sharding_manager @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO, blocking=False) def async_generate_sequences(self, *args, **kwargs): return super().generate_sequences(*args, **kwargs) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): assert self._is_rollout self._weights_info = weights_info class AsyncActorRolloutRefWorker(ActorRolloutRefWorker): def __init__(self, *args, **kwargs): raise NotImplementedError ================================================ FILE: verl_rl/recipe/one_step_off_policy/grpo_0.6b_gsm8k_fsdp2_2_6.sh ================================================ set -x project_name='GRPO' exp_name='GRPO-Qwen3-0.6b-gsm8k-fsdp2-one-step-off-2-6' # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen3-0.6B"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/gsm8k/train.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/gsm8k/test.parquet"} NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) python3 -m recipe.one_step_off_policy.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.train_batch_size=1152 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=192 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.val_before_train=True \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=2 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" $@ ================================================ FILE: verl_rl/recipe/one_step_off_policy/grpo_3b_gsm8k_fsdp2_2_6.sh ================================================ set -x project_name='GRPO' exp_name='GRPO-Qwen3-0.6b-gsm8k-fsdp2-one-step-off-2-6' # Paths RAY_DATA_HOME=${RAY_DATA_HOME:-"${HOME}/verl"} MODEL_PATH=${MODEL_PATH:-"${RAY_DATA_HOME}/models/Qwen/Qwen2.5-3B-Instruct"} CKPTS_DIR=${CKPTS_DIR:-"${RAY_DATA_HOME}/ckpts/${project_name}/${exp_name}"} TRAIN_FILE=${TRAIN_FILE:-"${RAY_DATA_HOME}/data/gsm8k/train.parquet"} TEST_FILE=${TEST_FILE:-"${RAY_DATA_HOME}/data/gsm8k/test.parquet"} NNODES=${NNODES:-1} NGPUS_PER_NODE=${NGPUS_PER_NODE:-8} n_gpus_rollout=2 n_gpus_training=$((NGPUS_PER_NODE - n_gpus_rollout)) python3 -m recipe.one_step_off_policy.main_ppo \ algorithm.adv_estimator=grpo \ data.train_files="${TRAIN_FILE}" \ data.val_files="${TEST_FILE}" \ data.train_batch_size=1152 \ data.max_prompt_length=512 \ data.max_response_length=1024 \ data.filter_overlong_prompts=True \ data.truncation='error' \ actor_rollout_ref.actor.strategy=fsdp2 \ critic.strategy=fsdp2 \ actor_rollout_ref.model.path="${MODEL_PATH}" \ actor_rollout_ref.actor.optim.lr=1e-6 \ actor_rollout_ref.hybrid_engine=False \ actor_rollout_ref.model.use_remove_padding=True \ actor_rollout_ref.actor.ppo_mini_batch_size=192 \ actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \ actor_rollout_ref.actor.use_kl_loss=True \ actor_rollout_ref.actor.kl_loss_coef=0.001 \ actor_rollout_ref.actor.kl_loss_type=low_var_kl \ actor_rollout_ref.actor.entropy_coeff=0 \ actor_rollout_ref.model.enable_gradient_checkpointing=True \ actor_rollout_ref.actor.fsdp_config.param_offload=False \ actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ actor_rollout_ref.rollout.name=vllm \ actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \ actor_rollout_ref.rollout.n=5 \ actor_rollout_ref.rollout.load_format=safetensors \ actor_rollout_ref.rollout.layered_summon=True \ actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \ actor_rollout_ref.ref.fsdp_config.param_offload=True \ algorithm.use_kl_in_reward=False \ trainer.critic_warmup=0 \ trainer.val_before_train=True \ trainer.logger=['console','tensorboard'] \ trainer.project_name="${project_name}" \ trainer.experiment_name="${exp_name}" \ trainer.save_freq=-1 \ trainer.test_freq=5 \ trainer.total_epochs=2 \ trainer.nnodes="${NNODES}" \ trainer.n_gpus_per_node="${n_gpus_training}" \ rollout.nnodes="${NNODES}" \ rollout.n_gpus_per_node="${n_gpus_rollout}" $@ ================================================ FILE: verl_rl/recipe/one_step_off_policy/main_ppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import os import socket import hydra import ray from omegaconf import OmegaConf from verl.trainer.constants_ppo import get_ppo_ray_runtime_env from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler from verl.trainer.ppo.reward import load_reward_manager from .ray_trainer import OneStepOffRayTrainer @hydra.main(config_path="config", config_name="one_step_off_ppo_trainer", version_base=None) def main(config): run_ppo(config) # Define a function to run the PPO-like training process def run_ppo(config) -> None: # Check if Ray is not initialized if not ray.is_initialized(): # Initialize Ray with a local cluster configuration # Set environment variables in the runtime environment to control tokenizer parallelism, # NCCL debug level, VLLM logging level, and allow runtime LoRA updating # `num_cpus` specifies the number of CPU cores Ray can use, obtained from the configuration ray.init( runtime_env=get_ppo_ray_runtime_env(), num_cpus=config.ray_init.num_cpus, ) # Create a remote instance of the TaskRunner class, and # Execute the `run` method of the TaskRunner instance remotely and wait for it to complete if ( OmegaConf.select(config.trainer, "profile_steps") is not None and len(OmegaConf.select(config.trainer, "profile_steps")) > 0 ): nsight_options = OmegaConf.to_container(config.trainer.controller_nsight_options) runner = TaskRunner.options(runtime_env={"nsight": nsight_options}).remote() else: runner = TaskRunner.remote() ray.get(runner.run.remote(config)) # [Optional] get the path of the timeline trace file from the configuration, default to None # This file is used for performance analysis timeline_json_file = config.ray_init.get("timeline_json_file", None) if timeline_json_file: ray.timeline(filename=timeline_json_file) @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: def run(self, config): # Print the initial configuration. `resolve=True` will evaluate symbolic values. from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) OmegaConf.resolve(config) # Download the checkpoint from HDFS to the local machine. # `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on local_path = copy_to_local( config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False) ) # Instantiate the tokenizer and processor. from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) # Used for multimodal LLM, could be None processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) # Define worker classes based on the actor strategy. if config.actor_rollout_ref.actor.strategy == "fsdp2": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray import RayWorkerGroup from .fsdp_workers import ( ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker, RolloutWorker, ) actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup from .megatron_workers import ( ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker, RolloutWorker, ) actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = NVMegatronRayWorkerGroup else: raise NotImplementedError from .ray_trainer import ResourcePoolManager, Role role_worker_mapping = { Role.Actor: ray.remote(actor_rollout_cls), Role.Rollout: ray.remote(RolloutWorker), Role.Critic: ray.remote(CriticWorker), } global_pool_id = "actor_pool" assert config.trainer.n_gpus_per_node > 0, "config.trainer.n_gpus_per_node must be greater than 0" assert config.trainer.nnodes > 0, "config.trainer.nnodes must be greater than 0" assert config.rollout.n_gpus_per_node > 0, "config.rollout.n_gpus_per_node must be greater than 0" assert config.rollout.nnodes > 0, "config.rollout.nnodes must be greater than 0" actor_pool = [config.trainer.n_gpus_per_node] * config.trainer.nnodes rollout_pool = [config.rollout.n_gpus_per_node] * config.rollout.nnodes resource_pool_spec = { "actor_pool": actor_pool, "rollout_pool": rollout_pool, } mapping = { Role.Actor: "actor_pool", Role.Rollout: "rollout_pool", Role.Critic: "actor_pool", } print(f"resource_pool_spec: {resource_pool_spec}") # We should adopt a multi-source reward function here: # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # finally, we combine all the rewards together # The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in ["fsdp2"]: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # Add a reference policy worker if KL loss or KL reward is used. if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id # Load the reward manager for training and validation. reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager( config, tokenizer, num_examine=1, **config.reward_model.get("reward_kwargs", {}) ) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) from verl.utils.dataset.rl_dataset import collate_fn # Create training and validation datasets. train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor) val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor) train_sampler = create_rl_sampler(config.data, train_dataset) # Initialize the PPO trainer. trainer = OneStepOffRayTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, device_name=config.trainer.device, ) # Initialize the workers of the trainer. trainer.init_workers() # Start the training process. trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_rl/recipe/one_step_off_policy/megatron_workers.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import torch import torch.distributed from omegaconf import DictConfig from verl.single_controller.base.decorator import Dispatch, register from verl.utils.debug import ( log_gpu_memory_usage, ) from verl.utils.device import get_device_name, get_torch_device from verl.utils.fs import copy_to_local from verl.utils.vllm_utils import patch_vllm_moe_model_weight_loader from verl.workers.megatron_workers import ActorRolloutRefWorker as ARRWorker from verl.workers.megatron_workers import CriticWorker, RewardModelWorker logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) __all__ = ["ActorRolloutRefWorker", "AsyncActorRolloutRefWorker", "CriticWorker", "RewardModelWorker", "RolloutWorker"] class ActorRolloutRefWorker(ARRWorker): def __init__(self, config: DictConfig, role: str): assert role in ["actor", "ref"] tmp_role = "ref" if role == "ref" else "actor_rollout" super().__init__(config, tmp_role) if role == "actor": self._is_rollout = False self.role = role def _get_actor_params_generator(self): assert self._is_actor from verl.models.mcore import get_mcore_weight_converter from verl.utils.megatron_utils import per_tensor_generator layer_name_mapping = { "qkv_layer_name": "self_attention.linear_qkv.", "gate_proj_layer_name": "linear_fc1.", } weight_converter = get_mcore_weight_converter(self.actor_model_config, self.dtype) generator = per_tensor_generator( self.actor.actor_module, self.actor_model_config, weight_converter, self.tf_config, layer_name_mapping, ) return generator @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False) def sync_rollout_weights(self): assert (self._is_actor or self._is_rollout) and not self.config.hybrid_engine assert hasattr(self, "_weights_info") and self._weights_info is not None params_generator = self._get_actor_params_generator() if self._is_actor else None if self._is_rollout: inference_model = ( self.rollout.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model ) patch_vllm_moe_model_weight_loader(inference_model) for key, shape, dtype in self._weights_info: if self._is_actor: weight_key, weight = next(params_generator) assert key == weight_key assert shape == weight.size() assert dtype == weight.dtype tensor = torch.empty(shape, dtype=dtype, device=get_torch_device().current_device()) if self._is_actor and torch.distributed.get_rank() == 0: tensor.copy_(weight) from ray.util.collective import collective collective.broadcast(tensor, src_rank=0, group_name="actor_rollout") if self._is_rollout: inference_model.load_weights([(key, tensor)]) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def get_actor_weights_info(self): assert self._is_actor if hasattr(self, "_weights_info"): return self._weights_info params_generator = self._get_actor_params_generator() ret = [] for key, tensor in params_generator: ret.append((key, tensor.size(), tensor.dtype)) self._weights_info = ret return ret class RolloutWorker(ActorRolloutRefWorker): def __init__(self, config: DictConfig, role: str): assert role == "rollout" ARRWorker.__init__(self, config, role) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) from omegaconf import OmegaConf from verl.utils.torch_dtypes import PrecisionType override_model_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create())) override_transformer_config = {} self.param_dtype = torch.bfloat16 self.dtype = PrecisionType.to_dtype(self.param_dtype) trust_remote_code = self.config.model.get("trust_remote_code", False) from verl.utils.model import get_generation_config self._init_hf_config_and_tf_config( self.config.model.path, self.config.model.path, self.dtype, override_model_config, override_transformer_config, trust_remote_code, ) self.generation_config = get_generation_config(self.local_path) from torch.distributed.device_mesh import init_device_mesh assert self.config.rollout.name == "vllm" assert self.config.rollout.mode == "sync" from verl.workers.rollout.vllm_rollout import vLLMRollout from .vllm_sharding_manager import VLLMShardingManager # NOTE(sgm): If the QKV and gate_up projection layer are concate together in actor, # we will reorganize their weight format when resharding from actor to rollout. infer_tp = self.config.rollout.tensor_model_parallel_size dp = self.world_size // infer_tp assert self.world_size % infer_tp == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}" ) rollout_device_mesh = init_device_mesh( get_device_name(), mesh_shape=(dp, infer_tp), mesh_dim_names=["dp", "infer_tp"] ) log_gpu_memory_usage("Before building vllm rollout", logger=None) local_path = copy_to_local(self.config.model.path, use_shm=self.config.model.get("use_shm", False)) from verl.workers.rollout.vllm_rollout import vLLMAsyncRollout vllm_rollout_cls = vLLMRollout if self.config.rollout.mode == "sync" else vLLMAsyncRollout rollout = vllm_rollout_cls( model_path=local_path, config=self.config.rollout, tokenizer=self.tokenizer, model_hf_config=self.hf_config, device_mesh=rollout_device_mesh, trust_remote_code=trust_remote_code, ) log_gpu_memory_usage("After building vllm rollout", logger=logger) sharding_manager = VLLMShardingManager( inference_engine=rollout.inference_engine, device_mesh=rollout_device_mesh, ) log_gpu_memory_usage("After building sharding manager", logger=logger) self.rollout, self.sharding_manager = rollout, sharding_manager self.rollout.sharding_manager = sharding_manager @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO, blocking=False) def async_generate_sequences(self, *args, **kwargs): return super().generate_sequences(*args, **kwargs) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def set_actor_weights_info(self, weights_info): assert self._is_rollout self._weights_info = weights_info class AsyncActorRolloutRefWorker(ActorRolloutRefWorker): def __init__(self, *args, **kwargs): raise NotImplementedError ================================================ FILE: verl_rl/recipe/one_step_off_policy/ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This trainer supports model-agonistic model initialization with huggingface """ import uuid from pprint import pprint import numpy as np import ray import torch from omegaconf import OmegaConf from torch.utils.data import Dataset, Sampler from tqdm import tqdm from verl import DataProto from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup from verl.single_controller.ray.base import create_colocated_worker_cls from verl.trainer.ppo import core_algos from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss from verl.trainer.ppo.metric_utils import ( compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, ) from verl.trainer.ppo.ray_trainer import ( RayPPOTrainer, ResourcePoolManager, Role, WorkerType, apply_kl_penalty, compute_advantage, compute_response_mask, ) from verl.trainer.ppo.reward import compute_reward, compute_reward_async from verl.utils.debug import marked_timer from verl.utils.metric import ( reduce_metrics, ) from verl.utils.tracking import ValidationGenerationsLogger class GenerationBatchFuture: """ Wrapper class for encapsulating batch generation results """ def __init__(self, epoch, batch, gen_batch_output): """ :param epoch: current epoch :param batch: Input batch data :param gen_batch_output: Generated sequences from the main model (DataProtoFuture) """ self.epoch = epoch self.batch = batch self.gen_batch_output = gen_batch_output def get(self): """ Get the actual results by calling get() method on gen_batch_output Returns: tuple: (batch, gen_batch_result) - batch: Original input batch data - gen_batch_result: Result from gen_batch_output.get() or gen_batch_output itself """ # Call get() method on gen_batch_output if available if hasattr(self.gen_batch_output, "get"): gen_batch_result = self.gen_batch_output.get() else: gen_batch_result = self.gen_batch_output return self.epoch, self.batch, gen_batch_result class OneStepOffRayTrainer(RayPPOTrainer): # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, train_dataset: Dataset | None = None, val_dataset: Dataset | None = None, collate_fn=None, train_sampler: Sampler | None = None, device_name="cuda", ): """ Initialize distributed PPO trainer with Ray backend. Note that this trainer runs on the driver process on a single CPU/GPU node. Args: config: Configuration object containing training parameters. tokenizer: Tokenizer used for encoding and decoding text. role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes. resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools. ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup. processor: Optional data processor, used for multimodal data reward_fn: Function for computing rewards during training. val_reward_fn: Function for computing rewards during validation. train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None. val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None. collate_fn: Function to collate data samples into batches. train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None. device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to "cuda". """ # Store the tokenizer for text processing self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert not self.hybrid_engine self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = Role.RefPolicy in role_worker_mapping self.use_rm = Role.RewardModel in role_worker_mapping self.ray_worker_group_cls = ray_worker_group_cls self.device_name = device_name self.validation_generations_logger = ValidationGenerationsLogger() # if ref_in_actor is True, the reference policy will be actor without lora applied self.ref_in_actor = config.actor_rollout_ref.model.get("lora_rank", 0) > 0 # define in-reward KL control # kl loss control currently not suppoorted if config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(config.algorithm.kl_ctrl) if self.config.algorithm.adv_estimator == AdvantageEstimator.GAE: self.use_critic = True elif self.config.algorithm.adv_estimator in [ AdvantageEstimator.GRPO, AdvantageEstimator.GRPO_PASSK, AdvantageEstimator.REINFORCE_PLUS_PLUS, # AdvantageEstimator.REMAX, # TODO:REMAX advantage estimator is not yet supported in one_step_off_policy AdvantageEstimator.RLOO, AdvantageEstimator.OPO, AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE, AdvantageEstimator.GPG, ]: self.use_critic = False else: raise NotImplementedError self._validate_config() self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) def _validate(self): self.actor_rollout_wg = self.rollout_wg ret = super()._validate() self.actor_rollout_wg = self.actor_wg return ret def init_workers(self): """Initialize distributed training workers using Ray backend. Creates: 1. Ray resource pools from configuration 2. Worker groups for each role (actor, critic, etc.) """ self.resource_pool_manager.create_resource_pool() self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()} # create actor and rollout for role, role_name in [(Role.Actor, "actor"), (Role.Rollout, "rollout")]: resource_pool = self.resource_pool_manager.get_resource_pool(role) role_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[role], config=self.config.actor_rollout_ref, role=role_name, ) self.resource_pool_to_cls[resource_pool][role_name] = role_cls # create critic if self.use_critic: resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic) critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=self.config.critic) self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls # create reference policy if needed if self.use_reference_policy: resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy) ref_policy_cls = RayClassWithInitArgs( self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role="ref", profile_option=self.config.trainer.npu_profile.options, ) self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls # create a reward model if reward_fn is None if self.use_rm: # we create a RM here resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model) self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. # Instead, directly pass different resource pool to different worker groups. # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information. all_wg = {} wg_kwargs = {} # Setting up kwargs for RayWorkerGroup if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None: wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout if OmegaConf.select(self.config.trainer, "profile_steps") is not None: wg_kwargs["profile_steps"] = OmegaConf.select(self.config.trainer, "profile_steps") assert OmegaConf.select(self.config.trainer, "worker_nsight_options") is not None, ( "worker_nsight_options must be set when profile_steps is set" ) wg_kwargs["worker_nsight_options"] = OmegaConf.to_container( OmegaConf.select(self.config.trainer, "worker_nsight_options") ) for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls( resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls, device_name=self.device_name, **wg_kwargs, ) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) if self.use_critic: self.critic_wg = all_wg["critic"] self.critic_wg.init_model() if self.use_reference_policy and not self.ref_in_actor: self.ref_policy_wg = all_wg["ref"] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = all_wg["rm"] self.rm_wg.init_model() self.actor_wg = all_wg["actor"] self.rollout_wg = all_wg["rollout"] self.actor_wg.init_model() self.rollout_wg.init_model() self.actor_rollout_wg = self.actor_wg # to be compatible with the functions that not be modified weights_info = self.actor_wg.get_actor_weights_info()[0] self.rollout_wg.set_actor_weights_info(weights_info) from ray.util.collective import collective actor_rollout_workers = self.actor_wg.workers + self.rollout_wg.workers collective.create_collective_group( actor_rollout_workers, len(actor_rollout_workers), list(range(0, len(actor_rollout_workers))), backend="nccl", group_name="actor_rollout", ) self.sync_rollout_weights() # create async rollout manager and request scheduler self.async_rollout_mode = False if self.config.actor_rollout_ref.rollout.mode == "async" and self._is_rollout: from verl.workers.rollout.async_server import AsyncLLMServerManager self.async_rollout_mode = True self.async_rollout_manager = AsyncLLMServerManager( config=self.config, worker_group=self.rollout_wg, ) def sync_rollout_weights(self): if not self.hybrid_engine: self.actor_wg.sync_rollout_weights() ray.get(self.rollout_wg.sync_rollout_weights()) def _create_continuous_iterator(self): """ Create a continuous data iterator across epoch """ for epoch in range(self.config.trainer.total_epochs): iterator = iter(self.train_dataloader) for batch_dict in iterator: yield epoch, batch_dict def _async_gen_next_batch(self, continuous_iterator): """ Call parameter synchronization and asynchronous sequence generation. """ try: epoch, batch_dict = next(continuous_iterator) except StopIteration: return None except Exception as e: print(f"Error in async_gen_next_batch: {e}") return None batch = DataProto.from_single_dict(batch_dict) # pop those keys for generation batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] if "multi_modal_data" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_data") if "raw_prompt" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("raw_prompt") if "tools_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("tools_kwargs") if "interaction_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("interaction_kwargs") gen_batch = batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) # sync weights from actor to rollout self.sync_rollout_weights() # async generation gen_batch_output = self.rollout_wg.async_generate_sequences(gen_batch) return GenerationBatchFuture(epoch, batch, gen_batch_output) def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None # across epoch iterator continuous_iterator = self._create_continuous_iterator() # Start the first asynchronous generation task. batch_data_future = self._async_gen_next_batch(continuous_iterator) while batch_data_future is not None: do_profile = ( self.global_steps in self.config.trainer.profile_steps if self.config.trainer.profile_steps is not None else False ) if do_profile: self.actor_wg.start_profile() if not self.hybrid_engine: self.rollout_wg.start_profile() if self.use_reference_policy: self.ref_policy_wg.start_profile() if self.use_critic: self.critic_wg.start_profile() if self.use_rm: self.rm_wg.start_profile() metrics = {} timing_raw = {} is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # wait for the previous batch with marked_timer("wait_prev_gen", timing_raw, color="red"): epoch, batch, gen_batch_output = batch_data_future.get() timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) # asys next generation (with syns weights from actor to rollout) with marked_timer("sync_rollout_weights", timing_raw, color="purple"): if not is_last_step: batch_data_future = self._async_gen_next_batch(continuous_iterator) batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) batch.batch["response_mask"] = compute_response_mask(batch) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() with marked_timer("reward", timing_raw, color="yellow"): # compute reward model score if self.use_rm: reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) if self.config.reward_model.launch_reward_fn_async: future_reward = compute_reward_async.remote(batch, self.config, self.tokenizer) else: reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn) # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, color="blue"): old_log_prob = self.actor_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if "rollout_log_probs" in batch.batch.keys(): # TODO: we may want to add diff of probs too. rollout_old_log_probs = batch.batch["rollout_log_probs"] actor_old_log_probs = batch.batch["old_log_probs"] attention_mask = batch.batch["attention_mask"] responses = batch.batch["responses"] response_length = responses.size(1) response_mask = attention_mask[:, -response_length:] rollout_probs = torch.exp(rollout_old_log_probs) actor_probs = torch.exp(actor_old_log_probs) rollout_probs_diff = torch.abs(rollout_probs - actor_probs) rollout_probs_diff = torch.masked_select(rollout_probs_diff, response_mask.bool()) rollout_probs_diff_max = torch.max(rollout_probs_diff) rollout_probs_diff_mean = torch.mean(rollout_probs_diff) rollout_probs_diff_std = torch.std(rollout_probs_diff) metrics.update( { "training/rollout_probs_diff_max": rollout_probs_diff_max.detach().item(), "training/rollout_probs_diff_mean": rollout_probs_diff_mean.detach().item(), "training/rollout_probs_diff_std": rollout_probs_diff_std.detach().item(), } ) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw, color="olive"): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw, color="cyan"): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw, color="brown"): # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] if self.config.reward_model.launch_reward_fn_async: reward_tensor, reward_extra_infos_dict = ray.get(future_reward) batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()}) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get( "norm_adv_by_std_in_grpo", True ) # GRPO adv normalization factor batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, config=self.config.algorithm, ) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, color="pink"): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, color="red"): batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable actor_output = self.actor_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: with marked_timer("dump_rollout_generations", timing_raw, color="green"): inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True) outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True) scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist() self._dump_generations( inputs=inputs, outputs=outputs, scores=scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=rollout_data_dir, ) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, color="green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 ): with marked_timer("save_checkpoint", timing_raw, color="green"): self._save_checkpoint() # training metrics metrics.update( { "training/global_step": self.global_steps, "training/epoch": epoch, } ) # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) progress_bar.update(1) self.global_steps += 1 if do_profile: self.actor_wg.stop_profile() if not self.hybrid_engine: self.rollout_wg.stop_profile() if self.use_reference_policy: self.ref_policy_wg.stop_profile() if self.use_critic: self.critic_wg.stop_profile() if self.use_rm: self.rm_wg.stop_profile() if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return ================================================ FILE: verl_rl/recipe/one_step_off_policy/vllm_sharding_manager.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2025 Meituan Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.protocol import all_gather_data_proto from verl.third_party.vllm import parallel_state as vllm_ps from verl.utils.debug import GPUMemoryLogger from verl.utils.device import get_torch_device from verl.utils.torch_functional import check_device_is_available from verl.workers.sharding_manager.base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class VLLMShardingManager(BaseShardingManager): @check_device_is_available() def __init__(self, inference_engine, device_mesh: DeviceMesh): self.device_mesh = device_mesh self.inference_engine = inference_engine inference_engine.wake_up() assert device_mesh is not None assert inference_engine is not None self.tp_size = self.device_mesh["infer_tp"].size() self.tp_rank = self.device_mesh["infer_tp"].get_local_rank() self.timing = {} gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) self.gen_random_states = get_torch_device().get_rng_state() @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def __enter__(self): get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def __exit__(self, exc_type, exc_value, traceback): self.gen_random_states = get_torch_device().get_rng_state() self.inference_engine.reset_prefix_cache() @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: """All gather across tp group to make each rank has identical input.""" if self.tp_size == 1: return data group = vllm_ps.get_tensor_model_parallel_group().device_group all_gather_data_proto(data=data, process_group=group) return data @GPUMemoryLogger(role="vllm sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: """Get chunk data of this tp rank since we do all gather in preprocess.""" if self.tp_size == 1: return data return data.chunk(chunks=self.tp_size)[self.tp_rank] ================================================ FILE: verl_rl/recipe/onerec/main_onerec_ppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OneRec custom main entry point for PPO training using custom onerec_ray_trainer. """ import os import sys # Add project root to path to ensure imports work correctly project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) if project_root not in sys.path: sys.path.insert(0, project_root) import hydra import ray from omegaconf import OmegaConf # Import the custom trainer from onerec_ray_trainer.py from recipe.onerec.onerec_ray_trainer import RayPPOTrainer # Import other necessary components from verl from verl.trainer.constants_ppo import get_ppo_ray_runtime_env from verl.trainer.main_ppo import TaskRunner as BaseTaskRunner, create_rl_dataset, create_rl_sampler from verl.utils.device import is_cuda_available @hydra.main(config_path="../../verl/trainer/config", config_name="ppo_trainer", version_base=None) def main(config): """Main entry point for OneRec PPO training with Hydra configuration management. Args: config: Hydra configuration dictionary containing training parameters. """ run_ppo(config) def run_ppo(config) -> None: """Run PPO training process with OneRec custom trainer. Args: config: Training configuration object containing all necessary parameters for distributed PPO training including Ray initialization settings, model paths, and training hyperparameters. """ # Check if Ray is not initialized if not ray.is_initialized(): # Initialize Ray with a local cluster configuration ray.init( runtime_env=get_ppo_ray_runtime_env(), num_cpus=config.ray_init.num_cpus, ) # Create a remote instance of the TaskRunner class if ( is_cuda_available and config.trainer.get("profile_steps") is not None and len(config.trainer.get("profile_steps", [])) > 0 ): nsight_options = OmegaConf.to_container(config.trainer.controller_nsight_options) runner = OneRecTaskRunner.options(runtime_env={"nsight": nsight_options}).remote() else: runner = OneRecTaskRunner.remote() ray.get(runner.run.remote(config)) # Optional: get the path of the timeline trace file from the configuration timeline_json_file = config.trainer.get("ray_timeline_filename", None) if timeline_json_file: ray.timeline(filename=timeline_json_file) @ray.remote(num_cpus=1) class OneRecTaskRunner: """Ray remote class for executing distributed OneRec PPO training tasks. This class encapsulates the main training logic and runs as a Ray remote actor to enable distributed execution across multiple nodes and GPUs. Uses the custom onerec_ray_trainer.RayPPOTrainer instead of the default trainer. """ def run(self, config): """Execute the main PPO training workflow with OneRec custom trainer. Args: config: Training configuration object containing all parameters needed for setting up and running the PPO training process. """ import socket from pprint import pprint from omegaconf import OmegaConf from verl.trainer.ppo.reward import load_reward_manager from verl.utils.fs import copy_to_local from verl.utils.import_utils import load_extern_type # Import Role and ResourcePoolManager from the custom onerec_ray_trainer # to ensure we use the same Role enum from recipe.onerec.onerec_ray_trainer import ResourcePoolManager, Role print(f"OneRecTaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") print("=" * 80) print("Using Custom OneRec RayPPOTrainer from recipe/onerec/onerec_ray_trainer.py") print("=" * 80) pprint(OmegaConf.to_container(config, resolve=True)) OmegaConf.resolve(config) # Download the checkpoint from HDFS to the local machine local_path = copy_to_local( config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False) ) # Instantiate the tokenizer and processor from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) # Define worker classes based on the actor strategy if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup # Use custom OneRecActorRolloutRefWorker instead of standard ActorRolloutRefWorker from recipe.onerec.onerec_fsdp_workers import OneRecActorRolloutRefWorker as ActorRolloutRefWorker from verl.workers.fsdp_workers import AsyncActorRolloutRefWorker use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto") if use_legacy_worker_impl in ["auto", "enable"]: from verl.workers.fsdp_workers import CriticWorker elif use_legacy_worker_impl == "disable": from verl.workers.roles import CriticWorker print("Using new worker implementation") else: raise ValueError(f"Invalid use_legacy_worker_impl: {use_legacy_worker_impl}") actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = NVMegatronRayWorkerGroup else: raise NotImplementedError(f"Unknown strategy: {config.actor_rollout_ref.actor.strategy}") # Load reward model worker if enabled if config.reward_model.get("enable", False): if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError(f"Unknown reward model strategy: {config.reward_model.strategy}") else: RewardModelWorker = None # Setup resource pool configuration n_gpus_per_node = config.trainer.n_gpus_per_node nnodes = config.trainer.nnodes global_pool_id = "global_pool" resource_pool_spec = {global_pool_id: [n_gpus_per_node] * nnodes} # Map roles to workers role_worker_mapping = { Role.ActorRollout: ray.remote(actor_rollout_cls), } mapping = { Role.ActorRollout: global_pool_id, } if config.critic.get("enable", True): role_worker_mapping[Role.Critic] = ray.remote(CriticWorker) mapping[Role.Critic] = global_pool_id if config.reward_model.get("enable", False): role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(actor_rollout_cls) mapping[Role.RefPolicy] = global_pool_id # Load reward managers reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager( config, tokenizer, num_examine=1, **config.reward_model.get("reward_kwargs", {}) ) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) from verl.utils.dataset.rl_dataset import collate_fn # Create training and validation datasets train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor, is_train=True) val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor, is_train=False) train_sampler = create_rl_sampler(config.data, train_dataset) # ======================================================================== # KEY CHANGE: Use the custom OneRec RayPPOTrainer instead of default # ======================================================================== trainer = RayPPOTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, ) # Initialize the workers of the trainer trainer.init_workers() # Start the training process trainer.fit() if __name__ == "__main__": main() ================================================ FILE: verl_rl/recipe/onerec/onerec_fsdp_workers.py ================================================ from verl.workers.fsdp_workers import ActorRolloutRefWorker from recipe.onerec.onerec_vllm_rollout import OneRecvLLMRollout from verl.utils.fs import copy_to_local from torch.distributed.device_mesh import init_device_mesh from verl.utils.device import get_device_name import logging import torch logger = logging.getLogger(__name__) class OneRecActorRolloutRefWorker(ActorRolloutRefWorker): """ Custom ActorRolloutRefWorker that uses OneRecvLLMRollout instead of standard vLLMRollout. """ def _build_rollout(self, trust_remote_code=False): # We only override the two_stage rollout path if self.config.rollout.name == "two_stage": from verl.workers.sharding_manager.fsdp_vllm import FSDPVLLMShardingManager from verl.utils.profiler import log_gpu_memory_usage # Original logic from ActorRolloutRefWorker._build_rollout infer_tp = self.config.rollout.tensor_model_parallel_size dp = self.world_size // infer_tp assert self.world_size % infer_tp == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}" ) device_name = get_device_name() rollout_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, infer_tp), mesh_dim_names=["dp", "infer_tp"] ) log_gpu_memory_usage(f"Before building vllm rollout (OneRec Custom)", logger=logger) local_path = copy_to_local(self.config.model.path, use_shm=self.config.model.get("use_shm", False)) lora_kwargs = ( {"lora_kwargs": {"enable_lora": True, "max_loras": 1, "max_lora_rank": self._lora_rank}} if self._is_lora else {} ) # Use our custom class! # We check for async mode but currently only support Sync OneRecvLLMRollout if self.config.rollout.mode == "async": logger.warning("OneRecvLLMRollout currently only supports SYNC mode fully. Async might fallback or fail if logic differs.") # If you implemented AsyncOneRecvLLMRollout, use it here. # For now, we assume sync mode or that OneRecvLLMRollout works for both structure wise # (vLLMAsyncRollout inherits from different base, so simple substitution might fail for async) # Fallback to original for async if you haven't implemented Async wrapper return super()._build_rollout(trust_remote_code) rollout = OneRecvLLMRollout( model_path=local_path, config=self.config.rollout, tokenizer=self.tokenizer, model_hf_config=self.actor_model_config, device_mesh=rollout_device_mesh, trust_remote_code=trust_remote_code, **lora_kwargs, ) log_gpu_memory_usage(f"After building vllm rollout (OneRec Custom)", logger=logger) full_params = torch.distributed.get_world_size() == 1 rollout_sharding_manager = FSDPVLLMShardingManager( module=self.actor_module_fsdp, inference_engine=rollout.inference_engine, model_config=self.actor_model_config, rollout_config=self.config.rollout, full_params=full_params, device_mesh=rollout_device_mesh, offload_param=self._is_offload_param, load_format=self.config.rollout.load_format, layered_summon=self.config.rollout.get("layered_summon", False), ) log_gpu_memory_usage("After building sharding manager", logger=logger) return rollout, rollout_sharding_manager else: # Fallback to parent implementation for other backends return super()._build_rollout(trust_remote_code) ================================================ FILE: verl_rl/recipe/onerec/onerec_ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import json import os import uuid from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from enum import Enum from pprint import pprint from typing import Optional import numpy as np import ray import torch import wandb from omegaconf import OmegaConf, open_dict from tensordict import TensorDict from torch.utils.data import Dataset, Sampler from torchdata.stateful_dataloader import StatefulDataLoader from tqdm import tqdm from verl import DataProto from verl.experimental.dataset.sampler import AbstractCurriculumSampler from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto from verl.single_controller.base import Worker from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup from verl.single_controller.ray.base import create_colocated_worker_cls from verl.trainer.config import AlgoConfig from verl.trainer.ppo import core_algos from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss from verl.trainer.ppo.metric_utils import ( compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, process_validation_metrics ) from verl.trainer.ppo.reward import compute_reward, compute_reward_async from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, should_save_ckpt_esi from verl.utils.debug import marked_timer from verl.utils.metric import ( reduce_metrics, ) from verl.utils.seqlen_balancing import get_seqlen_balanced_partitions, log_seqlen_unbalance from verl.utils.torch_functional import masked_mean, postprocess_data from verl.utils.tracking import ValidationGenerationsLogger WorkerType = type[Worker] class Role(Enum): """ To create more roles dynamically, you can subclass Role and add new members """ Actor = 0 Rollout = 1 ActorRollout = 2 Critic = 3 RefPolicy = 4 RewardModel = 5 ActorRolloutRef = 6 @dataclass class ResourcePoolManager: """ Define a resource pool specification. Resource pool will be initialized first. """ resource_pool_spec: dict[str, list[int]] mapping: dict[Role, str] resource_pool_dict: dict[str, RayResourcePool] = field(default_factory=dict) def create_resource_pool(self): """Create Ray resource pools for distributed training. Initializes resource pools based on the resource pool specification, with each pool managing GPU resources across multiple nodes. For FSDP backend, uses max_colocate_count=1 to merge WorkerGroups. For Megatron backend, uses max_colocate_count>1 for different models. """ for resource_pool_name, process_on_nodes in self.resource_pool_spec.items(): # max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool # For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one. # For Megatron backend, we recommend using max_colocate_count>1 # that can utilize different WorkerGroup for differnt models resource_pool = RayResourcePool( process_on_nodes=process_on_nodes, use_gpu=True, max_colocate_count=1, name_prefix=resource_pool_name ) self.resource_pool_dict[resource_pool_name] = resource_pool self._check_resource_available() def get_resource_pool(self, role: Role) -> RayResourcePool: """Get the resource pool of the worker_cls""" return self.resource_pool_dict[self.mapping[role]] def get_n_gpus(self) -> int: """Get the number of gpus in this cluster.""" return sum([n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes]) def _check_resource_available(self): """Check if the resource pool can be satisfied in this ray cluster.""" node_available_resources = ray.state.available_resources_per_node() node_available_gpus = { node: node_info.get("GPU", 0) if "GPU" in node_info else node_info.get("NPU", 0) for node, node_info in node_available_resources.items() } # check total required gpus can be satisfied total_available_gpus = sum(node_available_gpus.values()) total_required_gpus = sum( [n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes] ) if total_available_gpus < total_required_gpus: raise ValueError( f"Total available GPUs {total_available_gpus} is less than total desired GPUs {total_required_gpus}" ) # check each resource pool can be satisfied, O(#resource_pools * #nodes) for resource_pool_name, process_on_nodes in self.resource_pool_spec.items(): num_gpus, num_nodes = process_on_nodes[0], len(process_on_nodes) for node, available_gpus in node_available_gpus.items(): if available_gpus >= num_gpus: node_available_gpus[node] -= num_gpus num_nodes -= 1 if num_nodes == 0: break if num_nodes > 0: raise ValueError( f"Resource pool {resource_pool_name}: {num_gpus}*{num_nodes}" + "cannot be satisfied in this ray cluster" ) def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty="kl"): """Apply KL penalty to the token-level rewards. This function computes the KL divergence between the reference policy and current policy, then applies a penalty to the token-level rewards based on this divergence. Args: data (DataProto): The data containing batched model outputs and inputs. kl_ctrl (core_algos.AdaptiveKLController): Controller for adaptive KL penalty. kl_penalty (str, optional): Type of KL penalty to apply. Defaults to "kl". multi_turn (bool, optional): Whether the data is from a multi-turn conversation. Defaults to False. Returns: tuple: A tuple containing: - The updated data with token-level rewards adjusted by KL penalty - A dictionary of metrics related to the KL penalty """ response_mask = data.batch["response_mask"] token_level_scores = data.batch["token_level_scores"] batch_size = data.batch.batch_size[0] # compute kl between ref_policy and current policy # When apply_kl_penalty, algorithm.use_kl_in_reward=True, so the reference model has been enabled. kld = core_algos.kl_penalty( data.batch["old_log_probs"], data.batch["ref_log_prob"], kl_penalty=kl_penalty ) # (batch_size, response_length) kld = kld * response_mask beta = kl_ctrl.value token_level_rewards = token_level_scores - beta * kld current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence current_kl = torch.mean(current_kl, dim=0).item() # according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837 kl_ctrl.update(current_kl=current_kl, n_steps=batch_size) data.batch["token_level_rewards"] = token_level_rewards metrics = {"actor/reward_kl_penalty": current_kl, "actor/reward_kl_penalty_coeff": beta} return data, metrics def compute_response_mask(data: DataProto): """Compute the attention mask for the response part of the sequence. This function extracts the portion of the attention mask that corresponds to the model's response, which is used for masking computations that should only apply to response tokens. Args: data (DataProto): The data containing batched model outputs and inputs. Returns: torch.Tensor: The attention mask for the response tokens. """ responses = data.batch["responses"] response_length = responses.size(1) attention_mask = data.batch["attention_mask"] return attention_mask[:, -response_length:] def compute_advantage( data: DataProto, adv_estimator: AdvantageEstimator, gamma: float = 1.0, lam: float = 1.0, num_repeat: int = 1, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, tokenizer = None, ) -> DataProto: """Compute advantage estimates for policy optimization. This function computes advantage estimates using various estimators like GAE, GRPO, REINFORCE++, etc. The advantage estimates are used to guide policy optimization in RL algorithms. Args: data (DataProto): The data containing batched model outputs and inputs. adv_estimator (AdvantageEstimator): The advantage estimator to use (e.g., GAE, GRPO, REINFORCE++). gamma (float, optional): Discount factor for future rewards. Defaults to 1.0. lam (float, optional): Lambda parameter for GAE. Defaults to 1.0. num_repeat (int, optional): Number of times to repeat the computation. Defaults to 1. norm_adv_by_std_in_grpo (bool, optional): Whether to normalize advantages by standard deviation in GRPO. Defaults to True. config (dict, optional): Configuration dictionary for algorithm settings. Defaults to None. Returns: DataProto: The updated data with computed advantages and returns. """ # Back-compatible with trainers that do not compute response mask in fit if "response_mask" not in data.batch.keys(): data.batch["response_mask"] = compute_response_mask(data) # prepare response group if adv_estimator == AdvantageEstimator.GAE: # Compute advantages and returns using Generalized Advantage Estimation (GAE) advantages, returns = core_algos.compute_gae_advantage_return( token_level_rewards=data.batch["token_level_rewards"], values=data.batch["values"], response_mask=data.batch["response_mask"], gamma=gamma, lam=lam, ) data.batch["advantages"] = advantages data.batch["returns"] = returns if config.get("use_pf_ppo", False): data = core_algos.compute_pf_ppo_reweight_data( data, config.pf_ppo.reweight_method, config.pf_ppo.weight_pow, ) elif adv_estimator == AdvantageEstimator.GRPO: # Initialize the mask for GRPO calculation grpo_calculation_mask = data.batch["response_mask"] # Call compute_grpo_outcome_advantage with parameters matching its definition advantages, returns = core_algos.compute_grpo_outcome_advantage( token_level_rewards=data.batch["token_level_rewards"], response_mask=grpo_calculation_mask, index=data.non_tensor_batch["uid"], norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) data.batch["advantages"] = advantages data.batch["returns"] = returns else: # handle all other adv estimator type other than GAE and GRPO adv_estimator_fn = core_algos.get_adv_estimator_fn(adv_estimator) adv_kwargs = { "token_level_rewards": data.batch["token_level_rewards"], "response_mask": data.batch["response_mask"], "config": config, } if "uid" in data.non_tensor_batch: # optional adv_kwargs["index"] = data.non_tensor_batch["uid"] if "reward_baselines" in data.batch: # optional adv_kwargs["reward_baselines"] = data.batch["reward_baselines"] # calculate advantage estimator advantages, returns = adv_estimator_fn(**adv_kwargs) data.batch["advantages"] = advantages data.batch["returns"] = returns return data class RayPPOTrainer: """Distributed PPO trainer using Ray for scalable reinforcement learning. This trainer orchestrates distributed PPO training across multiple nodes and GPUs, managing actor rollouts, critic training, and reward computation with Ray backend. Supports various model architectures including FSDP, Megatron, and vLLM integration. """ # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, train_dataset: Optional[Dataset] = None, val_dataset: Optional[Dataset] = None, collate_fn=None, train_sampler: Optional[Sampler] = None, device_name=None, ): """ Initialize distributed PPO trainer with Ray backend. Note that this trainer runs on the driver process on a single CPU/GPU node. Args: config: Configuration object containing training parameters. tokenizer: Tokenizer used for encoding and decoding text. role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes. resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools. ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup. processor: Optional data processor, used for multimodal data reward_fn: Function for computing rewards during training. val_reward_fn: Function for computing rewards during validation. train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None. val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None. collate_fn: Function to collate data samples into batches. train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None. device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to None. """ # Store the tokenizer for text processing self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert self.hybrid_engine, "Currently, only support hybrid engine" if self.hybrid_engine: assert Role.ActorRollout in role_worker_mapping, f"{role_worker_mapping.keys()=}" self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = Role.RefPolicy in role_worker_mapping self.use_rm = Role.RewardModel in role_worker_mapping self.ray_worker_group_cls = ray_worker_group_cls self.device_name = device_name if device_name else self.config.trainer.device self.validation_generations_logger = ValidationGenerationsLogger( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, ) # if ref_in_actor is True, the reference policy will be actor without lora applied self.ref_in_actor = config.actor_rollout_ref.model.get("lora_rank", 0) > 0 # define in-reward KL control # kl loss control currently not suppoorted if self.config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl) if self.config.algorithm.adv_estimator == AdvantageEstimator.GAE: self.use_critic = True elif self.config.algorithm.adv_estimator in [ AdvantageEstimator.GRPO, AdvantageEstimator.REINFORCE_PLUS_PLUS, AdvantageEstimator.REMAX, AdvantageEstimator.RLOO, AdvantageEstimator.OPO, AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE, AdvantageEstimator.GPG, ]: self.use_critic = False else: raise NotImplementedError self._validate_config() self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) def _validate_config(self): config = self.config # number of GPUs total n_gpus = config.trainer.n_gpus_per_node * config.trainer.nnodes if config.actor_rollout_ref.actor.strategy == "megatron": model_parallel_size = ( config.actor_rollout_ref.actor.megatron.tensor_model_parallel_size * config.actor_rollout_ref.actor.megatron.pipeline_model_parallel_size ) assert ( n_gpus % (model_parallel_size * config.actor_rollout_ref.actor.megatron.context_parallel_size) == 0 ), ( f"n_gpus ({n_gpus}) must be divisible by model_parallel_size ({model_parallel_size}) times " f"context_parallel_size ({config.actor_rollout_ref.actor.megatron.context_parallel_size})" ) megatron_dp = n_gpus // ( model_parallel_size * config.actor_rollout_ref.actor.megatron.context_parallel_size ) minimal_bsz = megatron_dp * config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu else: minimal_bsz = n_gpus # 1. Check total batch size for data correctness real_train_batch_size = config.data.train_batch_size * config.actor_rollout_ref.rollout.n assert real_train_batch_size % minimal_bsz == 0, ( f"real_train_batch_size ({real_train_batch_size}) must be divisible by minimal possible batch size " f"({minimal_bsz})" ) # A helper function to check "micro_batch_size" vs "micro_batch_size_per_gpu" # We throw an error if the user sets both. The new convention is "..._micro_batch_size_per_gpu". def check_mutually_exclusive(mbs, mbs_per_gpu, name: str): """Validate mutually exclusive micro batch size configuration options. Ensures that users don't set both deprecated micro_batch_size and the new micro_batch_size_per_gpu parameters simultaneously. Args: mbs: Deprecated micro batch size parameter value. mbs_per_gpu: New micro batch size per GPU parameter value. name (str): Configuration section name for error messages. Raises: ValueError: If both parameters are set or neither is set. """ settings = { "actor_rollout_ref.actor": "micro_batch_size", "critic": "micro_batch_size", "reward_model": "micro_batch_size", "actor_rollout_ref.ref": "log_prob_micro_batch_size", "actor_rollout_ref.rollout": "log_prob_micro_batch_size", } if name in settings: param = settings[name] param_per_gpu = f"{param}_per_gpu" if mbs is None and mbs_per_gpu is None: raise ValueError( f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'." ) if mbs is not None and mbs_per_gpu is not None: raise ValueError( f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove " f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)." ) if not config.actor_rollout_ref.actor.use_dynamic_bsz: # actor: ppo_micro_batch_size vs. ppo_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.actor.ppo_micro_batch_size, config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu, "actor_rollout_ref.actor", ) if self.use_reference_policy: # reference: log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.ref.log_prob_micro_batch_size, config.actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu, "actor_rollout_ref.ref", ) # The rollout section also has log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.rollout.log_prob_micro_batch_size, config.actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu, "actor_rollout_ref.rollout", ) if self.use_critic and not config.critic.use_dynamic_bsz: # Check for critic micro-batch size conflicts check_mutually_exclusive( config.critic.ppo_micro_batch_size, config.critic.ppo_micro_batch_size_per_gpu, "critic" ) # Check for reward model micro-batch size conflicts if config.reward_model.enable and not config.reward_model.use_dynamic_bsz: check_mutually_exclusive( config.reward_model.micro_batch_size, config.reward_model.micro_batch_size_per_gpu, "reward_model" ) # Actor # check if train_batch_size is larger than ppo_mini_batch_size # if NOT dynamic_bsz, we must ensure: # ppo_mini_batch_size is divisible by ppo_micro_batch_size # ppo_micro_batch_size * sequence_parallel_size >= n_gpus if not config.actor_rollout_ref.actor.use_dynamic_bsz: assert config.data.train_batch_size >= config.actor_rollout_ref.actor.ppo_mini_batch_size sp_size = config.actor_rollout_ref.actor.get("ulysses_sequence_parallel_size", 1) if config.actor_rollout_ref.actor.ppo_micro_batch_size is not None: assert ( config.actor_rollout_ref.actor.ppo_mini_batch_size % config.actor_rollout_ref.actor.ppo_micro_batch_size == 0 ) assert config.actor_rollout_ref.actor.ppo_micro_batch_size * sp_size >= n_gpus assert config.actor_rollout_ref.actor.loss_agg_mode in [ "token-mean", "seq-mean-token-sum", "seq-mean-token-mean", "seq-mean-token-sum-norm", ], f"Invalid loss_agg_mode: {config.actor_rollout_ref.actor.loss_agg_mode}" if self.config.algorithm.use_kl_in_reward and config.actor_rollout_ref.actor.use_kl_loss: print("NOTICE: You have both enabled in-reward kl and kl loss.") # critic if self.use_critic and not config.critic.use_dynamic_bsz: assert config.data.train_batch_size >= config.critic.ppo_mini_batch_size sp_size = config.critic.get("ulysses_sequence_parallel_size", 1) if config.critic.ppo_micro_batch_size is not None: assert config.critic.ppo_mini_batch_size % config.critic.ppo_micro_batch_size == 0 assert config.critic.ppo_micro_batch_size * sp_size >= n_gpus # Check if use_remove_padding is enabled when using sequence parallelism for fsdp if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"} and ( config.actor_rollout_ref.actor.get("ulysses_sequence_parallel_size", 1) > 1 or config.actor_rollout_ref.ref.get("ulysses_sequence_parallel_size", 1) > 1 ): assert config.actor_rollout_ref.model.use_remove_padding, ( "When using sequence parallelism for actor/ref policy, you must enable `use_remove_padding`." ) if self.use_critic and config.critic.strategy in {"fsdp", "fsdp2"}: if config.critic.get("ulysses_sequence_parallel_size", 1) > 1: assert config.critic.model.use_remove_padding, ( "When using sequence parallelism for critic, you must enable `use_remove_padding`." ) if config.data.get("val_batch_size", None) is not None: print( "WARNING: val_batch_size is deprecated." + " Validation datasets are sent to inference engines as a whole batch," + " which will schedule the memory themselves." ) # check eval config if config.actor_rollout_ref.rollout.val_kwargs.do_sample: assert config.actor_rollout_ref.rollout.temperature > 0, ( "validation gen temperature should be greater than 0 when enabling do_sample" ) print("[validate_config] All configuration checks passed successfully!") def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler: Optional[Sampler]): """ Creates the train and validation dataloaders. """ # TODO: we have to make sure the batch size is divisible by the dp size from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler if train_dataset is None: train_dataset = create_rl_dataset( self.config.data.train_files, self.config.data, self.tokenizer, self.processor ) if val_dataset is None: val_dataset = create_rl_dataset( self.config.data.val_files, self.config.data, self.tokenizer, self.processor ) self.train_dataset, self.val_dataset = train_dataset, val_dataset if train_sampler is None: train_sampler = create_rl_sampler(self.config.data, self.train_dataset) if collate_fn is None: from verl.utils.dataset.rl_dataset import collate_fn as default_collate_fn collate_fn = default_collate_fn num_workers = self.config.data["dataloader_num_workers"] self.train_dataloader = StatefulDataLoader( dataset=self.train_dataset, batch_size=self.config.data.get("gen_batch_size", self.config.data.train_batch_size), num_workers=num_workers, drop_last=True, collate_fn=collate_fn, sampler=train_sampler, ) val_batch_size = self.config.data.val_batch_size # Prefer config value if set if val_batch_size is None: val_batch_size = len(self.val_dataset) self.val_dataloader = StatefulDataLoader( dataset=self.val_dataset, batch_size=val_batch_size, num_workers=num_workers, shuffle=self.config.data.get("validation_shuffle", True), drop_last=False, collate_fn=collate_fn, ) assert len(self.train_dataloader) >= 1, "Train dataloader is empty!" assert len(self.val_dataloader) >= 1, "Validation dataloader is empty!" print( f"Size of train dataloader: {len(self.train_dataloader)}, Size of val dataloader: " f"{len(self.val_dataloader)}" ) total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.trainer.total_training_steps is not None: total_training_steps = self.config.trainer.total_training_steps self.total_training_steps = total_training_steps print(f"Total training steps: {self.total_training_steps}") try: OmegaConf.set_struct(self.config, True) with open_dict(self.config): if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"): self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps if OmegaConf.select(self.config, "critic.optim"): self.config.critic.optim.total_training_steps = total_training_steps except Exception as e: print(f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}") def _dump_generations(self, inputs, outputs, scores, reward_extra_infos_dict, dump_path, ground_truths=None): """Dump rollout/validation samples as JSONL.""" os.makedirs(dump_path, exist_ok=True) filename = os.path.join(dump_path, f"{self.global_steps}.jsonl") n = len(inputs) base_data = { "input": inputs, "output": outputs, "score": scores, "step": [self.global_steps] * n, } if ground_truths and len(ground_truths) == n: base_data["ground_truth"] = ground_truths for k, v in reward_extra_infos_dict.items(): if len(v) == n: base_data[k] = v lines = [] for i in range(n): entry = {k: v[i] for k, v in base_data.items()} lines.append(json.dumps(entry, ensure_ascii=False)) with open(filename, "w") as f: f.write("\n".join(lines) + "\n") print(f"Dumped generations to {filename}") def _maybe_log_val_generations(self, inputs, outputs, scores): """Log a table of validation samples to the configured logger (wandb or swanlab)""" generations_to_log = self.config.trainer.log_val_generations if generations_to_log == 0: return import numpy as np # Create tuples of (input, output, score) and sort by input text samples = list(zip(inputs, outputs, scores, strict=True)) samples.sort(key=lambda x: x[0]) # Sort by input text # Use fixed random seed for deterministic shuffling rng = np.random.RandomState(42) rng.shuffle(samples) # Take first N samples after shuffling samples = samples[:generations_to_log] # Log to each configured logger self.validation_generations_logger.log(self.config.trainer.logger, samples, self.global_steps) def _validate(self): data_source_lst = [] reward_extra_infos_dict: dict[str, list] = defaultdict(list) # Debug: print dataset sizes before validation print(f"[_validate] Starting validation. train_dataset size: {len(self.train_dataset)}, val_dataset size: {len(self.val_dataset)}") print(f"[_validate] actor_rollout_wg world_size: {self.actor_rollout_wg.world_size}") # Lists to collect samples for the table sample_inputs = [] sample_outputs = [] sample_scores = [] sample_turns = [] sample_ground_truths = [] batch_idx = 0 for test_data in self.val_dataloader: test_batch = DataProto.from_single_dict(test_data) print(f"[Validation Debug] Batch {batch_idx}: test_batch size = {len(test_batch)}") batch_idx += 1 # Check if beam search or two-stage rollout is enabled for validation val_kwargs = self.config.actor_rollout_ref.rollout.val_kwargs rollout_config = self.config.actor_rollout_ref.rollout use_beam_search_val = val_kwargs.get("use_beam_search", False) is_two_stage_rollout_val = rollout_config.get("name") == "two_stage" # Only repeat if NOT using beam search (beam search will expand outputs internally) # For two-stage rollout, we DO repeat (for different CoT samples), beam expansion happens in rollout if not use_beam_search_val: # repeat test batch for sampling-based generation test_batch = test_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.val_kwargs.n, interleave=True ) # we only do validation on rule-based rm if self.config.reward_model.enable and test_batch[0].non_tensor_batch["reward_model"]["style"] == "model": return {} # Store original inputs (will be expanded later if beam search returns all beams) input_ids = test_batch.batch["input_ids"] input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids] # Note: sample_inputs will be extended after beam search expansion handling if "reward_model" in test_batch.non_tensor_batch: ground_truths = [item["ground_truth"] for item in test_batch.non_tensor_batch["reward_model"]] # Note: ground_truths will be extended after beam search expansion handling batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] if "multi_modal_data" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_data") if "raw_prompt" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("raw_prompt") if "tools_kwargs" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("tools_kwargs") if "interaction_kwargs" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("interaction_kwargs") if "agent_name" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("agent_name") test_gen_batch = test_batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) # Validation configuration val_kwargs = self.config.actor_rollout_ref.rollout.val_kwargs rollout_config = self.config.actor_rollout_ref.rollout meta_info = { "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, "recompute_log_prob": False, "do_sample": val_kwargs.do_sample, "validate": True, "global_steps": self.global_steps, } # Check for Two-Stage Rollout in Validation if rollout_config.get("enable_two_stage_rollout", False): meta_info["enable_two_stage_rollout"] = True meta_info["stage2_beam_size"] = rollout_config.get("stage2_beam_size", 32) meta_info["stage2_max_tokens"] = rollout_config.get("stage2_max_tokens", 16) # Stage 1 CoT config meta_info["max_tokens"] = self.config.data.get("max_response_length", 1024) # Disable standard beam search for Stage 1 (use sampling) meta_info["use_beam_search"] = False meta_info["n"] = val_kwargs.get("n", 1) print(f"[OneRecTrainer] Validation Two-Stage Enabled: {meta_info}") # Inject Beam Search parameters if enabled for validation (Single Stage) elif val_kwargs.get("use_beam_search", False): meta_info["use_beam_search"] = True meta_info["best_of"] = val_kwargs.get("best_of", 4) # Use max_response_length from config for validation as well meta_info["max_tokens"] = self.config.data.get("max_response_length", 16) meta_info["temperature"] = 0 # n controls how many beams to return per prompt (will expand output) meta_info["n"] = val_kwargs.get("n", 1) # Signal rollout to return all beams (no repeat, expand internally) meta_info["return_all_beams"] = True print(f"[OneRecTrainer] Validation Beam Search Enabled (optimized, no repeat): {meta_info}") test_gen_batch.meta_info = meta_info print(f"test_gen_batch meta info: {test_gen_batch.meta_info}") # pad to be divisible by dp_size size_divisor = ( self.actor_rollout_wg.world_size if not self.async_rollout_mode else self.config.actor_rollout_ref.rollout.agent.num_workers ) test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, size_divisor) if not self.async_rollout_mode: test_output_gen_batch_padded = self.actor_rollout_wg.generate_sequences(test_gen_batch_padded) else: test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(test_gen_batch_padded) # unpad - For beam search or two-stage rollout, output is expanded, so we need to unpad accordingly if use_beam_search_val or is_two_stage_rollout_val: # For two-stage rollout, expansion is val_kwargs.n * stage2_beam_size if is_two_stage_rollout_val: stage2_beam_size = rollout_config.get("stage2_beam_size", 2) n_beams = stage2_beam_size # rollout already expands by beam_width print(f"[Validation Debug] Two-stage unpad: original pad_size={pad_size}, stage2_beam_size={stage2_beam_size}, actual_pad_size={pad_size * n_beams}") else: n_beams = val_kwargs.get("n", 1) print(f"[Validation Debug] Beam search unpad: original pad_size={pad_size}, n_beams={n_beams}, actual_pad_size={pad_size * n_beams}") actual_pad_size = pad_size * n_beams else: actual_pad_size = pad_size test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=actual_pad_size) # Debug: Check keys returned from worker print(f"[Trainer Debug] test_output_gen_batch keys: {test_output_gen_batch.non_tensor_batch.keys()}") print("validation generation end") # Handle beam search or two-stage rollout expansion: output may be larger than input # When return_all_beams=True, rollout expands output to batch_size * beam_width output_len = len(test_output_gen_batch) input_len = len(test_batch) if output_len > input_len and (use_beam_search_val or is_two_stage_rollout_val): # Rollout guarantees output_len = input_len * expand_factor, so we can use simple repeat expand_factor = output_len // input_len print(f"[Validation Debug] Batch {batch_idx-1}: Beam/TwoStage expansion - input={input_len}, output={output_len}, factor={expand_factor}") test_batch = test_batch.repeat(repeat_times=expand_factor, interleave=True) input_texts = [t for t in input_texts for _ in range(expand_factor)] if "reward_model" in test_batch.non_tensor_batch: ground_truths = [t for t in ground_truths for _ in range(expand_factor)] print(f"[Validation Debug] Batch {batch_idx-1}: After expansion - len(input_texts)={len(input_texts)}, len(test_batch)={len(test_batch)}") # Now extend sample_inputs and sample_ground_truths before_extend = len(sample_inputs) sample_inputs.extend(input_texts) print(f"[Validation Debug] Batch {batch_idx-1}: Extended sample_inputs from {before_extend} to {len(sample_inputs)} (+{len(input_texts)})") if "reward_model" in test_batch.non_tensor_batch: sample_ground_truths.extend(ground_truths) # Store generated outputs output_ids = test_output_gen_batch.batch["responses"] output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids] sample_outputs.extend(output_texts) # Collect response lengths for validation metrics response_lengths = [(ids != self.tokenizer.pad_token_id).sum().item() for ids in output_ids] reward_extra_infos_dict["response_length"].extend(response_lengths) test_batch = test_batch.union(test_output_gen_batch) test_batch.meta_info["validate"] = True # Debug: Check keys after union print(f"[Trainer Debug] test_batch keys after union: {test_batch.non_tensor_batch.keys()}") # Critical Step: Move generated_items into extra_info for NaiveRewardManager if "generated_items" in test_batch.non_tensor_batch: print("[Trainer Debug] Moving generated_items into extra_info...") generated_items_arr = test_batch.non_tensor_batch["generated_items"] batch_size = len(generated_items_arr) # Ensure extra_info exists if "extra_info" not in test_batch.non_tensor_batch: test_batch.non_tensor_batch["extra_info"] = np.array([{} for _ in range(batch_size)], dtype=object) extra_info_arr = test_batch.non_tensor_batch["extra_info"] for i in range(batch_size): if extra_info_arr[i] is None: extra_info_arr[i] = {} # Update dict (reference modification) extra_info_arr[i]["generated_items"] = generated_items_arr[i] # evaluate using reward_function result = self.val_reward_fn(test_batch, return_dict=True) reward_tensor = result["reward_tensor"] scores = reward_tensor.sum(-1).cpu().tolist() sample_scores.extend(scores) reward_extra_infos_dict["reward"].extend(scores) print(f"len reward_extra_infos_dict['reward']: {len(reward_extra_infos_dict['reward'])}") if "reward_extra_info" in result: for key, lst in result["reward_extra_info"].items(): reward_extra_infos_dict[key].extend(lst) print(f"len reward_extra_infos_dict['{key}']: {len(reward_extra_infos_dict[key])}") # collect num_turns of each prompt if "__num_turns__" in test_batch.non_tensor_batch: sample_turns.append(test_batch.non_tensor_batch["__num_turns__"]) # 获取 data_source 信息,用于按task分组统计(和training逻辑一致) reward_fn_key = self.config.data.get("reward_fn_key", "data_source") data_sources_batch = test_batch.non_tensor_batch.get(reward_fn_key, None) # 如果没有找到,尝试其他常见字段名 if data_sources_batch is None: data_sources_batch = test_batch.non_tensor_batch.get("source", None) if data_sources_batch is None: data_sources_batch = test_batch.non_tensor_batch.get("data_source", None) # 如果还是找不到,使用默认值 if data_sources_batch is None: data_sources_batch = ["unknown"] * reward_tensor.shape[0] data_source_lst.append(data_sources_batch) self._maybe_log_val_generations(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores) # dump generations val_data_dir = self.config.trainer.get("validation_data_dir", None) if val_data_dir: self._dump_generations( inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=val_data_dir, ground_truths=sample_ground_truths, ) for key_info, lst in reward_extra_infos_dict.items(): assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}" data_sources = np.concatenate(data_source_lst, axis=0) # Debug: Check for duplicate prompts from collections import Counter prompt_counts = Counter(sample_inputs) duplicate_prompts = {p: c for p, c in prompt_counts.items() if c > 1} if duplicate_prompts: print(f"[Validation Debug] Found {len(duplicate_prompts)} duplicate prompts!") for p, c in list(duplicate_prompts.items())[:3]: # Show first 3 print(f" Prompt (truncated): '{p[:100]}...' appears {c} times") else: print(f"[Validation Debug] No duplicate prompts found. Total unique prompts: {len(prompt_counts)}") print(f"[Validation Debug] Total samples: {len(sample_inputs)}, Total scores: {len(sample_scores)}") data_src2var2metric2val = process_validation_metrics(data_sources, sample_inputs, reward_extra_infos_dict) metric_dict = {} for data_source, var2metric2val in data_src2var2metric2val.items(): core_var = "acc" if "acc" in var2metric2val else "reward" for var_name, metric2val in var2metric2val.items(): n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()]) for metric_name, metric_val in metric2val.items(): if ( (var_name == core_var) and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best", "pass"]) and (f"@{n_max}" in metric_name) ): metric_sec = "val-core" else: metric_sec = "val-aux" pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}" metric_dict[pfx] = metric_val if len(sample_turns) > 0: sample_turns = np.concatenate(sample_turns) metric_dict["val-aux/num_turns/min"] = sample_turns.min() metric_dict["val-aux/num_turns/max"] = sample_turns.max() metric_dict["val-aux/num_turns/mean"] = sample_turns.mean() # Add validation response_length statistics if "response_length" in reward_extra_infos_dict: response_lengths = reward_extra_infos_dict["response_length"] if len(response_lengths) > 0: import torch response_lengths_tensor = torch.tensor(response_lengths) metric_dict["val/response_length/mean"] = response_lengths_tensor.float().mean().item() metric_dict["val/response_length/max"] = response_lengths_tensor.max().item() metric_dict["val/response_length/min"] = response_lengths_tensor.min().item() return metric_dict def init_workers(self): """Initialize distributed training workers using Ray backend. Creates: 1. Ray resource pools from configuration 2. Worker groups for each role (actor, critic, etc.) """ self.resource_pool_manager.create_resource_pool() self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()} # create actor and rollout if self.hybrid_engine: resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout) actor_rollout_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[Role.ActorRollout], config=self.config.actor_rollout_ref, role="actor_rollout", profile_option=self.config.trainer.npu_profile.options, ) self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls else: raise NotImplementedError # create critic if self.use_critic: resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic) critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=self.config.critic) self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls # create reference policy if needed if self.use_reference_policy: resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy) ref_policy_cls = RayClassWithInitArgs( self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role="ref", profile_option=self.config.trainer.npu_profile.options, ) self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls # create a reward model if reward_fn is None if self.use_rm: # we create a RM here resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model) self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. # Instead, directly pass different resource pool to different worker groups. # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information. all_wg = {} wg_kwargs = {} # Setting up kwargs for RayWorkerGroup if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None: wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout if OmegaConf.select(self.config.trainer, "profile_steps") is not None: wg_kwargs["profile_steps"] = OmegaConf.select(self.config.trainer, "profile_steps") assert OmegaConf.select(self.config.trainer, "worker_nsight_options") is not None, ( "worker_nsight_options must be set when profile_steps is set" ) wg_kwargs["worker_nsight_options"] = OmegaConf.to_container( OmegaConf.select(self.config.trainer, "worker_nsight_options") ) wg_kwargs["device_name"] = self.device_name for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls( resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls, **wg_kwargs, ) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) if self.use_critic: self.critic_wg = all_wg["critic"] self.critic_wg.init_model() if self.use_reference_policy and not self.ref_in_actor: self.ref_policy_wg = all_wg["ref"] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = all_wg["rm"] self.rm_wg.init_model() # we should create rollout at the end so that vllm can have a better estimation of kv cache memory self.actor_rollout_wg = all_wg["actor_rollout"] self.actor_rollout_wg.init_model() # create async rollout manager and request scheduler self.async_rollout_mode = False if self.config.actor_rollout_ref.rollout.mode == "async": from verl.experimental.agent_loop import AgentLoopManager self.async_rollout_mode = True self.async_rollout_manager = AgentLoopManager( config=self.config, worker_group=self.actor_rollout_wg, ) def _save_checkpoint(self): from verl.utils.fs import local_mkdir_safe # path: given_path + `/global_step_{global_steps}` + `/actor` local_global_step_folder = os.path.join( self.config.trainer.default_local_dir, f"global_step_{self.global_steps}" ) print(f"local_global_step_folder: {local_global_step_folder}") actor_local_path = os.path.join(local_global_step_folder, "actor") actor_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor") ) remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False) if remove_previous_ckpt_in_save: print( "Warning: remove_previous_ckpt_in_save is deprecated," + " set max_actor_ckpt_to_keep=1 and max_critic_ckpt_to_keep=1 instead" ) max_actor_ckpt_to_keep = ( self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) max_critic_ckpt_to_keep = ( self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) self.actor_rollout_wg.save_checkpoint( actor_local_path, actor_remote_path, self.global_steps, max_ckpt_to_keep=max_actor_ckpt_to_keep ) if self.use_critic: critic_local_path = os.path.join(local_global_step_folder, "critic") critic_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "critic") ) self.critic_wg.save_checkpoint( critic_local_path, critic_remote_path, self.global_steps, max_ckpt_to_keep=max_critic_ckpt_to_keep ) # save dataloader local_mkdir_safe(local_global_step_folder) dataloader_local_path = os.path.join(local_global_step_folder, "data.pt") dataloader_state_dict = self.train_dataloader.state_dict() torch.save(dataloader_state_dict, dataloader_local_path) # latest checkpointed iteration tracker (for atomic usage) local_latest_checkpointed_iteration = os.path.join( self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt" ) with open(local_latest_checkpointed_iteration, "w") as f: f.write(str(self.global_steps)) def _load_checkpoint(self): if self.config.trainer.resume_mode == "disable": return 0 # load from hdfs if self.config.trainer.default_hdfs_dir is not None: raise NotImplementedError("load from hdfs is not implemented yet") else: checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path if not os.path.isabs(checkpoint_folder): working_dir = os.getcwd() checkpoint_folder = os.path.join(working_dir, checkpoint_folder) global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest # find global_step_folder if self.config.trainer.resume_mode == "auto": if global_step_folder is None: print("Training from scratch") return 0 else: if self.config.trainer.resume_mode == "resume_path": assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type" assert "global_step_" in self.config.trainer.resume_from_path, ( "resume ckpt must specify the global_steps" ) global_step_folder = self.config.trainer.resume_from_path if not os.path.isabs(global_step_folder): working_dir = os.getcwd() global_step_folder = os.path.join(working_dir, global_step_folder) print(f"Load from checkpoint folder: {global_step_folder}") # set global step self.global_steps = int(global_step_folder.split("global_step_")[-1]) print(f"Setting global step to {self.global_steps}") print(f"Resuming from {global_step_folder}") actor_path = os.path.join(global_step_folder, "actor") critic_path = os.path.join(global_step_folder, "critic") # load actor self.actor_rollout_wg.load_checkpoint( actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load critic if self.use_critic: self.critic_wg.load_checkpoint( critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load dataloader, # TODO: from remote not implemented yet dataloader_local_path = os.path.join(global_step_folder, "data.pt") if os.path.exists(dataloader_local_path): dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False) self.train_dataloader.load_state_dict(dataloader_state_dict) else: print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch") def _start_profiling(self, do_profile: bool) -> None: """Start profiling for all worker groups if profiling is enabled.""" if do_profile: self.actor_rollout_wg.start_profile(role="e2e", profile_step=self.global_steps) if self.use_reference_policy: self.ref_policy_wg.start_profile() if self.use_critic: self.critic_wg.start_profile() if self.use_rm: self.rm_wg.start_profile() def _stop_profiling(self, do_profile: bool) -> None: """Stop profiling for all worker groups if profiling is enabled.""" if do_profile: self.actor_rollout_wg.stop_profile() if self.use_reference_policy: self.ref_policy_wg.stop_profile() if self.use_critic: self.critic_wg.stop_profile() if self.use_rm: self.rm_wg.stop_profile() def _balance_batch(self, batch: DataProto, metrics, logging_prefix="global_seqlen"): """Reorder the data on single controller such that each dp rank gets similar total tokens""" attention_mask = batch.batch["attention_mask"] batch_size = attention_mask.shape[0] global_seqlen_lst = batch.batch["attention_mask"].view(batch_size, -1).sum(-1).tolist() # (train_batch_size,) world_size = self.actor_rollout_wg.world_size global_partition_lst = get_seqlen_balanced_partitions( global_seqlen_lst, k_partitions=world_size, equal_size=True ) # reorder based on index. The data will be automatically equally partitioned by dispatch function global_idx = torch.tensor([j for partition in global_partition_lst for j in partition]) batch.reorder(global_idx) global_balance_stats = log_seqlen_unbalance( seqlen_list=global_seqlen_lst, partitions=global_partition_lst, prefix=logging_prefix ) metrics.update(global_balance_stats) def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None self.max_steps_duration = 0 for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} do_profile = ( self.global_steps in self.config.trainer.profile_steps if self.config.trainer.profile_steps is not None else False ) with marked_timer("start_profile", timing_raw): self._start_profiling(do_profile) batch: DataProto = DataProto.from_single_dict(batch_dict) # pop those keys for generation batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] if "multi_modal_data" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_data") if "raw_prompt" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("raw_prompt") if "tools_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("tools_kwargs") if "interaction_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("interaction_kwargs") if "index" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("index") if "agent_name" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("agent_name") gen_batch = batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) # pass global_steps to trace gen_batch.meta_info["global_steps"] = self.global_steps # Get original batch size for beam_idx calculation original_bs = len(gen_batch) # Check if beam search is enabled - if so, don't repeat (optimization) # Two-stage rollout: still repeat (for different CoT samples), but beam expansion happens in rollout rollout_config = self.config.actor_rollout_ref.rollout use_beam_search_train = rollout_config.get("use_beam_search", False) is_two_stage_rollout = rollout_config.get("name") == "two_stage" rollout_n = self.config.actor_rollout_ref.rollout.n if not use_beam_search_train: # Standard sampling or two-stage rollout: repeat the batch for n_rollout different samples gen_batch = gen_batch.repeat(repeat_times=rollout_n, interleave=True) if "reward_model" in batch.non_tensor_batch: # repeat reward_model to match gen_batch size repeated_reward_model = np.repeat( batch.non_tensor_batch["reward_model"], rollout_n, axis=0 ) gen_batch.non_tensor_batch["reward_model"] = repeated_reward_model else: print(f"[OneRecTrainer] Beam search enabled, skipping repeat (optimized path)") is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw, color="red"): # Dynamically configure generation parameters based on config rollout_config = self.config.actor_rollout_ref.rollout # Check if beam search is enabled in config if rollout_config.get("use_beam_search", False): gen_batch.meta_info["use_beam_search"] = True gen_batch.meta_info["best_of"] = rollout_config.get("best_of", 4) # Use max_response_length from data config if available, otherwise default gen_batch.meta_info["max_tokens"] = self.config.data.get("max_response_length", 16) gen_batch.meta_info["temperature"] = 0 n = rollout_config.get("n", 1) gen_batch.meta_info["n"] = n # Optimized: return all beams from rollout, no repeat needed gen_batch.meta_info["return_all_beams"] = True print(f"[OneRecTrainer] Beam Search Enabled (optimized, no repeat): {gen_batch.meta_info}") # Check if Two-Stage Rollout is enabled if rollout_config.get("enable_two_stage_rollout", False): gen_batch.meta_info["enable_two_stage_rollout"] = True gen_batch.meta_info["stage2_beam_size"] = rollout_config.get("stage2_beam_size", 32) gen_batch.meta_info["stage2_max_tokens"] = rollout_config.get("stage2_max_tokens", 16) # For Stage 1 (CoT), we use sampling params gen_batch.meta_info["max_tokens"] = self.config.data.get("max_response_length", 1024) # CoT length gen_batch.meta_info["temperature"] = rollout_config.get("temperature", 1.0) gen_batch.meta_info["top_p"] = rollout_config.get("top_p", 1.0) # Disable use_beam_search flag to prevent conflict in standard flow if both are set gen_batch.meta_info["use_beam_search"] = False print(f"[OneRecTrainer] Two-Stage Rollout Enabled: {gen_batch.meta_info}") if not self.async_rollout_mode: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) else: gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) # Handle beam search/two-stage rollout expansion: output may be larger than input # When return_all_beams=True, rollout expands output to batch_size * beam_width if use_beam_search_train or is_two_stage_rollout: output_len = len(gen_batch_output) input_len = len(batch) print(f"[OneRecTrainer] Beam/TwoStage: gen_batch_output size={output_len}, batch size={input_len}, n={rollout_n}") # CRITICAL FIX: Generate UIDs BEFORE expansion so that beams from # the same prompt share the same UID for correct GRPO grouping # This must happen regardless of whether expansion is needed batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(input_len)], dtype=object ) print(f"[OneRecTrainer] Generated UIDs before expansion: {len(batch.non_tensor_batch['uid'])} unique UIDs") if output_len > input_len: # Rollout guarantees output_len = input_len * expand_factor assert output_len % input_len == 0, \ f"Output size {output_len} must be a multiple of input size {input_len}" expand_factor = output_len // input_len print(f"[OneRecTrainer] Expanding batch using repeat: factor={expand_factor}") batch = batch.repeat(repeat_times=expand_factor, interleave=True) print(f"[OneRecTrainer] After expansion: batch size={len(batch)}, UIDs will be repeated {expand_factor}x") if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with marked_timer("gen_max", timing_raw, color="purple"): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False if not self.async_rollout_mode: gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) else: gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_batch) batch = batch.union(gen_baseline_output) reward_baseline_tensor = self.reward_fn(batch) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) batch.pop(batch_keys=list(gen_baseline_output.batch.keys())) batch.batch["reward_baselines"] = reward_baseline_tensor del gen_baseline_batch, gen_baseline_output # Generate UIDs and repeat batch for standard sampling path # Skip if beam search or two-stage rollout already handled this above if not use_beam_search_train and not is_two_stage_rollout: # Use original_bs (stored at line 1786) for consistency batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(original_bs)], dtype=object ) # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) # FORCE INJECTION: Bypass union and inject directly into extra_info if "generated_items" in gen_batch_output.non_tensor_batch: print(f"[Trainer Fit Debug] Force injecting generated_items into extra_info...") gen_items = gen_batch_output.non_tensor_batch["generated_items"] # Ensure extra_info exists in batch if "extra_info" not in batch.non_tensor_batch: batch.non_tensor_batch["extra_info"] = np.array([{} for _ in range(len(batch))], dtype=object) extra_infos = batch.non_tensor_batch["extra_info"] if len(gen_items) == len(extra_infos): for i in range(len(gen_items)): if extra_infos[i] is None: extra_infos[i] = {} extra_infos[i]["generated_items"] = gen_items[i] else: print(f"[Trainer Fit Error] Batch size mismatch during injection: {len(gen_items)} vs {len(extra_infos)}") batch = batch.union(gen_batch_output) if "response_mask" not in batch.batch.keys(): batch.batch["response_mask"] = compute_response_mask(batch) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() with marked_timer("reward", timing_raw, color="yellow"): # compute reward model score if self.use_rm: reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) if self.config.reward_model.launch_reward_fn_async: future_reward = compute_reward_async.remote(data=batch, reward_fn=self.reward_fn) else: reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn) # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, color="blue"): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} # per-position entropy plot # masked_entropys = entropys * response_masks # sum_entropy_per_position = torch.sum(masked_entropys, dim=0) # num_tokens_per_position = torch.sum(response_masks, dim=0) # mean_entropy_per_position = sum_entropy_per_position / torch.clamp( # num_tokens_per_position, min=1 # ) # try: # entropy_list = mean_entropy_per_position.cpu().tolist() # table_data = [[i, ent] for i, ent in enumerate(entropy_list)] # table = wandb.Table(data=table_data, columns=["position", "entropy"]) # old_log_prob_metrics["actor/per_position_entropy_plot"] = wandb.plot.line( # table, "position", "entropy", title="Per-Position Entropy" # ) # except Exception as e: # print(f"Warning: Could not create wandb per-position entropy plot. Error: {e}") # token-type entropy try: responses = batch.batch["responses"] # mask for token type 1 (id >= 151669) type1_mask = (responses >= 151669) * response_masks # mask for token type 2 (id < 151669) type2_mask = (responses < 151669) * response_masks count_type1 = type1_mask.sum().item() count_type2 = type2_mask.sum().item() if count_type1 > 0: entropy_type1 = masked_mean(entropys, mask=type1_mask, axis=None).item() old_log_prob_metrics["actor/entropy_itemic_token"] = entropy_type1 if count_type2 > 0: entropy_type2 = masked_mean(entropys, mask=type2_mask, axis=None).item() old_log_prob_metrics["actor/entropy_lang_token"] = entropy_type2 old_log_prob_metrics["actor/token_count_itemic_token"] = count_type1 old_log_prob_metrics["actor/token_count_lang_token"] = count_type2 except Exception as e: print(f"Warning: Could not compute token-type entropy metrics. Error: {e}") metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if "rollout_log_probs" in batch.batch.keys(): # TODO: we may want to add diff of probs too. rollout_old_log_probs = batch.batch["rollout_log_probs"] actor_old_log_probs = batch.batch["old_log_probs"] attention_mask = batch.batch["attention_mask"] responses = batch.batch["responses"] response_length = responses.size(1) response_mask = attention_mask[:, -response_length:] rollout_probs = torch.exp(rollout_old_log_probs) actor_probs = torch.exp(actor_old_log_probs) rollout_probs_diff = torch.abs(rollout_probs - actor_probs) rollout_probs_diff = torch.masked_select(rollout_probs_diff, response_mask.bool()) rollout_probs_diff_max = torch.max(rollout_probs_diff) rollout_probs_diff_mean = torch.mean(rollout_probs_diff) rollout_probs_diff_std = torch.std(rollout_probs_diff) metrics.update( { "training/rollout_probs_diff_max": rollout_probs_diff_max.detach().item(), "training/rollout_probs_diff_mean": rollout_probs_diff_mean.detach().item(), "training/rollout_probs_diff_std": rollout_probs_diff_std.detach().item(), } ) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw, color="olive"): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw, color="cyan"): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw, color="brown"): # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] if self.config.reward_model.launch_reward_fn_async: reward_tensor, reward_extra_infos_dict = ray.get(future_reward) batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()}) # 获取 data_source 信息,用于按task分组统计 # 尝试多个可能的字段名:source, data_source, data_source_key reward_fn_key = self.config.data.get("reward_fn_key", "data_source") data_sources = batch.non_tensor_batch.get(reward_fn_key, None) # 如果没有找到,尝试其他常见字段名 if data_sources is None: data_sources = batch.non_tensor_batch.get("source", None) if data_sources is None: data_sources = batch.non_tensor_batch.get("data_source", None) # 调试信息:打印可用的字段 if self.global_steps <= 2: # 只在前几步打印 print(f"[DEBUG] Batch size: {len(batch)}") print(f"[DEBUG] Available non_tensor_batch keys: {list(batch.non_tensor_batch.keys())}") print(f"[DEBUG] reward_fn_key from config: {reward_fn_key}") print(f"[DEBUG] data_sources found: {data_sources is not None}") if data_sources is not None: print(f"[DEBUG] data_sources type: {type(data_sources)}, shape: {getattr(data_sources, 'shape', len(data_sources))}") print(f"[DEBUG] first 10 sources: {data_sources[:10] if len(data_sources) > 0 else []}") print(f"[DEBUG] unique sources: {np.unique(data_sources)}") if data_sources is not None: # 按 data_source 分组统计不同task的得分 unique_sources = np.unique(data_sources) print(f"[Task Statistics] Found {len(unique_sources)} unique tasks: {unique_sources}") for source in unique_sources: source_mask = data_sources == source num_samples = int(np.sum(source_mask)) for key, values in reward_extra_infos_dict.items(): if values and len(values) > 0: values_array = np.array(values) # 只记录数值类型的指标 if np.issubdtype(values_array.dtype, np.number): source_values = values_array[source_mask] if len(source_values) > 0: metrics[f"reward/{source}/{key}/mean"] = float(np.mean(source_values)) metrics[f"reward/{source}/{key}/max"] = float(np.max(source_values)) metrics[f"reward/{source}/{key}/min"] = float(np.min(source_values)) metrics[f"reward/{source}/{key}/count"] = num_samples else: print(f"[WARNING] data_sources not found in batch.non_tensor_batch. Available keys: {list(batch.non_tensor_batch.keys())}") # 全局统计(所有task合并) for key, values in reward_extra_infos_dict.items(): if values and len(values) > 0: values_array = np.array(values) # 只记录数值类型的指标 if np.issubdtype(values_array.dtype, np.number): metrics[f"reward/all/{key}/mean"] = float(np.mean(values_array)) metrics[f"reward/all/{key}/max"] = float(np.max(values_array)) metrics[f"reward/all/{key}/min"] = float(np.min(values_array)) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] # 🎯 添加基于GT PPL的think quality reward enable_sid_ppl_reward = self.config.get("enable_sid_ppl_reward", False) if enable_sid_ppl_reward: gt_ppl_reward_weight = self.config.get("sid_ppl_reward_weight", 0.1) # 1. 构造 probe data probe_batch_dict, probe_non_tensor_dict, probe_mapping = construct_gt_probe_data( batch=batch, tokenizer=self.tokenizer ) if probe_batch_dict: # 2. 构造 DataProto 并计算 log_prob # 注意:我们需要将 probe data 放到 device 上 device = batch.batch["input_ids"].device for k, v in probe_batch_dict.items(): probe_batch_dict[k] = v.to(device) # 将 dict 转换为 TensorDict probe_batch_size = probe_batch_dict["input_ids"].shape[0] probe_tensor_dict = TensorDict(probe_batch_dict, batch_size=probe_batch_size) probe_batch = DataProto( batch=probe_tensor_dict, non_tensor_batch=probe_non_tensor_dict ) # 计算 log_prob # compute_log_prob 返回的是 DataProto,其中 batch["old_log_probs"] 是 log_prob probe_output = self.actor_rollout_wg.compute_log_prob(probe_batch) probe_log_probs = probe_output.batch["old_log_probs"] # (num_probes, seq_len) # 3. 提取 GT tokens 的 log_prob 并计算 reward # 我们需要聚合每个 original_idx 的最大 reward original_idx_to_rewards = defaultdict(list) original_idx_to_think_end = {} for i, mapping in enumerate(probe_mapping): original_idx = mapping["original_idx"] gt_len = mapping["gt_len"] think_end_idx = mapping["think_end_idx"] original_idx_to_think_end[original_idx] = think_end_idx # 提取最后 gt_len 个 token 的 log_prob # 注意:old_log_probs 对应的是 input_ids 的 log_prob # input_ids = [prompt, thought, , GT] # 我们只关心 GT 部分 gt_log_probs = probe_log_probs[i, -gt_len:] # 计算平均 log_prob (即 -PPL score) reward = gt_log_probs.mean().item() original_idx_to_rewards[original_idx].append(reward) # 4. 回填 reward reward_added_count = 0 reward_sum = 0.0 max_reward_val = -float('inf') min_reward_val = float('inf') for i, rewards in original_idx_to_rewards.items(): # 取 max reward (最匹配的 GT) max_reward = max(rewards) think_end_idx = original_idx_to_think_end[i] # 确保索引不越界 if think_end_idx < batch.batch["token_level_rewards"].shape[1]: # 加上权重 weighted_reward = max_reward * gt_ppl_reward_weight batch.batch["token_level_rewards"][i, think_end_idx] += weighted_reward reward_added_count += 1 reward_sum += max_reward max_reward_val = max(max_reward_val, max_reward) min_reward_val = min(min_reward_val, max_reward) # 记录 metrics if reward_added_count > 0: metrics["gt_ppl_reward/mean"] = reward_sum / reward_added_count metrics["gt_ppl_reward/max"] = max_reward_val metrics["gt_ppl_reward/min"] = min_reward_val metrics["gt_ppl_reward/count"] = reward_added_count print(f"[Step {self.global_steps}] GT PPL Reward added to {reward_added_count} samples. Mean raw reward: {reward_sum / reward_added_count:.4f}") # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get( "norm_adv_by_std_in_grpo", True ) # GRPO adv normalization factor batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, config=self.config.algorithm, tokenizer=self.tokenizer, ) if self.config.algorithm.adv_estimator == AdvantageEstimator.GRPO: hit_rewards = batch.non_tensor_batch["score"] if isinstance(hit_rewards, np.ndarray): hit_rewards_tensor = torch.tensor(hit_rewards, dtype=torch.float32) else: hit_rewards_tensor = torch.tensor(list(hit_rewards), dtype=torch.float32) # 根据uid分组 uids = batch.non_tensor_batch["uid"] unique_uids = np.unique(uids) zero_hit_reward_group_ratios = [] all_group_zero_count = 0 # 统计完全为0的group数量 for uid in unique_uids: # 找到属于当前uid的所有样本 uid_mask = (uids == uid) uid_hit_rewards = hit_rewards_tensor[uid_mask] # 统计hit_reward为0的样本数量 zero_count = (uid_hit_rewards == 0).sum().item() total_count = len(uid_hit_rewards) # 计算当前group中hit_reward为0的比例 zero_ratio = zero_count / total_count if total_count > 0 else 0 zero_hit_reward_group_ratios.append(zero_ratio) # 如果整个group的hit_reward都是0,计数加1 if zero_count == total_count: all_group_zero_count += 1 # 计算统计指标 if len(zero_hit_reward_group_ratios) > 0: # 每个group中hit_reward为0的样本的平均比例 mean_zero_hit_reward_ratio_in_group = np.mean(zero_hit_reward_group_ratios) # hit_reward完全为0的group占总group数的比例 all_zero_group_ratio = all_group_zero_count / len(unique_uids) metrics["training/grpo_zero_hit_reward_ratio_in_group_mean"] = mean_zero_hit_reward_ratio_in_group metrics["training/grpo_all_zero_hit_reward_group_ratio"] = all_zero_group_ratio metrics["training/grpo_all_zero_hit_reward_group_count"] = all_group_zero_count metrics["training/grpo_total_group_count"] = len(unique_uids) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, color="pink"): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, color="red"): batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: with marked_timer("dump_rollout_generations", timing_raw, color="green"): inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True) outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True) scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist() ground_truths = None if "reward_model" in batch.non_tensor_batch: ground_truths = [item["ground_truth"] for item in batch.non_tensor_batch["reward_model"]] if "request_id" in batch.non_tensor_batch: reward_extra_infos_dict.setdefault( "request_id", batch.non_tensor_batch["request_id"].tolist(), ) self._dump_generations( inputs=inputs, outputs=outputs, scores=scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=rollout_data_dir, ground_truths=ground_truths, ) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, color="green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) # Check if the ESI (Elastic Server Instance)/training plan is close to expiration. esi_close_to_expiration = should_save_ckpt_esi( max_steps_duration=self.max_steps_duration, redundant_time=self.config.trainer.esi_redundant_time, ) # Check if the conditions for saving a checkpoint are met. # The conditions include a mandatory condition (1) and # one of the following optional conditions (2/3/4): # 1. The save frequency is set to a positive value. # 2. It's the last training step. # 3. The current step number is a multiple of the save frequency. # 4. The ESI(Elastic Server Instance)/training plan is close to expiration. if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration ): if esi_close_to_expiration: print("Force saving checkpoint: ESI instance expiration approaching.") with marked_timer("save_checkpoint", timing_raw, color="green"): self._save_checkpoint() with marked_timer("stop_profile", timing_raw): self._stop_profiling(do_profile) steps_duration = timing_raw["step"] self.max_steps_duration = max(self.max_steps_duration, steps_duration) # training metrics metrics.update( { "training/global_step": self.global_steps, "training/epoch": epoch, } ) # collect metrics train_data_metrics = compute_data_metrics(batch=batch, use_critic=self.use_critic) # Add train/ prefix to response_length metrics train_data_metrics_prefixed = {} for key, value in train_data_metrics.items(): if key.startswith("response_length/") or key.startswith("prompt_length/"): train_data_metrics_prefixed[f"train/{key}"] = value else: train_data_metrics_prefixed[key] = value metrics.update(train_data_metrics_prefixed) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) # this is experimental and may be changed/removed in the future in favor of a general-purpose one if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler): self.train_dataloader.sampler.update(batch=batch) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) progress_bar.update(1) self.global_steps += 1 if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return # this is experimental and may be changed/removed in the future # in favor of a general-purpose data buffer pool if hasattr(self.train_dataset, "on_batch_end"): # The dataset may be changed after each training batch self.train_dataset.on_batch_end(batch=batch) ================================================ FILE: verl_rl/recipe/onerec/onerec_recipe.py ================================================ from __future__ import annotations import ast import copy import logging import os import re from collections import defaultdict from typing import Any, Optional import datasets import numpy as np import torch from omegaconf import DictConfig, ListConfig from torch.utils.data import Dataset from transformers import PreTrainedTokenizer, ProcessorMixin import verl.utils.torch_functional as verl_F from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__name__) __all__ = ["collate_fn", "OneRecDataset", "compute_score"] def collate_fn(samples: list[dict[str, Any]]) -> dict[str, Any]: tensors: dict[str, list[torch.Tensor]] = defaultdict(list) non_tensors: dict[str, list[Any]] = defaultdict(list) for sample in samples: for key, value in sample.items(): if isinstance(value, torch.Tensor): tensors[key].append(value) else: non_tensors[key].append(value) batch: dict[str, Any] = {} for key, value in tensors.items(): batch[key] = torch.stack(value, dim=0) for key, value in non_tensors.items(): batch[key] = np.array(value, dtype=object) return batch class OneRecDataset(Dataset): def __init__( self, data_files: str | list[str], tokenizer: PreTrainedTokenizer, config: DictConfig, processor: Optional[ProcessorMixin] = None, max_samples: int = -1, ) -> None: if not isinstance(data_files, (list, ListConfig)): data_files = [data_files] self.data_files = copy.deepcopy(list(data_files)) self.original_data_files = copy.deepcopy(list(data_files)) self.tokenizer = tokenizer self.processor = processor self.max_samples = max_samples self.config = config self.cache_dir = os.path.expanduser(config.get("cache_dir", "~/.cache/verl/rlhf")) self.prompt_key = config.get("prompt_key", "prompt") self.image_key = config.get("image_key", "images") self.video_key = config.get("video_key", "videos") self.max_prompt_length = config.get("max_prompt_length", 1024) self.return_raw_chat = config.get("return_raw_chat", False) self.return_full_prompt = config.get("return_full_prompt", False) self.truncation = config.get("truncation", "error") self.filter_overlong_prompts = config.get("filter_overlong_prompts", True) self.need_tools_kwargs = config.get("need_tools_kwargs", False) self.filter_prompts = config.get("filter_prompts", True) self.return_multi_modal_inputs = config.get("return_multi_modal_inputs", True) self.enable_think = config.get("enable_think", True) self.enable_nonthink = config.get("enable_nonthink", False) self.use_force_prefix = config.get("use_force_prefix", False) self._FORCE_PREFIX_CONTENT = "\n<|sid_begin|>" if self.enable_think and self.enable_nonthink: raise ValueError("enable_think and enable_nonthink cannot be both True") self.num_workers = os.cpu_count() self.use_shm = config.get("use_shm", False) self.serialize_dataset = False self._download() self._read_files_and_tokenize() def _download(self, use_origin_parquet: bool = False) -> None: from verl.utils.fs import copy_to_local target_files = self.original_data_files if use_origin_parquet else self.data_files for idx, parquet_file in enumerate(target_files): local_path = copy_to_local(src=parquet_file, cache_dir=self.cache_dir, use_shm=self.use_shm) target_files[idx] = local_path if use_origin_parquet: self.data_files = target_files def _read_files_and_tokenize(self) -> None: dataframes: list[datasets.Dataset] = [] for parquet_file in self.data_files: dataframe = datasets.load_dataset("parquet", data_files=parquet_file)["train"] dataframes.append(dataframe) self.dataframe = datasets.concatenate_datasets(dataframes) # type: ignore[attr-defined] logger.info("dataset len: %s", len(self.dataframe)) if self.max_samples > 0 and self.max_samples < len(self.dataframe): if self.shuffle: rngs_args = (self.seed,) if self.seed is not None else () rng = np.random.default_rng(*rngs_args) indices = rng.choice(len(self.dataframe), size=self.max_samples, replace=False) else: indices = np.arange(self.max_samples) self.dataframe = self.dataframe.select(indices.tolist()) print(f"selected {self.max_samples} random samples out of {len(self.dataframe)}") self.dataframe = self.dataframe.map( self._extract_prompt_fields, num_proc=self.num_workers, desc="Extract prompts and reward annotations", ) logger.info("processed dataset len: %s", len(self.dataframe)) self.dataframe = self.maybe_filter_out_long_prompts(self.dataframe) def _extract_prompt_fields(self, row: dict[str, Any]) -> dict[str, Any]: raw_messages = row.get("messages") if isinstance(raw_messages, str): messages = ast.literal_eval(raw_messages) else: messages = raw_messages or [] clean_chats = [ { "role": message.get("role"), "content": "".join(segment.get("text", "") for segment in message.get("content", []) if segment.get("type") == "text"), } for message in messages ] if not clean_chats: raise ValueError("Sample has empty messages; please check data integrity.") prompt_messages = clean_chats[:-1] # Append /think or /no_think suffix to user messages based on config if self.enable_think: for message in prompt_messages: if message["role"] == "user": message["content"] = message["content"] + "/think" if self.enable_nonthink: for message in prompt_messages: if message["role"] == "user": message["content"] = message["content"] + "/no_think" ground_truth_message = clean_chats[-1]["content"] reward_payload = { "ground_truth": ground_truth_message, "style": "rule", } row[self.prompt_key] = prompt_messages row["reward_model"] = reward_payload return row def maybe_filter_out_long_prompts(self, dataframe: datasets.Dataset) -> datasets.Dataset: if not self.filter_overlong_prompts: return dataframe tokenizer = self.tokenizer processor = self.processor prompt_key = self.prompt_key image_key = self.image_key video_key = self.video_key if processor is not None: from verl.utils.dataset.vision_utils import process_image, process_video def doc_length(doc: dict[str, Any]) -> int: messages = self._build_messages(dict(doc)) raw_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) images = [process_image(image) for image in doc.get(image_key, [])] videos = [process_video(video) for video in doc.get(video_key, [])] encoded = processor(text=[raw_prompt], images=images or None, videos=videos or None, return_tensors="pt") return int(encoded["input_ids"].shape[-1]) else: def doc_length(doc: dict[str, Any]) -> int: messages = doc[prompt_key] return len(tokenizer.apply_chat_template(messages, add_generation_prompt=True)) filtered = dataframe.filter( lambda doc: doc_length(doc) <= self.max_prompt_length - 10, num_proc=self.num_workers, desc=f"Filtering prompts longer than {self.max_prompt_length - 10} tokens", ) logger.info("filtered dataset len: %s", len(filtered)) return filtered def resume_dataset_state(self) -> None: self.serialize_dataset = not hasattr(self, "original_data_files") if not self.serialize_dataset: self._download(use_origin_parquet=True) self._read_files_and_tokenize() else: logger.warning("resume with serialized dataloader, consider restarting from scratch for better perf") def __len__(self) -> int: # type: ignore[override] return len(self.dataframe) def _build_messages(self, example: dict[str, Any]) -> list[dict[str, Any]]: messages: list[dict[str, Any]] = example.pop(self.prompt_key) if self.image_key in example or self.video_key in example: for message in messages: content = message["content"] segments = [segment for segment in re.split(r"(|
", ""], } return extra async def __call__(self, messages: list[dict[str, str]], completions: ChatCompletion, info: dict[str, Any]): role, content, finish_reason = ( completions.choices[0].message.role, completions.choices[0].message.content, completions.choices[0].finish_reason, ) messages.append({"role": role, "content": content}) turn = len(messages) # STEP 0: check if we reach max turns if len(messages) >= self.max_assistant_turns: print(f"[id={completions.id},turn={turn},finish_reason={finish_reason}] Reach max turns, done!") return # STEP 1: check if we reach max tokens if finish_reason == "length": print(f"[id={completions.id},turn={turn},finish_reason={finish_reason}] Reach max tokens, done!") return # STEP 2: check if we got answer matches = self.answer_pattern.findall(content) if matches: print(f"[id={completions.id},turn={turn},finish_reason={finish_reason}] Got answer: {matches[0]}, done!") return # STEP 3: check if we got code block matches = self.code_pattern.findall(content) if not matches: print(f"[id={completions.id},turn={turn},finish_reason={finish_reason}] No code block found, done!") return # STEP 4: execute code block in sandbox code = matches[0].strip() metadata = await self.sandbox_code_execution(code) if metadata["run_status"] != "Finished": print( f"[id={completions.id},turn={turn},finish_reason={finish_reason}] Code block execution failed: " f"{metadata}, done!" ) return stdout, stderr = metadata["stdout"], metadata["stderr"] messages.append({"role": "tool", "content": f"{stdout}{stderr}"}) print(f"[id={completions.id},turn={turn},finish_reason={finish_reason}] Code block executed, continue...") # STEP 5: resubmit chat completions with code block output self.scheduler.submit_chat_completions( messages=messages, request_id=completions.id, info=info, ) user_prompt_template = """ You are a helpful assistant. Let's solve math problem in following steps: 1. Write a python code first and return the code to user, the code must be in following format: ```python import os print(...) ``` The code must explictly print necessary output to stdout. Remember stop generation at immediately and return the code. 2. User will send the python code to a external sandbox to execute and get output from stdout. 3. User will send the output in format output to you, and you should use the output to answer the question. The answer format must be: \\boxed{'The final answer goes here.'} *user question:* {question} """ if __name__ == "__main__": ray.init( runtime_env={ "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "INFO", "VLLM_USE_V1": "1", } } ) # Load config import os from hydra import compose, initialize_config_dir with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")): config = compose(config_name="ppo_trainer") model_path = "Qwen/Qwen2.5-1.5B-Instruct" config.actor_rollout_ref.model.path = model_path config.actor_rollout_ref.rollout.mode = "async" config.actor_rollout_ref.rollout.multi_turn.format = "hermes" config.actor_rollout_ref.rollout.multi_turn.completion_callback = ( "tests.workers.rollout.test_custom_completion_callback.CustomCompletionCallback" ) config.actor_rollout_ref.rollout.prompt_length = 4096 config.actor_rollout_ref.rollout.response_length = 4096 config.actor_rollout_ref.rollout.n = 4 # Init sandbox and async rollout manager sandbox = Sandbox.options(num_cpus=1).remote() sandbox_address = ray.get(sandbox.get_server_address.remote()) sandbox_fusion_url = f"http://{sandbox_address}/run_code" config.reward_model.sandbox_fusion.url = sandbox_fusion_url async_rollout_manager = init_async_rollout_manager(config) # Build dataset dataset = load_dataset("Maxwell-Jia/AIME_2024", split="train") prompts = DataProto( non_tensor_batch={ "raw_prompt": np.array( [ [{"role": "user", "content": user_prompt_template.replace("{question}", problem)}] for problem in dataset["Problem"] ] ), }, ) result = async_rollout_manager.generate_sequences(prompts=prompts) assert len(result) == len(dataset) * config.actor_rollout_ref.rollout.n # Check max turns that sandbox is called num_turns = result.non_tensor_batch["__num_turns__"] print(f"num_turns: {num_turns}") assert np.max(num_turns) > 2, f"max turns: {np.max(num_turns)}" # Check response_mask tokenizer = hf_tokenizer(config.actor_rollout_ref.model.path) responses = result.batch["responses"] response_mask = result.batch["response_mask"] assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}" # Decode responses with response_mask for i in range(len(responses)): valid_tokens = responses[i][response_mask[i].bool()] response_str = tokenizer.decode(valid_tokens) assert "" not in response_str, f"found in response: {response_str}" assert "" not in response_str, f"found in response: {response_str}" print(f"response: {response_str}") print("Test passed!") ================================================ FILE: verl_rl/tests/workers/rollout/test_hf_rollout.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from omegaconf import OmegaConf from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ShardedStateDictConfig, ShardingStrategy, StateDictType from transformers import AutoModelForCausalLM, AutoTokenizer from verl import DataProto from verl.utils.distributed import initialize_global_process_group from verl.utils.fs import copy_to_local from verl.utils.model import compute_position_id_with_mask from verl.workers.rollout.hf_rollout import HFRollout BASE_HF_ROLLOUT_CONFIG = { "temperature": 1.0, "top_k": -1, "top_p": 1, "prompt_length": 64, "response_length": 64, "do_sample": True, "n": 1, "val_kwargs": { "top_k": -1, "top_p": 1.0, "temperature": 0, "n": 1, "do_sample": False, }, } def prepare_input_dataproto(tokenizer, config, validate): preencode_prompts = [ [{"role": "user", "content": "Who won the Champions League in 2019?"}], [{"role": "user", "content": "The founder of Apple is"}], [{"role": "user", "content": "What's your name"}], ] formatted_prompts = [ tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True) for conversation in preencode_prompts ] prompts = tokenizer(formatted_prompts, return_tensors="pt", padding="max_length", max_length=config.prompt_length) input_dataproto = DataProto.from_dict( { "input_ids": prompts["input_ids"], "attention_mask": prompts["attention_mask"], "position_ids": compute_position_id_with_mask(prompts["attention_mask"]), }, meta_info={ "bos_token_id": tokenizer.bos_token_id, "eos_token_id": tokenizer.eos_token_id, "pad_token_id": tokenizer.pad_token_id, "validate": validate, }, ) return input_dataproto def prepare_fsdp_model(model, world_size): from torch.distributed.device_mesh import init_device_mesh device_mesh = init_device_mesh("cuda", mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) mixed_precision = MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32) fsdp_model = FSDP( model, use_orig_params=True, auto_wrap_policy=None, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, mixed_precision=mixed_precision, cpu_offload=CPUOffload(offload_params=False), sync_module_states=False, device_mesh=device_mesh, ) FSDP.set_state_dict_type( fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig() ) return fsdp_model def test_hf_rollout(n: int = 1, do_sample: bool = True, validate: bool = False): config = OmegaConf.create(BASE_HF_ROLLOUT_CONFIG) config.update({"n": n, "do_sample": do_sample}) assert torch.cuda.device_count() >= 2, "At least 2 GPUs is required to run tp+dp tests." local_rank, rank, world_size = initialize_global_process_group() # Initialize model and tokenizer local_cache_path = "~/.cache/verl/rlhf" local_cache_path = os.path.expanduser(local_cache_path) hdfs_path = "Qwen/Qwen2-7B-Instruct" local_model_path = copy_to_local(src=hdfs_path, cache_dir=local_cache_path) tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side="left", trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token # Initialize FSDP model actor_model = AutoModelForCausalLM.from_pretrained(local_model_path, trust_remote_code=True) actor_model.to(torch.bfloat16) fsdp_model = prepare_fsdp_model(actor_model, world_size) # Initialize HFRollout and start generate hf_rollout = HFRollout(fsdp_model, OmegaConf.create(config)) input = prepare_input_dataproto(tokenizer, config, validate).to(torch.cuda.current_device()) outputs = hf_rollout.generate_sequences(input) # check generated batch size is expected generated_batch_size = outputs.batch.batch_size[0] assert generated_batch_size == input.batch.batch_size[0] * config.n for i in range(generated_batch_size): prompt_tokens = outputs.batch["prompts"][i] prompt_mask = prompt_tokens != tokenizer.pad_token_id prompt_tokens = prompt_tokens[prompt_mask] decoded_prompt = tokenizer.decode(prompt_tokens, skip_special_tokens=False) response_tokens = outputs.batch["responses"][i] response_mask = response_tokens != tokenizer.pad_token_id response_tokens = response_tokens[response_mask] decoded_response = tokenizer.decode(response_tokens, skip_special_tokens=False) attention_mask = outputs.batch["attention_mask"][i] position_ids = outputs.batch["position_ids"][i] prompt_length = outputs.batch["prompts"].size(1) response_length = outputs.batch["responses"].size(1) assert attention_mask.size(0) == prompt_length + response_length assert position_ids.size(0) == prompt_length + response_length # check response attention mask is expected response_attention = attention_mask[prompt_length:] eos_positions = (outputs.batch["responses"][i] == tokenizer.pad_token_id).nonzero(as_tuple=True)[0] if len(eos_positions) > 0: first_eos_pos = eos_positions[0].item() assert response_attention[: first_eos_pos + 1].all(), "Response attention mask should be 1 until EOS" if first_eos_pos + 1 < response_length: assert not response_attention[first_eos_pos + 1 :].any(), ( "Response attention mask should be 0 after EOS" ) else: assert response_attention.all(), "Response attention mask should be all 1 if no EOS token" # check response position ids is expected prompt_positions = position_ids[:prompt_length] response_positions = position_ids[prompt_length:] valid_response_length = min(len(response_tokens), response_length) if valid_response_length > 0: assert response_positions[0] == prompt_positions[-1] + 1 for j in range(1, valid_response_length): assert response_positions[j] == response_positions[j - 1] + 1 # print generated text for inspection if torch.distributed.get_rank() == 0: print(f"prompt: {decoded_prompt}") print(f"response: {decoded_response}") print("=" * 30) if __name__ == "__main__": test_hf_rollout(n=2, do_sample=True, validate=False) # test_hf_rollout(n=1, do_sample=False, validate=True) # test_hf_rollout(n=1, do_sample=True, validate=False) ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_async_rollout_mcp_tools.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from tests/workers/rollout/test_sglang_async_rollout_sf_tools.py import asyncio from copy import deepcopy from unittest.mock import AsyncMock, MagicMock, patch import numpy as np import pytest from tensordict import TensorDict from transformers import AutoConfig, AutoTokenizer from utils_sglang import get_rollout_config, prepare_inputs from verl.protocol import DataProto from verl.tools.mcp_search_tool import MCPSearchTool from verl.tools.utils.mcp_clients.McpClientManager import MCPClientManager from verl.workers.rollout.schemas import AsyncRolloutRequest, AsyncRolloutRequestStateEnum, Message from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout DEFAULT_USER_CONTENT_PREFIX = ( "Answer the given question. You must conduct reasoning inside and " "first every time you get new information. After reasoning, if you find you lack " "some knowledge, you can call a search engine by query " "and it will return the top searched results between and " ". You can search as many times as your want. If you find no " "further external knowledge needed, you can directly provide the answer inside " " and , without detailed illustrations. For example, " " Beijing . Question: " ) user_content = DEFAULT_USER_CONTENT_PREFIX.rstrip("\n") + "How's the weather lately?" def get_search_messages(): user_prompt = { "role": "user", "content": user_content, } expect_turn_0_msg = { "role": "assistant", "content": "Let me search the web.", "tool_calls": [ { "id": "10", "type": "function", "function": { "name": "tavily_search_tool", "arguments": { "what_is_your_intent": "Search for the weather lately", "query": "the weather in Beijing today", "search_depth": "basic", "time_range": "day", "include_domains": ["google.com", "baidu.com"], "max_results": 2, }, }, } ], } expect_turn_1_msg = { "role": "assistant", "content": "Let me search again.", "tool_calls": [ { "type": "function", "function": { "name": "tavily_search_tool", "arguments": { "what_is_your_intent": "Search for the weather lately", "query": "the weather in Beijing tomorrow", "search_depth": "basic", "time_range": "day", "include_domains": ["google.com", "baidu.com"], "max_results": 2, }, }, } ], } expect_turn_2_msg = { "role": "assistant", "content": "Today is sunny and tomorrow will be cloudy in Beijing.", } # Mock search tool responses tool_return_0_msg = {"role": "tool", "content": [{"type": "text", "text": "Today's weather in Beijing is sunny."}]} tool_return_1_msg = { "role": "tool", "content": [{"type": "text", "text": "Tomorrow's weather in Beijing is cloudy."}], } user_prompts = [user_prompt] expect_turn_array = [expect_turn_0_msg, expect_turn_1_msg, expect_turn_2_msg] tool_return_array = [tool_return_0_msg, tool_return_1_msg] return user_prompts, expect_turn_array, tool_return_array class TestRolloutWithMCPSearchTools: @pytest.fixture def qwen_tokenizer(self): local_model_path = "Qwen/Qwen2.5-0.5B" tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token return tokenizer # we only need this for tokenizer @pytest.fixture def qwen_model_config(self): local_model_path = "Qwen/Qwen2.5-0.5B" config = AutoConfig.from_pretrained(local_model_path) return config @pytest.fixture def search_data(self, qwen_tokenizer): user_prompt, expect_turn_array, tool_return_array = get_search_messages() prompts = [[message] for message in user_prompt] preencode_turn_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=False) for turn in expect_turn_array ] preencode_tool_return_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=True) for turn in tool_return_array ] return prompts, preencode_turn_array, preencode_tool_return_array @pytest.fixture def search_rollout_config(self): max_prompt_length = 4096 max_response_length = 3000 dtype = "bfloat16" tensor_parallel_size = 1 tool_path = "./resource/tool_configs/mcp_tool_config" rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_path ) return rollout_config @pytest.fixture def search_data_proto(self, search_data, qwen_tokenizer): preencode_prompts, _, _ = search_data prompts = [ qwen_tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(qwen_tokenizer, prompts, 1000) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) messages = np.asarray(preencode_prompts) tools_kwargs = np.array( [ { "tavily_search_tool": { "create_kwargs": {"ground_truth": "Today is sunny and tomorrow will be cloudy in Beijing."}, }, } ], dtype=object, ) index = np.array([0], dtype=object) prompts = DataProto( batch=prompt_dict, non_tensor_batch={"raw_prompt": messages, "tools_kwargs": tools_kwargs, "index": index} ) return prompts @pytest.fixture def mock_rollout(self, search_rollout_config, qwen_tokenizer, qwen_model_config): """Mock the rollout instance with sampling_params initialized.""" tool_schema = [ { "type": "function", "function": { "name": "tavily_search_tool", "description": "A powerful web search tool...", "parameters": { "type": "object", "properties": { "what_is_your_intent": { "type": "string", "description": "Describe your intent for using Tavily", }, "query": {"type": "string", "description": "Search query"}, "search_depth": { "type": "string", "description": "The depth of the search ('basic' or 'advanced')", }, "topic": { "type": "string", "description": "The category of the search ('general' or 'news')", }, "days": { "type": "integer", "description": "Number of days back to include in search results (only for " "'news' topic)", }, "time_range": { "type": "string", "description": "Time range for results ('day', 'week', 'month', 'year', 'd', " "'w', 'm', 'y')", }, "include_domains": { "type": "array", "description": "List of domains to specifically include in search results", }, "exclude_domains": { "type": "array", "description": "List of domains to specifically exclude from search results", }, "include_answer": { "type": "boolean", "description": "Whether to include an answer summary generated by an LLM", }, "include_raw_content": { "type": "boolean", "description": "Whether to include the cleaned and parsed HTML content of each result", }, "include_images": { "type": "boolean", "description": "Whether to include images from search results", }, "include_image_descriptions": { "type": "boolean", "description": "Whether to include descriptions with images", }, "max_results": { "type": "integer", "description": "Maximum number of results to return (5-20)", }, "async_search": { "type": "boolean", "description": "Whether to perform the search asynchronously", }, }, "required": ["what_is_your_intent", "query"], }, "strict": False, }, } ] with ( patch.object(MCPClientManager, "fetch_tool_schemas", return_value=tool_schema), patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): rollout = SGLangRollout( actor_module="", config=search_rollout_config, processing_class=qwen_tokenizer, model_hf_config=qwen_model_config, ) rollout.sampling_params = { "n": 1, "max_new_tokens": search_rollout_config.response_length, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, } return rollout def test_tools_registration(self, mock_rollout): assert len(mock_rollout._tool_schemas) != 0 assert "tavily_search_tool" in mock_rollout._tool_map.keys() from verl.tools.mcp_search_tool import MCPSearchTool assert isinstance(mock_rollout._tool_map["tavily_search_tool"], MCPSearchTool) # depend on the tokenizer assert mock_rollout._tool_call_parser_type == "qwen25" def test_rollout_req_creation(self, mock_rollout, search_data_proto): req_list = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1) assert len(req_list) == 1 assert req_list[0].state == AsyncRolloutRequestStateEnum.PENDING assert len(req_list[0].tool_schemas) == 1 def test_over_size_case(self, mock_rollout, search_data_proto, search_data): mock_rollout.config.multi_turn.max_assistant_turns = 1 req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] _, expect_turn_array, _ = search_data # here we mock a meta info with 'length'. indicate the response is truncate mock_rollout._handle_engine_call = MagicMock() future = asyncio.Future() future.set_result( { "text": expect_turn_array[0], "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "length", "length": 3000}, "prompt_tokens": 132, "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 2.23543, }, } ) mock_rollout._handle_engine_call.return_value = future mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == 1 output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert output_req.reward_scores.get("tavily_search_tool") == [] # we should only have two message, one for prompt, second for response. assert len(output_req.messages) == 2 assert output_req.messages[1] == Message( role="assistant", content=expect_turn_array[0], tool_calls=None, ) @patch.object(MCPSearchTool, "execute", new_callable=AsyncMock) def test_tool_call_basic_case(self, mock_execute, mock_rollout, search_data_proto, search_data): _, expect_turn_array, tool_return_array = search_data # Mock search tool execution to return predefined responses mock_execute.side_effect = [(msg, 0.0, {"status": "success"}) for msg in tool_return_array] mock_rollout.config.multi_turn.max_assistant_turns = 10 req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] mock_rollout._handle_engine_call = MagicMock() futures = [asyncio.Future() for i in expect_turn_array] for idx, (i, turn) in enumerate(zip(futures, expect_turn_array, strict=True)): i.set_result( { "text": turn, "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 2.23543, }, } ) if idx < len(expect_turn_array) - 1: assert mock_rollout._function_call_parser.has_tool_call(turn) assert mock_rollout._function_call_parser.parse_non_stream(turn) mock_rollout._handle_engine_call.side_effect = futures mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather(*[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list]) ) # Verify conversation completed successfully with proper tool usage output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert "tavily_search_tool" in output_req.metrics assert output_req.metrics["tavily_search_tool"][0]["status"] == "success" assert mock_execute.await_count == 2 assert len(output_req.messages) == 6 # Verify tool response messages contain expected content search_counter = 0 for msg in output_req.messages: if msg.role == "tool": assert msg.content == tool_return_array[search_counter] search_counter += 1 assert search_counter == 2 @patch.object(MCPSearchTool, "execute", new_callable=AsyncMock) def test_tool_call_batch_case(self, mock_execute, mock_rollout, search_data_proto, search_data): _, expect_turn_array, tool_return_array = search_data # Mock tool execution for large batch (100 requests * 2 calls each) mock_execute.side_effect = [ (tool_return_array[0], 0.0, {"status": "success"}), (tool_return_array[1], 0.0, {"status": "success"}), ] * 100 mock_rollout.config.multi_turn.max_assistant_turns = 10 base_req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req_nums = 100 req_list = [] req_turns_map = {} req_turns_counter = {} for i in range(req_nums): tmp_req = deepcopy(base_req) tmp_req.batch_data_id = i tmp_req.request_id = i req_list.append(MagicMock(wraps=tmp_req, spec=AsyncRolloutRequest)) futures = [asyncio.Future() for _ in expect_turn_array] for idx, (fut, turn) in enumerate(zip(futures, expect_turn_array, strict=True)): fut.set_result( { "text": turn, "meta_info": { "id": "dummy", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, }, } ) req_turns_map[i] = futures req_turns_counter[i] = 0 async def hacked_handle_engine_call(self, _req: AsyncRolloutRequest, *_args, **_kwargs): fut = req_turns_map[_req.batch_data_id][req_turns_counter[_req.batch_data_id]] req_turns_counter[_req.batch_data_id] += 1 return await fut with patch.object(SGLangRollout, "_handle_engine_call", new=hacked_handle_engine_call): mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather(*[mock_rollout._async_rollout_a_request(r, True, False) for r in req_list]) ) # Verify all requests completed successfully assert len(output_req_list) == req_nums for out_req in output_req_list: assert out_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert "tavily_search_tool" in out_req.metrics for metric in out_req.metrics["tavily_search_tool"]: assert metric["status"] == "success" assert len(out_req.messages) == 6 assert sum(1 for m in out_req.messages if m.role == "tool") == 2 assert mock_execute.await_count == 2 * req_nums ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_async_rollout_multimodal_delta.py ================================================ # Copyright 2025 Amazon.com, Inc. or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from verl.utils.dataset.vision_utils import process_image from verl.utils.tokenizer import hf_processor from verl.workers.rollout.schemas import ( AsyncRolloutRequest, AsyncRolloutRequestStateEnum, TokenizationSanityCheckModeEnum, ) def _test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=False): assert len(image_list) == len(description_list) # Get the smallest dimensions across all images processed_images = [] for img_url in image_list: img = process_image(img_url) processed_images.append(img) min_width = min(img.size[0] for img in processed_images) min_height = min(img.size[1] for img in processed_images) min_size = (min_width, min_height) if resize_image: processed_images_resized = [] for img in processed_images: img = img.resize(min_size) processed_images_resized.append(img) processed_images = processed_images_resized # Initial message history system_prompt = ( "You will be provided with an image. Describe this image and then generate a new image for the next round" ) messages = [ { "role": "system", "content": system_prompt, }, { "role": "user", "content": [ {"type": "text", "text": "Here is the first image provided: "}, {"type": "image", "image": [processed_images[0]]}, ], }, ] # Initial multi_modal_data with one image multi_modal_data = {"image": [processed_images[0]], "video": []} # Minimal required fields for AsyncRolloutRequest req = AsyncRolloutRequest( batch_data_id=0, request_id="test-req-1", state=AsyncRolloutRequestStateEnum.PENDING, messages=messages, multi_modal_keys=["image", "video"], multi_modal_data=multi_modal_data.copy(), tool_schemas=[], tools_kwargs={}, interaction_kwargs={}, input_ids=None, prompt_ids=None, response_ids=None, attention_mask=None, prompt_attention_mask=None, response_attention_mask=None, position_ids=None, prompt_position_ids=None, response_position_ids=None, loss_mask=None, prompt_loss_mask=None, response_loss_mask=None, reward_scores={}, max_prompt_len=8192, max_response_len=8192, max_model_len=16384, metrics={}, use_inference_chat_template=True, tokenization_sanity_check_mode=TokenizationSanityCheckModeEnum.STRICT, generation_prompt_ids=None, base_conv_wo_gen_prompt_end_pos=0, base_conv_with_gen_prompt_end_pos=0, processing_class=processor, ) prev_generated_len = 0 # Add First Assistant Message and first tool response message(image) for idx, img in enumerate(processed_images): if idx == 0: continue _ = req.get_generation_prompt_ids(processor) req.add_assistant_message(processor, content=description_list[idx - 1]) before_tool_call_len = req.input_ids.shape[-1] req.add_tool_response_messages(processor, [{"image": [img], "text": "Here is the new image you requested: "}]) after_tool_call_len = req.input_ids.shape[-1] if prev_generated_len == 0: prev_generated_len = after_tool_call_len - before_tool_call_len else: if resize_image: assert after_tool_call_len - before_tool_call_len == prev_generated_len assert req.multi_modal_data["image"] == processed_images[: idx + 1] _ = req.get_generation_prompt_ids(processor) req.add_assistant_message(processor, content=description_list[-1]) messages = [msg.model_dump() for msg in req.messages] tools = [tool.model_dump() for tool in req.tool_schemas] if req.tool_schemas else None full_prompt_info = req._handle_apply_chat_template( processor, messages, multi_modal_data=req.multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, return_dict=True, ) full_prompt_ids = full_prompt_info["input_ids"] assert full_prompt_ids.eq(req.input_ids).all() # We must use dict(full_prompt_info) to convert BatchFeature values to a new dict # because np.array() only keeps the keys for BatchFeature. full_prompt_multi_modal_inputs = full_prompt_info.copy() full_prompt_multi_modal_inputs.pop("input_ids", None) full_prompt_multi_modal_inputs.pop("attention_mask", None) for key in full_prompt_multi_modal_inputs: assert full_prompt_multi_modal_inputs[key].eq(req.multi_modal_inputs[key]).all() @pytest.mark.skipif( hf_processor("Qwen/Qwen2.5-VL-3B-Instruct") is None, reason="Processor not available for Qwen/Qwen2.5-VL-B-Instruct" ) def test_add_tool_response_messages_image_delta(): processor = hf_processor("Qwen/Qwen2.5-VL-3B-Instruct") # From Qwen2.5-VL-3B-Instruct HF example img_1_url = {"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"} img_1_description = "A woman sits on the beach at sunset, smiling as she shares a high five with her large dog." # GitHub Logo img_2_url = {"image": "https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png"} img_2_description = "A GitHub Logo image" # Octocat img_3_url = {"image": "https://octodex.github.com/images/orderedlistocat.png"} img_3_description = "An Octocat image" image_list = [img_1_url, img_2_url, img_3_url] description_list = [img_1_description, img_2_description, img_3_description] _test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=False) @pytest.mark.skipif( hf_processor("Qwen/Qwen2.5-VL-3B-Instruct") is None, reason="Processor not available for Qwen/Qwen2.5-VL-B-Instruct" ) def test_add_tool_response_messages_image_delta_resize_image(): processor = hf_processor("Qwen/Qwen2.5-VL-3B-Instruct") # From Qwen2.5-VL-3B-Instruct HF example img_1_url = {"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"} img_1_description = "A woman sits on the beach at sunset, smiling as she shares a high five with her large dog." # GitHub Logo img_2_url = {"image": "https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png"} img_2_description = "A GitHub Logo image" # Octocat img_3_url = {"image": "https://octodex.github.com/images/orderedlistocat.png"} img_3_description = "An Octocat image" image_list = [img_1_url, img_2_url, img_3_url] description_list = [img_1_description, img_2_description, img_3_description] _test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=True) ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_async_rollout_search_tools.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from tests/workers/rollout/test_sglang_async_rollout_sf_tools.py import asyncio from copy import deepcopy from unittest.mock import AsyncMock, MagicMock, patch import numpy as np import pytest from tensordict import TensorDict from transformers import AutoConfig, AutoTokenizer from utils_sglang import get_rollout_config, prepare_inputs from verl.protocol import DataProto from verl.tools.schemas import ( OpenAIFunctionParametersSchema, OpenAIFunctionPropertySchema, OpenAIFunctionSchema, OpenAIFunctionToolSchema, ) from verl.tools.search_tool import SearchTool from verl.workers.rollout.schemas import AsyncRolloutRequest, AsyncRolloutRequestStateEnum, Message from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout DEFAULT_USER_CONTENT_PREFIX = ( "Answer the given question. You must conduct reasoning inside and " "first every time you get new information. After reasoning, if you find you lack " "some knowledge, you can call a search engine by query " "and it will return the top searched results between and " ". You can search as many times as your want. If you find no " "further external knowledge needed, you can directly provide the answer inside " " and , without detailed illustrations. For example, " " Beijing . Question: " ) user_content = DEFAULT_USER_CONTENT_PREFIX.rstrip("\n") + "How's the weather lately?" def get_search_messages(): user_prompt = { "role": "user", "content": user_content, } expect_turn_0_msg = { "role": "assistant", "content": "Let me search the web.", "tool_calls": [{"type": "function", "function": {"name": "search", "arguments": {"query": "today's weather"}}}], } expect_turn_1_msg = { "role": "assistant", "content": "Let me search again.", "tool_calls": [ {"type": "function", "function": {"name": "search", "arguments": {"query": "tomorrow's weather"}}} ], } expect_turn_2_msg = { "role": "assistant", "content": "Today is sunny and tomorrow will be cloudy in Beijing.", } # Mock search tool responses tool_return_0_msg = {"role": "tool", "content": "Today's weather in Beijing is sunny."} tool_return_1_msg = {"role": "tool", "content": "Tomorrow's weather in Beijing is cloudy."} user_prompts = [user_prompt] expect_turn_array = [expect_turn_0_msg, expect_turn_1_msg, expect_turn_2_msg] tool_return_array = [tool_return_0_msg, tool_return_1_msg] return user_prompts, expect_turn_array, tool_return_array class TestRolloutWithSearchTools: @pytest.fixture def qwen_tokenizer(self): local_model_path = "Qwen/Qwen2.5-0.5B" tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token return tokenizer # we only need this for tokenizer @pytest.fixture def qwen_model_config(self): local_model_path = "Qwen/Qwen2.5-0.5B" config = AutoConfig.from_pretrained(local_model_path) return config @pytest.fixture def search_data(self, qwen_tokenizer): user_prompt, expect_turn_array, tool_return_array = get_search_messages() prompts = [[message] for message in user_prompt] preencode_turn_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=False) for turn in expect_turn_array ] preencode_tool_return_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=True) for turn in tool_return_array ] return prompts, preencode_turn_array, preencode_tool_return_array @pytest.fixture def search_rollout_config(self): max_prompt_length = 4096 max_response_length = 3000 dtype = "bfloat16" tensor_parallel_size = 1 tool_path = "./resource/tool_configs/search_tool_config" rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_path ) return rollout_config @pytest.fixture def search_data_proto(self, search_data, qwen_tokenizer): preencode_prompts, _, _ = search_data prompts = [ qwen_tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(qwen_tokenizer, prompts, 1000) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) messages = np.asarray(preencode_prompts) tools_kwargs = np.array( [ { "search": { "create_kwargs": { "ground_truth": "Today is sunny and tomorrow will be cloudy in Beijing.", "data_source": "searchR1_nq", }, }, } ], dtype=object, ) index = np.array([0], dtype=object) prompts = DataProto( batch=prompt_dict, non_tensor_batch={"raw_prompt": messages, "tools_kwargs": tools_kwargs, "index": index} ) return prompts @pytest.fixture def mock_rollout(self, search_rollout_config, qwen_tokenizer, qwen_model_config): """Mock the rollout instance with sampling_params initialized.""" with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): rollout = SGLangRollout( actor_module="", config=search_rollout_config, processing_class=qwen_tokenizer, model_hf_config=qwen_model_config, ) rollout.sampling_params = { "n": 1, "max_new_tokens": search_rollout_config.response_length, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, } return rollout @patch.object(SGLangRollout, "_init_distributed_env", return_value=None) @patch.object(SGLangRollout, "_init_inference_engine", return_value=None) @patch.object(SGLangRollout, "_init_sampling_params", return_value=None) def test_tools_registration( self, mock_env, mock_engine, mock_sampling, search_rollout_config, qwen_tokenizer, qwen_model_config ): rollout = SGLangRollout( actor_module="", config=search_rollout_config, processing_class=qwen_tokenizer, model_hf_config=qwen_model_config, ) assert len(rollout._tool_schemas) == 1 assert "search" in rollout._tool_map.keys() from verl.tools.search_tool import SearchTool assert isinstance(rollout._tool_map["search"], SearchTool) # depend on the tokenizer assert rollout._tool_call_parser_type == "qwen25" @patch.object(SGLangRollout, "_init_distributed_env", return_value=None) @patch.object(SGLangRollout, "_init_inference_engine", return_value=None) @patch.object(SGLangRollout, "_init_sampling_params", return_value=None) def test_rollout_req_creation( self, mock_env, mock_engine, mock_sampling, search_rollout_config, qwen_tokenizer, qwen_model_config, search_data_proto, ): rollout = SGLangRollout( actor_module="", config=search_rollout_config, processing_class=qwen_tokenizer, model_hf_config=qwen_model_config, ) req_list = rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1) assert len(req_list) == 1 assert req_list[0].state == AsyncRolloutRequestStateEnum.PENDING assert len(req_list[0].tool_schemas) == 1 print(type(req_list[0].tool_schemas[0])) assert req_list[0].tool_schemas[0] == OpenAIFunctionToolSchema( type="function", function=OpenAIFunctionSchema( name="search", description="Searches the web for relevant information based on the given query.", parameters=OpenAIFunctionParametersSchema( type="object", properties={ "query_list": OpenAIFunctionPropertySchema( type="array", description="A list of fully-formed semantic queries. The tool will return search " "results for each query.", items={"type": "string"}, ) }, required=["query_list"], ), strict=False, ), ) def test_over_size_case(self, mock_rollout, search_data_proto, search_data): mock_rollout.config.multi_turn.max_assistant_turns = 1 req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] _, expect_turn_array, _ = search_data mock_rollout._handle_engine_call = MagicMock() future = asyncio.Future() future.set_result( { "text": expect_turn_array[0], "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "length", "length": 3000}, "prompt_tokens": 132, "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 2.23543, }, } ) mock_rollout._handle_engine_call.return_value = future mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == 1 output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert output_req.reward_scores.get("search") == [] assert len(output_req.messages) == 2 assert output_req.messages[1] == Message( role="assistant", content=expect_turn_array[0], tool_calls=None, ) @patch.object(SearchTool, "execute", new_callable=AsyncMock) def test_tool_call_basic_case(self, mock_execute, mock_rollout, search_data_proto, search_data): _, expect_turn_array, tool_return_array = search_data # Mock search tool execution to return predefined responses mock_execute.side_effect = [(msg, 0.0, {"status": "success"}) for msg in tool_return_array] mock_rollout.config.multi_turn.max_assistant_turns = 10 mock_rollout._tool_map["search"].retrieval_service_url = "mock://dummy" req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] mock_rollout._handle_engine_call = MagicMock() futures = [asyncio.Future() for i in expect_turn_array] for idx, (i, turn) in enumerate(zip(futures, expect_turn_array, strict=True)): i.set_result( { "text": turn, "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 2.23543, }, } ) if idx < len(expect_turn_array) - 1: assert mock_rollout._function_call_parser.has_tool_call(turn) assert mock_rollout._function_call_parser.parse_non_stream(turn) mock_rollout._handle_engine_call.side_effect = futures mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather(*[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list]) ) # Verify conversation completed successfully with proper tool usage output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert "search" in output_req.metrics assert output_req.metrics["search"][0]["status"] == "success" assert mock_execute.await_count == 2 assert len(output_req.messages) == 6 # user + 3*assistant + 2*tool_call # Verify tool response messages contain expected content search_counter = 0 for msg in output_req.messages: if msg.role == "tool": assert msg.content == tool_return_array[search_counter] search_counter += 1 assert search_counter == 2 @patch.object(SearchTool, "execute", new_callable=AsyncMock) def test_tool_call_batch_case(self, mock_execute, mock_rollout, search_data_proto, search_data): _, expect_turn_array, tool_return_array = search_data # Mock tool execution for large batch (100 requests * 2 calls each) mock_execute.side_effect = [ (tool_return_array[0], 0.0, {"status": "success"}), (tool_return_array[1], 0.0, {"status": "success"}), ] * 100 mock_rollout.config.multi_turn.max_assistant_turns = 10 mock_rollout._tool_map["search"].retrieval_service_url = "mock://dummy" base_req = mock_rollout._preprocess_prompt_to_async_rollout_requests(search_data_proto, n=1)[0] req_nums = 100 req_list = [] req_turns_map = {} req_turns_counter = {} for i in range(req_nums): tmp_req = deepcopy(base_req) tmp_req.batch_data_id = i tmp_req.request_id = i req_list.append(MagicMock(wraps=tmp_req, spec=AsyncRolloutRequest)) futures = [asyncio.Future() for _ in expect_turn_array] for idx, (fut, turn) in enumerate(zip(futures, expect_turn_array, strict=True)): fut.set_result( { "text": turn, "meta_info": { "id": "dummy", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, }, } ) req_turns_map[i] = futures req_turns_counter[i] = 0 async def hacked_handle_engine_call(self, _req: AsyncRolloutRequest, *_args, **_kwargs): fut = req_turns_map[_req.batch_data_id][req_turns_counter[_req.batch_data_id]] req_turns_counter[_req.batch_data_id] += 1 return await fut with patch.object(SGLangRollout, "_handle_engine_call", new=hacked_handle_engine_call): mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather(*[mock_rollout._async_rollout_a_request(r, True, False) for r in req_list]) ) # Verify all requests completed successfully assert len(output_req_list) == req_nums for out_req in output_req_list: assert out_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert "search" in out_req.metrics for metric in out_req.metrics["search"]: assert metric["status"] == "success" assert len(out_req.messages) == 6 # user + 3 assistant + 2 tool assert sum(1 for m in out_req.messages if m.role == "tool") == 2 assert mock_execute.await_count == 2 * req_nums ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_async_rollout_sf_tools.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # noqa import asyncio import time from copy import deepcopy from functools import wraps from unittest.mock import MagicMock, patch import numpy as np import pytest import ray from tensordict import TensorDict from torch.testing._internal.common_distributed import MultiProcessTestCase from transformers import AutoConfig, AutoTokenizer from utils_sglang import ( get_rollout_config, prepare_inputs, ) from verl.protocol import DataProto from verl.tools.sandbox_fusion_tools import TokenBucketWorker from verl.tools.schemas import ( OpenAIFunctionParametersSchema, OpenAIFunctionPropertySchema, OpenAIFunctionSchema, OpenAIFunctionToolSchema, ) from verl.workers.rollout.schemas import AsyncRolloutRequest, AsyncRolloutRequestStateEnum, Message from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout sandbox_url = "" def get_sandbox_fusion_messages(): user_prompt = { "role": "user", "content": """ Solve the following problem step by step. You now have the ability to selectively write executable Python code to enhance your reasoning process. \n\n**user question:**\nThere are 152 students at Dala High School. Assume the following: \n- 100 students take a Math class \n- 94 students take a Science class \n- 57 students take an English class \n- 73 students take a Math class and a Science class \n- 24 students take a Math class and an English class \n- 27 students take a Science class and an English class \n- 22 students take a Math class and a Science class and an English class\n \nHow many students take neither a Math class nor a Science class nor an Eglish class?\n\nRemember to place the final answer in the last part using the format: \n\n\boxed{'The final answer goes here.'}\n """, } expect_turn_0_msg = { "role": "assistant", "content": """ Okay, so I need to find out how many students at Dala High School are not taking any of the three classes: Math, Science, or English. The total number of students is 152. Let me see... I remember this is a problem about sets and maybe using the principle of inclusion-exclusion. Let me recall how that works.\n\nFirst, the inclusion-exclusion principle for three sets says that the total number of students taking at least one of the classes is equal to the sum of the numbers in each individual class, minus the sum of the numbers in each pair of classes, plus the number in all three classes. Then, subtract that total from the overall number of students to get those not taking any of the three. \n\nLet me write that down step by step. Let M be the set of students taking Math, S for Science, and E for English. Then:\n\nTotal in at least one class = |M ∪ S ∪ E| = |M| + |S| + |E| - |M ∩ S| - |M ∩ E| - |S ∩ E| + |M ∩ S ∩ E|\n\nGiven the numbers:\n\n|M| = 100\n\n|S| = 94\n\n|E| = 57\n\n|M ∩ S| = 73\n\n|M ∩ E| = 24\n\n|S ∩ E| = 27\n\n|M ∩ S ∩ E| = 22\n\nSo plugging these into the formula:\n\nTotal = 100 + 94 + 57 - 73 - 24 - 27 + 22\n\nLet me compute that step by step using code to ensure accuracy.\n """, "tool_calls": [ { "type": "function", "function": { "name": "code_interpreter", "arguments": { "code": """M = 100\\nS = 94\\nE = 57\\nM_S = 73\\nM_E = 24\\nS_E = 27\\nM_S_E = 22\\n\\ntotal_in_any = M + S + E - M_S - M_E - S_E + M_S_E\\nstudents_neither = 152 - total_in_any\\nprint(students_neither)""", }, }, } ], } tool_return_0_msg = { "role": "tool", "content": """3""", } expect_turn_1_msg = { "role": "assistant", "content": """ The code returns 3, indicating that 3 students take none of the classes. To verify, I'll break down each category to ensure the total matches:\n\n- Only Math and Science: 73 - 22 = 51\n- Only Math and English: 24 - 22 = 2\n- Only Science and English: 27 - 22 = 5\n- Only Math: 100 - 51 - 2 - 22 = 25\n- Only Science: 94 - 51 - 5 - 22 = 16\n- Only English: 57 - 2 - 5 - 22 = 28\n\nSumming all categories:\n """, "tool_calls": [ { "type": "function", "function": { "name": "code_interpreter", "arguments": { "code": """only_M_S = 73 - 22\\nonly_M_E = 24 - 22\\nonly_S_E = 27 - 22\\n\\nonly_M = 100 - only_M_S - only_M_E - 22\\nonly_S = 94 - only_M_S - only_S_E - 22\\nonly_E = 57 - only_M_E - only_S_E - 22\\n\\ntotal_verify = only_M + only_S + only_E + only_M_S + only_M_E + only_S_E + 22\\nprint(total_verify)""", }, }, } ], } tool_return_1_msg = { "role": "tool", "content": """149""", } expect_turn_2_msg = { "role": "assistant", "content": """ The verification total is 149, so students not taking any classes are 152 - 149 = 3, confirming the initial result.\n\n\n\\boxed{3}\n """, } user_prompts = [user_prompt] expect_turn_array = [expect_turn_0_msg, expect_turn_1_msg, expect_turn_2_msg] tool_return_array = [tool_return_0_msg, tool_return_1_msg] return user_prompts, expect_turn_array, tool_return_array def skip_if_valid_sandbox(url): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if url == "" or url is None: pytest.skip("No valid sandbox url provided") return wrapper return decorator class TestRolloutWithTools: @pytest.fixture def qwen_tokenizer(self): local_model_path = "Qwen/Qwen2.5-0.5B" tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token return tokenizer # we only need this for tokenizer @pytest.fixture def qwen_model_config(self): local_model_path = "Qwen/Qwen2.5-0.5B" config = AutoConfig.from_pretrained(local_model_path) return config @pytest.fixture def sandbox_fusion_data(self, qwen_tokenizer): user_prompt, expect_turn_array, tool_return_array = get_sandbox_fusion_messages() prompts = [[message] for message in user_prompt] preencode_turn_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=False) for turn in expect_turn_array ] preencode_tool_return_array = [ qwen_tokenizer.apply_chat_template([turn], tokenize=False, add_generation_prompt=True) for turn in tool_return_array ] return prompts, preencode_turn_array, preencode_tool_return_array @pytest.fixture def sandbox_fusion_rollout_config(self): max_prompt_length = 1024 max_response_length = 1024 dtype = "bfloat16" tensor_parallel_size = 1 tool_path = "./resource/tool_configs/sandbox_fusion_tool_config" rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_path ) return rollout_config @pytest.fixture def sandbox_data_proto(self, sandbox_fusion_data, qwen_tokenizer): preencode_prompts, _, _ = sandbox_fusion_data prompts = [ qwen_tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(qwen_tokenizer, prompts, 1000) prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) messages = np.asarray(preencode_prompts) tools_kwargs = np.array( [ { "code_interpreter": { "create_kwargs": {"ground_truth": "test-solution-str"}, }, } ], dtype=object, ) index = np.array([0], dtype=object) prompts = DataProto( batch=prompt_dict, non_tensor_batch={"raw_prompt": messages, "tools_kwargs": tools_kwargs, "index": index} ) return prompts @pytest.fixture def mock_rollout(self, sandbox_fusion_rollout_config, qwen_tokenizer, qwen_model_config): """Mock the rollout instance""" with patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object( SGLangRollout, "_init_inference_engine", return_value=None ), patch.object(SGLangRollout, "_init_sampling_params", return_value=None): rollout = SGLangRollout( actor_module="", config=sandbox_fusion_rollout_config, processing_class=qwen_tokenizer, model_hf_config=qwen_model_config, ) # set default sampling_params rollout.sampling_params = { "n": 1, "max_new_tokens": sandbox_fusion_rollout_config.response_length, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, } return rollout def test_tools_registration(self, mock_rollout): """Test tool registration functionality""" assert len(mock_rollout._tool_schemas) == 1 assert "code_interpreter" in mock_rollout._tool_map.keys() from verl.tools.sandbox_fusion_tools import SandboxFusionTool assert isinstance(mock_rollout._tool_map["code_interpreter"], SandboxFusionTool) assert mock_rollout._tool_call_parser_type == "qwen25" def test_rollout_req_creation(self, mock_rollout, sandbox_data_proto): """Test request creation functionality""" req_list = mock_rollout._preprocess_prompt_to_async_rollout_requests(sandbox_data_proto, n=1) assert len(req_list) == 1 assert req_list[0].state == AsyncRolloutRequestStateEnum.PENDING assert len(req_list[0].tool_schemas) == 1 print(type(req_list[0].tool_schemas[0])) assert req_list[0].tool_schemas[0] == OpenAIFunctionToolSchema( type="function", function=OpenAIFunctionSchema( name="code_interpreter", description="A tool for executing code.", parameters=OpenAIFunctionParametersSchema( type="object", properties={ "code": OpenAIFunctionPropertySchema( type="string", description="The code to execute.", enum=None, ) }, required=["code"], ), strict=False, ), ) def test_over_size_case(self, mock_rollout, sandbox_data_proto, sandbox_fusion_data): """Test over-size response truncation case""" mock_rollout.config.multi_turn.max_assistant_turns = 1 req = mock_rollout._preprocess_prompt_to_async_rollout_requests(sandbox_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] _, expect_turn_array, tool_return_array = sandbox_fusion_data # here we mock a meta info with 'length'. indicate the response is truncate mock_rollout._handle_engine_call = MagicMock() future = asyncio.Future() future.set_result( { "text": expect_turn_array[0], "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "length", "length": 1024}, "prompt_tokens": 132, "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 9.9304039478302, }, } ) mock_rollout._handle_engine_call.return_value = future mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == 1 output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED assert output_req.reward_scores.get("code_interpreter") == [] # we should only have two message, one for prompt, second for response. assert len(output_req.messages) == 2 assert output_req.messages[1] == Message( role="assistant", content=expect_turn_array[0], tool_calls=None, ) @skip_if_valid_sandbox(sandbox_url) def test_tool_call_basic_case(self, mock_rollout, sandbox_data_proto, sandbox_fusion_data): """Test basic tool call case""" mock_rollout.config.multi_turn.max_assistant_turns = 10 mock_rollout._tool_map["code_interpreter"].sandbox_fusion_url = sandbox_url req = mock_rollout._preprocess_prompt_to_async_rollout_requests(sandbox_data_proto, n=1)[0] req = MagicMock(wraps=req, spec=AsyncRolloutRequest) req.finalize = MagicMock() req_list = [req] _, expect_turn_array, tool_return_array = sandbox_fusion_data # here we mock a meta info with 'length'. indicate the response is truncate mock_rollout._handle_engine_call = MagicMock() futures = [asyncio.Future() for i in expect_turn_array] for idx, (i, turn) in enumerate(zip(futures, expect_turn_array)): i.set_result( { "text": turn, "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 9.9304039478302, }, } ) if idx < len(expect_turn_array) - 1: assert mock_rollout._function_call_parser.has_tool_call(turn) assert mock_rollout._function_call_parser.parse_non_stream(turn) mock_rollout._handle_engine_call.side_effect = futures mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == 1 output_req = output_req_list[0] assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED # here we verify whether the code sandbox is executed correctly assert output_req.metrics == {"code_interpreter": ["3", "149"]} assert mock_rollout._handle_engine_call.call_count == 3 assert len(output_req.messages) == 6 # user + 3*assistant + 2*tool_call code_counter = 0 for msg in output_req.messages: if msg.role == "tool": code_counter += 1 assert msg.content == tool_return_array[code_counter] assert code_counter == 2 @skip_if_valid_sandbox(sandbox_url) def test_tool_call_batch_case(self, mock_rollout, sandbox_data_proto, sandbox_fusion_data): """Test batch tool call case""" mock_rollout.config.multi_turn.max_assistant_turns = 10 mock_rollout._tool_map["code_interpreter"].sandbox_fusion_url = sandbox_url req = mock_rollout._preprocess_prompt_to_async_rollout_requests(sandbox_data_proto, n=1)[0] req_nums = 100 req_list = [] req_turns_counter = {} # this map should a Map[id:List[Futures]] req_turns_map = {} _, expect_turn_array, tool_return_array = sandbox_fusion_data for i in range(req_nums): _temp_req = deepcopy(req) _temp_req.batch_data_id = i _temp_req.request_id = i req_list.append(MagicMock(wraps=_temp_req, spec=AsyncRolloutRequest)) futures = [asyncio.Future() for i in expect_turn_array] for idx, (i, turn) in enumerate(zip(futures, expect_turn_array)): i.set_result( { "text": turn, "meta_info": { "id": "d1188d81cba840359df5b352b344bc8e", "finish_reason": {"type": "tool_calls" if idx < len(expect_turn_array) - 1 else "stop"}, "prompt_tokens": len(turn), "completion_tokens": 100, "cached_tokens": 0, "e2e_latency": 9.9304039478302, }, } ) if idx < len(expect_turn_array) - 1: assert mock_rollout._function_call_parser.has_tool_call(turn) assert mock_rollout._function_call_parser.parse_non_stream(turn) req_turns_map[_temp_req.batch_data_id] = futures req_turns_counter[_temp_req.batch_data_id] = 0 async def hacked_handle_engine_call( self, _req: AsyncRolloutRequest, do_sample: bool, is_validate: bool, **kwargs ): result = req_turns_map[_req.batch_data_id][req_turns_counter[_req.batch_data_id]] req_turns_counter[_req.batch_data_id] += 1 re = await result return re with patch.object(SGLangRollout, "_handle_engine_call", new=hacked_handle_engine_call): mock_rollout._tp_rank = 0 loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[mock_rollout._async_rollout_a_request(req, True, False) for req in req_list], ) ) assert len(output_req_list) == req_nums # FIGUER out how to count this # assert rollout._handle_engine_call.call_count == 3 * req_nums for output_req in output_req_list: assert output_req.state == AsyncRolloutRequestStateEnum.COMPLETED # here we verify whether the code sandbox is executed correctly assert output_req.metrics == {"code_interpreter": ["3", "149"]} assert len(output_req.messages) == 6 # user + 3*assistant + 2*tool_call code_counter = 0 for msg in output_req.messages: if msg.role == "tool": code_counter += 1 assert code_counter == 2 def test_sampling_params_functionality(self, mock_rollout): """Test sampling_params functionality""" # test basic copy functionality copied_params = mock_rollout.sampling_params.copy() assert copied_params == mock_rollout.sampling_params assert copied_params is not mock_rollout.sampling_params # test parameter update copied_params.update({"temperature": 0.8, "top_p": 0.9}) assert copied_params["temperature"] == 0.8 assert copied_params["top_p"] == 0.9 # ensure original parameters are not modified assert "temperature" not in mock_rollout.sampling_params assert "top_p" not in mock_rollout.sampling_params class RayMultiProcessTestCase(MultiProcessTestCase): def setUp(self): super().setUp() ray.init(ignore_reinit_error=True) print("init_single cluster") self._spawn_processes() def tearDown(self): print("tearDown_single cluster") ray.shutdown() @ray.remote class TestActor: def __init__(self, rank, world_size): self._world_size = world_size self._rank = rank self.rank_list = [] self.time_list = [] def record_rank(self, rank): self.rank_list.append(rank) def get_rank(self): return self._rank def ping(self): return True def record_execution_time(self, time): self.time_list.append(time) def get_time(self, timeout): import time now = time.time() while time.time() - now < timeout: # for start and end time if len(self.time_list) == self._world_size * 2: self.time_list.sort() return self.time_list[-1] - self.time_list[0] else: time.sleep(1) continue return False def verify_rank(self): import time now = time.time() while time.time() - now < 10: if len(self.rank_list) == self._world_size: print(self.rank_list) self.rank_list.sort() for i in range(self._world_size): if self.rank_list[i] != i: return False return True else: time.sleep(1) continue return False class TestRayGlobalActorCase(RayMultiProcessTestCase): @property def world_size(self) -> int: # for DP = 8 return 2 def test_basic_multi_process_init(self): ray.init("auto", namespace="test", ignore_reinit_error=True) handle = TestActor.remote(self.rank, self.world_size) re = ray.get(handle.get_rank.remote()) assert re == self.rank, f"rank not match: {re} != {self.rank}" # def test_global_actor(self): # ray.init("auto",namespace="test",ignore_reinit_error=True) # handle = TestActor.options(get_if_exists=True,name="test-actor").remote(self.rank,self.world_size) # handle.record_rank.remote(self.rank) # # since test actor's concurrency is 1, we need to wait for all processes to finish # time.sleep(5) # assert ray.get(handle.ping.remote()) == True # make sure actor handle is valid # if self.rank == 0: # assert ray.get(handle.verify_rank.remote()) == True # else: # # get_actor use weak_ref, so we need to make sure the actor is not garbage collected # time.sleep(10) class TestSingleNodeRateLimiterCase(RayMultiProcessTestCase): @property def world_size(self) -> int: return 1 def test_rate_limiter(self): ray.init("auto", namespace="test", ignore_reinit_error=True) from verl.tools.sandbox_fusion_tools import PoolMode, init_execution_pool # exec_worker = ExecutionWorker.options(max_concurrency=10).remote(enable_global_rate_limit=True, rate_limit=3) exec_worker = init_execution_pool( num_workers=10, enable_global_rate_limit=True, rate_limit=3, mode=PoolMode.ThreadMode ) center = TestActor.options(get_if_exists=True, name="test-actor").remote(self.rank, self.world_size) ray.get(exec_worker.ping.remote()) def fn(i): import time time.sleep(3) return i start = time.time() tasks = [exec_worker.execute.remote(fn, i) for i in range(6)] loop = asyncio.get_event_loop() results = loop.run_until_complete(asyncio.gather(*tasks)) end = time.time() duration = end - start center.record_execution_time.remote(start) center.record_execution_time.remote(end) print(f"Total time: {duration:.2f} seconds for rank: {self.rank}") assert results == list(range(6)) # we have 6 task with rate limit of 3, therefore we need at least 2 round: 3*2=6 seconds assert duration > 6 assert duration < 10 def test_rotten_execution(self): ray.init("auto", namespace="test", ignore_reinit_error=True) from verl.tools.sandbox_fusion_tools import PoolMode, init_execution_pool # exec_worker = ExecutionWorker.options(max_concurrency=10).remote(enable_global_rate_limit=True, rate_limit=6) exec_worker = init_execution_pool( num_workers=10, enable_global_rate_limit=True, rate_limit=6, mode=PoolMode.ThreadMode ) ray.get(exec_worker.ping.remote()) def fn(i): if i == 10: raise Exception("test") else: return i tasks = [exec_worker.execute.remote(fn, i) for i in range(20)] loop = asyncio.get_event_loop() results = loop.run_until_complete(asyncio.gather(*tasks)) expect_result = [None] + list(range(10)) + list(range(11, 20)) sorted_data = sorted(results, key=lambda x: (x is not None, x)) assert sorted_data == expect_result, f"results: {results}, expect_result: {expect_result}" rate_limiter = TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote() rate = ray.get(rate_limiter.get_current_count.remote()) assert rate == 0, f"rate: {rate}" class TestMultiNodeRateLimiterCase(RayMultiProcessTestCase): @property def world_size(self) -> int: return 2 def test_rate_limiter(self): ray.init("auto", namespace="test", ignore_reinit_error=True) from verl.tools.sandbox_fusion_tools import PoolMode, init_execution_pool # exec_worker = ExecutionWorker.options(max_concurrency=10).remote(enable_global_rate_limit=True, rate_limit=6) exec_worker = init_execution_pool( num_workers=10, enable_global_rate_limit=True, rate_limit=6, mode=PoolMode.ThreadMode ) center = TestActor.options(get_if_exists=True, name="test-actor").remote(self.rank, self.world_size) ray.get(exec_worker.ping.remote()) def fn(i): import time time.sleep(2) return i start = time.time() tasks = [exec_worker.execute.remote(fn, i) for i in range(6)] loop = asyncio.get_event_loop() results = loop.run_until_complete(asyncio.gather(*tasks)) end = time.time() duration = end - start center.record_execution_time.remote(start) center.record_execution_time.remote(end) print(f"Total time: {duration:.2f} seconds for rank: {self.rank}") assert results == list(range(6)) time.sleep(5) if self.rank == 0: total_cost = ray.get(center.get_time.remote(10)) print(f"for total cost: {total_cost}") # # we have 6 task each node * 2node = 12 task, each task take 2 second. # with rate limit of 6, # therefore we need at least 2 round: 12/6*2=4 seconds assert total_cost > 4, total_cost else: time.sleep(10) ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_async_rollout_w_interaction.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_async_rollout_w_interaction.py """ import numpy as np import torch from tensordict import TensorDict from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import MixedPrecision, ShardingStrategy from utils_sglang import ( are_lists_similar, clean_torchelastic_env, generate_hf_output, get_rollout_config, initialize_global_process_group, load_tokenizer_and_model, prepare_inputs, ) from verl import DataProto from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout from verl.workers.sharding_manager.fsdp_sglang import FSDPSGLangShardingManager def test_async_sglang_rollout_w_interaction(): assert torch.cuda.device_count() >= 2 initialize_global_process_group() clean_torchelastic_env() max_prompt_length = 32 max_response_length = 16 dtype = "bfloat16" tensor_parallel_size = 2 local_model_path = "Qwen/Qwen2.5-0.5B" tokenizer, actor_model = load_tokenizer_and_model(local_model_path) preencode_prompts = [ [{"role": "user", "content": prompt, "tool_calls": None}] for prompt in [ "Who won the Champions League in 2019?", "The founder of Apple is", "What's the best way to learn python?", ] ] interaction_kwargs = [ {"name": "gsm8k", "query": "Who won the Champions League in 2019?", "ground_truth": "Real Madrid"}, {"name": "gsm8k", "query": "The founder of Apple is", "ground_truth": "Steve Jobs"}, {"name": "gsm8k", "query": "What's the best way to learn python?", "ground_truth": "Learn python from scratch"}, ] prompts = [ tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(tokenizer, prompts, max_prompt_length) hf_response_tokens = generate_hf_output(actor_model, input_ids, attention_mask, tokenizer, max_response_length) fsdp_device_mesh = init_device_mesh("cuda", mesh_shape=(tensor_parallel_size,), mesh_dim_names=("fsdp",)) inference_device_mesh_cpu = init_device_mesh( "cpu", mesh_shape=(1, tensor_parallel_size, 1), mesh_dim_names=("dp", "infer_tp", "pp") ) fsdp_model = FSDP( actor_model, use_orig_params=True, device_id=fsdp_device_mesh["fsdp"].get_local_rank(), mixed_precision=MixedPrecision(param_dtype=getattr(torch, dtype)), sharding_strategy=ShardingStrategy.FULL_SHARD, device_mesh=fsdp_device_mesh, ) # Create a temporary interaction config file for testing import tempfile from omegaconf import OmegaConf interaction_config = { "interaction": [ {"name": "gsm8k", "class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}} ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(interaction_config, f.name) interaction_config_path = f.name rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, None, interaction_config_path ) rollout = SGLangRollout( actor_module=local_model_path, config=rollout_config, processing_class=tokenizer, model_hf_config=actor_model.config, ) rollout_sharding_manager = FSDPSGLangShardingManager( module=fsdp_model, inference_engine=rollout._engine, model_config=actor_model.config, rollout_config=rollout_config, full_params=True, device_mesh=inference_device_mesh_cpu, ) with rollout_sharding_manager: prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) print(f"preprocessed {input_ids.shape=}") messages = np.asarray(preencode_prompts) prompts = DataProto( batch=prompt_dict, non_tensor_batch={"raw_prompt": messages, "interaction_kwargs": np.asarray(interaction_kwargs)}, ) prompts.meta_info.update( { "eos_token_id": tokenizer.eos_token_id, "pad_token_id": tokenizer.pad_token_id, } ) prompts = rollout_sharding_manager.preprocess_data(prompts) # log_gpu_memory_usage("Before generating sequences", logger=None) output = rollout.generate_sequences(prompts=prompts) print(f"generated {output.batch['responses'].shape=}") # log_gpu_memory_usage("After generating sequences", logger=None) output = rollout_sharding_manager.postprocess_data(output) print(f"postprocessed {output.batch['responses'].shape=}") sglang_output = output.to("cpu") sglang_response_tokens = tokenizer.batch_decode(sglang_output.batch["responses"]) print(f"hf response: {hf_response_tokens}") print(f"sglang response: {sglang_response_tokens}") assert are_lists_similar(hf_response_tokens, sglang_response_tokens) print("SGLang w interaction Test Passed!") # Clean up temporary config file import os os.unlink(interaction_config_path) torch.distributed.barrier() torch.distributed.destroy_process_group() if __name__ == "__main__": test_async_sglang_rollout_w_interaction() ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_async_rollout_w_tools.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_async_rollout_w_tools.py """ import numpy as np import torch from tensordict import TensorDict from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import MixedPrecision, ShardingStrategy from utils_sglang import ( are_lists_similar, clean_torchelastic_env, generate_hf_output, get_rollout_config, initialize_global_process_group, load_tokenizer_and_model, prepare_inputs, ) from verl import DataProto from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout from verl.workers.sharding_manager.fsdp_sglang import FSDPSGLangShardingManager def test_async_sglang_rollout_w_tool(): assert torch.cuda.device_count() >= 2 initialize_global_process_group() clean_torchelastic_env() max_prompt_length = 32 max_response_length = 16 dtype = "bfloat16" tensor_parallel_size = 2 local_model_path = "Qwen/Qwen2.5-0.5B" tokenizer, actor_model = load_tokenizer_and_model(local_model_path) preencode_prompts = [ [{"role": "user", "content": prompt, "tool_calls": None}] for prompt in [ "Who won the Champions League in 2019?", "The founder of Apple is", "What's the best way to learn python?", ] ] prompts = [ tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) for message in preencode_prompts ] input_ids, attention_mask, position_ids = prepare_inputs(tokenizer, prompts, max_prompt_length) hf_response_tokens = generate_hf_output(actor_model, input_ids, attention_mask, tokenizer, max_response_length) fsdp_device_mesh = init_device_mesh("cuda", mesh_shape=(tensor_parallel_size,), mesh_dim_names=("fsdp",)) inference_device_mesh_cpu = init_device_mesh( "cpu", mesh_shape=(1, tensor_parallel_size, 1), mesh_dim_names=("dp", "infer_tp", "pp") ) fsdp_model = FSDP( actor_model, use_orig_params=True, device_id=fsdp_device_mesh["fsdp"].get_local_rank(), mixed_precision=MixedPrecision(param_dtype=getattr(torch, dtype)), sharding_strategy=ShardingStrategy.FULL_SHARD, device_mesh=fsdp_device_mesh, ) rollout_config = get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, "./resource/tool_configs/sandbox_fusion_tool_config", ) rollout = SGLangRollout( actor_module=local_model_path, config=rollout_config, processing_class=tokenizer, model_hf_config=actor_model.config, ) rollout_sharding_manager = FSDPSGLangShardingManager( module=fsdp_model, inference_engine=rollout._engine, model_config=actor_model.config, rollout_config=rollout_config, full_params=True, device_mesh=inference_device_mesh_cpu, ) with rollout_sharding_manager: prompt_dict = TensorDict( { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=input_ids.shape[0], ) print(f"preprocessed {input_ids.shape=}") messages = np.asarray(preencode_prompts) prompts = DataProto( batch=prompt_dict, non_tensor_batch={ "raw_prompt": messages, "tools_kwargs": np.array([{}] * input_ids.shape[0], dtype=object), }, ) prompts.meta_info.update( { "eos_token_id": tokenizer.eos_token_id, "pad_token_id": tokenizer.pad_token_id, } ) prompts = rollout_sharding_manager.preprocess_data(prompts) # log_gpu_memory_usage("Before generating sequences", logger=None) output = rollout.generate_sequences(prompts=prompts) print(f"generated {output.batch['responses'].shape=}") # log_gpu_memory_usage("After generating sequences", logger=None) output = rollout_sharding_manager.postprocess_data(output) print(f"postprocessed {output.batch['responses'].shape=}") sglang_output = output.to("cpu") sglang_response_tokens = tokenizer.batch_decode(sglang_output.batch["responses"]) print(f"hf response: {hf_response_tokens}") print(f"sglang response: {sglang_response_tokens}") assert are_lists_similar(hf_response_tokens, sglang_response_tokens) print("SGLang w tool Test Passed!") torch.distributed.barrier() torch.distributed.destroy_process_group() if __name__ == "__main__": test_async_sglang_rollout_w_tool() ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_multi_interaction.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test for multi-interaction support in SGLangRollout. usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_multi_interaction.py """ import os import tempfile from unittest.mock import MagicMock, patch import torch import torch.distributed as dist from omegaconf import DictConfig, OmegaConf from transformers import AutoTokenizer from verl.interactions.base import BaseInteraction from verl.workers.rollout.sglang_rollout.sglang_rollout import SGLangRollout class MockInteraction(BaseInteraction): """Mock interaction for testing.""" def __init__(self, config): super().__init__(config) self.started_instances = set() async def start_interaction(self, instance_id=None, **kwargs): if instance_id is None: instance_id = "mock_instance" self.started_instances.add(instance_id) return instance_id async def generate_response(self, instance_id, messages, **kwargs): return False, f"Mock response from {self.name}", 1.0, {} def create_mock_config_with_multi_interactions(): """Create a mock configuration with multiple interactions.""" # Create temporary interaction config file interaction_config = { "interaction": [ { "name": "mock_agent1", "class_name": "tests.workers.rollout.test_sglang_multi_interaction.MockInteraction", "config": {"param1": "value1"}, }, { "name": "mock_agent2", "class_name": "tests.workers.rollout.test_sglang_multi_interaction.MockInteraction", "config": {"param2": "value2"}, }, ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(interaction_config, f.name) interaction_config_path = f.name # Create mock SGLangRollout config config = DictConfig( { "multi_turn": { "interaction_config_path": interaction_config_path, "tool_config_path": None, "enable": True, "max_assistant_turns": 5, "max_user_turns": 3, "use_inference_chat_template": True, "tokenization_sanity_check_mode": "off", }, "prompt_length": 32, "response_length": 16, "max_model_len": 512, "dtype": "bfloat16", "gpu_memory_utilization": 0.8, "load_format": "dummy", "enforce_eager": True, "free_cache_engine": False, "calculate_log_probs": False, "tensor_model_parallel_size": 1, "n": 1, "val_kwargs": {"top_k": 1, "top_p": 1.0, "temperature": 0.0}, } ) return config, interaction_config_path def setup_distributed(): """Initialize distributed environment if not already initialized.""" if not dist.is_initialized(): dist.init_process_group(backend="nccl" if torch.cuda.is_available() else "gloo") class TestSGLangMultiInteraction: def test_initialize_multiple_interactions(self): """Test that SGLangRollout can initialize multiple interactions.""" setup_distributed() config, temp_config_path = create_mock_config_with_multi_interactions() try: # Mock SGLang engine and initialization methods like the reference test with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): # Create a real tokenizer like the reference test tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B", padding_side="left") tokenizer.pad_token = tokenizer.eos_token # Mock model config mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 # since this is a mock, we can set any rope scaling config # to test the rope_scaling logic at the same time of this test mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } # Create SGLangRollout instance rollout = SGLangRollout( actor_module="mock_model", config=config, processing_class=tokenizer, model_hf_config=mock_model_config, port=None, trust_remote_code=False, device_mesh=None, ) # Check that interactions were initialized assert len(rollout.interaction_map) == 2 assert "mock_agent1" in rollout.interaction_map assert "mock_agent2" in rollout.interaction_map # Use class name comparison instead of isinstance for multi-process compatibility assert rollout.interaction_map["mock_agent1"].__class__.__name__ == "MockInteraction" assert rollout.interaction_map["mock_agent2"].__class__.__name__ == "MockInteraction" # Also check that they are instances of BaseInteraction (which should work across processes) assert isinstance(rollout.interaction_map["mock_agent1"], BaseInteraction) assert isinstance(rollout.interaction_map["mock_agent2"], BaseInteraction) # Check that names were set correctly assert rollout.interaction_map["mock_agent1"].name == "mock_agent1" assert rollout.interaction_map["mock_agent2"].name == "mock_agent2" finally: os.unlink(temp_config_path) def test_interaction_selection_by_name(self): """Test that interactions are selected by name from interaction_kwargs.""" setup_distributed() config, temp_config_path = create_mock_config_with_multi_interactions() try: with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B", padding_side="left") tokenizer.pad_token = tokenizer.eos_token mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout = SGLangRollout( actor_module="mock_model", config=config, processing_class=tokenizer, model_hf_config=mock_model_config, port=None, trust_remote_code=False, device_mesh=None, ) # Test interaction selection logic from verl.workers.rollout.schemas import AsyncRolloutRequest, AsyncRolloutRequestStateEnum, Message # Create a mock request with specific interaction name req = AsyncRolloutRequest( request_id="test_req", state=AsyncRolloutRequestStateEnum.INTERACTING, messages=[Message(role="user", content="test message")], interaction_kwargs={"name": "mock_agent2", "test_param": "value"}, input_ids=None, prompt_ids=None, response_ids=None, attention_mask=None, prompt_attention_mask=None, response_attention_mask=None, position_ids=None, prompt_position_ids=None, response_position_ids=None, loss_mask=None, prompt_loss_mask=None, response_loss_mask=None, reward_scores={}, max_prompt_len=32, max_response_len=16, max_model_len=512, use_inference_chat_template=True, tokenization_sanity_check_mode="disable", processing_class=tokenizer, ) # Test that the correct interaction is selected interaction_name = req.interaction_kwargs.get("name", "gsm8k") assert interaction_name == "mock_agent2" assert interaction_name in rollout.interaction_map selected_interaction = rollout.interaction_map[interaction_name] assert selected_interaction.name == "mock_agent2" finally: os.unlink(temp_config_path) def test_fallback_to_default_interaction(self): """Test fallback to default interaction when name is not specified.""" setup_distributed() # Create config with gsm8k interaction interaction_config = { "interaction": [ { "name": "gsm8k", "class_name": "tests.workers.rollout.test_sglang_multi_interaction.MockInteraction", "config": {}, } ] } with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f: OmegaConf.save(interaction_config, f.name) interaction_config_path = f.name config = DictConfig( { "multi_turn": { "interaction_config_path": interaction_config_path, "tool_config_path": None, "enable": True, "max_assistant_turns": 5, "max_user_turns": 3, "use_inference_chat_template": True, "tokenization_sanity_check_mode": "disable", }, "prompt_length": 32, "response_length": 16, "max_model_len": 512, "dtype": "bfloat16", "gpu_memory_utilization": 0.8, "load_format": "dummy", "enforce_eager": True, "free_cache_engine": False, "calculate_log_probs": False, "tensor_model_parallel_size": 1, "n": 1, "val_kwargs": {"top_k": 1, "top_p": 1.0, "temperature": 0.0}, } ) try: with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B", padding_side="left") tokenizer.pad_token = tokenizer.eos_token mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout = SGLangRollout( actor_module="mock_model", config=config, processing_class=tokenizer, model_hf_config=mock_model_config, port=None, trust_remote_code=False, device_mesh=None, ) # Test that default interaction name works interaction_kwargs_without_name = {"test_param": "value"} default_name = interaction_kwargs_without_name.get("name", "gsm8k") assert default_name == "gsm8k" assert default_name in rollout.interaction_map finally: os.unlink(interaction_config_path) def test_error_on_missing_interaction(self): """Test that error is raised when requested interaction is not found.""" setup_distributed() config, temp_config_path = create_mock_config_with_multi_interactions() try: with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B", padding_side="left") tokenizer.pad_token = tokenizer.eos_token mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout = SGLangRollout( actor_module="mock_model", config=config, processing_class=tokenizer, model_hf_config=mock_model_config, port=None, trust_remote_code=False, device_mesh=None, ) # Test error when requesting non-existent interaction non_existent_name = "non_existent_interaction" assert non_existent_name not in rollout.interaction_map # This should raise ValueError in actual usage available_interactions = list(rollout.interaction_map.keys()) assert "mock_agent1" in available_interactions assert "mock_agent2" in available_interactions assert non_existent_name not in available_interactions finally: os.unlink(temp_config_path) def test_backward_compatibility_no_interaction_config(self): """Test backward compatibility when no interaction config is provided.""" setup_distributed() # Create config without interaction config config = DictConfig( { "multi_turn": { "interaction_config_path": None, "tool_config_path": None, "enable": True, "max_assistant_turns": 5, "max_user_turns": 3, "use_inference_chat_template": True, "tokenization_sanity_check_mode": "disable", }, "prompt_length": 32, "response_length": 16, "max_model_len": 512, "dtype": "bfloat16", "gpu_memory_utilization": 0.8, "load_format": "dummy", "enforce_eager": True, "free_cache_engine": False, "calculate_log_probs": False, "tensor_model_parallel_size": 1, "n": 1, "val_kwargs": {"top_k": 1, "top_p": 1.0, "temperature": 0.0}, } ) with ( patch.object(SGLangRollout, "_init_distributed_env", return_value=None), patch.object(SGLangRollout, "_init_inference_engine", return_value=None), patch.object(SGLangRollout, "_init_sampling_params", return_value=None), ): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B", padding_side="left") tokenizer.pad_token = tokenizer.eos_token mock_model_config = MagicMock() mock_model_config.max_position_embeddings = 2048 mock_model_config.rope_scaling = { "factor": 4.0, "original_max_position_embeddings": 32768, "type": "yarn", } rollout = SGLangRollout( actor_module="mock_model", config=config, processing_class=tokenizer, model_hf_config=mock_model_config, port=None, trust_remote_code=False, device_mesh=None, ) # Check that no interactions were initialized assert len(rollout.interaction_map) == 0 ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_rollout_sharding_manager.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets _TENSOR_1MB = torch.zeros(512, 512) _BYTES_1MB = 1 << 20 @pytest.mark.parametrize( "named_tensors, bucket_size_mb, gt_groups", [ ( [("a", _TENSOR_1MB), ("b", _TENSOR_1MB)], 0.5 * _BYTES_1MB, [["a"], ["b"]], ), ( [("a", _TENSOR_1MB), ("b", _TENSOR_1MB)], 1 * _BYTES_1MB, [["a"], ["b"]], ), ( [("a", _TENSOR_1MB), ("b", _TENSOR_1MB)], 1.5 * _BYTES_1MB, [["a"], ["b"]], ), ( [("a", _TENSOR_1MB), ("b", _TENSOR_1MB)], 2 * _BYTES_1MB, [["a", "b"]], ), ], ) def test_get_named_tensor_buckets(named_tensors, bucket_size_mb, gt_groups: list[list[str]]): named_tensors_iter = iter(named_tensors) groups = list(get_named_tensor_buckets(named_tensors_iter, bucket_size_mb)) assert len(groups) == len(gt_groups) for group, gt_group in zip(groups, gt_groups, strict=True): assert len(group) == len(gt_group) for (name, _), (gt_name) in zip(group, gt_group, strict=True): assert name == gt_name ================================================ FILE: verl_rl/tests/workers/rollout/test_sglang_spmd.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ usage: torchrun --standalone --nnodes=1 \ --nproc_per_node=2 $(which pytest) \ -s test_sglang_async_spmd.py """ import asyncio import torch from sglang.srt.entrypoints.engine import Engine from sglang.srt.utils import broadcast_pyobj from torch.distributed.device_mesh import init_device_mesh from utils_sglang import ( are_lists_similar, clean_torchelastic_env, generate_hf_output, initialize_global_process_group, load_tokenizer_and_model, prepare_inputs, ) def _pre_process_inputs(pad_token_id, prompt_token_ids: torch.Tensor): non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0] token_ids = prompt_token_ids[non_pad_index:].tolist() return token_ids def test_sglang_spmd(): assert torch.cuda.device_count() >= 2 initialize_global_process_group(spmd=True) clean_torchelastic_env() max_prompt_length = 16 max_response_length = 16 local_model_path = "Qwen/Qwen2.5-0.5B" tokenizer, actor_model = load_tokenizer_and_model(local_model_path) preencode_prompts = ["Who won the Champions League in 2019?", "The founder of Apple is", "What's your name?"] input_ids, attention_mask, _ = prepare_inputs(tokenizer, preencode_prompts, max_prompt_length) hf_response_tokens = generate_hf_output(actor_model, input_ids, attention_mask, tokenizer, max_response_length) tensor_parallel_size = 2 inference_device_mesh_cpu = init_device_mesh( "cpu", mesh_shape=(1, tensor_parallel_size, 1), mesh_dim_names=["dp", "tp", "pp"] ) tp_rank = inference_device_mesh_cpu["tp"].get_local_rank() if tp_rank == 0: llm = Engine( model_path=local_model_path, dtype="bfloat16", mem_fraction_static=0.5, enable_memory_saver=True, tp_size=inference_device_mesh_cpu["tp"].size(), attention_backend="fa3", ) input_ids = input_ids.cuda() idx_list = [] pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id for i in range(input_ids.shape[0]): idx_list.append(_pre_process_inputs(pad_token_id, input_ids[i])) sampling_params = dict( n=1, temperature=0, top_p=1, top_k=-1, max_new_tokens=max_response_length, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, skip_special_tokens=True, spaces_between_special_tokens=True, ignore_eos=False, ) loop = asyncio.get_event_loop() outputs = loop.run_until_complete(llm.async_generate(input_ids=idx_list, sampling_params=sampling_params)) else: outputs = None [outputs] = broadcast_pyobj( [outputs], rank=inference_device_mesh_cpu["tp"].get_local_rank(), src=inference_device_mesh_cpu["tp"].mesh[0].item(), dist_group=inference_device_mesh_cpu["tp"].get_group(), force_cpu_device=False, ) sglang_response_tokens = [output["text"] for output in outputs] print(f"sglang response: {sglang_response_tokens}") assert are_lists_similar(hf_response_tokens, sglang_response_tokens), "Strings differ more than 10%:\n" print("SPMD Test Passed!") torch.distributed.barrier() torch.distributed.destroy_process_group() ================================================ FILE: verl_rl/tests/workers/rollout/utils_sglang.py ================================================ # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from datetime import timedelta import torch from omegaconf import OmegaConf from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig from verl.utils.model import compute_position_id_with_mask from verl.utils.torch_functional import pad_sequence_to_length # ====================== utils ====================== def levenshtein(s1, s2): m, n = len(s1), len(s2) dp = [[0] * (n + 1) for _ in range(m + 1)] for i in range(m + 1): dp[i][0] = i for j in range(n + 1): dp[0][j] = j for i in range(1, m + 1): for j in range(1, n + 1): cost = 0 if s1[i - 1] == s2[j - 1] else 1 dp[i][j] = min(dp[i - 1][j] + 1, dp[i][j - 1] + 1, dp[i - 1][j - 1] + cost) return dp[m][n] def are_lists_similar(a, b, threshold=10): if len(a) != len(b): print("The lists are of different lengths.") return False total_length = 0 total_diff = 0 for s1, s2 in zip(a, b, strict=True): max_len = max(len(s1), len(s2)) total_length += max_len total_diff += levenshtein(s1, s2) percentage_difference = (total_diff / total_length) * 100 print(f"Total difference: {percentage_difference:.2f}%") return percentage_difference <= threshold def initialize_global_process_group(timeout_second=36000, spmd=False): import torch.distributed if not torch.distributed.is_initialized(): # Check if already initialized print("Initializing process group...") torch.distributed.init_process_group(timeout=timedelta(seconds=timeout_second)) else: print("Process group already initialized.") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) world_size = int(os.environ["WORLD_SIZE"]) torch.cuda.set_device(local_rank) CUDA_VISIBLE_DEVICES = os.environ.get("CUDA_VISIBLE_DEVICES", "") if not CUDA_VISIBLE_DEVICES: if spmd: # CUDA_VISIBLE_DEVICES = ','.join(str(i) for i in range(tensor_parallel_size)) CUDA_VISIBLE_DEVICES = ",".join(str(i) for i in range(world_size)) else: CUDA_VISIBLE_DEVICES = str(local_rank) os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES print(f"CUDA_VISIBLE_DEVICES is not set, set to {CUDA_VISIBLE_DEVICES}") return local_rank, rank, world_size def clean_torchelastic_env(): for k in ["TORCHELASTIC_USE_AGENT_STORE"]: if k in os.environ: del os.environ[k] def load_tokenizer_and_model(local_model_path, dtype="bfloat16"): tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained(local_model_path, torch_dtype=getattr(torch, dtype), device_map="cuda") return tokenizer, model def prepare_inputs(tokenizer, prompts, max_prompt_length): pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id tokenized = tokenizer(prompts, return_tensors="pt", padding=True) input_ids = pad_sequence_to_length(tokenized["input_ids"], max_prompt_length, pad_token_id, left_pad=True) attention_mask = pad_sequence_to_length( tokenized["attention_mask"], max_prompt_length, pad_token_id=0, left_pad=True ) position_ids = compute_position_id_with_mask(attention_mask) position_ids = pad_sequence_to_length(position_ids, max_prompt_length, pad_token_id=0, left_pad=True) return input_ids, attention_mask, position_ids def generate_hf_output(model, input_ids, attention_mask, tokenizer, max_response_length): generation_config = GenerationConfig(do_sample=False) output = model.generate( input_ids=input_ids.cuda(), attention_mask=attention_mask.cuda(), max_new_tokens=max_response_length, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, generation_config=generation_config, output_scores=False, return_dict_in_generate=True, use_cache=False, ) seq = output.sequences response = seq[:, input_ids.shape[1] :] return tokenizer.batch_decode(response) def get_rollout_config( max_response_length, max_prompt_length, dtype, tensor_parallel_size, tool_config_path=None, interaction_config_path=None, ): sampling_params = dict( n=1, temperature=0, top_p=1, top_k=-1, max_new_tokens=max_response_length, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, skip_special_tokens=True, spaces_between_special_tokens=True, ignore_eos=False, ) rollout_config = OmegaConf.create( { "name": "sglang", "mode": "sync", "load_format": "dummy_dtensor", "enforce_eager": False, "free_cache_engine": True, "dtype": dtype, "gpu_memory_utilization": 0.5, "ignore_eos": False, "max_num_batched_tokens": 8192, "prompt_length": max_prompt_length, "response_length": max_response_length, "tensor_model_parallel_size": tensor_parallel_size, # set to 128MB only for testing "update_weights_bucket_megabytes": 128, "multi_turn": { "max_assistant_turns": 4, "max_user_turns": 4, "enable": True, "tool_config_path": tool_config_path, "interaction_config_path": interaction_config_path, "use_inference_chat_template": False, "tokenization_sanity_check_mode": "strict", }, "max_model_len": None, **sampling_params, } ) return rollout_config ================================================ FILE: verl_rl/verl/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import logging import os from importlib.metadata import PackageNotFoundError from importlib.metadata import version as get_version from packaging.version import parse as parse_version from .protocol import DataProto from .utils.device import is_npu_available from .utils.logging_utils import set_basic_config version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__))) with open(os.path.join(version_folder, "version/version")) as f: __version__ = f.read().strip() set_basic_config(level=logging.WARNING) __all__ = ["DataProto", "__version__"] if os.getenv("VERL_USE_MODELSCOPE", "False").lower() == "true": if importlib.util.find_spec("modelscope") is None: raise ImportError("You are using the modelscope hub, please install modelscope by `pip install modelscope -U`") # Patch hub to download models from modelscope to speed up. from modelscope.utils.hf_util import patch_hub patch_hub() if is_npu_available: from .models.transformers import npu_patch as npu_patch package_name = "transformers" required_version_spec = "4.52.4" try: installed_version = get_version(package_name) installed = parse_version(installed_version) required = parse_version(required_version_spec) if installed < required: raise ValueError( f"{package_name} version >= {required_version_spec} is required on ASCEND NPU, current version is " f"{installed}." ) except PackageNotFoundError as e: raise ImportError( f"package {package_name} is not installed, please run pip install {package_name}=={required_version_spec}" ) from e ================================================ FILE: verl_rl/verl/base_config.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from dataclasses import ( dataclass, field, fields, # Import the fields function to inspect dataclass fields ) from typing import Any # BaseConfig class inherits from collections.abc.Mapping, which means it can act like a dictionary @dataclass class BaseConfig(collections.abc.Mapping): """The BaseConfig provides omegaconf DictConfig-like interface for a dataclass config. The BaseConfig class implements the Mapping Abstract Base Class. This allows instances of this class to be used like dictionaries. """ extra: dict[str, Any] = field(default_factory=dict) def __setattr__(self, name: str, value): # if the field already exists (i.e. was set in __init__) # and is in our frozen list, block assignment if hasattr(self, "_frozen_fields") and name in self._frozen_fields and name in self.__dict__: from dataclasses import FrozenInstanceError raise FrozenInstanceError(f"Field '{name}' is frozen and cannot be modified") # otherwise do the normal thing super().__setattr__(name, value) def get(self, key: str, default: Any = None) -> Any: """Get the value associated with the given key. If the key does not exist, return the default value. Args: key (str): The attribute name to retrieve. default (Any, optional): The value to return if the attribute does not exist. Defaults to None. Returns: Any: The value of the attribute or the default value. """ try: return getattr(self, key) except AttributeError: return default def __getitem__(self, key: str): """Implement the [] operator for the class. Allows accessing attributes like dictionary items. Args: key (str): The attribute name to retrieve. Returns: Any: The value of the attribute. Raises: AttributeError: If the attribute does not exist. TypeError: If the key type is not string """ return getattr(self, key) def __iter__(self): """Implement the iterator protocol. Allows iterating over the attribute names of the instance. Yields: str: The name of each field in the dataclass. """ for f in fields(self): yield f.name def __len__(self): """ Return the number of fields in the dataclass. Returns: int: The number of fields in the dataclass. """ return len(fields(self)) ================================================ FILE: verl_rl/verl/experimental/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/experimental/agent_loop/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .agent_loop import AgentLoopBase, AgentLoopManager from .single_turn_agent_loop import SingleTurnAgentLoop from .tool_agent_loop import ToolAgentLoop _ = [SingleTurnAgentLoop, ToolAgentLoop] __all__ = ["AgentLoopBase", "AgentLoopManager"] ================================================ FILE: verl_rl/verl/experimental/agent_loop/agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import heapq import logging import os import random from abc import ABC, abstractmethod from typing import Any import hydra import numpy as np import ray import torch from cachetools import LRUCache from omegaconf import DictConfig, OmegaConf from pydantic import BaseModel from tensordict import TensorDict from transformers import AutoTokenizer from verl.protocol import DataProto from verl.single_controller.ray.base import RayWorkerGroup from verl.utils import hf_tokenizer from verl.utils.fs import copy_to_local from verl.utils.rollout_trace import RolloutTraceConfig, rollout_trace_attr, rollout_trace_op from verl.workers.rollout.async_server import async_server_class logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class AsyncLLMServerManager: """ A class to manage multiple OpenAI compatible LLM servers. This class provides - Load balance: least requests load balancing - Sticky session: send multi-turn chat completions to same server for automatic prefix caching """ def __init__(self, config: DictConfig, server_handles: list[ray.actor.ActorHandle], max_cache_size: int = 10000): """Initialize the AsyncLLMServerManager. Args: config (DictConfig): YAML config. server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles. max_cache_size (int, optional): max cache size for request_id to server mapping. Defaults to 10000. """ self.config = config self.server_handles = server_handles random.shuffle(self.server_handles) # Least requests load balancing self.weighted_serveres = [[0, (hash(server), server)] for server in server_handles] heapq.heapify(self.weighted_serveres) # LRU cache to map request_id to server self.request_id_to_server = LRUCache(maxsize=max_cache_size) def _choose_server(self, request_id: str) -> ray.actor.ActorHandle: # TODO: implement server pressure awareness load balancing if request_id in self.request_id_to_server: return self.request_id_to_server[request_id] server = self.weighted_serveres[0][1][1] self.weighted_serveres[0][0] += 1 heapq.heapreplace(self.weighted_serveres, self.weighted_serveres[0]) self.request_id_to_server[request_id] = server return server @rollout_trace_op async def generate( self, request_id, *, prompt_ids: list[int], sampling_params: dict[str, Any], ) -> list[int]: """Generate tokens from prompt ids. Args: request_id (str): request id for sticky session. prompt_ids (List[int]): List of prompt token ids. sampling_params (Dict[str, Any]): Sampling parameters for the chat completion. Returns: List[int]: List of generated token ids. """ server = self._choose_server(request_id) output = await server.generate.remote( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, ) return output class AgentLoopMetrics(BaseModel): """Agent loop performance metrics.""" generate_sequences: float = 0.0 tool_calls: float = 0.0 class AgentLoopOutput(BaseModel): """Agent loop output.""" prompt_ids: list[int] """Prompt token ids.""" response_ids: list[int] """Response token ids including LLM generated token, tool response token.""" response_mask: list[int] """Response mask, 1 for LLM generated token, 0 for tool response token.""" num_turns: int = 0 """Number of chat turns, including user, assistant, tool.""" metrics: AgentLoopMetrics """Auxiliary performance metrics""" # make hydra.utils.instantiate happy class _DummyConfig: def __init__(self, config: DictConfig) -> None: self.config = config class AgentLoopBase(ABC): """An agent loop takes a input message, chat with OpenAI compatible LLM server and interact with various environments.""" _class_initialized = False def __init__( self, trainer_config: _DummyConfig, server_manager: AsyncLLMServerManager, tokenizer: AutoTokenizer, **kwargs ): """Initialize agent loop, each sample will have its own loop instance. Args: trainer_config (_DummyConfig): trainer config. server_manager (AsyncLLMServerManager): OpenAI compatible LLM server manager. tokenizer (AutoTokenizer): Tokenizer for tokenize messages. """ self.init_class(trainer_config.config, tokenizer, **kwargs) self.config = trainer_config.config self.server_manager = server_manager self.tokenizer = tokenizer self.loop = asyncio.get_running_loop() @classmethod def init_class(cls, config: DictConfig, tokenizer: AutoTokenizer, **kwargs): """This is used to do heavy initialization work that should shared across all instances. It's only called once. Args: config (DictConfig): trainer config. tokenizer (AutoTokenizer): Tokenizer for tokenize messages. **kwargs: extra kwargs from config file passed in by `hydra.utils.instantiate`. """ if cls._class_initialized: return cls._class_initialized = True @abstractmethod async def run(self, messages: list[dict[str, Any]], sampling_params: dict[str, Any]) -> AgentLoopOutput: """Run agent loop to interact with LLM server and environment. Args: messages (List[Dict[str, Any]]): Input messages. sampling_params (Dict[str, Any]): LLM sampling params. Returns: AgentLoopOutput: Agent loop output. """ raise NotImplementedError """Agent loop registry: key is agent_name, value is a dict of agent loop config used by hydra.utils.instantiate to initialize agent loop instance. https://hydra.cc/docs/advanced/instantiate_objects/overview/ """ _agent_loop_registry: dict[str, dict] = {} def register(agent_name: str): """Register agent loop class.""" def decorator(subclass: type[AgentLoopBase]) -> type[AgentLoopBase]: fqdn = f"{subclass.__module__}.{subclass.__qualname__}" _agent_loop_registry[agent_name] = {"_target_": fqdn} return subclass return decorator @ray.remote class AgentLoopWorker: """Agent loop worker takes a batch of messages and run each message in an agent loop.""" def __init__(self, config: DictConfig, server_handles: list[ray.actor.ActorHandle]): """Initialize agent loop manager. Args: config (DictConfig): YAML config. server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles. """ self.config = config self.server_manager = AsyncLLMServerManager(config, server_handles) model_path = config.actor_rollout_ref.model.path self.model_name = "/".join(model_path.split("/")[-2:]) local_path = copy_to_local(config.actor_rollout_ref.model.path) self.tokenizer = hf_tokenizer(local_path, trust_remote_code=True) agent_loop_config_path = config.actor_rollout_ref.rollout.agent.agent_loop_config_path if agent_loop_config_path: agent_loop_configs = OmegaConf.load(agent_loop_config_path) for agent_loop_config in agent_loop_configs: _agent_loop_registry[agent_loop_config.name] = agent_loop_config trace_config = config.trainer.get("rollout_trace", {}) trace_config = self.config.actor_rollout_ref.rollout.get("trace", {}) RolloutTraceConfig.init( self.config.trainer.project_name, self.config.trainer.experiment_name, trace_config.get("backend"), trace_config.get("token2text", False), ) async def generate_sequences(self, batch: DataProto) -> DataProto: """Generate sequences from agent loop. Args: batch (DataProto): Input batch. Returns: DataProto: Output batch. - prompts: [bsz, prompt_length], prompt token ids from dataset. - responses: [bsz, response_length], output token ids include response tokens from LLM generation and observation tokens from tool_calls. - response_mask: [bsz, response_length], 1 for LLM generated tokens, 0 for observation/padding tokens. - input_ids: [bsz, prompt_length + response_length], whole sequence token ids, including prompt tokens and response tokens. - attention_mask: [bsz, prompt_length + response_length], 0 for padding tokens, 1 for other tokens. - position_ids: [bsz, prompt_length + response_length], incremental position ids. For multi-turn conversations: responses: |<- LLM generation ->|<- tool_calls ->|<- LLM generation ->|<- padding ->| response_mask: | 1, 1, 1, ..., 1, 1 | 0, 0, .., 0, 0 | 1, 1, 1, ..., 1, 1 | 0, 0, ..., 0| """ config = self.config.actor_rollout_ref.rollout sampling_params = dict( temperature=config.temperature, top_p=config.top_p, repetition_penalty=1.0, ) # override sampling params for validation if batch.meta_info.get("validate", False): sampling_params["top_p"] = config.val_kwargs.top_p sampling_params["temperature"] = config.val_kwargs.temperature # by default, we assume it's a single turn agent if "agent_name" not in batch.non_tensor_batch: batch.non_tensor_batch["agent_name"] = np.array(["single_turn_agent"] * len(batch), dtype=object) tasks = [] agent_names = batch.non_tensor_batch["agent_name"] raw_prompts = batch.non_tensor_batch["raw_prompt"] if "index" in batch.non_tensor_batch: index = batch.non_tensor_batch["index"] else: index = np.arange(len(raw_prompts)) trajectory_info = await get_trajectory_info( batch.meta_info.get("global_steps", -1), index, batch.meta_info.get("validate", False) ) for agent_name, messages, trajectory in zip(agent_names, raw_prompts, trajectory_info, strict=True): tasks.append( asyncio.create_task(self._run_agent_loop(agent_name, messages.tolist(), sampling_params, trajectory)) ) outputs = await asyncio.gather(*tasks) output = self._postprocess(outputs) return output async def _run_agent_loop( self, agent_name: str, messages: list[dict[str, Any]], sampling_params: dict[str, Any], trajectory: dict[str, Any], ) -> AgentLoopOutput: with rollout_trace_attr( step=trajectory["step"], sample_index=trajectory["sample_index"], rollout_n=trajectory["rollout_n"], validate=trajectory["validate"], name="agent_loop", ): assert agent_name in _agent_loop_registry, ( f"Agent loop {agent_name} not registered, registered agent loops: {_agent_loop_registry.keys()}" ) agent_loop_config = _agent_loop_registry[agent_name] agent_loop = hydra.utils.instantiate( config=agent_loop_config, trainer_config=_DummyConfig(config=self.config), server_manager=self.server_manager, tokenizer=self.tokenizer, ) output = await agent_loop.run(messages, sampling_params) return output def _postprocess(self, inputs: list[AgentLoopOutput]) -> DataProto: # NOTE: consistent with batch version of generate_sequences in vllm_rollout_spmd.py # prompts: left pad # responses: right pad # input_ids: prompt + response # attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0] # position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11] # prompts self.tokenizer.padding_side = "left" outputs = self.tokenizer.pad( [{"input_ids": input.prompt_ids} for input in inputs], padding="max_length", max_length=self.config.actor_rollout_ref.rollout.prompt_length, return_tensors="pt", return_attention_mask=True, ) prompt_ids, prompt_attention_mask = outputs["input_ids"], outputs["attention_mask"] # responses self.tokenizer.padding_side = "right" outputs = self.tokenizer.pad( [{"input_ids": input.response_ids} for input in inputs], padding="max_length", max_length=self.config.actor_rollout_ref.rollout.response_length, return_tensors="pt", return_attention_mask=True, ) response_ids, response_attention_mask = outputs["input_ids"], outputs["attention_mask"] # response_mask outputs = self.tokenizer.pad( [{"input_ids": input.response_mask} for input in inputs], padding="max_length", max_length=self.config.actor_rollout_ref.rollout.response_length, return_tensors="pt", return_attention_mask=False, ) response_mask = outputs["input_ids"] assert response_ids.shape == response_mask.shape, ( f"mismatch in response_ids and response_mask shape: {response_ids.shape} vs {response_mask.shape}" ) response_mask = response_mask * response_attention_mask input_ids = torch.cat([prompt_ids, response_ids], dim=1) attention_mask = torch.cat([prompt_attention_mask, response_attention_mask], dim=1) position_ids = (attention_mask.cumsum(dim=1) - 1) * attention_mask batch = TensorDict( { "prompts": prompt_ids, # [bsz, prompt_length] "responses": response_ids, # [bsz, response_length] "response_mask": response_mask, # [bsz, response_length] "input_ids": input_ids, # [bsz, prompt_length + response_length] "attention_mask": attention_mask, # [bsz, prompt_length + response_length] "position_ids": position_ids, # [bsz, prompt_length + response_length] }, batch_size=len(input_ids), ) num_turns = np.array([input.num_turns for input in inputs], dtype=np.int32) metrics = [input.metrics.model_dump() for input in inputs] return DataProto(batch=batch, non_tensor_batch={"__num_turns__": num_turns}, meta_info={"metrics": metrics}) async def get_trajectory_info(step, index, validate): """Get trajectory info. Args: step (int): global steps in the trainer. index (list): form datastore extra_info.index column. validate (bool): whether is a validate step. Returns: list: trajectory. """ trajectory_info = [] rollout_n = 0 for i in range(len(index)): if i > 0 and index[i - 1] == index[i]: rollout_n += 1 else: rollout_n = 0 trajectory_info.append({"step": step, "sample_index": index[i], "rollout_n": rollout_n, "validate": validate}) return trajectory_info class AgentLoopManager: """Agent loop manager that manages a group of agent loop workers.""" def __init__(self, config: DictConfig, worker_group: RayWorkerGroup): """Initialize agent loop manager. Args: config (DictConfig): trainer config. worker_group (RayWorkerGroup): ActorRolloutRef worker group. """ self.config = config self.worker_group = worker_group self._initialize_llm_servers() self._init_agent_loop_workers() # Initially we're in sleep mode. self.sleep() def _initialize_llm_servers(self): self.rollout_tp_size = self.config.actor_rollout_ref.rollout.tensor_model_parallel_size self.rollout_dp_size = self.worker_group.world_size // self.rollout_tp_size register_center = ray.get_actor(f"{self.worker_group.name_prefix}_register_center") workers_info = ray.get(register_center.get_worker_info.remote()) assert len(workers_info) == self.worker_group.world_size self.async_llm_servers = [None] * self.rollout_dp_size self.server_addresses = [None] * self.rollout_dp_size if self.config.actor_rollout_ref.rollout.agent.custom_async_server: server_class = async_server_class( rollout_backend=self.config.actor_rollout_ref.rollout.name, rollout_backend_module=self.config.actor_rollout_ref.rollout.agent.custom_async_server.path, rollout_backend_class=self.config.actor_rollout_ref.rollout.agent.custom_async_server.name, ) else: server_class = async_server_class(rollout_backend=self.config.actor_rollout_ref.rollout.name) # Start all server instances, restart if address already in use. unready_dp_ranks = set(range(self.rollout_dp_size)) while len(unready_dp_ranks) > 0: servers = { rollout_dp_rank: server_class.options( # make sure AsyncvLLMServer colocates with its corresponding workers scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=workers_info[rollout_dp_rank * self.rollout_tp_size], soft=False, ), name=f"async_llm_server_{rollout_dp_rank}", ).remote(self.config, self.rollout_dp_size, rollout_dp_rank, self.worker_group.name_prefix) for rollout_dp_rank in unready_dp_ranks } for rollout_dp_rank, server in servers.items(): try: address = ray.get(server.get_server_address.remote()) self.server_addresses[rollout_dp_rank] = address self.async_llm_servers[rollout_dp_rank] = server unready_dp_ranks.remove(rollout_dp_rank) except Exception: ray.kill(server) print(f"rollout server {rollout_dp_rank} failed, maybe address already in use, restarting...") # All server instances are ready, init AsyncLLM engine. ray.get([server.init_engine.remote() for server in self.async_llm_servers]) def _init_agent_loop_workers(self): self.agent_loop_workers = [] for i in range(self.config.actor_rollout_ref.rollout.agent.num_workers): self.agent_loop_workers.append( AgentLoopWorker.options( name=f"agent_loop_worker_{i}", ).remote(self.config, self.async_llm_servers) ) def generate_sequences(self, prompts: DataProto) -> DataProto: """Split input batch and dispatch to agent loop workers. Args: prompts (DataProto): Input batch. Returns: DataProto: Output batch. """ if self.config.actor_rollout_ref.rollout.free_cache_engine: self.wake_up() chunkes = prompts.chunk(len(self.agent_loop_workers)) outputs = ray.get( [ worker.generate_sequences.remote(chunk) for worker, chunk in zip(self.agent_loop_workers, chunkes, strict=True) ] ) output = DataProto.concat(outputs) if self.config.actor_rollout_ref.rollout.free_cache_engine: self.sleep() # calculate performance metrics metrics = [output.meta_info["metrics"] for output in outputs] # List[List[Dict[str, str]]] timing = self._performance_metrics(metrics, output) output.meta_info = {"timing": timing} return output def _performance_metrics(self, metrics: list[list[dict[str, str]]], output: DataProto) -> dict[str, float]: timing = {} t_generate_sequences = np.array([metric["generate_sequences"] for chunk in metrics for metric in chunk]) t_tool_calls = np.array([metric["tool_calls"] for chunk in metrics for metric in chunk]) timing["agent_loop/generate_sequences/min"] = t_generate_sequences.min() timing["agent_loop/generate_sequences/max"] = t_generate_sequences.max() timing["agent_loop/generate_sequences/mean"] = t_generate_sequences.mean() timing["agent_loop/tool_calls/min"] = t_tool_calls.min() timing["agent_loop/tool_calls/max"] = t_tool_calls.max() timing["agent_loop/tool_calls/mean"] = t_tool_calls.mean() # batch sequence generation is bounded by the slowest sample slowest = np.argmax(t_generate_sequences + t_tool_calls) attention_mask = output.batch["attention_mask"][slowest] prompt_length = output.batch["prompts"].shape[1] timing["agent_loop/slowest/generate_sequences"] = t_generate_sequences[slowest] timing["agent_loop/slowest/tool_calls"] = t_tool_calls[slowest] timing["agent_loop/slowest/prompt_length"] = attention_mask[:prompt_length].sum().item() timing["agent_loop/slowest/response_length"] = attention_mask[prompt_length:].sum().item() return timing def wake_up(self): """Wake up all rollout server instances.""" ray.get([server.wake_up.remote() for server in self.async_llm_servers]) def sleep(self): """Sleep all rollout server instances.""" ray.get([server.sleep.remote() for server in self.async_llm_servers]) ================================================ FILE: verl_rl/verl/experimental/agent_loop/single_turn_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any from uuid import uuid4 from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput, register from verl.utils.profiler import simple_timer logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) @register("single_turn_agent") class SingleTurnAgentLoop(AgentLoopBase): """Naive agent loop that only do single turn chat completion.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.prompt_length = self.config.actor_rollout_ref.rollout.prompt_length self.response_length = self.config.actor_rollout_ref.rollout.response_length async def run(self, messages: list[dict[str, Any]], sampling_params: dict[str, Any]) -> AgentLoopOutput: metrics = {} request_id = uuid4().hex prompt_ids = await self.loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) ) with simple_timer("generate_sequences", metrics): response_ids = await self.server_manager.generate( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params ) response_mask = [1] * len(response_ids) output = AgentLoopOutput( prompt_ids=prompt_ids, response_ids=response_ids[: self.response_length], response_mask=response_mask[: self.response_length], num_turns=2, metrics=metrics, ) return output ================================================ FILE: verl_rl/verl/experimental/agent_loop/tool_agent_loop.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import json import logging import os from typing import Any from uuid import uuid4 from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput, register from verl.experimental.agent_loop.tool_parser import FunctionCall, ToolParser from verl.tools.utils.tool_registry import initialize_tools_from_config from verl.utils.profiler import simple_timer from verl.utils.rollout_trace import rollout_trace_op logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) @register("tool_agent") class ToolAgentLoop(AgentLoopBase): @classmethod def init_class(cls, config, tokenizer, **kwargs): if cls._class_initialized: return cls._class_initialized = True print("Performing class-level ToolAgentLoop initialization") # Initialize tools from config file cls.tokenizer = tokenizer cls.max_user_turns = config.actor_rollout_ref.rollout.multi_turn.max_user_turns cls.max_assistant_turns = config.actor_rollout_ref.rollout.multi_turn.max_assistant_turns cls.max_parallel_calls = config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls cls.max_tool_response_length = config.actor_rollout_ref.rollout.multi_turn.max_tool_response_length cls.tool_response_truncate_side = config.actor_rollout_ref.rollout.multi_turn.tool_response_truncate_side tool_config_path = config.actor_rollout_ref.rollout.multi_turn.tool_config_path tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else [] cls.tools = {tool.name: tool for tool in tool_list} cls.tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list] cls.tool_parser = ToolParser.get_tool_parser(config.actor_rollout_ref.rollout.multi_turn.format, cls.tokenizer) print(f"Initialized tools: {cls.tools}") cls.prompt_length = config.actor_rollout_ref.rollout.prompt_length cls.response_length = config.actor_rollout_ref.rollout.response_length cls.system_prompt = tokenizer.apply_chat_template([{}], add_generation_prompt=False, tokenize=True) @rollout_trace_op async def run(self, messages: list[dict[str, Any]], sampling_params: dict[str, Any]) -> AgentLoopOutput: metrics = {} request_id = uuid4().hex prompt_ids = await self.loop.run_in_executor( None, lambda: self.tokenizer.apply_chat_template( messages, tools=self.tool_schemas, add_generation_prompt=True, tokenize=True ), ) response_mask = [] user_turns, assistant_turns = 0, 0 while True: with simple_timer("generate_sequences", metrics): response_ids = await self.server_manager.generate( request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params ) prompt_ids += response_ids response_mask += [1] * len(response_ids) assistant_turns += 1 # reach max response length if len(response_mask) >= self.response_length: break # reach max assistant turns if self.max_assistant_turns and assistant_turns >= self.max_assistant_turns: break # reach max user turns if self.max_user_turns and user_turns >= self.max_user_turns: break # no tool calls _, tool_calls = await self.tool_parser.extract_tool_calls(response_ids) if not tool_calls: break # call tools tasks = [] for tool_call in tool_calls[: self.max_parallel_calls]: tasks.append(self._call_tool(tool_call)) with simple_timer("tool_calls", metrics): tool_responses = await asyncio.gather(*tasks) if any(isinstance(item, Exception) for item in tool_responses): break # append tool_response_ids tool_response_ids = await self.loop.run_in_executor( None, lambda messages=tool_responses: self.tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True ), ) tool_response_ids = tool_response_ids[len(self.system_prompt) :] # NOTE: last turn should not be user turn, or the EOS token reward # can't be propagated to previous token in GAE. if len(response_mask) + len(tool_response_ids) >= self.response_length: break prompt_ids += tool_response_ids response_mask += [0] * len(tool_response_ids) user_turns += 1 response_ids = prompt_ids[-len(response_mask) :] prompt_ids = prompt_ids[: len(prompt_ids) - len(response_mask)] output = AgentLoopOutput( prompt_ids=prompt_ids, response_ids=response_ids[: self.response_length], response_mask=response_mask[: self.response_length], num_turns=user_turns + assistant_turns + 1, metrics=metrics, ) return output async def _call_tool(self, tool_call: FunctionCall) -> dict[str, str]: """Call tool and return tool response.""" tool, instance_id = None, None try: # TODO: append malformed tool_call to the prompt: invalid function name or arguments tool_name = tool_call.name tool_args = json.loads(tool_call.arguments) tool = self.tools[tool_name] instance_id = await tool.create() tool_response, _, _ = await tool.execute(instance_id, tool_args) except Exception as e: logger.exception(f"Error when executing tool: {e}") return e finally: if tool and instance_id: await tool.release(instance_id) if len(tool_response) > self.max_tool_response_length: if self.tool_response_truncate_side == "left": tool_response = tool_response[: self.max_tool_response_length] + "...(truncated)" elif self.tool_response_truncate_side == "right": tool_response = "(truncated)..." + tool_response[-self.max_tool_response_length :] else: length = self.max_tool_response_length // 2 tool_response = tool_response[:length] + "...(truncated)..." + tool_response[-length:] return { "role": "tool", "content": tool_response, } ================================================ FILE: verl_rl/verl/experimental/agent_loop/tool_parser.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import json import logging import os from abc import ABC, abstractmethod import regex as re from pydantic import BaseModel from verl.utils.rollout_trace import rollout_trace_op logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class FunctionCall(BaseModel): arguments: str """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. """ name: str """The name of the function to call.""" class ToolParser(ABC): _registry: dict[str, type["ToolParser"]] = {} def __init__(self, tokenizer) -> None: self.tokenizer = tokenizer @abstractmethod async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]: """Extract tool calls from the responses. Args: responses_ids (List[int]): The ids of the responses. Returns: Tuple[str, List[FunctionCall]]: Content and extracted tool calls. """ raise NotImplementedError @classmethod def get_tool_parser(cls, name: str, tokenizer): if name not in cls._registry: raise ValueError(f"Unknown tool parser: {name}") return cls._registry[name](tokenizer) @classmethod def register(cls, name: str): def decorator(subclass: type[ToolParser]) -> type[ToolParser]: cls._registry[name] = subclass return subclass return decorator @ToolParser.register("hermes") class HermesToolParser(ToolParser): """Adapted from https://github.com/vllm-project/vllm/blob/v0.9.1/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py""" def __init__(self, tokenizer) -> None: super().__init__(tokenizer) self.tool_call_start_token: str = "" self.tool_call_end_token: str = "" self.tool_call_regex = re.compile(r"(.*?)", re.DOTALL) @rollout_trace_op async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]: loop = asyncio.get_running_loop() text = await loop.run_in_executor(None, self.tokenizer.decode, responses_ids) if self.tool_call_start_token not in text or self.tool_call_end_token not in text: return text, [] matches = self.tool_call_regex.findall(text) function_calls = [] for match in matches: try: function_call = json.loads(match) name, arguments = function_call["name"], function_call["arguments"] function_calls.append(FunctionCall(name=name, arguments=json.dumps(arguments, ensure_ascii=False))) except Exception as e: logger.error(f"Failed to decode tool call: {e}") # remaing text exclude tool call tokens content = self.tool_call_regex.sub("", text) return content, function_calls ================================================ FILE: verl_rl/verl/experimental/dataset/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/experimental/dataset/sampler.py ================================================ # Copyright 2025 Amazon.com Inc and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod from collections.abc import Sized from omegaconf import DictConfig from torch.utils.data import Sampler from verl import DataProto class AbstractSampler(Sampler[int]): """Abstract interface for custom samplers.""" @abstractmethod def __init__( self, data_source: Sized, data_config: DictConfig, ): pass class AbstractCurriculumSampler(AbstractSampler): """Experimental interface for curriculum learning samplers.""" @abstractmethod def update(self, batch: DataProto) -> None: pass ================================================ FILE: verl_rl/verl/experimental/dynamic_dataset/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/experimental/dynamic_dataset/dynamicgen_dataset.py ================================================ # Copyright 2025 Amazon.com Inc and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Dataset class that enables dynamic data generation strategies between iterations of training. This class extends RLHFDataset and uses an AbstractDataGen instance to generate data. This is especially useful in settings where proposer model generates new tasks based on rollout data. """ import logging from abc import ABC, abstractmethod from typing import Optional import datasets from omegaconf import DictConfig from torch.utils.data import Dataset from transformers import PreTrainedTokenizer, ProcessorMixin from verl import DataProto from verl.utils.dataset import RLHFDataset from verl.utils.import_utils import load_extern_type logger = logging.getLogger(__name__) class AbstractDataGenerator(ABC): def __init__(self, config: DictConfig): self.config = config @abstractmethod def generate(self, dataset: Dataset) -> datasets.Dataset: """ Generate method must be implemented by subclasses. Args: dataset: The dataset to generate from. Returns: Processed data or result as implemented by the subclass. """ pass class MockDataGenerator(AbstractDataGenerator): """ A noop data gen class that only reappends the first datapoint. This class is useful as a placeholder and testing. """ def __init__(self, config: DictConfig = None): super().__init__(config) def generate(self, dataset: Dataset) -> datasets.Dataset: print("MockDataGenerator: No operation performed on the dataset.") return dataset.dataframe.select([0]) class DynamicGenDataset(RLHFDataset): """ A dataset class that uses a data generation strategy to process data. This class extends RLHFDataset and uses an AbstractDataGen instance to generate data. """ def __init__( self, data_files: str | list[str], tokenizer: PreTrainedTokenizer, config: DictConfig, processor: Optional[ProcessorMixin] = None, ): super().__init__(data_files, tokenizer, config, processor) self.datagen: AbstractDataGenerator = config.datagen assert "datagen" in config and config.datagen.get("path", None) is not None, ( f"datagen path is not set in config: {config}" ) # Dynamically load the custom datagen class datagen_cls = load_extern_type(config.datagen.path, config.datagen.name) # Verify that the custom datagen class inherits from AbstractDataGenerator abs_cls = AbstractDataGenerator if not issubclass(datagen_cls, abs_cls): raise TypeError( f"The custom datagen class '{config.datagen.name}' from '{config.datagen.path}'" + " must inherit from {abs_cls}" ) self.data_generator = datagen_cls(config.datagen) self.on_batch_end() def append_dataframe(self, new_dataframe: datasets.Dataset): new_dataframe = self.maybe_filter_out_long_prompts(new_dataframe) self.dataframe = datasets.concatenate_datasets([self.dataframe, new_dataframe]) logger.info(f"new dataset len: {len(self.dataframe)}") def on_batch_end(self, batch: DataProto) -> None: """ Generate data using the provided data generation strategy. Note: This method is intended to change the dataset after each training batch. """ new_data = self.data_generator.generate(self) self.append_dataframe(new_data) ================================================ FILE: verl_rl/verl/interactions/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/interactions/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional from uuid import uuid4 class BaseInteraction: def __init__(self, config: dict[str, Any]): self.config = config self.name: str = config.get("name", "interaction_agent") # More general agent default role name async def start_interaction(self, instance_id: Optional[str] = None, **kwargs) -> str: """Create a tool instance. Args: instance_id: The instance id of the tool. Returns: The instance id of the tool. """ if instance_id is None: return str(uuid4()) else: return instance_id async def generate_response( self, instance_id: str, messages: list[dict[str, Any]], **kwargs ) -> tuple[bool, str, float, dict[str, Any]]: # More clear response generation method """ Generates a response for the current turn of interaction. Returns a tuple containing: - should_terminate_sequence (bool): True if the interaction sequence should end. - response_content (str): The textual content of the response. - current_turn_score (float): The score for this specific turn/response. - additional_data (dict): Any extra information or metadata. """ should_terminate_sequence: bool = False # if True, end rollout response_content: str = "Your current result seems acceptable." current_turn_score: float = 0.8 additional_data: dict[str, Any] = {} return should_terminate_sequence, response_content, current_turn_score, additional_data async def calculate_score(self) -> float: # More clear score calculation method """ Calculates a score for the interaction, potentially considering aspects like partial exposure & in-context task switching. should be invoke at turn-level """ # ...implement the logic to calculate turn-level score... score = 0.0 return score async def finalize_interaction(self) -> None: # More clear interaction end and resource release method """ Finalizes the interaction session and releases any associated state or resources. Simulates: release state """ # ...implement the logic to release state... pass ================================================ FILE: verl_rl/verl/interactions/gsm8k_interaction.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any, Optional from uuid import uuid4 from verl.utils.reward_score import gsm8k from .base import BaseInteraction logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class Gsm8kInteraction(BaseInteraction): """A demo interaction for calculating the reward of gsm8k. - `start_interaction`: start a interaction instance for a trajectory. - `generate_response`: generate the response of the user. - `calculate_score`: calculate the score of the interaction. - `finalize_interaction`: finalize the interaction instance. """ def __init__(self, config: dict): super().__init__(config) self._instance_dict = {} async def start_interaction( self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs ) -> str: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id async def generate_response( self, instance_id: str, messages: list[dict[str, Any]], **kwargs ) -> tuple[bool, str, float, dict]: content = "" for i in range(len(messages) - 1, -1, -1): item = messages[i] if item.get("role") == "assistant": content = item.get("content") break self._instance_dict[instance_id]["response"] = content reward = await self.calculate_score(instance_id) if reward == 1.0: response = "Your response is correct!" should_terminate_sequence = True else: response = "Your response is incorrect! You need to reflect on your answer and try again." should_terminate_sequence = False return should_terminate_sequence, response, reward, {} async def calculate_score(self, instance_id: str, **kwargs) -> float: return gsm8k.compute_score( self._instance_dict[instance_id]["response"], self._instance_dict[instance_id]["ground_truth"], method="strict", format_score=0.0, score=1.0, ) async def finalize_interaction(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_rl/verl/interactions/utils/__init__.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/interactions/utils/interaction_registry.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.util import logging import os import sys from omegaconf import OmegaConf logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def get_interaction_class(cls_name): """Dynamically import and return the interaction class.""" module_name, class_name = cls_name.rsplit(".", 1) if module_name not in sys.modules: spec = importlib.util.find_spec(module_name) module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) else: module = sys.modules[module_name] interaction_cls = getattr(module, class_name) return interaction_cls def initialize_interactions_from_config(interaction_config_file): """Initialize interactions from configuration file. Args: interaction_config_file: Path to the interaction configuration file. Returns: dict: A dictionary mapping interaction names to BaseInteraction instances. """ interaction_config = OmegaConf.load(interaction_config_file) interaction_map = {} for interaction_item in interaction_config.interaction: cls_name = interaction_item.class_name interaction_cls = get_interaction_class(cls_name) # Extract config and name config = OmegaConf.to_container(interaction_item.config, resolve=True) # Get the interaction name - either from config or derive from class name name = interaction_item.get("name", None) if name is None: # If no name is specified, use the class name as default class_simple_name = cls_name.split(".")[-1] # Remove "Interaction" suffix if present, otherwise use full class name if class_simple_name.endswith("Interaction"): name = class_simple_name[:-11].lower() # Remove "Interaction" (11 chars) else: name = class_simple_name.lower() # Check for duplicate names if name in interaction_map: raise ValueError(f"Duplicate interaction name '{name}' found. Each interaction must have a unique name.") # Inject the name into the config config["name"] = name # Create the interaction instance interaction = interaction_cls(config=config) interaction_map[name] = interaction logger.info(f"Initialized interaction '{name}' with class '{cls_name}'") return interaction_map ================================================ FILE: verl_rl/verl/model_merger/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/model_merger/__main__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module is used to merge huggingface model and test verl checkpoints from FSDP and Megatron backends. To merge FSDP checkpoints: ```sh python -m verl.model_merger merge \ --backend fsdp \ --local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model ``` To merge Megatron checkpoints: ```sh python -m verl.model_merger merge \ --backend megatron \ --tie-word-embedding \ --local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \ --target_dir /path/to/merged_hf_model ``` or use distribtued merge for large models like dpskv3 671B ```sh torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} -m verl.model_merger merge\ --backend megatron \ --local_dir ./checkpoints/global_step_1/actor \ --target_dir /path/to/merged_hf_model ``` For more details, please refer to documentation: https://verl.readthedocs.io/en/latest/advance/checkpoint.html#convert-fsdp-and-megatron-checkpoints-to-huggingface-format-model """ from .base_model_merger import generate_config_from_args, parse_args def main(): args = parse_args() config = generate_config_from_args(args) print(f"config: {config}") if config.backend == "fsdp": from .fsdp_model_merger import FSDPModelMerger merger = FSDPModelMerger(config) elif config.backend == "megatron": from .megatron_model_merger import MegatronModelMerger merger = MegatronModelMerger(config) else: raise NotImplementedError(f"Unknown backend: {config.backend}") merger.merge_and_save() merger.cleanup() if __name__ == "__main__": main() ================================================ FILE: verl_rl/verl/model_merger/base_model_merger.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Optional import torch from accelerate import init_empty_weights from transformers import ( AutoConfig, AutoModelForCausalLM, AutoModelForTokenClassification, AutoModelForVision2Seq, GenerationConfig, ) from verl.utils import hf_processor, hf_tokenizer def parse_args(): parser = argparse.ArgumentParser(description="verl model merger") subparsers = parser.add_subparsers(dest="operation", required=True, help="Specify 'merge' or 'test' operation.") base_op_parser = argparse.ArgumentParser(add_help=False) base_op_parser.add_argument( "--backend", type=str, required=True, choices=["fsdp", "megatron"], help="The backend of the model" ) base_op_parser.add_argument("--local_dir", type=str, default=None, help="Path to the saved model checkpoints.") base_op_parser.add_argument( "--tie-word-embedding", action="store_true", help="Whether to tie word embedding weights (currently only Megatron supported)", ) base_op_parser.add_argument("--trust-remote-code", action="store_true", help="Whether to trust remote code") base_op_parser.add_argument( "--is-value-model", action="store_true", help="Whether the model is a value model (currently only Megatron supported)", ) base_op_parser.add_argument( "--use_cpu_initialization", action="store_true", help="Whether to use CPU initialization for the model. This is useful for large models that cannot " "fit into GPU memory during initialization.", ) merge_parser = subparsers.add_parser("merge", parents=[base_op_parser], help="Merge model checkpoints and save.") merge_parser.add_argument( "--target_dir", default="tmp", type=str, help="Directory to save the merged huggingface model" ) merge_parser.add_argument( "--hf_upload_path", default=None, type=str, help="Hugging Face repository ID to upload the model" ) merge_parser.add_argument( "--private", action="store_true", help="Whether to upload the model to a private Hugging Face repository" ) test_parser = subparsers.add_parser( "test", parents=[base_op_parser], help="Test merged model against a reference Hugging Face model" ) test_parser.add_argument( "--test_hf_dir", type=str, required=True, help="Path to the reference Hugging Face model directory for testing" ) args = parser.parse_args() return args @dataclass class ModelMergerConfig: """Configuration for model merger operations. Args: operation (str): Operation type - 'merge' or 'test'. backend (str): Backend type for the model ('fsdp' or 'megatron'). target_dir (Optional[str]): Directory to save the merged huggingface model. Defaults to "tmp". hf_upload_path (Optional[str]): Hugging Face repository ID to upload the model. Defaults to None. private (bool): Whether to upload the model to a private Hugging Face repository. Defaults to False. test_hf_dir (Optional[str]): Path to the reference Hugging Face model directory for testing. Defaults to None. tie_word_embedding (bool): Whether to tie word embedding weights (currently only Megatron supported). Defaults to False. trust_remote_code (bool): Whether to trust remote code. Defaults to False. is_value_model (bool): Whether the model is a value model (currently only Megatron supported). Defaults to False. local_dir (Optional[str]): Path to the saved model checkpoints. Defaults to None. hf_model_config_path (Optional[str]): Path to HuggingFace model configuration files. Defaults to None. hf_upload (bool): Whether to upload to HuggingFace (computed automatically). Not for initialization. use_cpu_initialization (bool): Whether to use CPU initialization for large models. Defaults to False. """ operation: str # 'merge' or 'test' backend: str target_dir: Optional[str] = "tmp" hf_upload_path: Optional[str] = None private: bool = False test_hf_dir: Optional[str] = None tie_word_embedding: bool = False trust_remote_code: bool = False is_value_model: bool = False local_dir: Optional[str] = None hf_model_config_path: Optional[str] = None hf_upload: bool = field(init=False) use_cpu_initialization: bool = False def __post_init__(self): self.hf_upload = self.operation == "merge" and bool(self.hf_upload_path) if self.operation == "test": self.target_dir = None self.hf_upload_path = None self.private = False def generate_config_from_args(args: argparse.Namespace) -> ModelMergerConfig: common_config_args = { "operation": args.operation, "backend": args.backend, "tie_word_embedding": args.tie_word_embedding, "trust_remote_code": args.trust_remote_code, "is_value_model": args.is_value_model, "local_dir": args.local_dir, "hf_model_config_path": os.path.join(args.local_dir, "huggingface"), "use_cpu_initialization": args.use_cpu_initialization, } if args.operation == "merge": config = ModelMergerConfig( **common_config_args, target_dir=args.target_dir, hf_upload_path=args.hf_upload_path, private=args.private, test_hf_dir=None, ) os.makedirs(config.target_dir, exist_ok=True) elif args.operation == "test": config = ModelMergerConfig( **common_config_args, test_hf_dir=args.test_hf_dir, # the following args are not used by test operation target_dir=None, hf_upload_path=None, private=False, ) else: raise NotImplementedError(f"Unknown operation: {args.operation}") return config class BaseModelMerger(ABC): """ Abstract base class for merging distributed model checkpoints into HuggingFace format. This class provides common functionality for converting model checkpoints from different distributed training backends (FSDP, Megatron) into standard HuggingFace format that can be easily loaded and used for inference or further training. The merger supports two main operations: - merge: Convert and save checkpoints to HuggingFace format - test: Validate merged checkpoints against a reference model Args: config (ModelMergerConfig): Configuration object containing paths, backend type, and operation parameters. Attributes: config (ModelMergerConfig): The configuration object passed during initialization. hf_model_config_path (str): Path to the HuggingFace model configuration files. model_config (PretrainedConfig): Loaded HuggingFace model configuration. """ def __init__(self, config: ModelMergerConfig): self.config = config self.hf_model_config_path = config.hf_model_config_path self.model_config = AutoConfig.from_pretrained( self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code ) def get_transformers_auto_model_class(self): if "ForTokenClassification" in self.model_config.architectures[0]: return AutoModelForTokenClassification elif "ForCausalLM" in self.model_config.architectures[0]: return AutoModelForCausalLM elif "ForConditionalGeneration" in self.model_config.architectures[0]: return AutoModelForVision2Seq raise NotImplementedError(f"Unknown architecture {self.model_config.architectures}") def patch_model_generation_config(self, model): """ The generation_config created from model config may be different to the pretrained model, this may lead to error when generating: https://github.com/volcengine/verl/issues/1246 This function patch the generation_config created from model config to the pretrained model. """ if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained(self.hf_model_config_path) except OSError: print( f"Warning: Generation config file not found in {self.hf_model_config_path}, using a " f"generation config created from the model config." ) return model def save_lora_adapter(self, state_dict: dict[str, torch.Tensor]): """ Save lora adapter to safetensors. Returns: lora_path: str, the path to the lora adapter. None if no lora adapter found. Note: This function change the 'state_dict' in place. """ lora_params_names = [name for name in state_dict.keys() if "lora_" in name] if len(lora_params_names) == 0: return None import json from typing import OrderedDict import peft from safetensors.torch import save_file lora_params = OrderedDict() target_modules = set() lora_key = None for name in lora_params_names: lora_key = name.replace(".default.weight", ".weight") target_modules.add(lora_key.split(".")[-3]) lora_params[lora_key] = state_dict.pop(name) lora_rank = min(lora_params[lora_key].shape[0], lora_params[lora_key].shape[1]) peft_dict = { "r": lora_rank, "lora_alpha": 0, # lora_alpha is not set. An error should be raised to inform the user to set it manually. "target_modules": list(target_modules), } peft_config = peft.LoraConfig(**peft_dict).to_dict() peft_config["task_type"] = peft_config["task_type"].value if peft_config["task_type"] else None peft_config["peft_type"] = peft_config["peft_type"].value if peft_config["peft_type"] else None peft_config["target_modules"] = list(peft_config["target_modules"]) lora_path = os.path.join(self.config.target_dir, "lora_adapter") os.makedirs(lora_path, exist_ok=True) with open(os.path.join(lora_path, "adapter_config.json"), "w", encoding="utf-8") as f: json.dump(peft_config, f, ensure_ascii=False, indent=4) save_file(lora_params, os.path.join(lora_path, "adapter_model.safetensors")) for name in list(state_dict.keys()): key = ( name.replace("base_model.model.", "") .replace(".base_layer.weight", ".weight") .replace(".base_layer.bias", ".bias") ) state_dict[key] = state_dict.pop(name) return lora_path def save_hf_model_and_tokenizer(self, state_dict: dict[str, torch.Tensor]): auto_model_class = self.get_transformers_auto_model_class() with init_empty_weights(): model = auto_model_class.from_config( self.model_config, torch_dtype=torch.bfloat16, trust_remote_code=self.config.trust_remote_code ) model.to_empty(device="cpu") model = self.patch_model_generation_config(model) lora_path = self.save_lora_adapter(state_dict) if lora_path: print(f"Saving lora adapter to {lora_path}") print(f"Saving model to {self.config.target_dir}") model.save_pretrained(self.config.target_dir, state_dict=state_dict) del state_dict del model processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code) tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code) if processor is not None: print(f"Saving processor to {self.config.target_dir}") processor.save_pretrained(self.config.target_dir) if tokenizer is not None: print(f"Saving tokenizer to {self.config.target_dir}") tokenizer.save_pretrained(self.config.target_dir) def upload_to_huggingface(self): import requests from huggingface_hub import HfApi from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError api = HfApi() try: # Attempt to create repository api.create_repo(repo_id=self.config.hf_upload_path, private=self.config.private, exist_ok=True) except HfHubHTTPError as e: # Handle authentication/API errors if e.response.status_code == 401: raise PermissionError( "Hugging Face authentication failed. Verify your token is valid and has write permissions." ) from e elif e.response.status_code == 404: raise RepositoryNotFoundError(f"Repository path not found: {self.config.hf_upload_path}") from e else: raise ConnectionError(f"Failed to create repository ({e.response.status_code}): {e}") from e except requests.exceptions.ConnectionError as e: raise ConnectionError("Network connection failed. Check your internet connection.") from e try: # Attempt folder upload api.upload_folder(folder_path=self.config.target_dir, repo_id=self.config.hf_upload_path, repo_type="model") except HfHubHTTPError as e: if e.response.status_code == 401: raise PermissionError("Authentication failed during upload. Token may have expired.") from e else: raise RuntimeError(f"Upload failed ({e.response.status_code}): {e}") from e except requests.exceptions.ConnectionError as e: raise ConnectionError("Network interruption during upload. Try again with stable connection.") from e except OSError as e: raise FileNotFoundError(f"Local folder error: {self.config.target_dir} - {str(e)}") from e except Exception as e: raise RuntimeError(f"Unexpected error during upload: {str(e)}") from e @abstractmethod def merge_and_save(self): raise NotImplementedError("Subclasses should implement this method") @abstractmethod def cleanup(self): raise NotImplementedError("Subclasses should implement this method to clean up resources if needed") ================================================ FILE: verl_rl/verl/model_merger/fsdp_model_merger.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from concurrent.futures import ThreadPoolExecutor from pathlib import Path import numpy as np import torch from torch.distributed._tensor import Placement, Shard try: # for torch 2.5+ from torch.distributed.tensor import DTensor except ImportError: from torch.distributed._tensor import DTensor from tqdm import tqdm from .base_model_merger import BaseModelMerger class FSDPModelMerger(BaseModelMerger): """ Model merger for FSDP (Fully Sharded Data Parallel) checkpoints. This class handles the conversion of FSDP distributed checkpoints into HuggingFace format. FSDP shards model parameters across multiple processes, and this merger reconstructs the full model by loading and concatenating the sharded parameters from all ranks. The merger supports various FSDP configurations including: - Pure FSDP (single dimension sharding) - FSDP + DDP (data parallel + fully sharded data parallel) - DTensor-based sharding with custom device meshes Key features: - Automatic detection of world size from checkpoint filenames - Support for DTensor and non-DTensor checkpoints - Parallel loading of checkpoint shards for efficiency - Validation against reference HuggingFace models Example: To merge FSDP checkpoints: ```python config = ModelMergerConfig( operation="merge", backend="fsdp", local_dir="path/to/fsdp/checkpoints", target_dir="path/to/output" ) merger = FSDPModelMerger(config) merger.merge_and_save() ``` """ def _get_world_size(self) -> int: """_summary_ From FSDP json config file, extract the world size. Returns: int: world size """ config_path = Path(self.config.local_dir) / "fsdp_config.json" if not config_path.exists(): raise FileNotFoundError(f"Config file {config_path} does not exist.") with open(config_path) as f: config = json.load(f) # Extract world size from the config world_size = config.get("world_size", None) if world_size is None: raise ValueError("World size not found in the config file.") return world_size def _load_rank_zero_state_dict(self, world_size: int) -> dict: return torch.load( Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_0.pt", map_location="cpu", weights_only=False, ) def _extract_device_mesh_info(self, state_dict: dict, world_size: int) -> tuple[np.ndarray, tuple[str, ...]]: """ Retrieves sharding information (device_mesh, mesh_dim_names) from a DTensor in the state_dict. If no DTensor is found, infers a simple FSDP mesh based on world_size. """ pivot_key = sorted(list(state_dict.keys()))[0] weight = state_dict[pivot_key] if isinstance(weight, DTensor): # get sharding info device_mesh = weight.device_mesh mesh = device_mesh.mesh mesh_dim_names = device_mesh.mesh_dim_names else: # for non-DTensor mesh = np.array([world_size], dtype=np.int64) mesh_dim_names = ("fsdp",) return mesh, mesh_dim_names def _calculate_shard_configuration( self, mesh: np.ndarray, mesh_dim_names: tuple[str, ...] ) -> tuple[int, tuple[int, ...]]: """Calculates the total number of shards and the shape of the device mesh.""" assert mesh_dim_names in (("fsdp",), ("ddp", "fsdp")), f"Unsupported mesh_dim_names {mesh_dim_names}" if "tp" in mesh_dim_names: # TODO: "tp" is not supported yet due to the above assert total_shards = mesh.shape[-1] * mesh.shape[-2] mesh_shape = (mesh.shape[-2], mesh.shape[-1]) else: total_shards = mesh.shape[-1] mesh_shape = (mesh.shape[-1],) return total_shards, mesh_shape def _merge_by_placement(self, tensors: list[torch.Tensor], placement: Placement) -> torch.Tensor: """Merges a list of tensors based on their DTensor placement""" if placement.is_replicate(): return tensors[0] elif placement.is_partial(): raise NotImplementedError("Partial placement is not supported yet") elif placement.is_shard(): return torch.cat(tensors, dim=placement.dim).contiguous() raise NotImplementedError(f"Unsupported placement: {placement}") def _load_and_merge_state_dicts( self, world_size: int, total_shards: int, mesh_shape: tuple[int, ...], mesh_dim_names: tuple[str, ...] ) -> dict[str, torch.Tensor]: model_state_dict_lst = [None] * total_shards def process_one_shard(rank: int, model_state_dict_lst: list): model_path = Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_{rank}.pt" state_dict = torch.load(model_path, map_location="cpu", weights_only=False) model_state_dict_lst[rank] = state_dict return state_dict with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor: futures = [executor.submit(process_one_shard, rank, model_state_dict_lst) for rank in range(total_shards)] for future in tqdm(futures, desc=f"Loading {total_shards} FSDP shards", total=total_shards): future.result() # Merge state dicts from all shards state_dict = {} param_placements: dict[str, list] = {} for key in set(model_state_dict_lst[0].keys()): state_dict[key] = [] for model_state_shard in model_state_dict_lst: # add tensor shard in order of rank to state_dict[key] tensor = model_state_shard.pop(key) if isinstance(tensor, DTensor): state_dict[key].append(tensor._local_tensor.bfloat16()) placements = tuple(tensor.placements) # replicated placement at dp dimension can be discarded if mesh_dim_names[0] in ("dp", "ddp"): placements = placements[1:] if key not in param_placements: param_placements[key] = placements else: assert param_placements[key] == placements else: state_dict[key].append(tensor.bfloat16()) del model_state_dict_lst # Merge tensors for key in sorted(state_dict): if not isinstance(state_dict[key], list): print(f"No need to merge key {key}") continue if key in param_placements: # merge shards placements: tuple[Shard] = param_placements[key] if len(mesh_shape) == 1: # 1-D list, FSDP without TP assert len(placements) == 1 shards = state_dict[key] state_dict[key] = self._merge_by_placement(shards, placements[0]) else: # 2-D list, FSDP + TP raise NotImplementedError("FSDP + TP is not supported yet") else: state_dict[key] = torch.cat(state_dict[key], dim=0) return state_dict def merge_and_save(self): world_size = self._get_world_size() rank_zero_state_dict = self._load_rank_zero_state_dict(world_size) mesh, mesh_dim_names = self._extract_device_mesh_info(rank_zero_state_dict, world_size) print(f"Got device mesh {mesh}, mesh_dim_names {mesh_dim_names}") total_shards, mesh_shape = self._calculate_shard_configuration(mesh, mesh_dim_names) print(f"Processing model shards with {total_shards} {mesh_shape} in total") merged_state_dict = self._load_and_merge_state_dicts(world_size, total_shards, mesh_shape, mesh_dim_names) if self.config.operation == "test": if not self.config.test_hf_dir: raise ValueError("test_hf_dir must be provided for test operation") self._validate_state_dict(merged_state_dict) elif self.config.operation == "merge": self.save_hf_model_and_tokenizer(merged_state_dict) if self.config.hf_upload: self.upload_to_huggingface() else: raise ValueError(f"Unknown operation: {self.config.operation}") def _validate_state_dict(self, state_dict: dict[str, torch.Tensor]): auto_model_class = self.get_transformers_auto_model_class() hf_model = auto_model_class.from_pretrained(self.config.test_hf_dir, torch_dtype=torch.bfloat16) hf_state_dict = hf_model.state_dict() del hf_model hf_model_keys = set(hf_state_dict.keys()) collected_keys = set(state_dict.keys()) missing_keys = hf_model_keys - collected_keys assert len(missing_keys) == 0, f"Missing keys in collected state dict: {list(sorted(missing_keys))}" extra_keys = collected_keys - hf_model_keys assert len(extra_keys) == 0, f"Extra keys in collected state dict: {list(sorted(extra_keys))}" for key in hf_model_keys: hf_shape = hf_state_dict[key].shape collected_shape = state_dict[key].shape assert hf_shape == collected_shape, ( f"Shape mismatch for key '{key}': original {hf_shape} vs collected {collected_shape}" ) hf_dtype = hf_state_dict[key].dtype collected_dtype = state_dict[key].dtype assert hf_dtype == collected_dtype, ( f"Dtype mismatch for key '{key}': original {hf_dtype} vs collected {collected_dtype}" ) torch.testing.assert_close(hf_state_dict[key], state_dict[key], atol=1e-6, rtol=1e-6) print("FSDP checks passed: The merged state_dict matches the hf model saved by FSDPCheckpointManager.") def cleanup(self): """Cleanup temporary files if needed.""" # FSDP merger does not create temporary files, so no cleanup is needed. pass ================================================ FILE: verl_rl/verl/model_merger/megatron_model_merger.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import warnings from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, ContextManager import numpy as np import torch import torch.distributed as dist from accelerate import init_empty_weights from megatron.core import mpu from megatron.core.models.gpt.gpt_model import ModelType from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from safetensors.torch import load_file from transformers import ( AutoConfig, PretrainedConfig, ) from verl.models.mcore import hf_to_mcore_config from verl.utils.device import get_device_name, get_nccl_backend, get_torch_device from verl.utils.megatron.dist_checkpointing import load_dist_checkpointing from verl.utils.megatron_utils import get_model from verl.utils.tokenizer import hf_processor, hf_tokenizer from .base_model_merger import BaseModelMerger, ModelMergerConfig @contextmanager def noop_context() -> Any: yield def get_dynamic_pipeline_shards(layer_num: int, pp_size: int) -> list[int]: """Calculate the pipeline sharding configuration for Megatron-LM. Args: layer_num: Total number of layers in the model. pp_size: Number of pipeline parallel ranks. Returns: layer number of each pp rank. Make the sharding of the pipeline as uniform as possible. """ if layer_num < pp_size: raise ValueError(f"layer_num {layer_num} must be greater than pp_size {pp_size}.") if pp_size < 1: raise ValueError(f"pp_size must be at least 1, got {pp_size}.") if pp_size == 1: return [layer_num] if pp_size == 2: return [ layer_num // 2, layer_num - layer_num // 2, ] middle_size = pp_size - 2 shards_strategy = [] for middle_layer_num in range(layer_num): first_last_layer_num = layer_num - middle_layer_num * middle_size first_layer_num = first_last_layer_num // 2 last_layer_num = first_last_layer_num - first_last_layer_num // 2 if 0 < first_layer_num <= middle_layer_num and 0 < last_layer_num <= middle_layer_num: shards_strategy.append( ( [first_layer_num] + [middle_layer_num] * middle_size + [last_layer_num], abs(first_layer_num - middle_layer_num), ) ) # sort by diff of layer_num, to make it as uniform as possible res = sorted(shards_strategy, key=lambda x: x[1])[0][0] assert sum(res) == layer_num, f"sum(res)={sum(res)} != layer_num={layer_num}, pp_size={pp_size}" return res class MegatronModelMerger(BaseModelMerger): """ Model merger for Megatron-LM distributed checkpoints. This class handles the conversion of Megatron-LM distributed checkpoints into HuggingFace format. Megatron-LM uses tensor parallelism, pipeline parallelism, and data parallelism to distribute large language models across multiple GPUs. This merger reconstructs the full model by loading distributed checkpoints and applying the necessary transformations. Key features: - Support for tensor parallel, pipeline parallel, and data parallel configurations - Automatic parameter name mapping from Megatron to HuggingFace conventions - Handling of QKV and gate-up tensor splitting/merging - Support for tied word embeddings and value models - Integration with Megatron's distributed checkpointing system The merger handles various model architectures and configurations: - Standard transformer models (GPT-style) - Models with tied word embeddings - Value models for reinforcement learning - Multi-layer attention (MLA) architectures - Mixture of Experts (MoE) models Args: config (ModelMergerConfig): Configuration object with Megatron-specific settings including tie_word_embedding and is_value_model flags. Example: To merge Megatron checkpoints: ```python config = ModelMergerConfig( operation="merge", backend="megatron", local_dir="path/to/megatron/checkpoints", target_dir="path/to/output", tie_word_embedding=True ) merger = MegatronModelMerger(config) merger.merge_and_save() ``` """ def __init__(self, config: ModelMergerConfig): super().__init__(config) # Currently we use only 1 rank to merge the dist_ckpt, we will move to multi-process save shortly afterwards if "WORLD_SIZE" not in os.environ: os.environ["RANK"] = "0" os.environ["LOCAL_RANK"] = "0" os.environ["WORLD_SIZE"] = "1" os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "12355" torch.distributed.init_process_group(get_nccl_backend()) self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() local_rank = os.environ.get("LOCAL_RANK", 0) get_torch_device().set_device(f"{get_device_name()}:{local_rank}") mpu.initialize_model_parallel( tensor_model_parallel_size=1, pipeline_model_parallel_size=self.world_size, virtual_pipeline_model_parallel_size=None, context_parallel_size=1, expert_model_parallel_size=1, ) model_parallel_cuda_manual_seed(0) self.hf_config = AutoConfig.from_pretrained( self.config.hf_model_config_path, trust_remote_code=self.config.trust_remote_code ) print(self.hf_config, flush=True) self.params_mapping = { # megatron core gpt model name, huggingface model name # NOTICE: It's a little bit tricky, when 2 keys have the same prefix, we need to make sure the # longer key within the containing relationship is processed first. "embedding.word_embeddings": "model.embed_tokens", # input layer norm for dpskv3 "input_layernorm.weight": "input_layernorm.weight", "input_layernorm.bias": "input_layernorm.bias", # attn "self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight", "self_attention.linear_qkv.layer_norm_bias": "input_layernorm.bias", "self_attention.linear_qkv": "self_attn.qkv_proj", "self_attention.q_layernorm": "self_attn.q_norm", "self_attention.k_layernorm": "self_attn.k_norm", "self_attention.linear_proj": "self_attn.o_proj", # mla "self_attention.linear_q_proj": "self_attn.q_proj", "self_attention.linear_q_down_proj": "self_attn.q_a_proj", "self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight", "self_attention.linear_q_up_proj": "self_attn.q_b_proj", "self_attention.linear_kv_down_proj": "self_attn.kv_a_proj_with_mqa", "self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight", "self_attention.linear_kv_up_proj": "self_attn.kv_b_proj", # mlp "pre_mlp_layernorm": "post_attention_layernorm", "mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight", "mlp.linear_fc1.layer_norm_bias": "post_attention_layernorm.bias", "mlp.linear_fc1": "mlp.gate_up_proj", "mlp.linear_fc2": "mlp.down_proj", # moe "mlp.router.expert_bias": "mlp.gate.e_score_correction_bias", "mlp.router": "mlp.gate", "mlp.shared_experts.linear_fc1": "mlp.shared_experts.gate_up_proj", "mlp.shared_experts.linear_fc2": "mlp.shared_experts.down_proj", "linear_fc1": "gate_up_proj", "linear_fc2": "down_proj", # output "final_layernorm": "norm", "output_layer": "lm_head", } if "Qwen2MoeForCausalLM" in self.hf_config.architectures: self.params_mapping["mlp.shared_experts.linear_fc1"] = "mlp.shared_expert.gate_up_proj" self.params_mapping["mlp.shared_experts.linear_fc2"] = "mlp.shared_expert.down_proj" self.params_mapping["mlp.shared_experts.gate_weight"] = "mlp.shared_expert_gate.weight" def _load_state_dicts(self, model_ckpt_path: str) -> dict[str, Any]: """_summary_ Use Megatron dist_checkpointing to load the model state dicts from the checkpoint directory. Args: model_ckpt_path (str): Path to the model checkpoint directory. Returns: State dict containing the model parameters. """ # init hf config self.pipeline_shards = get_dynamic_pipeline_shards(self.hf_config.num_hidden_layers, self.world_size) print(f"Pipeline shards: {self.pipeline_shards}, total layers: {sum(self.pipeline_shards)}") tf_config = hf_to_mcore_config( self.hf_config, torch.bfloat16, num_layers_in_first_pipeline_stage=self.pipeline_shards[0] if len(self.pipeline_shards) > 1 else None, num_layers_in_last_pipeline_stage=self.pipeline_shards[-1] if len(self.pipeline_shards) > 2 else None, ) tf_config.use_cpu_initialization = self.config.use_cpu_initialization tie_word_embeddings = getattr(self.hf_config, "tie_word_embeddings", False) # init megatron model def megatron_model_provider(pre_process, post_process): from verl.models.mcore import init_mcore_model parallel_model = init_mcore_model( tf_config, self.hf_config, pre_process, post_process, share_embeddings_and_output_weights=tie_word_embeddings, value=False, ) return parallel_model context: Callable[..., ContextManager] = ( init_empty_weights if self.config.use_cpu_initialization else noop_context ) with context(): whole_model = get_model( model_provider_func=megatron_model_provider, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=False, transformer_config=tf_config, ) if self.config.use_cpu_initialization: # convert meta device to empty tensor so it can use `copy_` function whole_model[0].module = whole_model[0].module.to_empty(device="cpu") # load state dicts sharded_state_dict = {} for vpp_rank, model in enumerate(whole_model): key = f"model{vpp_rank}" if len(whole_model) > 1 else "model" mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank) sharded_state_dict[key] = model.sharded_state_dict() model_state_dict = load_dist_checkpointing(sharded_state_dict, model_ckpt_path) model_state_dict_list = [] for vpp_rank, model in enumerate(whole_model): key = f"model{vpp_rank}" if len(whole_model) > 1 else "model" mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank) model_state_dict_list.append(model_state_dict[key]) return model_state_dict_list def _check_megatron_state_key(self, key: str) -> bool: """ Checks if the key is a valid Megatron state key. Now the model merger only supports keys that start with "decoder/embedding/output_layer" in TransformerLayer. Shall not use key starts with "model." """ if key.startswith("model."): raise ValueError( f"Invalid key {key} in Megatron state_dict. Expected keys to start with " f"'decoder/embedding/output_layer' in TransformerLayer." ) skip_checking_keys = ["embedding.word_embeddings", "output_layer"] for skip_key in skip_checking_keys: if skip_key in key: print(f"skip checking key {key}") return # Exclude extra state keys if not key.startswith("decoder"): raise ValueError( f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder' in TransformerLayer." ) def _split_tensors( self, key: str, tensor: torch.Tensor, config: PretrainedConfig, is_value_model: bool = False ) -> list[torch.Tensor]: """ Splits a tensor into multiple tensors based on the name. This is used to handle qkv and gate_up tensors. """ if "linear_fc1.weight" in key: # if the tensor is gate and proj gate_lst = [] up_lst = [] gate, up = tensor.chunk(2) gate_lst.append(gate) up_lst.append(up) gate = torch.cat(gate_lst, dim=0) up = torch.cat(up_lst, dim=0) return [gate, up] elif "self_attention.linear_qkv." in key and "layer_norm" not in key: # if the tensor is qkv, for each param on tp, split into q, k, v # concat q, k, v separately. q_lst, k_lst, v_lst = [], [], [] assert config.num_attention_heads % config.num_key_value_heads == 0 num_q_per_kv = config.num_attention_heads // config.num_key_value_heads assert tensor.shape[0] % (num_q_per_kv + 2) == 0, ( f"Tensor shape {tensor.shape} is not divisible by {num_q_per_kv + 2}" ) kv_size = tensor.shape[0] // (num_q_per_kv + 2) split_size = [kv_size * num_q_per_kv, kv_size, kv_size] num_query_groups_per_partition = config.num_key_value_heads for chunk in tensor.chunk(num_query_groups_per_partition): split_size = [ kv_size * num_q_per_kv // num_query_groups_per_partition, kv_size // num_query_groups_per_partition, kv_size // num_query_groups_per_partition, ] q, k, v = chunk.split(split_size) q_lst.append(q) k_lst.append(k) v_lst.append(v) return [torch.cat(q_lst, dim=0), torch.cat(k_lst, dim=0), torch.cat(v_lst, dim=0)] else: return [tensor] def _merge_state_dicts(self, model_state_dict_list: list[dict[str, Any]]) -> dict[str, torch.Tensor]: state_dict = {} layers_cum = 0 if self.world_size > 1: pipeline_cumsum = np.cumsum(self.pipeline_shards) layers_cum = 0 if self.rank == 0 else pipeline_cumsum[self.rank - 1] print(f"{layers_cum=}") for model_state_dict in model_state_dict_list: layers_handled = 0 keys = model_state_dict.keys() for key in keys: if "extra_state" in key: continue if self.config.tie_word_embedding and ("output_layer" in key): print("skip lm_head and reward_head loading because of tie_word_embeddings") continue self._check_megatron_state_key(key) hf_name = self._replace_name(key, self.params_mapping) assert hf_name is not None, f"Failed to convert layer name [{key}] from megatron to huggingface." if "model.layers." in hf_name: local_layer_no = int(hf_name.split(".")[2]) layers_handled = max(local_layer_no, layers_handled) global_layer_no = local_layer_no + layers_cum new_key_list = hf_name.split(".") new_key_list[2] = str(global_layer_no) hf_name = ".".join(new_key_list) else: warnings.warn(f"hf_name {hf_name} will not be fixed with layer number", stacklevel=2) if "mlp.experts." in hf_name and ".weight" in hf_name: name_prefix, expert_id = hf_name.split(".weight") for proj in ["gate_up", "down"]: if f"{proj}_proj" in hf_name: hf_name = hf_name.replace( f"mlp.experts.{proj}_proj.weight{expert_id}", f"mlp.experts.{expert_id}.{proj}_proj.weight", ) tensor = model_state_dict[key] split_tensor = self._split_tensors( key, tensor, self.hf_config, is_value_model=self.config.is_value_model ) if len(split_tensor) == 1: state_dict[hf_name] = split_tensor[0] elif len(split_tensor) == 3: # split qkv for n, d in zip(["q", "k", "v"], split_tensor, strict=True): state_dict[hf_name.replace("qkv", n)] = d elif len(split_tensor) == 2: # split gate up state_dict[hf_name.replace("gate_up", "gate")] = split_tensor[0] state_dict[hf_name.replace("gate_up", "up")] = split_tensor[1] shape_info = ( split_tensor.shape if isinstance(split_tensor, torch.Tensor) else [t.shape for t in split_tensor] ) print(f"converted {key} to {hf_name} with shape {shape_info}") layers_cum += layers_handled + 1 # zero based return state_dict def save_hf_model_and_tokenizer(self, merged_state_dict): if self.world_size == 1: return super().save_hf_model_and_tokenizer(merged_state_dict) from safetensors.torch import save_file layer_num = self.hf_config.num_hidden_layers # FIXME: make configurable saves_per_layer = 1 if layer_num < 30 else 2 saves_total = saves_per_layer * layer_num saves_indexes = {} # calculate the layer start index and key chunks layer_this_rank = self.pipeline_shards[self.rank] pipeline_cumsum = np.cumsum(self.pipeline_shards) layer_start = 0 if self.rank == 0 else pipeline_cumsum[self.rank - 1] keys = list(merged_state_dict.keys()) keys_chunk = np.array_split(np.array(keys), layer_this_rank * saves_per_layer) numel = 0 assert len(keys_chunk) == layer_this_rank * saves_per_layer, ( f"Expected {len(keys_chunk)} chunks, but got {layer_this_rank * saves_per_layer} for rank {self.rank}." ) # save to model shards manually target_dir = Path(self.config.target_dir) for i, keys in enumerate(keys_chunk): sd_to_save = {k: merged_state_dict[k] for k in keys} numel += sum([sd_to_save[i].numel() for i in sd_to_save]) save_idx = layer_start * saves_per_layer + i save_path = target_dir / f"model-{save_idx + 1:05d}-of-{saves_total:05d}.safetensors" save_file(sd_to_save, save_path) for k in keys: saves_indexes[k] = str(save_path.name) tensor = torch.tensor([numel]).to(get_device_name()) dist.all_reduce(tensor, op=dist.ReduceOp.SUM) numel = tensor.cpu().item() all_save_indexes = [{} for _ in range(self.world_size)] dist.all_gather_object(all_save_indexes, saves_indexes) saves_indexes = {k: v for i in all_save_indexes for k, v in i.items()} if self.rank == 0: with open(target_dir / "model.safetensors.index.json", "w") as f: json.dump( { "metadata": { "total_size": numel, }, "weight_map": saves_indexes, }, f, indent=4, ) print(f"model saved to {target_dir} with {numel=}") self.model_config.save_pretrained(self.config.target_dir) processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code) tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code) if processor is not None: print(f"Saving processor to {self.config.target_dir}") processor.save_pretrained(self.config.target_dir) if tokenizer is not None: print(f"Saving tokenizer to {self.config.target_dir}") tokenizer.save_pretrained(self.config.target_dir) def merge_and_save(self): from verl.utils.megatron_utils import get_dist_checkpoint_path model_ckpt_path = get_dist_checkpoint_path(self.config.local_dir) model_state_dict = self._load_state_dicts(model_ckpt_path) merged_state_dict = self._merge_state_dicts(model_state_dict) del model_state_dict if self.config.operation == "test": if not self.config.test_hf_dir: raise ValueError("test_hf_dir must be provided for test operation") self._validate_state_dict(merged_state_dict) elif self.config.operation == "merge": self.save_hf_model_and_tokenizer(merged_state_dict) if self.config.hf_upload: self.upload_to_huggingface() else: raise ValueError(f"Unknown operation: {self.config.operation}") def _validate_state_dict(self, state_dict: dict[str, torch.Tensor]): """ Compares the merged Megatron state_dict against a reference safetensors model. Applies necessary name mappings from Megatron to Hugging Face conventions using _replace_name. """ ref_state_dict = load_file(Path(self.config.test_hf_dir) / "model.safetensors") for name, loaded_weight in state_dict.items(): # name = self._replace_name(original_name, self.params_mapping) if not name or name.endswith(".bias") and name not in ref_state_dict: continue if "rotary_emb.inv_freq" in name: continue if "lm_head.weight" in name: if self.config.is_value_model or self.config.tie_word_embedding: continue if name not in ref_state_dict: raise RuntimeError(f"key: {name} not exist in state_dict") param = ref_state_dict[name] assert loaded_weight.dtype == param.dtype torch.testing.assert_close(loaded_weight.to("cpu"), param, atol=1e-2, rtol=5e-2) def _replace_name(self, megatron_name: str, name_mapping: dict[str, str]) -> str: for m_name, v_name in name_mapping.items(): if m_name not in megatron_name: continue megatron_name = megatron_name.replace("decoder", "model") param_name = megatron_name.replace(m_name, v_name) return param_name return None # Return None if no mapping found def cleanup(self): torch.distributed.destroy_process_group() ================================================ FILE: verl_rl/verl/models/README.md ================================================ # Models Common modelzoo such as huggingface/transformers stuggles when using Pytorch native model parallelism. Following the design principle of vLLM, we keep a simple, parallelizable, highly-optimized with packed inputs in verl. ## Adding a New Huggingface Model ### Step 1: Copy the model file from HF to verl - Add a new file under verl/models/hf - Copy ONLY the model file from huggingface/transformers/models to verl/models/hf ### Step 2: Modify the model file to use packed inputs - Remove all the code related to inference (kv cache) - Modify the inputs to include only - input_ids (total_nnz,) - cu_seqlens (total_nnz + 1,) - max_seqlen_in_batch: int - Note that this requires using flash attention with causal mask. ### Step 2.5: Add tests - Add a test to compare this version and the huggingface version - Following the infrastructure and add tests to tests/models/hf ### Step 3: Add a function to apply tensor parallelism - Please follow - https://pytorch.org/docs/stable/distributed.tensor.parallel.html - https://pytorch.org/tutorials/intermediate/TP_tutorial.html - General comments - Tensor Parallelism in native Pytorch is NOT auto-parallelism. The way it works is to specify how model parameters and input/output reshards using configs. These configs are then registered as hooks to perform input/output resharding before/after model forward. ### Step 4: Add a function to apply data parallelism - Please use FSDP2 APIs - See demo here https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py#L413 ### Step 5: Add a function to apply pipeline parallelism - Comes in Pytorch 2.4 - Currently only in alpha in nightly version - Check torchtitan for more details ================================================ FILE: verl_rl/verl/models/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/models/llama/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/models/llama/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .modeling_llama_megatron import ( ParallelLlamaForCausalLM, # rmpad with megatron ParallelLlamaForCausalLMRmPad, # rmpad with megatron and pipeline parallelism ParallelLlamaForCausalLMRmPadPP, ParallelLlamaForValueRmPad, ParallelLlamaForValueRmPadPP, # original model with megatron ParallelLlamaModel, ) __all__ = [ "ParallelLlamaForCausalLM", "ParallelLlamaForCausalLMRmPad", "ParallelLlamaForCausalLMRmPadPP", "ParallelLlamaForValueRmPad", "ParallelLlamaForValueRmPadPP", "ParallelLlamaModel", ] ================================================ FILE: verl_rl/verl/models/llama/megatron/checkpoint_utils/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/models/llama/megatron/checkpoint_utils/llama_loader.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu print(f"get megatron data parallel size: {mpu.get_data_parallel_world_size()}") pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_llama( state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False ): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def fetch_params(module): for param in module.parameters(): torch.distributed.fetch( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, ( f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size " f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}" ) models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.model.layers) == num_layers_per_model def _fetch_tensor(tensor, name) -> torch.Tensor: """fetch tensor""" nonlocal state_dict if tensor is not None: tensor.data.copy_(state_dict[name]) def _fetch_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """fetch tensor in tp shards""" nonlocal state_dict tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) if tensor is not None: tensor.data.copy_(tensor_chunk[tp_rank]) else: print(f"tp_shard tensor:[{name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """fetch tensor in tp shards""" nonlocal state_dict tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) if tensor is not None: tensor.data.copy_(tensor_chunk[tp_rank]) else: print(f"tp_shard tensor:[{name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """fetch gate_up tensor in tp shards""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if gate_name in state_dict and up_name in state_dict: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) if tensor is not None: tensor.data.copy_(tensor_chunk[tp_rank]) else: print(f"tp_shard tensor:[{gate_name}, {up_name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name) -> torch.Tensor: """fetch tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0)) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0)) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) if tensor is not None: tensor.data.copy_(tensor_chunk[tp_rank]) # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) embed_tokens_weight = None if pp_rank == 0: embed_tokens_weight = gpt_model_module.model.embed_tokens.weight _fetch_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() num_layer_per_pp = config.num_hidden_layers // pp_size vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() layer_list = [] if vpp_size is not None: for vpp_rank in range(vpp_size): num_layer_vpp_chunk = num_layer_per_pp // vpp_size num_layer_this_model = num_layer_vpp_chunk offset = vpp_rank * (config.num_hidden_layers // mpu.get_virtual_pipeline_model_parallel_world_size()) + ( mpu.get_pipeline_model_parallel_rank() * num_layer_vpp_chunk ) layer_list.extend(list(range(offset, offset + num_layer_this_model))) else: num_layer_this_model = num_layer_per_pp offset = pp_rank * num_layer_per_pp layer_list.extend(list(range(offset, offset + num_layer_this_model))) for layer in layer_list: print_rank_0(f"loading layer #{layer}...") layer_name = f"model.layers.{layer}" dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[dst_layer_idx] _fetch_tensor( sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) _fetch_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) _fetch_tp_shard_tensor( sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _fetch_tensor( sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _fetch_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _fetch_tp_shard_tensor( sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _fetch_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", ) print_rank_0("loading lm_head...") if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.lm_head.weight if is_value_model: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _fetch_tensor(lm_head_weight, "lm_head.weight") print_rank_0("load lm_head weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _fetch_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _fetch_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") else: _fetch_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_rl/verl/models/llama/megatron/checkpoint_utils/llama_loader_depracated.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu print(f"get megatron data parallel size: {mpu.get_data_parallel_world_size()}") pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_llama( state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False ): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def broadcast_params(module): for param in module.parameters(): torch.distributed.broadcast( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, ( f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size " f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}" ) models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.model.layers) == num_layers_per_model def _broadcast_tensor(tensor, name) -> torch.Tensor: """broadcast tensor from rank0 across mp_group""" nonlocal state_dict nonlocal mp_group if torch.distributed.get_rank() == 0: if name in state_dict: weight = state_dict[name] tensor_shape = weight.shape else: tensor_shape = None else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not in state_dict, skip load") return if tensor is None: tensor = torch.empty( tensor_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) if torch.distributed.get_rank() == 0: tensor.data.copy_(weight) dist.broadcast(tensor, src=0, group=mp_group) def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank() == 0:} tensor {gate_name, up_name} shape " f"{tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_( torch.cat([q_part, k_part, v_part], dim=0) ) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_( torch.cat([q_part, k_part, v_part], dim=0) ) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name, k_name, v_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) if dp_rank == 0: # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) embed_tokens_weight = None if pp_rank == 0: embed_tokens_weight = gpt_model_module.model.embed_tokens.weight _broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"loading layer #{layer}...") layer_name = f"model.layers.{layer}" dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[dst_layer_idx] _broadcast_tensor( sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) _broadcast_tp_shard_tensor( sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _broadcast_tensor( sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _broadcast_tp_shard_tensor( sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", ) print_rank_0("loading lm_head...") lm_head_weight = None if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.lm_head.weight if is_value_model: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "lm_head.weight") print_rank_0("load lm_head weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _broadcast_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") else: _broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() # Broadcast weights inside data parallel groups for wrapped_model in wrapped_models: broadcast_params(wrapped_model) get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_rl/verl/models/llama/megatron/checkpoint_utils/llama_saver.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from megatron.core import mpu from megatron.core.distributed import DistributedDataParallel as LocalDDP from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.device import get_device_id, get_torch_device from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model def _megatron_calc_global_rank(tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0): """given TP,DP,PP rank to get the global rank.""" tp_size = mpu.get_tensor_model_parallel_world_size() dp_size = mpu.get_data_parallel_world_size() pp_size = mpu.get_pipeline_model_parallel_world_size() assert tp_size * dp_size * pp_size == torch.distributed.get_world_size(), ( f"{tp_size} x {dp_size} x {pp_size} != {torch.distributed.get_world_size()}" ) # We only support TP-DP-PP grouping, for correctness when resharding return (pp_rank * dp_size + dp_rank) * tp_size + tp_rank def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def merge_megatron_ckpt_llama(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False): """Merge sharded parameters of a Megatron module into a merged checkpoint. Args: wrapped_models (list of megatron.core.distributed.DistributedDataParallel): The local DDP wrapped megatron modules. config (str or None): HF config for model dtype: model params type is_value_model: if model is value model tie_word_embeddings: tie_word_embeddings, not used in llama, only to keep same interface with qwen2 Returns: state_dict (dict): The merged state_dict in rank 0, and an empty dictionary in other ranks. """ start_time = time.time() def _get_gpt_model(model): return model dp_rank = mpu.get_data_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() pp_rank = mpu.get_pipeline_model_parallel_rank() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if dist.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) assert len(models[i].model.layers) == num_layers_per_model, ( "len model layers {} not equal to num_layers_per_model {}".format( len(models[i].model.layers), num_layers_per_model ) ) state_dict = dict() def _get_cpu_tensor(tensor: torch.Tensor): if tensor is None: return None if tensor.device == torch.device("cpu"): return tensor.detach().clone() return tensor.detach().cpu() def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor: """broadcast tensor across mp_group""" nonlocal state_dict nonlocal mp_group src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) if torch.distributed.get_rank() == src_rank: if tensor is None: weight = None tensor_shape = None else: weight = tensor tensor_shape = weight.shape else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not exist, skip collect") return if weight is None: weight = torch.empty( tensor_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) dist.broadcast(weight, src=src_rank, group=mp_group) if torch.distributed.get_rank() == 0: state_dict[name] = _get_cpu_tensor(weight) def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=concat_dim) if mutate_func is not None: full_tensor = mutate_func(full_tensor) state_dict[name] = full_tensor def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) intermediate_size_tp = config.intermediate_size // tp_size gate_weight_list = [] up_weight_list = [] for i in range(tp_size): gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)] gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp] up_weight_tp = gate_up_weight_tp[intermediate_size_tp:] gate_weight_list.append(gate_weight_tp) up_weight_list.append(up_weight_tp) state_dict[gate_name] = torch.cat(gate_weight_list, dim=0) state_dict[up_name] = torch.cat(up_weight_list, dim=0) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank): """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) q_weight_list = [] k_weight_list = [] v_weight_list = [] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_part = qkv_part[:q_size_tp] k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp] v_part = qkv_part[q_size_tp + kv_size_tp : total_size] q_weight_list.append(q_part) k_weight_list.append(k_part) v_weight_list.append(v_part) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_part = qkv_part[:q_size_tp] k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp] v_part = qkv_part[q_size_tp + kv_size_tp : total_size] q_weight_list.append(q_part) if i * config.num_key_value_heads % tp_size == 0: k_weight_list.append(k_part) v_weight_list.append(v_part) state_dict[q_name] = torch.cat(q_weight_list, dim=0) state_dict[k_name] = torch.cat(k_weight_list, dim=0) state_dict[v_name] = torch.cat(v_weight_list, dim=0) # empty cache before collecting weights get_torch_device().empty_cache() # Embeddings # ------------------- if dp_rank == 0: # Embeddings # ------------------- print_rank_0("collecting embeddings...") gpt_model_module = _get_gpt_model(models[0]) _broadcast_tp_shard_tensor( gpt_model_module.model.embed_tokens.weight if pp_rank == 0 else None, "model.embed_tokens.weight", src_pp_rank=0, ) # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"collecting layer #{layer}...") layer_name = f"model.layers.{layer}" src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[src_layer_idx] _broadcast_tensor( sync_layer.input_layernorm.weight, f"{layer_name}.input_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.self_attn.o_proj.weight, f"{layer_name}.self_attn.o_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) _broadcast_tensor( sync_layer.post_attention_layernorm.weight, f"{layer_name}.post_attention_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.mlp.down_proj.weight, f"{layer_name}.mlp.down_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) # Final Layernorm # ------------------- print_rank_0("collecting final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", src_pp_rank=pp_size - 1, ) print_rank_0("collecting lm_head...") if is_value_model: if pp_rank == pp_size - 1: print(f"gpt_model_module.lm_head.weight: {gpt_model_module.lm_head.weight.shape}") _broadcast_tensor( gpt_model_module.lm_head.weight if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) _broadcast_tensor( gpt_model_module.reward_head.weight if pp_rank == pp_size - 1 and getattr(gpt_model_module, "reward_weight", None) is not None else None, "reward_head.weight", src_pp_rank=pp_size - 1, ) else: _broadcast_tp_shard_tensor( getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) dist.barrier() get_torch_device().empty_cache() if torch.distributed.get_rank() == 0: if dtype not in [torch.float16, torch.bfloat16, torch.float32]: print(f'Unknown/unsupported dtype to save: {dtype}"') exit(1) for k, v in state_dict.items(): if dtype != v.dtype: state_dict[k] = v.to(dtype) print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s") return state_dict ================================================ FILE: verl_rl/verl/models/llama/megatron/layers/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .parallel_attention import ParallelLlamaAttention from .parallel_decoder import ParallelLlamaDecoderLayer, ParallelLlamaDecoderLayerRmPad from .parallel_linear import ( LinearForLastLayer, MergedColumnParallelLinear, QKVParallelLinear, ) from .parallel_mlp import ParallelLlamaMLP from .parallel_rmsnorm import ParallelLlamaRMSNorm __all__ = [ "LinearForLastLayer", "MergedColumnParallelLinear", "QKVParallelLinear", "ParallelLlamaAttention", "ParallelLlamaDecoderLayer", "ParallelLlamaDecoderLayerRmPad", "ParallelLlamaMLP", "ParallelLlamaRMSNorm", ] ================================================ FILE: verl_rl/verl/models/llama/megatron/layers/parallel_attention.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional import torch import torch.nn.functional as F from einops import rearrange from flash_attn.layers.rotary import apply_rotary_emb from megatron.core import ModelParallelConfig, tensor_parallel from megatron.core import parallel_state as mpu from torch import nn from transformers import LlamaConfig from transformers.utils import is_flash_attn_2_available from verl.models.llama.megatron.layers.parallel_linear import QKVParallelLinear from verl.utils.megatron import tensor_parallel as tp_utils class LlamaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) t = t / self.scaling_factor freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) class LlamaLlama3ScalingRotaryEmbedding(LlamaRotaryEmbedding): def __init__(self, dim, config, max_position_embeddings=2048, base=10000, device=None): super().__init__(dim, max_position_embeddings, base, device) self.factor = config.rope_scaling["factor"] # `8` in the original implementation self.high_freq_factor = config.rope_scaling["high_freq_factor"] # `1` in the original implementation self.low_freq_factor = config.rope_scaling["low_freq_factor"] # `4` in the original implementation self.old_context_len = config.rope_scaling[ "original_max_position_embeddings" ] # `8192` in the original implementation low_freq_wavelen = self.old_context_len / self.low_freq_factor high_freq_wavelen = self.old_context_len / self.high_freq_factor wavelen = 2 * math.pi / self.inv_freq # wavelen < high_freq_wavelen: do nothing; wavelen > low_freq_wavelen: divide by factor inv_freq_llama = torch.where(wavelen > low_freq_wavelen, self.inv_freq / self.factor, self.inv_freq) # otherwise: interpolate between the two, using a smooth factor smooth_factor = (self.old_context_len / wavelen - self.low_freq_factor) / ( self.high_freq_factor - self.low_freq_factor ) smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / self.factor + smooth_factor * inv_freq_llama is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen) inv_freq = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class ParallelLlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config = config self.megatron_config = megatron_config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta # assign values after tp tp_size = mpu.get_tensor_model_parallel_world_size() assert self.num_heads % tp_size == 0, ( f"num_head must be divisible by tp_size. Got num_head={self.num_heads}, tp_size={tp_size}" ) assert self.num_key_value_heads % tp_size == 0, ( f"num_key_value_heads must be divisible by tp_size. Got num_key_value_heads=" f"{self.num_key_value_heads}, tp_size={tp_size}" ) self.num_heads_per_tp = self.num_heads // tp_size self.num_key_value_heads_per_tp = self.num_key_value_heads // tp_size self.hidden_size_per_tp = self.hidden_size // tp_size if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and " f"`num_heads`: {self.num_heads})." ) column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" assert row_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, megatron_config) tp_utils.update_kwargs_with_config(row_kwargs, megatron_config) # [self.q_size, self.k_size, self.v_size] self.qkv_proj = QKVParallelLinear( input_size=self.hidden_size, num_heads=self.num_heads, num_key_value_heads=self.num_key_value_heads, head_dim=self.head_dim, bias=config.attention_bias, gather_output=False, skip_bias_add=False, **column_kwargs, ) self.q_size = self.num_heads_per_tp * self.head_dim self.k_size = self.num_key_value_heads_per_tp * self.head_dim self.v_size = self.num_key_value_heads_per_tp * self.head_dim self.o_proj = tensor_parallel.RowParallelLinear( input_size=self.num_heads * self.head_dim, output_size=self.hidden_size, bias=config.attention_bias, input_is_parallel=True, skip_bias_add=False, **row_kwargs, ) self._init_rope() def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = LlamaRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) else: rope_type_key = "type" if "type" in self.config.rope_scaling else "rope_type" scaling_type = self.config.rope_scaling[rope_type_key] scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "linear": self.rotary_emb = LlamaLinearScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) elif scaling_type == "dynamic": self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta, ) elif scaling_type == "llama3": self.rotary_emb = LlamaLlama3ScalingRotaryEmbedding( self.head_dim, self.config, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) else: raise ValueError(f"Unknown RoPE scaling type {scaling_type}") def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() qkv = self.qkv_proj(hidden_states)[0] query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1) query_states = query_states.view(bsz, q_len, self.num_heads_per_tp, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads_per_tp, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads_per_tp, q_len, kv_seq_len)}, " f"but is {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads_per_tp, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads_per_tp, q_len, self.head_dim)}, " f"but is {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size_per_tp) attn_output = self.o_proj(attn_output)[0] return attn_output """ Remove padding Attention - Using Flash-attn 2 - Compatible with sequence parallel """ if is_flash_attn_2_available(): from flash_attn import flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa def apply_rotary_pos_emb_rmpad(q, k, cos, sin, position_ids, indices, sequence_length): batch_size = position_ids.shape[0] q = pad_input(q, indices, batch_size, sequence_length) # (batch_size, seqlen, num_head, head_dim) k = pad_input(k, indices, batch_size, sequence_length) cos = cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim] sin = sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) q_embed = index_first_axis(rearrange(q_embed, "b s ... -> (b s) ..."), indices) k_embed = index_first_axis(rearrange(k_embed, "b s ... -> (b s) ..."), indices) return q_embed, k_embed # use flash-attn rotary embeddings with rmpad # cos/sin shoudl be: (seq_length, rotary_dim / 2) def apply_rotary_pos_emb_rmpad_flash(q, k, cos, sin, cu_seqlens, max_seqlen): q_embed = apply_rotary_emb( q, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) k_embed = apply_rotary_emb( k, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) return q_embed, k_embed class ParallelLlamaAttentionRmPad(ParallelLlamaAttention): def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: torch.Tensor = None, max_seqlen_in_batch: int = None, ): total_nnz, _, _ = hidden_states.size() # This is the total_nnz padded after sequence parallel if self.megatron_config.sequence_parallel: total_nnz = total_nnz * mpu.get_tensor_model_parallel_world_size() qkv = self.qkv_proj(hidden_states)[0] query_states, key_states, value_states = qkv.split( [self.q_size, self.k_size, self.v_size], dim=-1 ) # (total_nnz, 1, hidden_size) if self.megatron_config.sequence_parallel: sequence_parallel_pad = total_nnz - cu_seqlens[-1] total_nnz = cu_seqlens[-1] # total_nnz before sp padding query_states = query_states[:total_nnz] key_states = key_states[:total_nnz] value_states = value_states[:total_nnz] # Flash attention requires the input to have the shape # batch_size x seq_length x head_dime x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(total_nnz, self.num_heads_per_tp, self.head_dim) key_states = key_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim) value_states = value_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim) cos, sin = self.rotary_emb(value_states, seq_len=sequence_length) cos, sin = cos[:, : cos.shape[1] // 2], sin[:, : sin.shape[1] // 2] # flash attn only needs half query_states, key_states = apply_rotary_pos_emb_rmpad_flash( query_states, key_states, cos, sin, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen_in_batch ) # query_states, key_states = apply_rotary_pos_emb_rmpad(query_states, key_states, cos, sin, # position_ids, indices, # TODO: llama does not have dropout in the config?? # It is recommended to use dropout with FA according to the docs # when training. dropout_rate = 0.0 # if not self.training else self.attn_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: query_states = query_states.to(torch.float16) key_states = key_states.to(torch.float16) value_states = value_states.to(torch.float16) attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens, cu_seqlens_k=cu_seqlens, max_seqlen_q=max_seqlen_in_batch, max_seqlen_k=max_seqlen_in_batch, dropout_p=dropout_rate, softmax_scale=None, causal=True, ) attn_output_unpad = attn_output_unpad.to(input_dtype) attn_output_unpad = attn_output_unpad.reshape(total_nnz, 1, self.hidden_size_per_tp).contiguous() # sequence parallel reduce_scatter is performed inside RowColumnParallel if enabled # Here we need to repad if self.megatron_config.sequence_parallel: attn_output_unpad = F.pad(attn_output_unpad, pad=(0, 0, 0, 0, 0, sequence_parallel_pad)) attn_output_unpad = self.o_proj(attn_output_unpad)[0] return attn_output_unpad ================================================ FILE: verl_rl/verl/models/llama/megatron/layers/parallel_decoder.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch from megatron.core import ModelParallelConfig from torch import nn from transformers import LlamaConfig from verl.utils.megatron_utils import TransformerConfig, convert_config from .parallel_attention import ParallelLlamaAttention, ParallelLlamaAttentionRmPad from .parallel_mlp import ParallelLlamaMLP from .parallel_rmsnorm import ParallelLlamaRMSNorm class ParallelLlamaDecoderLayer(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, layer_idx: int): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.self_attn = ParallelLlamaAttention(config=config, megatron_config=megatron_config) self.mlp = ParallelLlamaMLP(config, megatron_config=megatron_config) self.input_layernorm = ParallelLlamaRMSNorm(config, megatron_config) self.post_attention_layernorm = ParallelLlamaRMSNorm(config, megatron_config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Note: sequence parallel is hidden inside ColumnParallelLinear # reduce scatter is hidden inside RowParallelLinear # Self Attention hidden_states = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, ) # TODO: add sequence parallel operator reduce_scatter here hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) # TODO: add sequence parallel operator all_gather here hidden_states = self.mlp(hidden_states) # TODO: add sequence parallel operator reduce_scatter here hidden_states = residual + hidden_states outputs = hidden_states return outputs class ParallelLlamaDecoderLayerRmPad(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, layer_idx: int): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.self_attn = ParallelLlamaAttentionRmPad(config=config, megatron_config=megatron_config) self.mlp = ParallelLlamaMLP(config, megatron_config=megatron_config) self.input_layernorm = ParallelLlamaRMSNorm(config, megatron_config) self.post_attention_layernorm = ParallelLlamaRMSNorm(config, megatron_config) def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states # (total_nnz // sp, 1, hidden_size) hidden_states = self.input_layernorm(hidden_states) # Self Attention # (total_nnz // sp, 1, hidden_size) -> all-gather (total_nnz, 1, hidden_size) # -> col + row -> reduce-scatter -> (total_nnz // sp, 1, hidden_size) hidden_states = self.self_attn( hidden_states=hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = residual + hidden_states # Fully Connected # shape changes same as attn residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = hidden_states return outputs ================================================ FILE: verl_rl/verl/models/llama/megatron/layers/parallel_linear.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023 The vLLM team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/linear.py import torch from megatron.core import tensor_parallel class QKVParallelLinear(tensor_parallel.ColumnParallelLinear): def __init__( self, input_size, num_heads, num_key_value_heads, head_dim, *, bias=True, gather_output=True, skip_bias_add=False, **kwargs, ): # Keep input parameters, and already restrict the head numbers self.input_size = input_size self.q_output_size = num_heads * head_dim self.kv_output_size = num_key_value_heads * head_dim self.head_dim = head_dim self.gather_output = gather_output self.skip_bias_add = skip_bias_add input_size = self.input_size output_size = (num_heads + 2 * num_key_value_heads) * self.head_dim super().__init__( input_size=input_size, output_size=output_size, bias=bias, gather_output=gather_output, skip_bias_add=skip_bias_add, **kwargs, ) class MergedColumnParallelLinear(tensor_parallel.ColumnParallelLinear): def __init__( self, input_size, gate_ouput_size, up_output_size, *, bias=True, gather_output=True, skip_bias_add=False, **kwargs, ): # Keep input parameters, and already restrict the head numbers self.input_size = input_size self.output_size = gate_ouput_size + up_output_size self.gather_output = gather_output self.skip_bias_add = skip_bias_add super().__init__( input_size=self.input_size, output_size=self.output_size, bias=bias, gather_output=gather_output, skip_bias_add=skip_bias_add, **kwargs, ) class LinearForLastLayer(torch.nn.Linear): def __init__( self, input_size, output_size, *, config, bias=True, ): super().__init__(in_features=input_size, out_features=output_size, bias=bias) self.sequence_parallel = config.sequence_parallel if self.sequence_parallel: self.weight.sequence_parallel = True def forward( self, input_, weight=None, runtime_gather_output=None, ): logits = super().forward(input_) logits = logits.float() if self.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits, None ================================================ FILE: verl_rl/verl/models/llama/megatron/layers/parallel_mlp.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from megatron.core import ModelParallelConfig, tensor_parallel from megatron.core import parallel_state as mpu from torch import nn from transformers.activations import ACT2FN from verl.models.llama.megatron.layers.parallel_linear import MergedColumnParallelLinear from verl.utils.megatron import tensor_parallel as tp_utils class ParallelLlamaMLP(nn.Module): def __init__(self, config, megatron_config: ModelParallelConfig = None) -> None: super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size # The weight is only [hidden_size, intermediate_size // model_parallel_world_size] column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" assert row_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(row_kwargs, megatron_config) tp_utils.update_kwargs_with_config(column_kwargs, megatron_config) tp_size = mpu.get_tensor_model_parallel_world_size() self.gate_up_proj = MergedColumnParallelLinear( input_size=self.hidden_size, gate_ouput_size=self.intermediate_size, up_output_size=self.intermediate_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) self.gate_size = self.intermediate_size // tp_size self.down_proj = tensor_parallel.RowParallelLinear( input_size=self.intermediate_size, output_size=self.hidden_size, bias=False, input_is_parallel=True, skip_bias_add=False, **row_kwargs, ) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): gate_up = self.gate_up_proj(x)[0] gate, up = gate_up.split(self.gate_size, dim=-1) return self.down_proj(self.act_fn(gate) * up)[0] ================================================ FILE: verl_rl/verl/models/llama/megatron/layers/parallel_rmsnorm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numbers import torch from apex.normalization.fused_layer_norm import fused_rms_norm_affine from megatron.core import ModelParallelConfig from torch import nn from transformers import LlamaConfig from verl.utils.megatron import sequence_parallel as sp_utils class ParallelLlamaRMSNorm(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): """ LlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() if isinstance(config.hidden_size, numbers.Integral): normalized_shape = (config.hidden_size,) self.normalized_shape = torch.Size(normalized_shape) self.weight = nn.Parameter(torch.ones(self.normalized_shape)) self.variance_epsilon = config.rms_norm_eps if megatron_config.sequence_parallel: sp_utils.mark_parameter_as_sequence_parallel(self.weight) def forward(self, hidden_states): return fused_rms_norm_affine( input=hidden_states, weight=self.weight, normalized_shape=self.normalized_shape, eps=self.variance_epsilon, memory_efficient=True, ) ================================================ FILE: verl_rl/verl/models/llama/megatron/modeling_llama_megatron.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LLaMA model with Megatron-style acceleration.""" from typing import Optional import torch import torch.utils.checkpoint from megatron.core import ModelParallelConfig, mpu, tensor_parallel from torch import nn from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.models.llama.configuration_llama import LlamaConfig from transformers.models.llama.modeling_llama import CausalLMOutputWithPast from verl.utils.megatron import sequence_parallel as sp_utils from verl.utils.megatron import tensor_parallel as tp_utils from verl.utils.megatron_utils import TransformerConfig, convert_config from .layers import ParallelLlamaDecoderLayer, ParallelLlamaDecoderLayerRmPad, ParallelLlamaRMSNorm """ TODO: 1. Add weight initialization. Here we need to be careful on TP weight init. 2. Add sequence parallel 3. Load checkpoint from meta LLama pretrained checkpoint """ # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class ParallelLlamaModel(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] Args: config: LlamaConfig """ def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) self.layers = nn.ModuleList( [ParallelLlamaDecoderLayer(config, megatron_config) for _ in range(config.num_hidden_layers)] ) self.norm = ParallelLlamaRMSNorm(config, megatron_config) # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (batch_size, seq_length) attention_mask: attention_mask. shape (batch_size, seq_length) position_ids: position ids. shape (batch_size, seq_length) Returns: """ batch_size, seq_length = input_ids.shape inputs_embeds = self.embed_tokens(input_ids) # embed positions attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds) hidden_states = inputs_embeds for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return hidden_states class ParallelLlamaForCausalLM(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.model = ParallelLlamaModel(config, megatron_config=megatron_config) self.vocab_size = config.vocab_size column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) hidden_states = outputs logits = self.lm_head(hidden_states)[0] logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) logits = logits.float() return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa class ParallelLlamaModelRmPad(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] Args: config: LlamaConfig """ def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() self.megatron_config = megatron_config if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) self.layers = nn.ModuleList( [ParallelLlamaDecoderLayerRmPad(config, megatron_config) for _ in range(config.num_hidden_layers)] ) self.norm = ParallelLlamaRMSNorm(config, megatron_config) def forward( self, input_ids: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (1, totol_nnz) position_ids: position ids. shape (batch_size, seq_length) Returns: """ inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size) # (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size) inputs_embeds = inputs_embeds.transpose(0, 1) if self.megatron_config.sequence_parallel: inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds) hidden_states = inputs_embeds for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return hidden_states class ParallelLlamaForCausalLMRmPad(nn.Module): def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.megatron_config = megatron_config self.model = ParallelLlamaModelRmPad(config, megatron_config=megatron_config) self.vocab_size = config.vocab_size self._init_head(config) def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def _forward_head(self, hidden_states): # all_gather from sequence parallel region is performed inside lm_head logits = self.lm_head(hidden_states)[0] logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp) logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) # (total_nnz_padded, 1, vocab_size) return logits def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" batch_size, sequence_length = input_ids.shape # remove padding here input_ids, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input( input_ids.unsqueeze(dim=-1), attention_mask ) # (total_nnz, 1) # pad input_ids to multiple of tp for all tp ranks # TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap if self.megatron_config.sequence_parallel: input_ids = sp_utils.pad_to_sequence_parallel(input_ids) input_ids = input_ids.transpose(0, 1) # (1, total_nnz+pad) outputs = self.model( input_ids=input_ids, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = outputs logits = self._forward_head(hidden_states) # remove padding from sequence parallel if self.megatron_config.sequence_parallel: totol_nnz = cu_seqlens[-1] logits = logits[:totol_nnz] # (total_nnz_padded) logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # add removed padding back logits = pad_input( logits, indices, batch_size, seqlen=sequence_length ) # (batch_size, sequence_length, vocab_size) return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) class ParallelLlamaForValueRmPad(ParallelLlamaForCausalLMRmPad): def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False) # lm_head is effectively the same as sequence parallel sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight) def _forward_head(self, hidden_states): logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1) logits = logits.float() if self.megatron_config.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: output = super().forward(input_ids, attention_mask, position_ids) output.logits = torch.squeeze(output.logits, dim=-1) return output """ Support pipeline parallelism """ class ParallelLlamaModelRmPadPP(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] This model definition supports pipeline parallelism. To support pp and vpp, - This model only contains layer in this pp stage and vpp chunk - When calling get_model in Megatron, this rank will instantiate all the vpp chunks in this pp. Args: config: LlamaConfig """ def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, pre_process, post_process): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.pre_process = pre_process self.post_process = post_process self.megatron_config = megatron_config embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) if pre_process: self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) else: self.embed_tokens = None pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = megatron_config.pipeline_model_parallel_size self.num_layer_per_pp = config.num_hidden_layers // pp_size vpp_size = megatron_config.virtual_pipeline_model_parallel_size vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank() if vpp_size is not None: self.layers = nn.ModuleList() self.num_layer_vpp_chunk = self.num_layer_per_pp // vpp_size self.num_layer_this_model = self.num_layer_vpp_chunk offset = vpp_rank * (config.num_hidden_layers // vpp_size) + (pp_rank * self.num_layer_vpp_chunk) else: self.num_layer_this_model = self.num_layer_per_pp offset = pp_rank * self.num_layer_per_pp self.layers = nn.ModuleList() for i in range(self.num_layer_this_model): layer = ParallelLlamaDecoderLayerRmPad(config, megatron_config, layer_idx=offset + i) self.layers.add_module(f"{i}", layer) if post_process: self.norm = ParallelLlamaRMSNorm(config, megatron_config) else: self.norm = None def set_input_tensor(self, input_tensor): """Set input tensor to be used instead of forward()'s input. When doing pipeline parallelism the input from the previous stage comes from communication, not from the input, so the model's forward_step_func won't have it. This function is thus used by internal code to bypass the input provided by the forward_step_func""" self.input_tensor = input_tensor def forward( self, input_ids: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (1, totol_nnz) position_ids: position ids. shape (batch_size, seq_length) Returns: """ if self.pre_process: inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size) # vocab parallel embedding will not do sequence parallel reduce-scatter in open source megatron # so need to deal with it by handle here: # (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size) inputs_embeds = inputs_embeds.transpose(0, 1) if self.megatron_config.sequence_parallel: inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds) hidden_states = inputs_embeds else: # self.hidden_states should be passed by Megatron hidden_states = self.input_tensor for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = layer_outputs if self.post_process: hidden_states = self.norm(hidden_states) return hidden_states class ParallelLlamaForCausalLMRmPadPP(nn.Module): def __init__( self, config: LlamaConfig, megatron_config: ModelParallelConfig, pre_process, post_process, share_embeddings_and_output_weights=False, ): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.megatron_config = megatron_config self.model = ParallelLlamaModelRmPadPP( config, megatron_config=megatron_config, pre_process=pre_process, post_process=post_process ) assert share_embeddings_and_output_weights is False, ( "Llama Model not supports sharing embedding and output weights" ) self.share_embeddings_and_output_weights = share_embeddings_and_output_weights self.vocab_size = config.vocab_size self.pre_process = pre_process self.post_process = post_process if post_process: self._init_head(config) def set_input_tensor(self, input_tensor): """Set input tensor to be used instead of forward()'s input. When doing pipeline parallelism the input from the previous stage comes from communication, not from the input, so the model's forward_step_func won't have it. This function is thus used by internal code to bypass the input provided by the forward_step_func""" assert len(input_tensor) == 1 self.model.set_input_tensor(input_tensor[0]) def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def _forward_head(self, hidden_states): # all_gather from sequence parallel region is performed inside lm_head # logits shape before forward_head hidden_states.shape: [4, 32, 4096] logits = self.lm_head(hidden_states)[0] # logits shape after forward_head logits.shape: [8, 32, 8] logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp) return logits def forward( self, # original input *, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" # Note that input_ids, attention_mask and position_ids should be passed to every pp layer. # In the first pp, input_ids will be used, in other pp layers hidden_states will be used inside self.model batch_size, sequence_length = input_ids.shape # remove padding here input_ids_rmpad, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input( input_ids.unsqueeze(dim=-1), attention_mask ) # (total_nnz, 1) # pad input_ids to multiple of tp for all tp ranks # TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap if self.megatron_config.sequence_parallel: input_ids_rmpad = sp_utils.pad_to_sequence_parallel(input_ids_rmpad) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz+pad) outputs = self.model( input_ids=input_ids_rmpad, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) if self.post_process: hidden_states = outputs # print(f'hidden_states.shape = {hidden_states.shape}') # torch.Size([4, 32, 4096]) logits = self._forward_head(hidden_states) logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # torch.Size([8, 32, 16]) # remove padding from sequence parallel if self.megatron_config.sequence_parallel: totol_nnz = cu_seqlens[-1] logits = logits[:totol_nnz] # (total_nnz_padded) # add removed padding back. If input is already rmpad, we let the caller pad_input logits = pad_input( logits, indices, batch_size, seqlen=sequence_length ) # (batch_size, sequence_length, vocab_size) return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) else: return outputs class ParallelLlamaForValueRmPadPP(ParallelLlamaForCausalLMRmPadPP): def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False) # lm_head is effectively the same as sequence parallel sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight) def _forward_head(self, hidden_states): logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1) logits = logits.float() if self.megatron_config.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits def forward( self, *, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: output = super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) if self.post_process: output.logits = torch.squeeze(output.logits, dim=-1) return output else: return output ================================================ FILE: verl_rl/verl/models/mcore/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .registry import ( get_mcore_forward_fn, get_mcore_forward_fused_fn, get_mcore_weight_converter, hf_to_mcore_config, init_mcore_model, ) __all__ = [ "hf_to_mcore_config", "init_mcore_model", "get_mcore_forward_fn", "get_mcore_weight_converter", "get_mcore_forward_fused_fn", ] ================================================ FILE: verl_rl/verl/models/mcore/config_converter.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # convert huggingface config to mcore transformer config import warnings import torch import torch.nn.functional as F from megatron.core import parallel_state as mpu from megatron.core.transformer import MLATransformerConfig, TransformerConfig from transformers import PretrainedConfig def _get_base_transformer_config( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> dict: """ Create a base TransformerConfig with common parameters across different model architectures. TODO: (ycl) use dataclass or converter config? Args: hf_config: HuggingFace model configuration dtype: Data type for the model override_transformer_config_kwargs: Additional parameters to override defaults Returns: TransformerConfig with common parameters """ # Common parallel state parameters overlap_p2p_comm = ( mpu.get_virtual_pipeline_model_parallel_world_size() is not None and mpu.get_virtual_pipeline_model_parallel_world_size() > 1 ) batch_p2p_comm = False # Base configuration with common parameters base_config = { # Model architecture parameters "num_layers": hf_config.num_hidden_layers, "hidden_size": hf_config.hidden_size, "num_attention_heads": hf_config.num_attention_heads, "num_query_groups": hf_config.num_key_value_heads, "ffn_hidden_size": hf_config.intermediate_size, "attention_dropout": hf_config.attention_dropout, "hidden_dropout": getattr(hf_config, "hidden_dropout", 0.0), "kv_channels": getattr(hf_config, "head_dim", None), "layernorm_epsilon": hf_config.rms_norm_eps, "add_bias_linear": True, # Activation and normalization "activation_func": F.silu, "normalization": "RMSNorm", "gated_linear_unit": True, # Data types "pipeline_dtype": dtype, "params_dtype": dtype, "bf16": dtype is torch.bfloat16, # Parallel configuration "tensor_model_parallel_size": mpu.get_tensor_model_parallel_world_size(), "pipeline_model_parallel_size": mpu.get_pipeline_model_parallel_world_size(), "expert_model_parallel_size": mpu.get_expert_model_parallel_world_size(), "expert_tensor_parallel_size": mpu.get_expert_tensor_parallel_world_size(), "virtual_pipeline_model_parallel_size": mpu.get_virtual_pipeline_model_parallel_world_size(), "context_parallel_size": mpu.get_context_parallel_world_size(), "overlap_p2p_comm": overlap_p2p_comm, "batch_p2p_comm": batch_p2p_comm, "sequence_parallel": mpu.get_tensor_model_parallel_world_size() > 1, # Common settings "variable_seq_lengths": True, "masked_softmax_fusion": True, "moe_token_dispatcher_type": "alltoall", } # Update with any provided overrides # override_transformer_config_kwargs as kwargs shall never be none base_config.update(override_transformer_config_kwargs) return base_config def _get_mla_transformer_config( hf_config: PretrainedConfig, mla_rope_config: dict, dtype: torch.dtype, **override_transformer_config_kwargs ) -> dict: """ Create a MLATransformerConfig with common parameters across different model architectures. This is specifically for MLA models like DeepseekV3. Args: hf_config: HuggingFace model configuration mla_rope_config: MLA specific RoPE configuration dtype: Data type for the model override_transformer_config_kwargs: Additional parameters to override defaults Returns: MLATransformerConfig with common parameters """ base_config = _get_base_transformer_config(hf_config=hf_config, dtype=dtype, **override_transformer_config_kwargs) mla_config = { # MLA specific parameters "q_lora_rank": hf_config.q_lora_rank, "kv_lora_rank": hf_config.kv_lora_rank, "qk_head_dim": hf_config.qk_nope_head_dim, "qk_pos_emb_head_dim": hf_config.qk_rope_head_dim, "v_head_dim": hf_config.v_head_dim, "rotary_base": hf_config.rope_theta, "rotary_scaling_factor": mla_rope_config["factor"], "rope_type": mla_rope_config["type"], "max_position_embeddings": mla_rope_config["original_max_position_embeddings"], "beta_fast": mla_rope_config["beta_fast"], "beta_slow": mla_rope_config["beta_slow"], "mscale": mla_rope_config["mscale"], "mscale_all_dim": mla_rope_config["mscale_all_dim"], } base_config.update(mla_config) return base_config def check_and_disable_incompatible_configs(original_config: dict) -> dict: """ Check and disable incompatible configurations for older Megatron version. Args: original_config (dict): The original model configuration. Returns: dict: The updated model configuration with incompatible settings disabled. """ removed_keys = [] for key in original_config.keys(): if not hasattr(TransformerConfig, key): removed_keys.append(key) if removed_keys: warnings.warn( f"The following keys are not supported in the current Megatron version and will be removed: {removed_keys}", stacklevel=2, ) for key in removed_keys: original_config.pop(key) return original_config def hf_to_mcore_config_dense( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: # for LlamaForCausalLM or Qwen2ForCausalLM qkv_bias = True if "Qwen2ForCausalLM" in hf_config.architectures else getattr(hf_config, "attention_bias", False) qk_layernorm = True if "Qwen3ForCausalLM" in hf_config.architectures else False args: dict = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, use_cpu_initialization=False, add_bias_linear=False, add_qkv_bias=qkv_bias, qk_layernorm=qk_layernorm, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) args = check_and_disable_incompatible_configs(args) print(f"Overridden TF init config: {args}") return TransformerConfig(**args) def hf_to_mcore_config_qwen2moe( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: args: dict = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, use_cpu_initialization=False, add_bias_linear=False, layernorm_epsilon=hf_config.rms_norm_eps, # MoE specific moe_ffn_hidden_size=hf_config.moe_intermediate_size, moe_router_bias_update_rate=0.001, moe_router_topk=hf_config.num_experts_per_tok, num_moe_experts=hf_config.num_experts, moe_shared_expert_intermediate_size=hf_config.shared_expert_intermediate_size, moe_aux_loss_coeff=hf_config.router_aux_loss_coef, # moe_aux_loss_coeff=0.0, moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL moe_shared_expert_overlap=True, moe_grouped_gemm=True, moe_router_score_function="softmax", # Other optimizations persist_layer_norm=True, bias_activation_fusion=True, bias_dropout_fusion=True, # Qwen specific moe_router_pre_softmax=True, add_qkv_bias=True, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) args = check_and_disable_incompatible_configs(args) print(f"Overridden TF init config: {args}") return TransformerConfig(**args) def hf_to_mcore_config_mixtral( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: args: dict = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, use_cpu_initialization=False, add_bias_linear=False, layernorm_epsilon=hf_config.rms_norm_eps, # MoE specific num_moe_experts=hf_config.num_local_experts, moe_aux_loss_coeff=hf_config.router_aux_loss_coef, moe_router_topk=hf_config.num_experts_per_tok, moe_router_pre_softmax=True, moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL moe_router_score_function="softmax", moe_shared_expert_intermediate_size=None, # mixtral has no shared expert moe_shared_expert_overlap=False, # mixtral has no shared expert moe_ffn_hidden_size=hf_config.intermediate_size, moe_router_bias_update_rate=0.001, # moe_permute_fusion=True, # need TE 2.1+ moe_grouped_gemm=True, # Other optimizations persist_layer_norm=True, apply_rope_fusion=True, bias_activation_fusion=True, bias_dropout_fusion=True, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) args = check_and_disable_incompatible_configs(args) print(f"Overridden TF init config: {args}") return TransformerConfig(**args) def hf_to_mcore_config_qwen3moe( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: args: dict = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, use_cpu_initialization=False, add_bias_linear=False, layernorm_epsilon=hf_config.rms_norm_eps, # MoE specific moe_ffn_hidden_size=hf_config.moe_intermediate_size, moe_router_bias_update_rate=0.001, moe_router_topk=hf_config.num_experts_per_tok, num_moe_experts=hf_config.num_experts, moe_aux_loss_coeff=hf_config.router_aux_loss_coef, # moe_aux_loss_coeff=0.0, moe_router_load_balancing_type="none", # turn off aux_loss as it hurts perf in RL moe_grouped_gemm=True, moe_router_score_function="softmax", # Other optimizations persist_layer_norm=True, bias_activation_fusion=True, bias_dropout_fusion=True, # Qwen specific moe_router_pre_softmax=False, qk_layernorm=True, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) args = check_and_disable_incompatible_configs(args) print(f"Overridden TF init config: {args}") return TransformerConfig(**args) def hf_to_mcore_config_dpskv3( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> MLATransformerConfig: # DeepseekV3ForCausalLM from megatron.core.transformer.enums import AttnBackend from .patch_v012 import apply_patch apply_patch() mla_rope_config = { "beta_fast": 32, "beta_slow": 1, "factor": 1, "mscale": 1.0, "mscale_all_dim": 1.0, "original_max_position_embeddings": 4096, "type": "rope", } if "rope_scaling" in hf_config and hf_config.rope_scaling is not None: mla_rope_config.update(hf_config.rope_scaling) moe_layer_freq = [1] * hf_config.num_hidden_layers for i in range(min(hf_config.first_k_dense_replace, hf_config.num_hidden_layers)): moe_layer_freq[i] = 0 # disable MTP and quantization for now if "num_nextn_predict_layers" in hf_config: assert hf_config.num_nextn_predict_layers == 0, ( "MTP is not supported for now, please modify the config.json to set num_nextn_predict_layers to 0" ) assert "quantization_config" not in hf_config or not hf_config.quantization_config, ( "quantization is not supported for now, please modify the config.json to remove quantization_config" ) args: dict = _get_mla_transformer_config( hf_config=hf_config, mla_rope_config=mla_rope_config, dtype=dtype, # Additional parameters use_cpu_initialization=False, add_bias_linear=False, attention_backend=AttnBackend.fused, qk_layernorm=True, # Standard MoE parameters moe_ffn_hidden_size=hf_config.moe_intermediate_size, moe_token_dispatcher_type="alltoall", moe_router_bias_update_rate=0.001, moe_router_enable_expert_bias=True, moe_router_topk=hf_config.num_experts_per_tok, num_moe_experts=hf_config.n_routed_experts, moe_shared_expert_intermediate_size=hf_config.moe_intermediate_size * hf_config.n_shared_experts, moe_aux_loss_coeff=getattr(hf_config, "aux_loss_alpha", 0.001), moe_router_load_balancing_type="seq_aux_loss", moe_shared_expert_overlap=True, # moe_permute_fusion=True, # need TE 2.1+ moe_grouped_gemm=True, moe_router_score_function="sigmoid", moe_router_pre_softmax=True, moe_router_topk_scaling_factor=hf_config.routed_scaling_factor, moe_layer_freq=moe_layer_freq, # mcore 0.12 moe moe_router_dtype="fp64", disable_bf16_reduced_precision_matmul=True, # Other optimizations # deallocate_pipeline_outputs=True, # gradient_accumulation_fusion=True, persist_layer_norm=True, bias_activation_fusion=True, bias_dropout_fusion=True, ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) args = check_and_disable_incompatible_configs(args) transformer_config: MLATransformerConfig = MLATransformerConfig(**args) print(f"Overridden MLA TF init config: {transformer_config}") # MTP if "num_nextn_predict_layers" in hf_config: transformer_config.mtp_num_layers = hf_config.num_nextn_predict_layers transformer_config.mtp_loss_scaling_factor = 0.1 return transformer_config def hf_to_mcore_config_qwen2_5_vl( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: # Qwen2_5_VLForConditionalGeneration args = _get_base_transformer_config( hf_config=hf_config, dtype=dtype, add_bias_linear=False, # qwen specific add_qkv_bias=True, mrope_section=hf_config.rope_scaling["mrope_section"], ) # override_transformer_config_kwargs as kwargs shall never be none args.update(override_transformer_config_kwargs) args = check_and_disable_incompatible_configs(args) print(f"Overridden TF init config: {args}") return TransformerConfig(**args) def hf_to_mcore_config_llama4( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: # Llama4ForConditionalGeneration raise NotImplementedError("Llama4ForConditionalGeneration is not supported yet") ================================================ FILE: verl_rl/verl/models/mcore/loader.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device from .saver import _megatron_calc_global_rank def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_gptmodel(state_dict, wrapped_models, config, params_dtype, is_value_model=False): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def broadcast_params(module): for param in module.parameters(): torch.distributed.broadcast( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() cp_rank = mpu.get_context_parallel_rank() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=0, cp_rank=cp_rank) pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == src_rank: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.decoder.layers) == num_layers_per_model def _broadcast_tensor(tensor, name) -> torch.Tensor: """broadcast tensor from rank0 across mp_group""" nonlocal state_dict nonlocal mp_group if torch.distributed.get_rank() == src_rank: if name in state_dict: weight = state_dict[name] tensor_shape = weight.shape else: tensor_shape = None else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not in state_dict, skip load") return if tensor is None: tensor = torch.empty( tensor_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) if torch.distributed.get_rank() == src_rank: tensor.data.copy_(weight) dist.broadcast(tensor, src=src_rank, group=mp_group) def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == src_rank: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == src_rank: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=src_rank, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == src_rank: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == src_rank: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=src_rank, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == src_rank: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank() == src_rank:} tensor {gate_name, up_name} shape " f"{tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == src_rank: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=src_rank, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == src_rank: assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) if config.num_key_value_heads >= tp_size: q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp sizes = [total_size * tp_size] if not bias: sizes.append(config.hidden_size) new_weight_qkv = torch.empty(*sizes, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] num_query_groups_per_partition = models[0].config.num_query_groups // tp_size new_weight_qkv_this_tp = new_weight_qkv[i * total_size : (i + 1) * total_size] q_part_per_head = torch.chunk(q_part, num_query_groups_per_partition, dim=0) k_part_per_head = torch.chunk(k_part, num_query_groups_per_partition, dim=0) v_part_per_head = torch.chunk(v_part, num_query_groups_per_partition, dim=0) total_size_per_head = total_size // num_query_groups_per_partition for j in range(num_query_groups_per_partition): new_weight_qkv_this_tp[j * total_size_per_head : (j + 1) * total_size_per_head].copy_( torch.cat([q_part_per_head[j], k_part_per_head[j], v_part_per_head[j]], dim=0) ) else: q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp sizes = [total_size * tp_size] if not bias: sizes.append(config.hidden_size) new_weight_qkv = torch.empty(*sizes, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv_this_tp = new_weight_qkv[i * total_size : (i + 1) * total_size] q_part_per_head = torch.chunk(q_part, config.num_attention_heads, dim=0) k_part_per_head = torch.chunk(k_part, config.num_attention_heads, dim=0) v_part_per_head = torch.chunk(v_part, config.num_attention_heads, dim=0) total_size_per_head = total_size // config.num_attention_heads for j in range(config.num_attention_heads): new_weight_qkv_this_tp[j * total_size_per_head : (j + 1) * total_size_per_head].copy_( torch.cat([q_part_per_head[j], k_part_per_head[j], v_part_per_head[j]], dim=0) ) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name, k_name, v_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == src_rank: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=src_rank, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) if dp_rank == 0: # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) embed_tokens_weight = None if pp_rank == 0: embed_tokens_weight = gpt_model_module.embedding.word_embeddings.weight _broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): layer_name = f"model.layers.{layer}" print_rank_0(f"loading layer #{layer}, with layer_name model.layers.{layer}...") dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.decoder.layers[dst_layer_idx] _broadcast_tensor( sync_layer.self_attention.linear_qkv.layer_norm_weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) if f"{layer_name}.self_attn.q_norm.weight" in state_dict: _broadcast_tensor( sync_layer.self_attention.q_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_norm.weight", ) _broadcast_tensor( sync_layer.self_attention.k_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.k_norm.weight", ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attention.linear_qkv.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) if f"{layer_name}.self_attn.q_proj.bias" in state_dict: _broadcast_tp_shard_tensor_qkv( sync_layer.self_attention.linear_qkv.bias if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", bias=True, ) _broadcast_tp_shard_tensor( sync_layer.self_attention.linear_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _broadcast_tensor( sync_layer.mlp.linear_fc1.layer_norm_weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.linear_fc1.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _broadcast_tp_shard_tensor( sync_layer.mlp.linear_fc2.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.decoder.final_layernorm, "weight", None), "model.norm.weight", ) print_rank_0("loading lm_head...") lm_head_weight = None if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.output_layer.weight if is_value_model: # if torch.distributed.get_rank() == src_rank: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "lm_head.weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _broadcast_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") # else: # _broadcast_tensor(lm_head_weight, "lm_head.weight") else: _broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() # Broadcast weights inside data parallel groups for wrapped_model in wrapped_models: broadcast_params(wrapped_model) pass get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_rl/verl/models/mcore/mbridge.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: from mbridge import AutoBridge from mbridge.utils.post_creation_callbacks import freeze_moe_router, make_value_model except ImportError: print("mbridge package not found. Please install mbridge with `pip install verl[mcore]` or `pip install mbridge`") raise __all__ = ["AutoBridge", "make_value_model", "freeze_moe_router"] ================================================ FILE: verl_rl/verl/models/mcore/model_forward.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from verl.utils.megatron_utils import unwrap_model from .util import postprocess_packed_seqs, preprocess_packed_seqs, recover_left_padding, remove_left_padding def gptmodel_forward( model, input_ids, attention_mask, position_ids, sequence_parallel, value_model=False, pack_seqs=True, logits_processor=None, logits_processor_args: dict = None, **kwargs, ): """Default forward pass for GPT models with optional sequence packing.""" pre_process = unwrap_model(model).pre_process post_process = unwrap_model(model).post_process if pack_seqs: batch_size, seq_len = attention_mask.shape[:2] input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process) input_ids_rmpad = input_ids_rmpad.contiguous() output_orig = model( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids, packed_seq_params=packed_seq_params, ) if post_process and logits_processor is not None: args = { k: preprocess_packed_seqs(v, attention_mask, pre_process=True)[0] for k, v in logits_processor_args.items() } output_dict = logits_processor(output_orig, **args) output = { k: postprocess_packed_seqs( v, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) for k, v in output_dict.items() } else: output = postprocess_packed_seqs( output_orig, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) else: assert logits_processor is None, "logits_processor is not supported for non-packed sequence" batch_size, sequence_length = attention_mask.shape new_input_ids, new_attention_mask, new_position_ids = remove_left_padding( input_ids, attention_mask, position_ids, sequence_parallel, pre_process=pre_process ) output = model(input_ids=new_input_ids, attention_mask=new_attention_mask, position_ids=new_position_ids) output = recover_left_padding( output, new_attention_mask, attention_mask, sequence_length, post_process=post_process ) if value_model and post_process: output = output[..., 0] return output def gptmodel_forward_qwen2_5_vl( model, input_ids, attention_mask, position_ids, sequence_parallel, value_model=False, pack_seqs=True, multi_modal_inputs=None, logits_processor=None, logits_processor_args: dict = None, **kwargs, ): from megatron.core import parallel_state as mpu assert mpu.get_context_parallel_world_size() == 1, "qwen2_5_vl's context parallel is not accurate yet" pre_process = unwrap_model(model).pre_process post_process = unwrap_model(model).post_process pixel_values = ( multi_modal_inputs["pixel_values"].to(input_ids.device) if "pixel_values" in multi_modal_inputs else None ) image_grid_thw = ( multi_modal_inputs["image_grid_thw"].to(input_ids.device) if "image_grid_thw" in multi_modal_inputs else None ) if pack_seqs: batch_size, seq_len = attention_mask.shape[:2] input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=True) input_ids_rmpad = input_ids_rmpad.contiguous() output_orig = model( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids, packed_seq_params=packed_seq_params, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) if post_process and logits_processor is not None: args = { k: preprocess_packed_seqs(v, attention_mask, pre_process=True)[0] for k, v in logits_processor_args.items() } output_dict = logits_processor(output_orig, **args) output = { k: postprocess_packed_seqs( v, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) for k, v in output_dict.items() } else: output = postprocess_packed_seqs( output_orig, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) else: batch_size, sequence_length = attention_mask.shape new_input_ids, new_attention_mask, new_position_ids = remove_left_padding( input_ids, attention_mask, position_ids, sequence_parallel, pre_process=pre_process ) output = model( input_ids=new_input_ids, position_ids=new_position_ids, attention_mask=new_attention_mask, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) output = recover_left_padding( output, new_attention_mask, attention_mask, sequence_length, post_process=post_process ) if value_model and post_process: output = output[..., 0] return output ================================================ FILE: verl_rl/verl/models/mcore/model_forward_fused.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict from typing import Optional import torch from megatron.core import parallel_state from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk from megatron.core.inference.contexts import BaseInferenceContext from megatron.core.models.gpt.gpt_model import GPTModel from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.tensor_parallel.mappings import gather_from_sequence_parallel_region from torch import Tensor from verl.models.mcore.util import preprocess_packed_seqs from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy from verl.utils.megatron_utils import unwrap_model from verl.utils.model import CausalLMOutputForPPO from .qwen2_5_vl.model import Qwen2_5VLModel from .util import postprocess_packed_seqs_for_dict_output def patch_fused_forward(model: torch.nn.Module): model = unwrap_model(model) if isinstance(model, GPTModel): model = model elif isinstance(model, Qwen2_5VLModel): if not hasattr(model, "language_model"): # the qwen2.5vl model might only have vision_model return model = model.language_model else: raise ValueError("Model is not a GPTModel or Qwen2_5VLModel") model.forward_backup = model.forward model.forward = _fused_GPTModel_forward.__get__(model, model.__class__) return def unpatch_fused_forward(model: torch.nn.Module): model = unwrap_model(model) if isinstance(model, GPTModel): model = model elif isinstance(model, Qwen2_5VLModel): model = model.language_model else: raise ValueError("Model is not a GPTModel or Qwen2_5VLModel") model.forward = model.forward_backup return def fused_forward_gptmodel( model: GPTModel, input_ids: Tensor, position_ids: Tensor, attention_mask: Tensor, labels: Tensor, labels_mask: Tensor, **kwargs, ): pre_process: bool = unwrap_model(model).pre_process post_process: bool = unwrap_model(model).post_process batch_size, seq_len = attention_mask.shape[:2] input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process) input_ids_rmpad = input_ids_rmpad.contiguous() labels_rmpad, _ = preprocess_packed_seqs(labels, attention_mask, pre_process=True) labels_mask_rmpad, _ = preprocess_packed_seqs(labels_mask, attention_mask, pre_process=True) labels_rmpad = labels_rmpad.contiguous() labels_mask_rmpad = labels_mask_rmpad.contiguous() output_orig: CausalLMOutputForPPO = model( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids, labels=labels_rmpad, packed_seq_params=packed_seq_params, ) if post_process: # output_orig is in type of CausalLMOutputForPPO output = postprocess_packed_seqs_for_dict_output( labels_mask_rmpad, output_orig, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process, ) else: output = output_orig return output def fused_forward_qwen2_5_vl( model: Qwen2_5VLModel, input_ids: Tensor, position_ids: Tensor, attention_mask: Tensor, labels: Tensor, labels_mask: Tensor, multi_modal_inputs=None, **kwargs, ): # pre_process = unwrap_model(model).pre_process post_process = unwrap_model(model).post_process pixel_values = ( multi_modal_inputs["pixel_values"].to(input_ids.device) if "pixel_values" in multi_modal_inputs else None ) image_grid_thw = ( multi_modal_inputs["image_grid_thw"].to(input_ids.device) if "image_grid_thw" in multi_modal_inputs else None ) batch_size, seq_len = attention_mask.shape[:2] input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=True) labels_rmpad, _ = preprocess_packed_seqs(labels, attention_mask, pre_process=True) labels_mask_rmpad, _ = preprocess_packed_seqs(labels_mask, attention_mask, pre_process=True) labels_rmpad = labels_rmpad.contiguous() labels_mask_rmpad = labels_mask_rmpad.contiguous() input_ids_rmpad = input_ids_rmpad.contiguous() output_orig: CausalLMOutputForPPO = model( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids, packed_seq_params=packed_seq_params, pixel_values=pixel_values, image_grid_thw=image_grid_thw, labels=labels, ) if post_process: # output_orig is in type of CausalLMOutputForPPO output = postprocess_packed_seqs_for_dict_output( labels_mask_rmpad, output_orig, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process, ) else: output = output_orig return output def _fused_GPTModel_forward( self, input_ids: Tensor, position_ids: Tensor, attention_mask: Tensor, decoder_input: Tensor = None, labels: Tensor = None, inference_context: BaseInferenceContext = None, packed_seq_params: PackedSeqParams = None, extra_block_kwargs: dict = None, runtime_gather_output: Optional[bool] = None, *, inference_params: Optional[BaseInferenceContext] = None, loss_mask: Optional[Tensor] = None, temperature: float = 1.0, ) -> CausalLMOutputForPPO: """ Forward pass for GPT models with fused kernel support. Patch https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/models/gpt/gpt_model.py """ # If decoder_input is provided (not None), then input_ids and position_ids are ignored. # Otherwise, apply embedding layer on input_ids and position_ids to get decoder_input. # Decoder embedding. if decoder_input is not None: pass elif self.pre_process: decoder_input = self.embedding(input_ids=input_ids, position_ids=position_ids) else: # intermediate stage of pipeline # decoder will get hidden_states from encoder.input_tensor decoder_input = None # Rotary positional embeddings (embedding is None for PP intermediate devices) rotary_pos_emb = None rotary_pos_cos = None rotary_pos_sin = None if self.position_embedding_type == "rope" and not self.config.multi_latent_attention: if not self.training and self.config.flash_decode and inference_context: assert inference_context.is_static_batching(), "GPTModel currently only supports static inference batching." # Flash decoding uses precomputed cos and sin for RoPE rotary_pos_cos, rotary_pos_sin = self.rotary_pos_emb_cache.setdefault( inference_context.max_sequence_length, self.rotary_pos_emb.get_cos_sin(inference_context.max_sequence_length), ) else: rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len( inference_context, self.decoder, decoder_input, self.config, packed_seq_params ) rotary_pos_emb = self.rotary_pos_emb( rotary_seq_len, packed_seq=packed_seq_params is not None and packed_seq_params.qkv_format == "thd", ) elif self.position_embedding_type == "mrope" and not self.config.multi_latent_attention: if self.training or not self.config.flash_decode: rotary_pos_emb = self.rotary_pos_emb(position_ids, self.mrope_section) else: # Flash decoding uses precomputed cos and sin for RoPE raise NotImplementedError( "Flash decoding uses precomputed cos and sin for RoPE, not implmented in MultimodalRotaryEmbedding yet." ) if ( (self.config.enable_cuda_graph or self.config.flash_decode) and rotary_pos_cos is not None and inference_context and inference_context.is_static_batching() and not self.training ): sequence_len_offset = torch.tensor( [inference_context.sequence_len_offset] * inference_context.current_batch_size, dtype=torch.int32, device=rotary_pos_cos.device, # Co-locate this with the rotary tensors ) else: sequence_len_offset = None # Wrap decoder_input to allow the decoder (TransformerBlock) to delete the # reference held by this caller function, enabling early garbage collection for # skip inference # Run decoder. hidden_states = self.decoder( hidden_states=decoder_input, attention_mask=attention_mask, inference_context=inference_context, rotary_pos_emb=rotary_pos_emb, rotary_pos_cos=rotary_pos_cos, rotary_pos_sin=rotary_pos_sin, packed_seq_params=packed_seq_params, sequence_len_offset=sequence_len_offset, **(extra_block_kwargs or {}), ) # Process inference output. if inference_context and not inference_context.is_static_batching(): hidden_states = inference_context.last_token_logits(hidden_states.squeeze(1).unsqueeze(0)).unsqueeze(1) # logits and loss output_weight = None if self.share_embeddings_and_output_weights: output_weight = self.shared_embedding_or_output_weight() if self.mtp_process: hidden_states = self.mtp( input_ids=input_ids, position_ids=position_ids, labels=labels, loss_mask=loss_mask, hidden_states=hidden_states, attention_mask=attention_mask, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb, rotary_pos_cos=rotary_pos_cos, rotary_pos_sin=rotary_pos_sin, packed_seq_params=packed_seq_params, sequence_len_offset=sequence_len_offset, embedding=self.embedding, output_layer=self.output_layer, output_weight=output_weight, runtime_gather_output=runtime_gather_output, compute_language_model_loss=self.compute_language_model_loss, **(extra_block_kwargs or {}), ) if not self.post_process: return hidden_states output = CausalLMOutputForPPO( loss=None, logits=None, past_key_values=None, hidden_states=hidden_states, attentions=None, ) if self.config.sequence_parallel: hidden_states = gather_from_sequence_parallel_region(hidden_states) logprobs, entropy = linear_cross_entropy( hidden_states, self.output_layer.weight, labels, temperature, "none", parallel_state.get_tensor_model_parallel_group(), ) if has_config_logger_enabled(self.config): payload = OrderedDict( { "input_ids": input_ids, "position_ids": position_ids, "attention_mask": attention_mask, "decoder_input": decoder_input, "logprobs": logprobs, "entropy": entropy, } ) log_config_to_disk(self.config, payload, prefix="input_and_logits") output.entropy = entropy output.log_probs = logprobs return output ================================================ FILE: verl_rl/verl/models/mcore/model_initializer.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use mcore transformer config to initialize the model from abc import ABC, abstractmethod from megatron.core.models.gpt.gpt_layer_specs import get_gpt_decoder_block_spec, get_gpt_mtp_block_spec from megatron.core.models.gpt.gpt_model import GPTModel from .config_converter import PretrainedConfig, TransformerConfig class BaseModelInitializer(ABC): """Base class for model initializers.""" def __init__(self, tfconfig: TransformerConfig, hf_config: PretrainedConfig): self.tfconfig = tfconfig self.hf_config = hf_config @abstractmethod def get_transformer_layer_spec(self): """Get the transformer layer specification. https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/models/gpt/gpt_layer_specs.py""" pass def get_rope_scaling_args(self) -> dict: """Get rope scaling args.""" rope_scaling_args = {} if "rope_scaling" in self.hf_config: if self.hf_config.rope_scaling is not None: # assert self.hf_config.rope_scaling["type"] == "linear", "only linear scaling is supported for now" rope_scaling_args["seq_len_interpolation_factor"] = self.hf_config.rope_scaling["factor"] return rope_scaling_args def initialize( self, pre_process: bool = True, post_process: bool = True, share_embeddings_and_output_weights: bool = False, value: bool = False, **extra_kwargs, ) -> GPTModel: """Initialize a GPT model with the given configuration. https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/models/gpt/gpt_model.py Args: pre_process (bool): include embedding layer. post_process (bool): including an output layer. share_embeddings_and_output_weights (bool): input embeddings and output logit weights are shared. value (bool): add an extra linear layer for classification or regression. Returns: GPTModel: An initialized GPT model instance """ transformer_layer_spec = self.get_transformer_layer_spec() rope_scaling_args = self.get_rope_scaling_args() mtp_block_spec = extra_kwargs.get("mtp_block_spec", None) model = GPTModel( config=self.tfconfig, transformer_layer_spec=transformer_layer_spec, vocab_size=self.hf_config.vocab_size, max_sequence_length=self.hf_config.max_position_embeddings, pre_process=pre_process, post_process=post_process, share_embeddings_and_output_weights=share_embeddings_and_output_weights, position_embedding_type="rope", rotary_base=self.hf_config.rope_theta, **rope_scaling_args, mtp_block_spec=mtp_block_spec, ) if post_process and value: from verl.models.llama.megatron.layers.parallel_linear import LinearForLastLayer model.output_layer = LinearForLastLayer( input_size=self.tfconfig.hidden_size, output_size=1, config=self.tfconfig ) return model class DenseModel(BaseModelInitializer): """Initializer for dense models like Llama and Qwen2.""" def get_transformer_layer_spec(self): assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now" return get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True) class Qwen2MoEModel(BaseModelInitializer): """Initializer for Qwen2 MoE models.""" def get_transformer_layer_spec(self): assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now" transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True) # Patch layer spec for shared experts for i in range(len(transformer_layer_spec.layer_specs)): transformer_layer_spec.layer_specs[i].submodules.mlp.submodules.shared_experts.params["gate"] = True return transformer_layer_spec def initialize(self, **kwargs): # Qwen default freeze_moe_router: true model = super().initialize(**kwargs) freeze_moe_router = kwargs.get("freeze_moe_router", True) if freeze_moe_router: for layer in model.decoder.layers: layer.mlp.router.weight.requires_grad = False return model class MixtralModel(BaseModelInitializer): """Initializer for Mixtral models.""" def get_transformer_layer_spec(self): assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now" transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True) return transformer_layer_spec def initialize(self, **kwargs): model = super().initialize(**kwargs) freeze_moe_router = kwargs.get("freeze_moe_router", False) if freeze_moe_router: for layer in model.decoder.layers: layer.mlp.router.weight.requires_grad = False return model class Qwen3MoEModel(BaseModelInitializer): """Initializer for Qwen3 MoE models.""" def get_transformer_layer_spec(self): assert self.tfconfig.normalization == "RMSNorm", "only RMSNorm is supported for now" transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True) return transformer_layer_spec def initialize(self, **kwargs): # Qwen default freeze_moe_router: true model = super().initialize(**kwargs) freeze_moe_router = kwargs.get("freeze_moe_router", True) if freeze_moe_router: for layer in model.decoder.layers: layer.mlp.router.weight.requires_grad = False return model class DeepseekV3Model(BaseModelInitializer): """Initializer for DeepseekV3 models.""" def get_transformer_layer_spec(self): transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True) return transformer_layer_spec def get_rope_scaling_args(self) -> dict: """Get rope scaling args.""" rope_scaling_args = {} return rope_scaling_args def initialize( self, **kwargs, ): freeze_moe_router = kwargs.get("freeze_moe_router", True) if freeze_moe_router: self.tfconfig.moe_router_load_balancing_type = "none" # MTP if self.tfconfig.mtp_num_layers is not None: transformer_layer_spec = self.get_transformer_layer_spec() mtp_block_spec = get_gpt_mtp_block_spec(self.tfconfig, transformer_layer_spec, use_transformer_engine=True) kwargs["mtp_block_spec"] = mtp_block_spec model = super().initialize(**kwargs) if freeze_moe_router: for layer in model.decoder.layers: if hasattr(layer.mlp, "router"): layer.mlp.router.weight.requires_grad = False return model class Qwen25VLModel(BaseModelInitializer): """Initializer for Qwen2.5 VL models.""" def get_transformer_layer_spec(self): transformer_layer_spec = get_gpt_decoder_block_spec(self.tfconfig, use_transformer_engine=True) return transformer_layer_spec def initialize( self, pre_process=None, post_process=None, share_embeddings_and_output_weights=False, value=False, **extra_kwargs, ): tfconfig = self.tfconfig hf_config = self.hf_config # Qwen2_5_VLForConditionalGeneration from copy import deepcopy transformer_layer_spec = self.get_transformer_layer_spec() from megatron.core.extensions.transformer_engine import TEColumnParallelLinear, TERowParallelLinear from megatron.core.models.gpt.moe_module_specs import MLPSubmodules from megatron.core.models.vision.vit_layer_specs import get_vit_layer_with_transformer_engine_spec from .qwen2_5_vl import Qwen2_5VLModel, get_vision_model_config, get_vision_projection_config vision_transformer_config = get_vision_model_config(deepcopy(tfconfig)) vision_transformer_config.pipeline_model_parallel_size = 1 vision_transformer_config.first_pipeline_num_layers = None vision_projection_config = get_vision_projection_config( deepcopy(tfconfig), vision_transformer_config.hidden_size, spatial_merge_size=hf_config.vision_config.spatial_merge_size, ) vision_projection_layer_spec = MLPSubmodules( linear_fc1=TEColumnParallelLinear, linear_fc2=TERowParallelLinear, ) vision_transformer_layer_spec = get_vit_layer_with_transformer_engine_spec() qwen25_vl_model = Qwen2_5VLModel( language_transformer_config=tfconfig, language_transformer_layer_spec=transformer_layer_spec, language_vocab_size=hf_config.vocab_size, language_max_sequence_length=hf_config.max_position_embeddings, vision_transformer_config=vision_transformer_config, vision_transformer_layer_spec=vision_transformer_layer_spec, vision_projection_config=vision_projection_config, vision_projection_layer_spec=vision_projection_layer_spec, vision_projection_type="mlp", language_rotary_base=hf_config.rope_theta, pre_process=pre_process, post_process=post_process, add_decoder=True, add_encoder=True, parallel_output=True, language_share_embeddings_and_output_weights=share_embeddings_and_output_weights, ) if post_process and value: from verl.models.llama.megatron.layers.parallel_linear import LinearForLastLayer qwen25_vl_model.language_model.output_layer = LinearForLastLayer( input_size=tfconfig.hidden_size, output_size=1, config=tfconfig ) return qwen25_vl_model ================================================ FILE: verl_rl/verl/models/mcore/patch_v012.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # there is some bug in mcore 0.12, so we need to patch it # 1. `get_query_key_value_tensors` in `multi_latent_attention.py` works wrong when packed_seq_params is not None def apply_patch(): import torch from megatron.core import parallel_state, tensor_parallel from megatron.core.transformer.multi_latent_attention import ( MLASelfAttention, apply_rotary_pos_emb, deprecate_inference_params, gather_from_sequence_parallel_region, gather_from_tensor_model_parallel_region, scatter_to_sequence_parallel_region, ) def patch_get_query_key_value_tensors( self, hidden_states, key_value_states=None, position_ids=None, packed_seq_params=None, inference_context=None, *, inference_params=None, ): """ Derives `query`, `key` and `value` tensors from `hidden_states`. """ # s = sequence length, b = batch size, h = hidden size, n = num attention heads # Attention heads [s, b, n*h] assert hidden_states.ndim == 3, f"hidden_states should be 3D, [s, b, n*h], got {hidden_states.ndim}D" inference_context = deprecate_inference_params(inference_context, inference_params) # ========================================= # Prepare RoPE and seqlen related params # ========================================= rotary_seq_len = self.rotary_pos_emb.get_rotary_seq_len( inference_context, None, hidden_states, self.config, packed_seq_params ) # rotary_pos_emb:[s, b, 1, 64] mscale = 1.0 if self.config.rope_type == "rope": packed_seq = packed_seq_params is not None and packed_seq_params.qkv_format == "thd" rotary_pos_emb = self.rotary_pos_emb(rotary_seq_len, packed_seq=packed_seq) else: rotary_pos_emb, mscale = self.rotary_pos_emb(rotary_seq_len) # ========================================= # QKV down projection and layernorm # ========================================= if self.config.q_lora_rank is not None: # if linear_q_down_proj is ColumnParallelLinear: # q_compressed: [s, b, q_lora_rank / TP] # elif linear_q_down_proj is Linear: # q_compressed: [s / TP, b, q_lora_rank] q_compressed, _ = self.linear_q_down_proj(hidden_states) # When output is sharded (ColumnParallelLinear), two things are needed to be # identical to a normal Linear. # 1. Manually gather output to restore output dim q_lora_rank; # 2. Scatter sequence back to s / TP if sequence-parallel since it was # gathered by ColumnParallelLinear. if q_compressed.size(-1) != self.config.q_lora_rank: q_compressed = gather_from_tensor_model_parallel_region(q_compressed) if self.config.sequence_parallel: q_compressed = scatter_to_sequence_parallel_region(q_compressed) q_compressed = self.q_layernorm(q_compressed) else: q_compressed = hidden_states # if linear_kv_down_proj is ColumnParallelLinear: # kv_combined: [s, b, (kv_lora_rank + qk_pos_emb_head_dim) / TP] # elif linear_kv_down_proj is Linear: # kv_combined: [s / TP, b, (kv_lora_rank + qk_pos_emb_head_dim)] kv_combined, _ = self.linear_kv_down_proj(hidden_states) if kv_combined.size(-1) != self.config.kv_lora_rank + self.config.qk_pos_emb_head_dim: # kv_combined: [s, b, (kv_lora_rank + qk_pos_emb_head_dim)] kv_combined = gather_from_tensor_model_parallel_region(kv_combined) # kv_compressed:[s, b, kv_lora_rank], k_pos_emb: [s, b, qk_pos_emb_head_dim] kv_compressed, k_pos_emb = torch.split( kv_combined, [self.config.kv_lora_rank, self.config.qk_pos_emb_head_dim], dim=-1 ) if self.config.sequence_parallel: # kv_compressed:[s / TP, b, kv_lora_rank] kv_compressed = scatter_to_sequence_parallel_region(kv_compressed) else: # kv_compressed:[s / TP, b, kv_lora_rank], k_pos_emb: [s / TP, b, qk_pos_emb_head_dim] kv_compressed, k_pos_emb = torch.split( kv_combined, [self.config.kv_lora_rank, self.config.qk_pos_emb_head_dim], dim=-1 ) if parallel_state.get_tensor_model_parallel_world_size() > 1: # k_pos_emb: [s, b, qk_pos_emb_head_dim] k_pos_emb = gather_from_sequence_parallel_region(k_pos_emb) kv_compressed = self.kv_layernorm(kv_compressed) # ========================================= # QKV up projection and RoPE apply # ========================================= def qkv_up_proj_and_rope_apply(q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb): if self.config.q_lora_rank is not None: q, _ = self.linear_q_up_proj(q_compressed) else: # hidden_states:[s, b, 2048], q: [s, b, n * 192] q, _ = self.linear_q_proj(q_compressed) q_len, bsz, _ = q.size() # q: [s, b, n, 192] q = q.view(q_len, bsz, self.num_attention_heads_per_partition, self.q_head_dim) # kv: [s, b, 2048] kv, _ = self.linear_kv_up_proj(kv_compressed) # kv: [s, b, n, 256] kv = kv.view( q_len, bsz, self.num_attention_heads_per_partition, self.config.qk_head_dim + self.config.v_head_dim, ) if inference_context is not None: # add offset to the sequence start for inference sequence_start = inference_context.sequence_len_offset sequence_end = sequence_start + q_len rotary_pos_emb = rotary_pos_emb[sequence_start:sequence_end] else: # Shorten rotary_pos_emb to the sequence length when inference_params # is not provided. This makes sure we can run forward directly with # any sequence length. During training, the sequence length is always # the full rotary_pos_emb length. rotary_pos_emb = rotary_pos_emb[0:q_len] # [s, b, 64] -> [s, b, 1, 64] k_pos_emb = torch.unsqueeze(k_pos_emb, 2) # q: [s, b, n, 128], q_pos_emb: [s, b, n, 64] q_no_pe, q_pos_emb = torch.split(q, [self.config.qk_head_dim, self.config.qk_pos_emb_head_dim], dim=-1) # k_no_pe: [s, b, n, 128], value: [s, b, n, 128] k_no_pe, value = torch.split(kv, [self.config.qk_head_dim, self.config.v_head_dim], dim=-1) if packed_seq_params is not None: cu_seqlens_q = packed_seq_params.cu_seqlens_q cu_seqlens_kv = packed_seq_params.cu_seqlens_kv q_pos_emb = q_pos_emb.squeeze(1) k_pos_emb = k_pos_emb.squeeze(1) q_no_pe = q_no_pe.squeeze(1) k_no_pe = k_no_pe.squeeze(1) value = value.squeeze(1) else: cu_seqlens_q = cu_seqlens_kv = None # q_pos_emb: [s, b, n, 64], k_pos_emb:[s, b, 1, 64] q_pos_emb = apply_rotary_pos_emb( q_pos_emb, rotary_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q, mscale=mscale, ) k_pos_emb = apply_rotary_pos_emb( k_pos_emb, rotary_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv, mscale=mscale, ) # query: [s, b, n, 192] query = torch.cat([q_no_pe, q_pos_emb], dim=-1) if packed_seq_params is not None: k_pos_emb = k_pos_emb.expand(-1, self.num_attention_heads_per_partition, -1) key = torch.cat([k_no_pe, k_pos_emb], dim=-1) else: # key: [s, b, n, 192] k_pos_emb = k_pos_emb.expand(-1, -1, self.num_attention_heads_per_partition, -1) key = torch.cat([k_no_pe, k_pos_emb], dim=-1) query = query.contiguous() key = key.contiguous() value = value.contiguous() return query, key, value if self.recompute_up_proj: self.qkv_up_checkpoint = tensor_parallel.CheckpointWithoutOutput() query, key, value = self.qkv_up_checkpoint.checkpoint( qkv_up_proj_and_rope_apply, q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb ) else: query, key, value = qkv_up_proj_and_rope_apply(q_compressed, kv_compressed, k_pos_emb, rotary_pos_emb) return query, key, value MLASelfAttention.get_query_key_value_tensors = patch_get_query_key_value_tensors ================================================ FILE: verl_rl/verl/models/mcore/qwen2_5_vl/__init__.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .model import Qwen2_5VLModel from .vision_config import get_vision_model_config, get_vision_projection_config __all__ = ["Qwen2_5VLModel", "get_vision_model_config", "get_vision_projection_config"] ================================================ FILE: verl_rl/verl/models/mcore/qwen2_5_vl/attention.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from megatron.core.transformer.attention import * from .rope_utils import apply_rotary_pos_emb_absolute class Qwen2_5VLSelfAttention(SelfAttention): """ Overrides the SelfAttention class, the difference is that qwen2_5_vl uses apply_rotary_pos_emb_absolute instead of apply_rotary_pos_emb """ def forward( self, hidden_states: Tensor, attention_mask: Tensor, key_value_states: Optional[Tensor] = None, inference_context: Optional[BaseInferenceContext] = None, rotary_pos_emb: Optional[Union[Tensor, Tuple[Tensor, Tensor]]] = None, rotary_pos_cos: Optional[Tensor] = None, rotary_pos_sin: Optional[Tensor] = None, attention_bias: Optional[Tensor] = None, packed_seq_params: Optional[PackedSeqParams] = None, sequence_len_offset: Optional[int] = None, *, inference_params: Optional[BaseInferenceContext] = None, ) -> Tuple[Tensor, Tensor]: """ Perform a forward pass through the attention module. Args: hidden_states (Tensor): Hidden states. attention_mask (Tensor): Attention mask. key_value_states (Optional[Tensor]): Key/value states (for cross attention). inference_context (Optional[BaseInferenceContext]): Inference context that manages KV cache. rotary_pos_emb (Optional[Union[Tensor, Tuple[Tensor, Tensor]]]): Rotary embedding tensor(s). rotary_pos_cos (Optional[Tensor]): Rotary embedding cosine. rotary_pos_sin (Optional[Tensor]): Rotary embedding sine. attention_bias (Optional[Tensor]): Attention bias. packed_seq_params (Optional[PackedSeqparams]): Parameters used for THD format. sequence_len_offset (Optional[int]): Sequence length offset used for inference CUDA graphs. Return: (Tuple[Tensor, Tensor]) Attention output and bias. """ inference_context = deprecate_inference_params(inference_context, inference_params) if inference_context and inference_context.is_dynamic_batching(): assert flash_decode_and_prefill_kernel is not None, ( "Internal use only: install package `nvidia_chunked_flash_attn`." ) # hidden_states: [sq, b, h] if self.config.flash_decode and not self.training and inference_context is not None: rotary_pos_emb = None else: assert rotary_pos_cos is None and rotary_pos_sin is None # For self attention we just duplicate the rotary_pos_emb if it isn't already if rotary_pos_emb is not None and not isinstance(rotary_pos_emb, tuple): rotary_pos_emb = (rotary_pos_emb,) * 2 # ===================== # Query, Key, and Value # ===================== # Get the query, key and value tensors based on the type of attention - # self or cross attn. query, key, value = self.get_query_key_value_tensors(hidden_states, key_value_states) # =================================================== # Adjust key, value, and rotary_pos_emb for inference # =================================================== # This branch only runs in the decode phase of flash decoding and returns after the linear # projection. This conditional is not used in the prefill phase or non-flash-decoding cases. if ( self.config.flash_decode and inference_context is not None and inference_context.is_decode_only() and not self.training and rotary_pos_cos is not None ): assert self.layer_number in inference_context.key_value_memory_dict assert inference_context.sequence_len_offset is not None inference_key_memory, inference_value_memory = inference_context.key_value_memory_dict[self.layer_number] output = self.flash_decode( sequence_len_offset=sequence_len_offset, query_layer=query, key_layer=key, value_layer=value, inference_key_memory=inference_key_memory, inference_value_memory=inference_value_memory, rotary_cos=rotary_pos_cos, rotary_sin=rotary_pos_sin, ) out = output.transpose(0, 1).contiguous() context_layer = out.view(out.size(0), out.size(1), -1) output, bias = self.linear_proj(context_layer) return output, bias query, key, value, rotary_pos_emb, attn_mask_type = self._adjust_key_value_for_inference( inference_context, query, key, value, rotary_pos_emb, rotary_pos_cos, rotary_pos_sin, sequence_len_offset, ) if packed_seq_params is not None: query = query.squeeze(1) key = key.squeeze(1) value = value.squeeze(1) # ================================================ # relative positional embedding (rotary embedding) # ================================================ if rotary_pos_emb is not None and not self.config.flash_decode: q_pos_emb, k_pos_emb = rotary_pos_emb if packed_seq_params is not None: if packed_seq_params.cu_seqlens_q_padded is not None: cu_seqlens_q = packed_seq_params.cu_seqlens_q_padded else: cu_seqlens_q = packed_seq_params.cu_seqlens_q if packed_seq_params.cu_seqlens_kv_padded is not None: cu_seqlens_kv = packed_seq_params.cu_seqlens_kv_padded else: cu_seqlens_kv = packed_seq_params.cu_seqlens_kv else: cu_seqlens_q = cu_seqlens_kv = None if q_pos_emb is not None: # TODO VIJAY: simplify if inference_context is None or inference_context.is_static_batching(): query = apply_rotary_pos_emb_absolute(query, q_pos_emb, config=self.config, cu_seqlens=cu_seqlens_q) else: query = inference_context.apply_rotary_emb_query(query, q_pos_emb, self.config, cu_seqlens_q) if k_pos_emb is not None: key = apply_rotary_pos_emb_absolute(key, k_pos_emb, config=self.config, cu_seqlens=cu_seqlens_kv) # TODO, can apply positional embedding to value_layer so it has # absolute positional embedding. # otherwise, only relative positional embedding takes effect # value_layer = apply_rotary_pos_emb(value_layer, k_pos_emb) # ================================== # core attention computation # ================================== if self.checkpoint_core_attention and self.training: core_attn_out = self._checkpointed_attention_forward( query, key, value, attention_mask, attn_mask_type=attn_mask_type, attention_bias=attention_bias, packed_seq_params=packed_seq_params, ) else: if inference_context is None or inference_context.is_static_batching(): # Static batching attention kernel. core_attn_out = self.core_attention( query, key, value, attention_mask, attn_mask_type=attn_mask_type, attention_bias=attention_bias, packed_seq_params=packed_seq_params, ) else: # Dynamic batching attention kernel. q, k, v = (query, key, value) cu_query_lengths, max_seqlen_q = inference_context.cu_query_lengths() cu_kv_lengths, max_seqlen_k = inference_context.cu_kv_lengths() core_attn_out = self.flash_decode_and_prefill( q, k, v, max_seqlen_q, max_seqlen_k, cu_query_lengths, cu_kv_lengths ) core_attn_out = core_attn_out.squeeze(0).unsqueeze(1) core_attn_out = rearrange(core_attn_out, "s b h d -> s b (h d)") if packed_seq_params is not None and packed_seq_params.qkv_format == "thd": # reshape to same output shape as unpacked case # (t, np, hn) -> (t, b=1, h=np*hn) # t is the pack size = sum (sq_i) # note that batch is a dummy dimension in the packed case core_attn_out = core_attn_out.reshape(core_attn_out.size(0), 1, -1) # ================= # Output. [sq, b, h] # ================= output, bias = self.linear_proj(core_attn_out) return output, bias ================================================ FILE: verl_rl/verl/models/mcore/qwen2_5_vl/model.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import torch from megatron.core import InferenceParams, tensor_parallel from megatron.core.models.gpt.gpt_model import GPTModel # from .transformer_config import Qwen2VLTransformerConfig from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer import MegatronModule from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_config import TransformerConfig from .attention import Qwen2_5VLSelfAttention from .vision_model import Qwen2_5VisionModel # Note: This is under development and may be missing features. class Qwen2_5VLModel(MegatronModule): """Qwen2.5VL multi-modal model. Args: language_transformer_config (TransformerConfig): Transformer config for the language model. language_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the language model. language_vocab_size (int): Language model vocabulary size. language_max_sequence_length (int): Language model maximum sequence length. This is used for positional embedding. vision_transformer_config (TransformerConfig): Transformer config for the vision model. vision_transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers of the vision model. vision_projection_config (TransformerConfig): Config for the projection from vision model outputs to language model inputs. vision_projection_layer_spec (ModuleSpec): Specifies the module to use for the vision projection. vision_projection_type (str): Type of the vision projection to use. Default is a 2-layer MLP. parallel_output (bool): Do not gather the outputs, keep them split across tensor parallel ranks. This is typically True for training and False for inference. language_rotary_percent (float): Percent of rotary dimension to use for rotary position embeddings in the language model. Defaults to 1.0. pre_process (bool): Include the embedding layer in the gpt decoder (used with pipeline parallelism). Defaults to True. post_process (bool): Include an output layer and a layernorm in the gpt decoder (used with pipeline parallelism). Defaults to True. add_encoder (bool): Construct the encoder module (used with pipeline parallelism). Defaults to True. When we use pipelining, the encoder will live on only a subset of the pipeline stages (specifically, only the first stage). add_decoder (bool): Construct the decoder module (used with pipeline parallelism). Defaults to True. When we use pipelining, the decoder will live on only a subset of the pipeline stages (specifically, every stage after the first one). img_h (int): The height of each image that the ViT will see. img_w (int): The width of each image that the ViT will see. patch_dim (int): The size of each patch side. img_embedding_idx (int): Index in the language_embeddings tensor where image_embeddings should be inserted. Defaults to 0. """ def __init__( self, language_transformer_config: TransformerConfig, language_transformer_layer_spec: ModuleSpec, language_vocab_size: int, language_max_sequence_length: int, vision_transformer_config: TransformerConfig, vision_transformer_layer_spec: ModuleSpec, vision_projection_config: TransformerConfig, vision_projection_layer_spec: ModuleSpec, vision_projection_type: str = "mlp", parallel_output: bool = True, language_rotary_percent: float = 1.0, pre_process: bool = True, post_process: bool = True, add_encoder: bool = True, add_decoder: bool = True, language_rotary_base: int = 10000, fp16_lm_cross_entropy: bool = False, language_share_embeddings_and_output_weights: bool = False, image_token_id: int = 151655, video_token_id: int = 151656, ) -> None: super().__init__(config=language_transformer_config) # patch self_attention to use qwen2_5_vl attention vision_transformer_layer_spec.submodules.self_attention.module = Qwen2_5VLSelfAttention for layer_spec in language_transformer_layer_spec.layer_specs: layer_spec.submodules.self_attention.module = Qwen2_5VLSelfAttention logging.getLogger(__name__).warning("Qwen2VL model is under development and may be missing features.") self.pre_process = pre_process self.post_process = post_process self.add_encoder = add_encoder self.add_decoder = add_decoder self.encoder_hidden_state = None self.vision_model = None self.vision_projection = None self.language_model = None self.image_token_id = image_token_id self.video_token_id = video_token_id self.square_merge_size = vision_projection_config.ffn_hidden_size // vision_transformer_config.hidden_size # This attribute is needed to check if an all-reduce is required # on the word embeddings inside `finalize_model_grads._allreduce_word_embedding_grads`. self.share_embeddings_and_output_weights = False if self.pre_process: self.vision_model = Qwen2_5VisionModel( vision_transformer_config, vision_transformer_layer_spec, vision_projection_config, vision_projection_layer_spec, projection_type=vision_projection_type, pre_process=True, post_process=True, ) self.language_model = GPTModel( config=language_transformer_config, transformer_layer_spec=language_transformer_layer_spec, vocab_size=language_vocab_size, max_sequence_length=language_max_sequence_length, parallel_output=parallel_output, position_embedding_type="mrope", rotary_percent=language_rotary_percent, pre_process=self.pre_process, post_process=self.post_process, rotary_base=language_rotary_base, fp16_lm_cross_entropy=fp16_lm_cross_entropy, share_embeddings_and_output_weights=language_share_embeddings_and_output_weights, scatter_embedding_sequence_parallel=False, ) self.share_embeddings_and_output_weights = self.language_model.share_embeddings_and_output_weights def shared_embedding_or_output_weight(self): """This is a convenience method to surface the language model's word embeddings, which is necessary for `finalize_model_grads._allreduce_word_embedding_grads`.""" if self.add_decoder: return self.language_model.shared_embedding_or_output_weight() return None def set_input_tensor(self, input_tensor) -> None: # This is usually handled in schedules.py but some inference code still # gives us non-lists or None if not isinstance(input_tensor, list): input_tensor = [input_tensor] assert len(input_tensor) == 1, "input_tensor should only be length 1 for Qwen2VL" if self.pre_process: self.encoder_hidden_state = input_tensor[0] else: self.language_model.set_input_tensor(input_tensor[0]) def freeze(self, freeze_language_model: bool, freeze_vision_model: bool, freeze_vision_projection: bool): """Freeze model modules. Make specific modules non-trainable by setting requires_grad to False for the module's parameters. Args: freeze_language_model (bool): Freeze the language model module. freeze_vision_model (bool): Freeze the vision model module. freeze_vision_projection (bool): Freeze the vision projection module. """ modules = [] if freeze_language_model and self.language_model is not None: modules.append(self.language_model) if freeze_vision_model and self.vision_model is not None: modules.append(self.vision_model) if freeze_vision_projection and self.vision_projection is not None: modules.append(self.vision_projection) for module in modules: for param in module.parameters(): param.requires_grad = False def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, attention_mask: torch.Tensor = None, labels: torch.Tensor = None, inference_params: InferenceParams = None, packed_seq_params: PackedSeqParams = None, extra_block_kwargs: dict = None, pixel_values: torch.Tensor = None, pixel_values_videos: torch.Tensor = None, image_grid_thw: torch.Tensor = None, video_grid_thw: torch.Tensor = None, ) -> torch.Tensor: """Forward function of the Qwen2VL model. Args: image_data (torch.Tensor): input image of shape [total_thw_size, n_features]. input_ids (torch.Tensor): input text ids [batch, text_seq_len]. position_ids (torch.Tensor): input text position ids [batch, text_seq_len]. attention_mask (torch.Tensor): attention mask for the language model [batch, 1, combined_seq_len, combined_seq_len]. labels (torch.Tensor): Optional target text labels [batch, combined_seq_len]. inference_params (InferenceParams): Inference-time parameters including KV cache. video_start_index: 0 -- all video len(video_seq) -- all image others -- mixture *_input_mask: should not be None in the first PP stage Returns: output (torch.Tensor): Loss of shape [b, s] if labels are provided, otherwise logits of shape [b, s, vocab_size]. """ video_start_index = 0 vision_grid_thw = None vision_data = None if image_grid_thw is not None: image_mask = input_ids == self.image_token_id vision_grid_thw = image_grid_thw vision_data = pixel_values video_start_index = image_mask.sum().item() if video_grid_thw is not None: video_mask = input_ids == self.video_token_id vision_grid_thw = torch.cat([vision_grid_thw, video_grid_thw], dim=0) vision_data = torch.cat([vision_data, pixel_values_videos], dim=0) video_start_index = image_mask.sum().item() + video_mask.sum().item() use_inference_kv_cache = ( inference_params is not None and "image_tokens_count" in inference_params.key_value_memory_dict ) use_inference_kv_cache = ( inference_params is not None and "image_tokens_count" in inference_params.key_value_memory_dict ) if use_inference_kv_cache: raise NotImplementedError() if self.pre_process: vision_embeds = None if vision_grid_thw is not None and vision_grid_thw.shape[0] > 0: vision_embeds = self.vision_model( vision_data=vision_data, # If None, vision model should use intermediate outputs (EPP > 1) grid_thw=vision_grid_thw, # should provided in each EPP stage ) # If running inference, the language model KV cache will be updated for image token positions. # Here we store the image tokens sequence length, which can be used as an offset to the KV cache later. if inference_params is not None: raise NotImplementedError() # inference_params.key_value_memory_dict["image_tokens_count"] = ( # vision_embeddings.shape[0] # ) # If running inference, we can skip image token computation if they were computed already earlier # for this sample. if use_inference_kv_cache: language_embeddings: torch.Tensor = self.language_model.embedding( input_ids=input_ids, position_ids=None, # NOTE: disable ) # [text_seq_len, b, h_language] # NOTE: why not cat here? is it the combined embeddings useless? combined_embeddings = language_embeddings elif vision_embeds is not None: if video_start_index == 0: image_embeds = None video_embeds = vision_embeds elif video_start_index == vision_embeds.shape[0]: image_embeds = vision_embeds video_embeds = None elif 0 < video_start_index < vision_embeds.shape[0]: image_embeds = vision_embeds[:video_start_index] video_embeds = vision_embeds[video_start_index:] else: raise ValueError( f"Expect video token start index in range [0, {vision_embeds.shape[0]}], but got " f"{video_start_index}" ) combined_embeddings = self.language_model.embedding( input_ids=input_ids, position_ids=None, # NOTE: disable ) # [text_seq_len, b, h_language] if image_embeds is not None or video_embeds is not None: combined_embeddings = combined_embeddings.transpose(0, 1).contiguous() if image_embeds is not None: image_mask = (input_ids == self.image_token_id).contiguous() if image_mask.sum() > 0: combined_embeddings = combined_embeddings.clone() combined_embeddings[image_mask] = image_embeds.to( dtype=combined_embeddings.dtype, device=combined_embeddings.device ) if video_embeds is not None: video_mask = (input_ids == self.video_token_id).contiguous() if video_mask.sum() > 0: combined_embeddings = combined_embeddings.clone() combined_embeddings[video_mask] = video_embeds.to( dtype=combined_embeddings.dtype, device=combined_embeddings.device ) combined_embeddings = combined_embeddings.transpose(0, 1).contiguous() else: combined_embeddings = self.language_model.embedding( input_ids=input_ids, position_ids=None, # NOTE: disable ) # [text_seq_len, b, h_language] if self.config.sequence_parallel: combined_embeddings = tensor_parallel.scatter_to_sequence_parallel_region(combined_embeddings) combined_embeddings = combined_embeddings.contiguous() else: combined_embeddings = None from .rope_utils import get_rope_index position_ids, _ = get_rope_index( input_ids, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, attention_mask=attention_mask ) output = self.language_model( input_ids=None, position_ids=position_ids, # None in encoder attention_mask=attention_mask, # None in encoder decoder_input=combined_embeddings, # only not None in the first decoder PP stage labels=labels, # only not None in the last decoder PP stage # inference_params=inference_params, # currently always None packed_seq_params=packed_seq_params, # currently always None **(extra_block_kwargs or {}), ) return output ================================================ FILE: verl_rl/verl/models/mcore/qwen2_5_vl/rope_utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging from typing import Optional import torch from megatron.core.models.common.embeddings.rope_utils import * from megatron.core.models.common.embeddings.rope_utils import _apply_rotary_pos_emb_bshd from torch import Tensor logger = logging.getLogger(__name__) # Slightly modified from Qwen2VLForConditionalGeneration.get_rope_index def get_rope_index( input_ids: Optional[torch.LongTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ): """ Calculate the 3D rope index based on image and video's temporal, height and width in LLM. Explanation: Each embedding sequence contains vision embedding and text embedding or just contains text embedding. For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. Examples: input_ids: [T T T T T], here T is for text. temporal position_ids: [0, 1, 2, 3, 4] height position_ids: [0, 1, 2, 3, 4] width position_ids: [0, 1, 2, 3, 4] For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part and 1D rotary position embedding for text part. Examples: Temporal (Time): 3 patches, representing different segments of the video in time. Height: 2 patches, dividing each frame vertically. Width: 2 patches, dividing each frame horizontally. We also have some important parameters: fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second. tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity. temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs. input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] text temporal position_ids: [101, 102, 103, 104, 105] text height position_ids: [101, 102, 103, 104, 105] text width position_ids: [101, 102, 103, 104, 105] Here we calculate the text start position_ids as the max vision position_ids plus 1. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*): The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) """ spatial_merge_size = 2 tokens_per_second = 2 image_token_id = 151655 video_token_id = 151656 vision_start_token_id = 151652 mrope_position_deltas = [] if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): total_input_ids = input_ids if attention_mask is None: attention_mask = torch.ones_like(total_input_ids) position_ids = torch.ones( 3, input_ids.shape[0], input_ids.shape[1], dtype=input_ids.dtype, device=input_ids.device, ) image_index, video_index = 0, 0 attention_mask = attention_mask.to(total_input_ids.device) for i, input_ids in enumerate(total_input_ids): input_ids = input_ids[attention_mask[i] == 1] image_nums, video_nums = 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1) vision_tokens = input_ids[vision_start_indices + 1] image_nums = (vision_tokens == image_token_id).sum() video_nums = (vision_tokens == video_token_id).sum() input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos = image_nums, video_nums for _ in range(image_nums + video_nums): if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if ed_image < ed_video: t, h, w = ( image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2], ) second_per_grid_t = 0 image_index += 1 remain_images -= 1 ed = ed_image else: t, h, w = ( video_grid_thw[video_index][0], video_grid_thw[video_index][1], video_grid_thw[video_index][2], ) if second_per_grid_ts is not None: second_per_grid_t = second_per_grid_ts[video_index] else: second_per_grid_t = 1.0 video_index += 1 remain_videos -= 1 ed = ed_video llm_grid_t, llm_grid_h, llm_grid_w = ( t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) text_len = ed - st st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) range_tensor = torch.arange(llm_grid_t).view(-1, 1) expanded_range = range_tensor.expand(-1, llm_grid_h * llm_grid_w) time_tensor = expanded_range * second_per_grid_t * tokens_per_second time_tensor_long = time_tensor.long() t_index = time_tensor_long.flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx) st = ed + llm_grid_t * llm_grid_h * llm_grid_w if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i])) mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) return position_ids, mrope_position_deltas else: if attention_mask is not None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1] else: position_ids = ( torch.arange(input_ids.shape[1], device=input_ids.device) .view(1, 1, -1) .expand(3, input_ids.shape[0], -1) ) mrope_position_deltas = torch.zeros( [input_ids.shape[0], 1], device=input_ids.device, dtype=input_ids.dtype, ) return position_ids, mrope_position_deltas def apply_rotary_pos_emb_thd_absolute( t: Tensor, cu_seqlens: Tensor, freqs: Tensor, rotary_interleaved: bool = False ) -> Tensor: """A baseline implementation of applying RoPE for `thd` format. Args: t (Tensor): Input tensor T is of shape [t, h, d] cu_seqlens(Tensor): Cumulative sum of sequence lengths in a batch for `t`, with shape [b + 1] and dtype torch.int32. freqs (Tensor): Rotary Positional embedding tensor freq is of shape [max_s, 1, 1, d] Returns: Tensor: Shape [t, h, d]. The input tensor after applying RoPE. """ return _apply_rotary_pos_emb_bshd(t[:, None], freqs, rotary_interleaved=rotary_interleaved).squeeze(1) def apply_rotary_pos_emb_absolute( t: Tensor, freqs: Tensor, config: TransformerConfig, cu_seqlens: Optional[Tensor] = None, ): """ Reroute to the appropriate apply_rotary_pos_emb function depending on bshd (conventional) / thd (packed seq) format In Qwen2-VL, the shape of freqs is (seq_length, bs, 1, 2 * dim) instead of [max_seqlen, 1, 1, 2 * dim] """ if config.apply_rope_fusion: if cu_seqlens is None: # NOTE: TE backends do not support mRoPE in bshd format when bs > 1 if freqs.shape[1] > 1: return _apply_rotary_pos_emb_bshd(t, freqs, rotary_interleaved=config.rotary_interleaved) else: return fused_apply_rotary_pos_emb(t, freqs) else: # NOTE: as expected, thd format can use bshd return fused_apply_rotary_pos_emb(t[:, None], freqs).squeeze(1) else: if cu_seqlens is None: return _apply_rotary_pos_emb_bshd(t, freqs, rotary_interleaved=config.rotary_interleaved) else: return apply_rotary_pos_emb_thd_absolute(t, cu_seqlens, freqs, rotary_interleaved=config.rotary_interleaved) ================================================ FILE: verl_rl/verl/models/mcore/qwen2_5_vl/vision_config.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from megatron.core import parallel_state from megatron.core.transformer import TransformerConfig def get_vision_model_config(config: TransformerConfig) -> TransformerConfig: # Given a Transformer Config from decoder, build vision encoder config # diff: out_hidden_size & intermediate_size # mlp: hidden_size -> intermediate_size -> embed_dim, silu # NOTE: here we provide a workaround to solve the wrong layer amount when VPP of decoder is on if config.num_layers in [28, 36]: config.ffn_hidden_size = 3420 else: config.ffn_hidden_size = 3456 if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None: config.num_layers = 32 * parallel_state.get_virtual_pipeline_model_parallel_world_size() # depth else: config.num_layers = 32 # depth config.num_attention_heads = 16 # num_heads config.add_bias_linear = True # all nn.Linear has bias (MLP, attn) config.add_qkv_bias = True # qkv_proj in attn has bias config.hidden_size = 1280 # hidden_size config.hidden_dropout = 0.0 config.attention_dropout = 0.0 # config.gated_linear_unit = False # no gated # config.activation_func = quick_gelu # hidden_act config.kv_channels = config.hidden_size // config.num_attention_heads config.num_query_groups = config.num_attention_heads # no GQA config.layernorm_zero_centered_gamma = False # False config.apply_query_key_layer_scaling = False # factor=math.sqrt(head_dim) config.bias_activation_fusion = False # no swiglu, set false config.bias_dropout_fusion = False # no dropout, set false config.attention_softmax_in_fp32 = True # use True # config.normalization = 'LayerNorm' # use RMSNorm config.seq_length = 1 config.tp_comm_overlap = False config.sequence_parallel = False config.temporal_patch_size = 2 config.patch_size = 14 config.in_channels = 3 config.spatial_merge_size = 2 config.fullatt_block_indexes = [7, 15, 23, 31] config._qwen2_5_vl_window_size = 112 return config def get_vision_projection_config( config: TransformerConfig, embed_dim: int, spatial_merge_size: int ) -> TransformerConfig: # merger: # context_dim = hidden_size * merge_size**2 # out_hidden_size = hidden_size # context_dim -> context_dim -> out_hidden_size # MLP: # input_size -> ffn_hidden_size -> hidden_size # spec: LN -> Linear(bias=True) -> GELU -> Linear(bias=True) config.gated_linear_unit = False config.bias_activation_fusion = False config.add_bias_linear = True config.ffn_hidden_size = embed_dim * (spatial_merge_size**2) config.activation_func = torch.nn.functional.gelu config.tp_comm_overlap = False config.sequence_parallel = False return config ================================================ FILE: verl_rl/verl/models/mcore/qwen2_5_vl/vision_model.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch from megatron.core import InferenceParams from megatron.core.models.common.vision_module.vision_module import VisionModule from megatron.core.models.vision.multimodal_projector import MultimodalProjector from megatron.core.packed_seq_params import PackedSeqParams from megatron.core.transformer.enums import ModelType from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.transformer_config import TransformerConfig from torch import nn from torch.nn import functional as F from .vision_transformer_block import Qwen2_5VisionTransformerBlock as TransformerBlock # copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py class PatchEmbed(nn.Module): def __init__( self, patch_size: int = 14, temporal_patch_size: int = 2, in_channels: int = 3, embed_dim: int = 1152, ) -> None: super().__init__() self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.in_channels = in_channels self.embed_dim = embed_dim kernel_size = [temporal_patch_size, patch_size, patch_size] self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: target_dtype = self.proj.weight.dtype hidden_states = hidden_states.view( -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size ) hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim) return hidden_states # copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py class VisionRotaryEmbedding(nn.Module): def __init__(self, dim: int, theta: float = 10000.0) -> None: super().__init__() inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) def forward(self, seqlen: int) -> torch.Tensor: seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype) freqs = torch.outer(seq, self.inv_freq) return freqs.float() class Qwen2_5VisionModel(VisionModule): """Qwen2.5 ViT vision model. Args: transformer_config (TransformerConfig): Transformer config. transformer_layer_spec (ModuleSpec): Specifies module to use for transformer layers. ln_pre_impl (ModuleSpec or type): Specifies the layer norm type to use for ln_pre. add_class_token (bool, optional): Include a class token. Defaults to True. class_token_len (int): Class token length. Defaults to 1 but 8 may be faster. patch_dim (int): Image patch size. img_h (int): Input image height. img_w (int): Input image width. """ def __init__( self, transformer_config: TransformerConfig, transformer_layer_spec: ModuleSpec, projection_config: TransformerConfig, projection_layer_spec: ModuleSpec, projection_type: str = "mlp", pre_process: bool = True, post_process: bool = False, ) -> None: super().__init__(config=transformer_config) self.spatial_merge_size = transformer_config.spatial_merge_size embed_dim = transformer_config.hidden_size num_heads = transformer_config.num_attention_heads temporal_patch_size = transformer_config.temporal_patch_size patch_size = transformer_config.patch_size in_channels = transformer_config.in_channels self.patch_size = transformer_config.patch_size self.fullatt_block_indexes = transformer_config.fullatt_block_indexes self.window_size = transformer_config._qwen2_5_vl_window_size self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size self.max_sequence_length = transformer_config.seq_length self.patch_embed = PatchEmbed( patch_size=patch_size, temporal_patch_size=temporal_patch_size, in_channels=in_channels, embed_dim=embed_dim, ) head_dim = embed_dim // num_heads self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2) self.model_type = ModelType.encoder_or_decoder self.pre_process = pre_process self.post_process = post_process # Transformer layers. # TODO: Follow-up changes will make pre and post_process configurable. They are needed for supporting # pipeline parallelism. # NOTE: a final layer norm and/or linear layer present in some implementations are omitted here. self.decoder = TransformerBlock( config=transformer_config, spec=transformer_layer_spec, pre_process=self.pre_process, post_process=self.post_process, post_layer_norm=True, ) self.merge_hidden_size = projection_config.ffn_hidden_size self.square_merge_size = self.merge_hidden_size // embed_dim if self.post_process: self.projection = MultimodalProjector( projection_config, projection_layer_spec, projection_type, projection_config.ffn_hidden_size ) else: self.projection = None self.input_tensor = None def set_input_tensor(self, input_tensor: torch.Tensor) -> None: """Sets input tensor to the model. Args: input_tensor (Tensor): Sets the input tensor for the model. """ if self.pre_process: # always True self.input_tensor = input_tensor else: raise NotImplementedError() def rot_pos_emb(self, grid_thw): pos_ids = [] for t, h, w in grid_thw: hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) hpos_ids = hpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) hpos_ids = hpos_ids.permute(0, 2, 1, 3) hpos_ids = hpos_ids.flatten() wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) wpos_ids = wpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) wpos_ids = wpos_ids.permute(0, 2, 1, 3) wpos_ids = wpos_ids.flatten() pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) pos_ids = torch.cat(pos_ids, dim=0).to(grid_thw.device) max_grid_size = grid_thw[:, 1:].max() rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size).to(grid_thw.device) rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) return rotary_pos_emb def get_window_index(self, grid_thw): window_index: list = [] cu_window_seqlens: list = [0] window_index_id = 0 vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size for grid_t, grid_h, grid_w in grid_thw: llm_grid_h, llm_grid_w = ( grid_h // self.spatial_merge_size, grid_w // self.spatial_merge_size, ) index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) index_padded = index_padded.reshape( grid_t, num_windows_h, vit_merger_window_size, num_windows_w, vit_merger_window_size, ) index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( grid_t, num_windows_h * num_windows_w, vit_merger_window_size, vit_merger_window_size, ) seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) index_padded = index_padded.reshape(-1) index_new = index_padded[index_padded != -100] window_index.append(index_new + window_index_id) cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1] cu_window_seqlens.extend(cu_seqlens_tmp.tolist()) window_index_id += (grid_t * llm_grid_h * llm_grid_w).item() window_index = torch.cat(window_index, dim=0) return window_index, cu_window_seqlens def forward( self, vision_data: Optional[torch.Tensor], grid_thw: torch.Tensor, inference_params: Optional[InferenceParams] = None, extra_block_kwargs: dict = None, ) -> torch.Tensor: """Forward function of the Qwen2 Vision Model. This function passes the input tensors through the embedding layer and then the transformer. Args: x (torch.Tensor): input image/video data of shape [n_tokens, n_dims] grid_thw (torch.Tensor): the size tensor indicates grid size of each image/frame packed_seq_params (PackedSeqParams): parameters to build attention mask in the backend Returns: x (torch.Tensor): output after final transformer block of shape [b, s, h]. """ assert grid_thw is not None assert self.input_tensor is None assert inference_params is None # Rotary positional embeddings (embedding is None for PP intermediate devices) vision_data = self.patch_embed(vision_data) window_index, cu_window_seqlens = self.get_window_index(grid_thw) cu_window_seqlens = torch.tensor( cu_window_seqlens, device=vision_data.device, dtype=torch.int32, ) cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) seq_len, _ = vision_data.size() vision_data = vision_data.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) vision_data = vision_data[window_index, :, :] vision_data = vision_data.reshape(seq_len, 1, -1) rotary_pos_emb = self.rot_pos_emb(grid_thw) rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) rotary_pos_emb = rotary_pos_emb[window_index, :, :] rotary_pos_emb = rotary_pos_emb.reshape(seq_len, 1, 1, -1).repeat(1, 1, 1, 2) hidden_states = self.decoder( hidden_states=vision_data, attention_mask=None, inference_params=inference_params, rotary_pos_emb=rotary_pos_emb, packed_seq_params=self.build_packed_seq_params(None, cu_window_seqlens), packed_seq_params_full=self.build_packed_seq_params(grid_thw), fullatt_block_indexes=self.fullatt_block_indexes, **(extra_block_kwargs or {}), ) hidden_states = self.projection(hidden_states.view(-1, self.merge_hidden_size)) reverse_indices = torch.argsort(window_index) return hidden_states[reverse_indices, :] def build_packed_seq_params( self, grid_thw: Optional[torch.Tensor], cu_seqlens: Optional[torch.Tensor] = None, ) -> PackedSeqParams: # NOTE: each frame is a sequence (rather than each grid) if grid_thw is not None: seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]) cu_seqlens = seqlens.cumsum(dim=0) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0).int() else: seqlens = cu_seqlens[1:] - cu_seqlens[:-1] max_seqlen_q = seqlens.max() return PackedSeqParams( cu_seqlens_q=cu_seqlens, cu_seqlens_kv=cu_seqlens, qkv_format="thd", max_seqlen_q=max_seqlen_q, max_seqlen_kv=max_seqlen_q, ) ================================================ FILE: verl_rl/verl/models/mcore/qwen2_5_vl/vision_transformer_block.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright (c) 2024 Alibaba PAI Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from megatron.core.transformer.transformer_block import * class Qwen2_5VisionTransformerBlock(TransformerBlock): def _checkpointed_forward( self, hidden_states: Tensor, attention_mask: Tensor, context: Tensor, context_mask: Tensor, rotary_pos_emb: Tensor, attention_bias: Tensor, packed_seq_params: PackedSeqParams, packed_seq_params_full: PackedSeqParams, fullatt_block_indexes, ): """Forward method with activation checkpointing.""" def custom(start: int, end: int): def custom_forward(hidden_states, attention_mask, context, context_mask, rotary_pos_emb): for index in range(start, end): if index in fullatt_block_indexes: packed_seq_params_now = packed_seq_params_full else: packed_seq_params_now = packed_seq_params layer = self._get_layer(index) hidden_states, context = layer( hidden_states=hidden_states, attention_mask=attention_mask, context=context, context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, attention_bias=attention_bias, inference_context=None, packed_seq_params=packed_seq_params_now, ) return hidden_states, context return custom_forward def checkpoint_handler(forward_func): """Determines whether to use the `te_checkpoint` or `tensor_parallel.checkpoint`""" if self.config.fp8: return te_checkpoint( forward_func, self.config.distribute_saved_activations, tensor_parallel.random.get_cuda_rng_tracker, parallel_state.get_tensor_model_parallel_group(), hidden_states, attention_mask, context, context_mask, rotary_pos_emb, ) else: return tensor_parallel.checkpoint( forward_func, self.config.distribute_saved_activations, hidden_states, attention_mask, context, context_mask, rotary_pos_emb, ) if self.config.recompute_method == "uniform": # Uniformly divide the total number of Transformer layers and checkpoint # the input activation of each divided chunk. # A method to further reduce memory usage reducing checkpoints. layer_idx = 0 while layer_idx < self.num_layers_per_pipeline_rank: hidden_states, context = checkpoint_handler( custom(layer_idx, layer_idx + self.config.recompute_num_layers) ) layer_idx += self.config.recompute_num_layers elif self.config.recompute_method == "block": # Checkpoint the input activation of only a set number of individual # Transformer layers and skip the rest. # A method fully use the device memory removing redundant re-computation. recompute_skip_num_layers = 0 for layer_idx in range(self.num_layers_per_pipeline_rank): # Skip recomputation when input grad computation is not needed. # Need to have at least one input tensor with gradient computation # for re-enterant autograd engine. if self.config.fp8 and not hidden_states.requires_grad: recompute_skip_num_layers += 1 if ( layer_idx >= recompute_skip_num_layers and layer_idx < self.config.recompute_num_layers + recompute_skip_num_layers ): hidden_states, context = checkpoint_handler(custom(layer_idx, layer_idx + 1)) else: hidden_states, context = custom(layer_idx, layer_idx + 1)( hidden_states, attention_mask, context, context_mask, rotary_pos_emb ) else: raise ValueError("Invalid activation recompute method.") return hidden_states def forward( self, hidden_states: Union[Tensor, WrappedTensor], attention_mask: Optional[Tensor], context: Optional[Tensor] = None, context_mask: Optional[Tensor] = None, rotary_pos_emb: Optional[Tensor] = None, rotary_pos_cos: Optional[Tensor] = None, rotary_pos_sin: Optional[Tensor] = None, attention_bias: Optional[Tensor] = None, inference_context: Optional[BaseInferenceContext] = None, packed_seq_params: Optional[PackedSeqParams] = None, sequence_len_offset: Optional[Tensor] = None, packed_seq_params_full: PackedSeqParams = None, fullatt_block_indexes=None, *, inference_params: Optional[BaseInferenceContext] = None, ): """ Perform the forward pass through the transformer block. This method handles the core computation of the transformer, including self-attention, optional cross-attention, and feed-forward operations. Args: hidden_states (Union[Tensor, WrappedTensor]): Input tensor of shape [s, b, h] where s is the sequence length, b is the batch size, and h is the hidden size. Can be passed as a WrappedTensor during inference to avoid an obsolete reference in the calling function. attention_mask (Tensor): Boolean tensor of shape [1, 1, s, s] for masking self-attention. context (Tensor, optional): Context tensor for cross-attention. context_mask (Tensor, optional): Mask for cross-attention context rotary_pos_emb (Tensor, optional): Rotary positional embeddings. attention_bias (Tensor): Bias tensor for Q * K.T of shape in shape broadcastable to [b, num_head, sq, skv], e.g. [1, 1, sq, skv]. Used as an alternative to apply attention mask for TE cuDNN attention. inference_context (BaseInferenceContext, optional): Parameters for inference-time optimizations. packed_seq_params (PackedSeqParams, optional): Parameters for packed sequence processing. Returns: Union[Tensor, Tuple[Tensor, Tensor]]: The output hidden states tensor of shape [s, b, h], and optionally the updated context tensor if cross-attention is used. """ inference_context = deprecate_inference_params(inference_context, inference_params) # Delete the obsolete reference to the initial input tensor if necessary if isinstance(hidden_states, WrappedTensor): hidden_states = hidden_states.unwrap() if not self.pre_process: # See set_input_tensor() hidden_states = self.input_tensor # Update the inference parameters with the current batch size in case it is variable if inference_context and not self.training: inference_context.current_batch_size = hidden_states.size(1) # Viewless tensor. # - We only need to create a viewless tensor in the case of micro batch # size (mbs) == 1, since in this case, 'hidden_states.transpose()' # above creates a view tensor, and '.contiguous()' is a pass-through. # For mbs >= 2, '.contiguous()' creates a new tensor, eliminating # the need to make it viewless. # # However, we don't explicitly check mbs == 1 here because # make_viewless_tensor() has negligible overhead when its input # is already viewless. # # - For the 'else' case above, calling make_viewless_tensor() here is # likely redundant, since p2p_communication.py (likely originator) # already creates viewless tensors. That said, make_viewless_tensor() # is called here to be future-proof and corner-case-proof. hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True) if self.config.sequence_parallel: rng_context = tensor_parallel.get_cuda_rng_tracker().fork() else: rng_context = nullcontext() # If fp8_recipe is delayed, wrap the entire pass with get_fp8_context(), # otherwise do nothing extra at the outer level # if we are using other fp8 recipes, then the context manager enter&exit are free # we can wrap fp8_context within the for loop over layers, so that we can fine-grained # control which layer will be fp8 or bf16 use_outer_fp8_context = self.config.fp8 and self.config.fp8_recipe == Fp8Recipe.delayed use_inner_fp8_context = self.config.fp8 and self.config.fp8_recipe != Fp8Recipe.delayed outer_fp8_context = get_fp8_context(self.config) if use_outer_fp8_context else nullcontext() with rng_context, outer_fp8_context: # Forward pass. if self.config.recompute_granularity == "full" and self.training: hidden_states = self._checkpointed_forward( hidden_states=hidden_states, attention_mask=attention_mask, context=context, context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, attention_bias=attention_bias, packed_seq_params=packed_seq_params, packed_seq_params_full=packed_seq_params_full, fullatt_block_indexes=fullatt_block_indexes, ) else: for l_no, layer in enumerate(self.layers): inner_fp8_context = ( get_fp8_context(self.config, layer.layer_number - 1) if use_inner_fp8_context else nullcontext() ) if l_no in fullatt_block_indexes: packed_seq_params_now = packed_seq_params_full else: packed_seq_params_now = packed_seq_params with self.offload_context, inner_fp8_context: hidden_states, context = layer( hidden_states=hidden_states, attention_mask=attention_mask, context=context, context_mask=context_mask, rotary_pos_emb=rotary_pos_emb, rotary_pos_cos=rotary_pos_cos, rotary_pos_sin=rotary_pos_sin, attention_bias=attention_bias, inference_context=inference_context, packed_seq_params=packed_seq_params_now, sequence_len_offset=sequence_len_offset, ) if ( torch.is_grad_enabled() and self.config.cpu_offloading and self.group_prefetch_offload_commit_async is not None ): hidden_states = self.group_prefetch_offload_commit_async(hidden_states) # Final layer norm. if self.final_layernorm is not None: hidden_states = self.final_layernorm(hidden_states) # TENorm produces a "viewed" tensor. This will result in schedule.py's # deallocate_output_tensor() throwing an error, so a viewless tensor is # created to prevent this. hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True) return hidden_states ================================================ FILE: verl_rl/verl/models/mcore/readme.md ================================================ # verl Megatron-Core Models The earlier versions of verl use `Megatron-LM` 0.4 and workaround huggingface model classes. To better use the latest features and speedup of modern Megatron, we are migrating to `Megatron-Core`(mcore), and use the recommended `GPTModel` class for all language models. With mcore `GPTModel`, we can use the latest features like `context parallel`, `expert parallel`, `dist_checkpointing`, etc. and we can update mcore with little effort in the future for new features. The migration has been successful with the help of the mcore team and the community. What we have done is: 1. update `Megatron` version to `0.11.0` 2. migrate `LlamaForCausalLM` and `Qwen2ForCausalLM` to mcore `GPTModel` 3. support sequence packing/thd format. 4. support `tensor parallel`, `pipeline parallel`, `sequence parallel`, `virtual pipeline parallel`, `context parallel`. 5. support the mcore `dist_checkpointing` feature and a basic offline weighs conversion script from huggingface to mcore `dist_checkpointing` format. We are working on the following features: - support `Qwen2MoeForCausalLM` - support `MixtralForCausalLM` - support `DeepseekV3ForCausalLM` - support `expert parallel` Features we invite the community to contribute: - better scripts for offline weights conversion from huggingface to mcore `dist_checkpointing` format. - conversion of large models with multiple GPUs - conversion of large models with single GPU - refactor the `megatron_checkpoint_manager.py` by `dist_checkpointing` format. - support llama4 - support qwen2.5-vl To track the progress of verl mcore integration, please refer to the [mcore integration issue](https://github.com/volcengine/verl/issues/1033). ## How things work now To engage the community in contributing, here are the key steps in our mcore integration process and features under development. The huggingface `transformers` is the de facto standard of model zoo while mcore is good at computation efficiency. The main challenge is conversion between the two. main steps: 1. modelling the huggingface model with mcore `GPTModel` - a. convert the huggingface config to mcore `TransformerConfig` - b. init the mcore `GPTModel` with the converted config - c. load the huggingface model weights to the `GPTModel` 2. online weight conversion from mcore to huggingface (due to the rollout engine `vLLM` is using huggingface format) - a. bridge the gap between mcore and huggingface weights format and name mapping - b. online resharding the mcore weights to rollout engine - this part is very complicated with multiple parallel strategies composition between mcore and rollout engine 3. support the mcore features in verl - a. support `tensor parallel`, `pipeline parallel`, `sequence parallel`, `virtual pipeline parallel`, `context parallel` - b. support recompute and other mcore speed up features 4. checkpointing - a. support recovering the verl training. - b. support exporting the mcore checkpoint to huggingface format, for downstream inference. ### Modelling the huggingface model with mcore `GPTModel` The first step is to convert huggingface config to mcore `TransformerConfig` and init the mcore `GPTModel` with the converted config. See code in `verl/models/mcore/config_converter.py` and `verl/verl/models/mcore/models/model_initializer.py`. The corresponding model forward code is in `verl/verl/models/mcore/models/model_forward.py`. There are two ways of loading the huggingface model weights to the `GPTModel` 1. Runtime loading - every rank loads the entire huggingface model weights and then shard and convert to mcore weights. - speed is slow and memory consumption is high. - this way is deprecated and will not support new models. 2. Offline loading - use offline script to convert the huggingface model weights to mcore weights and save with mcore `dist_checkpointing` format. - online loading and sharding is automatically done by mcore `dist_checkpointing` format. The speed is fast and memory consumption is low. - the offline script is in `verl/scripts/converter_hf_to_mcore.py`. ### online weight conversion from mcore to huggingface See function `convert_megatron_model_to_transformers_model` in `verl/utils/megatron_utils.py` for the details. It should be refatored for extensibility and better performance. ### support the mcore features in verl Most of the features of `GPTModel` is out-of-the-box supported in verl through changing the `TransformerConfig`, except those about parallel strategies, such as `expert parallel`. Features about parallel strategies should be supported with changes about the online weights conversion(especially the resharding part) and verl work dispatching. ### checkpointing The existing checkpointing code is in `verl/utils/checkpoint/megatron_checkpoint_manager.py`. And the script to convert checkpoint to huggingface format is in `verl/scripts/model_merger`. The existing checkpoint format simply saves every rank's weights and optimizer states. It should be refactored by `dist_checkpointing` format. ## How to support new models 1. make sure the model is supported by vLLM 2. modelling the huggingface model with mcore `GPTModel` (The [Pai-Megatron-Path](https://github.com/alibaba/Pai-Megatron-Patch/tree/main) is a good reference) - a. convert the huggingface config to mcore `TransformerConfig` - b. init the mcore `GPTModel` with the converted config - c. load the huggingface model weights to the `GPTModel` - d. for VLM the interface might be different, it is ok to add a new model class with GPTModel as its module. 3. offline weights conversion from huggingface to mcore `dist_checkpointing` format 4. support online weights conversion from mcore to huggingface - it is recommended to initialize a vLLM model with the converted mcore weights, and then test if the generating sequence is correct. ## How to scale up to larger models like deepseek-v3 or other 100B+ models The greatest challenge for scaling up to larger models is the memory consumption. The necessary features under development for scaling up are 1. Training engine part - expert parallel 2. Rollout engine part - pipeline parallel - expert parallel - more efficient and general weight resharding and loading 3. Offline weights conversion - support weights larger than single GPU memory ================================================ FILE: verl_rl/verl/models/mcore/registry.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Registry module for model architecture components. """ from enum import Enum from typing import Callable import torch import torch.nn as nn from .config_converter import ( PretrainedConfig, TransformerConfig, hf_to_mcore_config_dense, hf_to_mcore_config_dpskv3, hf_to_mcore_config_llama4, hf_to_mcore_config_mixtral, hf_to_mcore_config_qwen2_5_vl, hf_to_mcore_config_qwen2moe, hf_to_mcore_config_qwen3moe, ) from .model_forward import ( gptmodel_forward, gptmodel_forward_qwen2_5_vl, ) from .model_forward_fused import ( fused_forward_gptmodel, fused_forward_qwen2_5_vl, ) from .model_initializer import ( BaseModelInitializer, DeepseekV3Model, DenseModel, MixtralModel, Qwen2MoEModel, Qwen3MoEModel, Qwen25VLModel, ) from .weight_converter import ( McoreToHFWeightConverterDense, McoreToHFWeightConverterDpskv3, McoreToHFWeightConverterMixtral, McoreToHFWeightConverterQwen2_5_VL, McoreToHFWeightConverterQwen2Moe, McoreToHFWeightConverterQwen3Moe, ) class SupportedModel(Enum): LLAMA = "LlamaForCausalLM" # tested QWEN2 = "Qwen2ForCausalLM" # tested QWEN2_MOE = "Qwen2MoeForCausalLM" # pending DEEPSEEK_V3 = "DeepseekV3ForCausalLM" # not tested MIXTRAL = "MixtralForCausalLM" # tested QWEN2_5_VL = "Qwen2_5_VLForConditionalGeneration" # not supported LLAMA4 = "Llama4ForConditionalGeneration" # not tested QWEN3 = "Qwen3ForCausalLM" # tested QWEN3_MOE = "Qwen3MoeForCausalLM" # not tested # Registry for model configuration converters MODEL_CONFIG_CONVERTER_REGISTRY: dict[SupportedModel, Callable[[PretrainedConfig, torch.dtype], TransformerConfig]] = { SupportedModel.LLAMA: hf_to_mcore_config_dense, SupportedModel.QWEN2: hf_to_mcore_config_dense, SupportedModel.QWEN2_MOE: hf_to_mcore_config_qwen2moe, SupportedModel.DEEPSEEK_V3: hf_to_mcore_config_dpskv3, SupportedModel.MIXTRAL: hf_to_mcore_config_mixtral, SupportedModel.QWEN2_5_VL: hf_to_mcore_config_qwen2_5_vl, SupportedModel.LLAMA4: hf_to_mcore_config_llama4, SupportedModel.QWEN3: hf_to_mcore_config_dense, SupportedModel.QWEN3_MOE: hf_to_mcore_config_qwen3moe, SupportedModel.QWEN2_5_VL: hf_to_mcore_config_qwen2_5_vl, } # Registry for model initializers MODEL_INITIALIZER_REGISTRY: dict[SupportedModel, type[BaseModelInitializer]] = { SupportedModel.LLAMA: DenseModel, SupportedModel.QWEN2: DenseModel, SupportedModel.QWEN2_MOE: Qwen2MoEModel, SupportedModel.MIXTRAL: MixtralModel, SupportedModel.DEEPSEEK_V3: DeepseekV3Model, SupportedModel.QWEN2_5_VL: Qwen25VLModel, SupportedModel.LLAMA4: DenseModel, SupportedModel.QWEN3: DenseModel, SupportedModel.QWEN3_MOE: Qwen3MoEModel, SupportedModel.QWEN2_5_VL: Qwen25VLModel, } # Registry for model forward functions MODEL_FORWARD_REGISTRY: dict[SupportedModel, Callable] = { SupportedModel.LLAMA: gptmodel_forward, SupportedModel.QWEN2: gptmodel_forward, SupportedModel.QWEN2_MOE: gptmodel_forward, SupportedModel.MIXTRAL: gptmodel_forward, SupportedModel.DEEPSEEK_V3: gptmodel_forward, SupportedModel.QWEN2_5_VL: gptmodel_forward, SupportedModel.LLAMA4: gptmodel_forward, SupportedModel.QWEN3: gptmodel_forward, SupportedModel.QWEN3_MOE: gptmodel_forward, SupportedModel.QWEN2_5_VL: gptmodel_forward_qwen2_5_vl, SupportedModel.DEEPSEEK_V3: gptmodel_forward, } # Registry for model forward functions MODEL_FORWARD_FUSED_REGISTRY: dict[SupportedModel, Callable] = { SupportedModel.LLAMA: fused_forward_gptmodel, SupportedModel.QWEN2: fused_forward_gptmodel, SupportedModel.QWEN2_MOE: fused_forward_gptmodel, SupportedModel.MIXTRAL: fused_forward_gptmodel, SupportedModel.DEEPSEEK_V3: fused_forward_gptmodel, SupportedModel.QWEN2_5_VL: fused_forward_qwen2_5_vl, SupportedModel.LLAMA4: fused_forward_gptmodel, SupportedModel.QWEN3: fused_forward_gptmodel, SupportedModel.QWEN3_MOE: fused_forward_gptmodel, SupportedModel.QWEN2_5_VL: fused_forward_qwen2_5_vl, SupportedModel.DEEPSEEK_V3: fused_forward_gptmodel, } # Registry for model weight converters MODEL_WEIGHT_CONVERTER_REGISTRY: dict[SupportedModel, type] = { SupportedModel.LLAMA: McoreToHFWeightConverterDense, SupportedModel.QWEN2: McoreToHFWeightConverterDense, SupportedModel.QWEN2_MOE: McoreToHFWeightConverterQwen2Moe, SupportedModel.MIXTRAL: McoreToHFWeightConverterMixtral, SupportedModel.DEEPSEEK_V3: McoreToHFWeightConverterDpskv3, SupportedModel.QWEN3: McoreToHFWeightConverterDense, SupportedModel.QWEN3_MOE: McoreToHFWeightConverterQwen3Moe, SupportedModel.QWEN2_5_VL: McoreToHFWeightConverterQwen2_5_VL, } def get_supported_model(model_type: str) -> SupportedModel: try: return SupportedModel(model_type) except ValueError as err: supported_models = [e.value for e in SupportedModel] raise NotImplementedError( f"Model Type: {model_type} not supported. Supported models: {supported_models}" ) from err def hf_to_mcore_config( hf_config: PretrainedConfig, dtype: torch.dtype, **override_transformer_config_kwargs ) -> TransformerConfig: """Convert huggingface PretrainedConfig to mcore TransformerConfig. Args: hf_config: The huggingface PretrainedConfig. dtype: The dtype of the model. **override_transformer_config_kwargs: The kwargs to override the transformer config. Returns: The mcore TransformerConfig. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) return MODEL_CONFIG_CONVERTER_REGISTRY[model](hf_config, dtype, **override_transformer_config_kwargs) def init_mcore_model( tfconfig: TransformerConfig, hf_config: PretrainedConfig, pre_process: bool = True, post_process: bool = None, *, share_embeddings_and_output_weights: bool = False, value: bool = False, **extra_kwargs, # may be used for vlm and moe ) -> nn.Module: """ Initialize a Mcore model. Args: tfconfig: The transformer config. hf_config: The HuggingFace config. pre_process: Optional pre-processing function. post_process: Optional post-processing function. share_embeddings_and_output_weights: Whether to share embeddings and output weights. value: Whether to use value. **extra_kwargs: Additional keyword arguments. Returns: The initialized model. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) initializer_cls = MODEL_INITIALIZER_REGISTRY[model] initializer = initializer_cls(tfconfig, hf_config) return initializer.initialize( pre_process=pre_process, post_process=post_process, share_embeddings_and_output_weights=share_embeddings_and_output_weights, value=value, **extra_kwargs, ) def get_mcore_forward_fn(hf_config: PretrainedConfig) -> Callable: """ Get the forward function for given model architecture. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) return MODEL_FORWARD_REGISTRY[model] def get_mcore_forward_fused_fn(hf_config: PretrainedConfig) -> Callable: """ Get the forward function for given model architecture. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) return MODEL_FORWARD_FUSED_REGISTRY[model] def get_mcore_weight_converter(hf_config: PretrainedConfig, dtype: torch.dtype) -> Callable: """ Get the weight converter for given model architecture. """ assert len(hf_config.architectures) == 1, "Only one architecture is supported for now" model = get_supported_model(hf_config.architectures[0]) tfconfig = hf_to_mcore_config(hf_config, dtype) return MODEL_WEIGHT_CONVERTER_REGISTRY[model](hf_config, tfconfig) ================================================ FILE: verl_rl/verl/models/mcore/saver.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from megatron.core import mpu from megatron.core.distributed import DistributedDataParallel as LocalDDP from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.device import get_device_id, get_torch_device from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model def _megatron_calc_global_rank( tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0, cp_rank: int = 0, ep_rank: int = 0 ): """Calculate global rank with support for CP/EP parallelism""" # Get parallel sizes for each dimension tp_size = mpu.get_tensor_model_parallel_world_size() dp_size = mpu.get_data_parallel_world_size() pp_size = mpu.get_pipeline_model_parallel_world_size() cp_size = mpu.get_context_parallel_world_size() # ep_size = mpu.get_expert_model_parallel_world_size() # Verify total GPU count matches (must be consistent with parallel_state.py) total_size = tp_size * dp_size * pp_size * cp_size assert total_size == torch.distributed.get_world_size(), ( f"{tp_size}x{dp_size}x{pp_size}x{cp_size} != {torch.distributed.get_world_size()}" ) # Core calculation logic (corresponds to RankGenerator order parameter) # Assumes default order is "tp-cp-ep-dp-pp" return ((pp_rank * dp_size + dp_rank) * cp_size + cp_rank) * tp_size + tp_rank def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def merge_megatron_ckpt_gptmodel(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False): """Merge sharded parameters of a Megatron module into a merged checkpoint. Args: wrapped_models (list of megatron.core.distributed.DistributedDataParallel): The local DDP wrapped megatron modules. config (str or None): HF config for model dtype: model params type is_value_model: if model is value model tie_word_embeddings: tie_word_embeddings Returns: state_dict (dict): The merged state_dict in rank 0, and an empty dictionary in other ranks. """ start_time = time.time() def _get_gpt_model(model): return model dp_rank = mpu.get_data_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() pp_rank = mpu.get_pipeline_model_parallel_rank() cp_rank = mpu.get_context_parallel_rank() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if dist.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) assert len(models[i].decoder.layers) == num_layers_per_model, ( "len model layers {} not equal to num_layers_per_model {}".format( len(models[i].decoder.layers), num_layers_per_model ) ) state_dict = dict() def _get_cpu_tensor(tensor: torch.Tensor): if tensor is None: return None if tensor.device == torch.device("cpu"): return tensor.detach().clone() return tensor.detach().cpu() def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor: """broadcast tensor across mp_group""" nonlocal state_dict nonlocal mp_group src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) if torch.distributed.get_rank() == src_rank: if tensor is None: weight = None tensor_shape = None else: weight = tensor tensor_shape = weight.shape else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not exist, skip collect") return if weight is None: weight = torch.empty( tensor_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) dist.broadcast(weight, src=src_rank, group=mp_group) if torch.distributed.get_rank() == 0: state_dict[name] = _get_cpu_tensor(weight) def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group # tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=concat_dim) if mutate_func is not None: full_tensor = mutate_func(full_tensor) state_dict[name] = full_tensor def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group # tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) intermediate_size_tp = config.intermediate_size // tp_size gate_weight_list = [] up_weight_list = [] for i in range(tp_size): gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)] gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp] up_weight_tp = gate_up_weight_tp[intermediate_size_tp:] gate_weight_list.append(gate_weight_tp) up_weight_list.append(up_weight_tp) state_dict[gate_name] = torch.cat(gate_weight_list, dim=0) state_dict[up_name] = torch.cat(up_weight_list, dim=0) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank): """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group # tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank, cp_rank=cp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) q_weight_list = [] k_weight_list = [] v_weight_list = [] hidden_size_per_head = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) if config.num_key_value_heads >= tp_size: q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): num_query_groups_per_partition = wrapped_models[0].config.num_query_groups // tp_size qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_size_chunk = q_size_tp // num_query_groups_per_partition kv_size_chunk = kv_size_tp // num_query_groups_per_partition for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition): q_part = qkv_part_chunk[:q_size_chunk] k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk] v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :] q_weight_list.append(q_part) k_weight_list.append(k_part) v_weight_list.append(v_part) else: q_size_tp = hidden_size_per_head * config.num_attention_heads // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): num_query_groups_per_partition = wrapped_models[0].config.num_query_groups // tp_size qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_size_chunk = q_size_tp // num_query_groups_per_partition kv_size_chunk = kv_size_tp // num_query_groups_per_partition for qkv_part_chunk in qkv_part.chunk(num_query_groups_per_partition): q_part = qkv_part_chunk[:q_size_chunk] k_part = qkv_part_chunk[q_size_chunk : q_size_chunk + kv_size_chunk] v_part = qkv_part_chunk[q_size_chunk + kv_size_chunk :] q_weight_list.append(q_part) if i * config.num_key_value_heads % tp_size == 0: k_weight_list.append(k_part) v_weight_list.append(v_part) state_dict[q_name] = torch.cat(q_weight_list, dim=0) state_dict[k_name] = torch.cat(k_weight_list, dim=0) state_dict[v_name] = torch.cat(v_weight_list, dim=0) # empty cache before collecting weights get_torch_device().empty_cache() # Embeddings # ------------------- if dp_rank == 0 and cp_rank == 0: # models are identical across cp ranks # Embeddings # ------------------- print_rank_0("collecting embeddings...") gpt_model_module = _get_gpt_model(models[0]) _broadcast_tp_shard_tensor( gpt_model_module.embedding.word_embeddings.weight if pp_rank == 0 else None, "model.embed_tokens.weight", src_pp_rank=0, ) # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"collecting layer #{layer}...") layer_name = f"model.layers.{layer}" src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank]) sync_layer = gpt_model_module.decoder.layers[src_layer_idx] _broadcast_tensor( sync_layer.self_attention.linear_qkv.layer_norm_weight, f"{layer_name}.input_layernorm.weight", src_pp_rank=src_pp_rank, ) if gpt_model_module.config.qk_layernorm: _broadcast_tensor( sync_layer.self_attention.q_layernorm.weight, f"{layer_name}.self_attn.q_norm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tensor( sync_layer.self_attention.k_layernorm.weight, f"{layer_name}.self_attn.k_norm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attention.linear_qkv.weight, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", src_pp_rank=src_pp_rank, ) if gpt_model_module.config.add_qkv_bias: _broadcast_tp_shard_tensor_qkv( sync_layer.self_attention.linear_qkv.bias, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.self_attention.linear_proj.weight, f"{layer_name}.self_attn.o_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) _broadcast_tensor( sync_layer.mlp.linear_fc1.layer_norm_weight, f"{layer_name}.post_attention_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.linear_fc1.weight, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.mlp.linear_fc2.weight, f"{layer_name}.mlp.down_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) # Final Layernorm # ------------------- print_rank_0("collecting final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.decoder.final_layernorm, "weight", None), "model.norm.weight", src_pp_rank=pp_size - 1, ) if tie_word_embeddings: print_rank_0("tie word embedding skip load lm_head...") else: print_rank_0("collecting lm_head...") if is_value_model: lm_head_weight = None if pp_rank == pp_size - 1: lm_head_weight = getattr(gpt_model_module.output_layer, "weight", None) _broadcast_tensor(lm_head_weight, "lm_head.weight", src_pp_rank=pp_size - 1) else: _broadcast_tp_shard_tensor( getattr(gpt_model_module.output_layer, "weight", None) if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) dist.barrier() get_torch_device().empty_cache() if torch.distributed.get_rank() == 0: for k, v in state_dict.items(): if dtype != v.dtype: state_dict[k] = v.to(dtype) print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s") return state_dict def merge_megatron_ckpt_gptmodel_qwen_moe( wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False ): raise NotImplementedError("merge_megatron_ckpt_gptmodel_qwen_moe is not implemented") def merge_megatron_ckpt_gptmodel_qwen2_5_vl( wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False ): raise NotImplementedError("merge_megatron_ckpt_gptmodel_qwen2_5_vl is not implemented") def merge_megatron_ckpt_gptmodel_dpskv3(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False): raise NotImplementedError("merge_megatron_ckpt_gptmodel_dpskv3 is not implemented") def merge_megatron_ckpt_gptmodel_mixtral( wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False ): raise NotImplementedError("merge_megatron_ckpt_gptmodel_mixtral is not implemented") ================================================ FILE: verl_rl/verl/models/mcore/util.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from megatron.core import parallel_state as mpu from megatron.core.packed_seq_params import PackedSeqParams from verl.utils.model import CausalLMOutputForPPO def preprocess_packed_seqs( input_ids: torch.Tensor, attention_mask: torch.Tensor, pre_process: bool = True ) -> tuple[torch.Tensor, PackedSeqParams]: """ Preprocess packed sequences CP splits sequence into CP*2 chunks, and each GPU gets 2 chunks (GPU0 gets first and last chunks, GPU1 gets second and second last chunks, and so on), this is for load balancing with causal masking. See https://github.com/NVIDIA/TransformerEngine/issues/1368 """ batch_size = input_ids.shape[0] seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) tp_size = mpu.get_tensor_model_parallel_world_size() cp_size = mpu.get_context_parallel_world_size() cp_rank = mpu.get_context_parallel_rank() align_size = tp_size * cp_size * 2 if cp_size > 1 else tp_size pad_size = (align_size - seqlens_in_batch % align_size) % align_size seqlens_in_batch_padded = seqlens_in_batch + pad_size cu_seqlens = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device) cu_seqlens[1:] = torch.cumsum(seqlens_in_batch, dim=0) cu_seqlens_padded = torch.zeros(batch_size + 1, dtype=torch.int32, device=input_ids.device) cu_seqlens_padded[1:] = torch.cumsum(seqlens_in_batch_padded, dim=0) max_seqlen_in_batch = seqlens_in_batch_padded.max().item() shape = list(input_ids.shape[1:]) shape[0] = seqlens_in_batch_padded.sum().item() // cp_size if pre_process: input_ids_rmpad = torch.zeros(shape, dtype=input_ids.dtype, device=input_ids.device) for i in range(batch_size): if cp_size <= 1: seqlen = seqlens_in_batch[i] input_ids_rmpad[cu_seqlens_padded[i] : cu_seqlens_padded[i] + seqlen] = input_ids[i, attention_mask[i]] continue seqlen = seqlens_in_batch_padded[i] // cp_size half_seqlen = seqlen // 2 start_idx = cu_seqlens_padded[i] // cp_size # split to 2 chunks d = input_ids[i, attention_mask[i]] input_ids_rmpad[start_idx : start_idx + half_seqlen] = d[ half_seqlen * cp_rank : half_seqlen * (cp_rank + 1) ] remain_start = seqlens_in_batch_padded[i] - half_seqlen * (cp_rank + 1) remain_end = seqlens_in_batch_padded[i] - half_seqlen * cp_rank remain_end = min(remain_end, d.shape[0]) remain_len = remain_end - remain_start if remain_len > 0: input_ids_rmpad[start_idx + half_seqlen : start_idx + half_seqlen + remain_len] = d[ remain_start:remain_end ] packed_seq_params = PackedSeqParams( qkv_format="thd", cu_seqlens_q=cu_seqlens_padded, max_seqlen_q=max_seqlen_in_batch, cu_seqlens_kv=cu_seqlens_padded, max_seqlen_kv=max_seqlen_in_batch, cu_seqlens_q_padded=cu_seqlens_padded, cu_seqlens_kv_padded=cu_seqlens_padded, ) if pre_process: return input_ids_rmpad.unsqueeze(0), packed_seq_params else: return input_ids, packed_seq_params def postprocess_packed_seqs( output: torch.Tensor, packed_seq_params: PackedSeqParams, attention_mask: torch.Tensor, batch_size: int, seq_len: int, post_process: bool = True, ) -> torch.Tensor: """ Postprocess packed sequences """ if not post_process: return output shape = [batch_size, seq_len] + list(output.shape[2:]) # 1,packed, dim -> batch_size, seq_len, dim output_new = torch.zeros(shape, dtype=output.dtype, device=output.device) cp_size = mpu.get_context_parallel_world_size() # all gather output across context parallel group if cp_size > 1: # output shape: [1, packed_len, hidden_dim] # need to gather across cp group and concatenate in sequence dimension output_list = [torch.empty_like(output) for _ in range(cp_size)] torch.distributed.all_gather(output_list, output.detach(), group=mpu.get_context_parallel_group()) output_list[mpu.get_context_parallel_rank()] = output else: output_list = [output] for i in range(batch_size): if cp_size <= 1: s = attention_mask[i].sum().item() output_new[i, attention_mask[i]] = output[0][ packed_seq_params.cu_seqlens_q_padded[i] : packed_seq_params.cu_seqlens_q_padded[i] + s ] continue s_len_padded_chunk = ( packed_seq_params.cu_seqlens_q_padded[i + 1] - packed_seq_params.cu_seqlens_q_padded[i] ) // cp_size half_seqlen = s_len_padded_chunk // 2 s_len = attention_mask[i].sum().item() s_len_padded = s_len_padded_chunk * cp_size tmp = torch.empty(s_len_padded, *output.shape[2:], device=output.device) for j in range(cp_size): o = output_list[j][0] # split to 2 chunks packed_start_idx = packed_seq_params.cu_seqlens_q_padded[i] // cp_size o0, o1 = ( o[packed_start_idx : packed_start_idx + half_seqlen], o[packed_start_idx + half_seqlen : packed_start_idx + s_len_padded_chunk], ) tmp[j * half_seqlen : (j + 1) * half_seqlen] = o0 tmp[s_len_padded - (j + 1) * half_seqlen : s_len_padded - j * half_seqlen] = o1 output_new[i, attention_mask[i]] = tmp[:s_len] return output_new def remove_left_padding( input_ids: torch.Tensor, attention_mask: torch.Tensor, position_ids: torch.Tensor, sequence_parallel: bool = False, pre_process: bool = True, ): """ Remove left padding from input_ids, attention_mask and position_ids return new_input_ids, new_attention_mask, new_position_ids """ assert attention_mask.ndim == 2 assert position_ids.ndim == 2 cp_size = mpu.get_context_parallel_world_size() assert cp_size == 1, "Context parallel size without seq_pack is not supported" batch_size = input_ids.shape[0] shape = list(input_ids.shape) # batch_size, seq_len,... seq_lens = attention_mask.sum(dim=1) seq_len = seq_lens.max().item() if sequence_parallel: sp_world_size = mpu.get_tensor_model_parallel_world_size() pad_size = (sp_world_size - seq_len % sp_world_size) % sp_world_size seq_len = seq_len + pad_size shape[1] = seq_len if pre_process: new_input_ids = torch.zeros(dtype=input_ids.dtype, device=input_ids.device, size=shape) new_attention_mask = torch.zeros( dtype=attention_mask.dtype, device=attention_mask.device, size=(batch_size, seq_len) ) new_position_ids = torch.zeros(dtype=position_ids.dtype, device=position_ids.device, size=(batch_size, seq_len)) for i in range(batch_size): if pre_process: new_input_ids[i, : seq_lens[i]] = input_ids[i, attention_mask[i]] new_attention_mask[i, : seq_lens[i]] = attention_mask[i, attention_mask[i]] new_position_ids[i, : seq_lens[i]] = position_ids[i, attention_mask[i]] if pre_process: return new_input_ids, new_attention_mask, new_position_ids else: return input_ids, new_attention_mask, new_position_ids def recover_left_padding( result, attention_mask: torch.Tensor, original_attention_mask: torch.Tensor, origin_seqlen: int, post_process: bool = True, ): """ Recover left padding from result return result """ if not post_process: return result shape = list(result.shape) batch_size = shape[0] shape[1] = origin_seqlen new_result = torch.zeros(dtype=result.dtype, device=result.device, size=shape) for i in range(batch_size): new_result[i, original_attention_mask[i]] = result[i, attention_mask[i]] return new_result def postprocess_packed_seqs_for_dict_output( labels_mask: torch.Tensor, output: CausalLMOutputForPPO, packed_seq_params: PackedSeqParams, attention_mask: torch.Tensor, batch_size: int, seq_len: int, post_process: bool = True, ) -> dict[str, torch.Tensor]: """_summary_ For fused kernels, the output is a dictionary with keys like 'log_probs', 'entropy', etc. This function post-processes each tensor in the output dictionary. Args: output (CausalLMOutputForPPO): _description_ packed_seq_params (PackedSeqParams): _description_ attention_mask (torch.Tensor): _description_ batch_size (int): _description_ seq_len (int): _description_ post_process (bool, optional): _description_. Defaults to True. Returns: CausalLMOutputForPPO: _description_ """ ret = {} output.entropy = output.entropy.view(1, -1) output.log_probs = output.log_probs.view(1, -1) output.log_probs = output.log_probs.masked_fill(~labels_mask, 0.0) ret["entropy"] = postprocess_packed_seqs( output.entropy, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) ret["log_probs"] = postprocess_packed_seqs( output.log_probs, packed_seq_params, attention_mask, batch_size, seq_len, post_process=post_process ) return ret ================================================ FILE: verl_rl/verl/models/mcore/weight_converter.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # online convert mcore weight to pure huggingface weight, no any fusion # including format conversion and name mapping # not including resharding import torch from megatron.core.transformer import TransformerConfig from transformers import PretrainedConfig class McoreToHFWeightConverterBase: def __init__(self, hf_config: PretrainedConfig, mcore_config: TransformerConfig): self.hf_config = hf_config self.mcore_config = mcore_config def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> torch.Tensor: raise NotImplementedError class McoreToHFWeightConverterDense(McoreToHFWeightConverterBase): def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # 'decoder.layers.0.self_attention.linear_proj.weight' # 'decoder.layers.0.self_attention.linear_qkv.layer_norm_weight' # 'decoder.layers.0.self_attention.linear_qkv.weight' # 'decoder.layers.0.self_attention.linear_qkv.bias' layer_number = name.split(".")[2] convert_names = [] if "self_attention.linear_qkv.bias" in name or "self_attention.linear_qkv.weight" in name: param_type = name.split(".")[-1] assert param_type == "bias" or param_type == "weight" convert_names.append(f"model.layers.{layer_number}.self_attn.q_proj.{param_type}") convert_names.append(f"model.layers.{layer_number}.self_attn.k_proj.{param_type}") convert_names.append(f"model.layers.{layer_number}.self_attn.v_proj.{param_type}") assert len(params) == 3 elif "self_attention.linear_proj.weight" in name: convert_names.append(f"model.layers.{layer_number}.self_attn.o_proj.weight") assert len(params) == 1 elif "self_attention.linear_qkv.layer_norm_weight" in name: convert_names.append(f"model.layers.{layer_number}.input_layernorm.weight") assert len(params) == 1 elif "self_attention.q_layernorm.weight" in name: convert_names.append(f"model.layers.{layer_number}.self_attn.q_norm.weight") assert len(params) == 1 elif "self_attention.k_layernorm.weight" in name: convert_names.append(f"model.layers.{layer_number}.self_attn.k_norm.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # 'decoder.layers.0.mlp.linear_fc1.layer_norm_weight' # 'decoder.layers.0.mlp.linear_fc1.weight' # 'decoder.layers.0.mlp.linear_fc2.weight' layer_number = name.split(".")[2] convert_names = [] if "mlp.linear_fc1.weight" in name: # split gate_proj and up_proj convert_names.append(f"model.layers.{layer_number}.mlp.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.up_proj.weight") assert len(params) == 2 elif "mlp.linear_fc1.layer_norm_weight" in name: convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight") assert len(params) == 1 elif "mlp.linear_fc2.weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.down_proj.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: direct_name_mapping = { "embedding.word_embeddings.weight": "model.embed_tokens.weight", "decoder.final_layernorm.weight": "model.norm.weight", "output_layer.weight": "lm_head.weight", } if name in direct_name_mapping: return [direct_name_mapping[name]], [params_one_group[0]] if "self_attention" in name: return self._convert_attention_param(name, params_one_group) elif "mlp" in name: return self._convert_mlp_param(name, params_one_group) else: raise NotImplementedError(f"Unsupported parameter name: {name}") class McoreToHFWeightConverterQwen2Moe(McoreToHFWeightConverterDense): def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # 'decoder.layers.0.pre_mlp_layernorm.weight', # 'decoder.layers.0.mlp.router.weight', # 'decoder.layers.0.mlp.shared_experts.gate_weight', # 'decoder.layers.0.mlp.shared_experts.linear_fc1.weight', # 'decoder.layers.0.mlp.shared_experts.linear_fc2.weight' # moe1 # 'decoder.layers.0.mlp.experts.linear_fc1.weight0', # 'decoder.layers.0.mlp.experts.linear_fc1.weight1', # 'decoder.layers.0.mlp.experts.linear_fc1.weight2', # 'decoder.layers.0.mlp.experts.linear_fc1.weight3', # moe2 # 'decoder.layers.0.mlp.experts.linear_fc2.weight0', # 'decoder.layers.0.mlp.experts.linear_fc2.weight1', layer_number = name.split(".")[2] convert_names = [] if "pre_mlp_layernorm" in name: convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight") assert len(params) == 1 elif "mlp.router.weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.gate.weight") assert len(params) == 1 elif "shared_experts.gate_weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert_gate.weight") assert len(params) == 1 elif "shared_experts.linear_fc1.weight" in name: # split gate_proj and up_proj convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.up_proj.weight") assert len(params) == 2 elif "shared_experts.linear_fc2.weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.shared_expert.down_proj.weight") assert len(params) == 1 elif "mlp.experts.linear_fc1" in name: # split gate_proj and up_proj expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight") assert len(params) == 2 elif "mlp.experts.linear_fc2" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params class McoreToHFWeightConverterQwen2_5_VL(McoreToHFWeightConverterDense): def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: direct_name_mapping = { "language_model.embedding.word_embeddings.weight": "model.embed_tokens.weight", "language_model.decoder.final_layernorm.weight": "model.norm.weight", "language_model.output_layer.weight": "lm_head.weight", "vision_model.patch_embed.proj.weight": "visual.patch_embed.proj.weight", "vision_model.decoder.final_layernorm.weight": "visual.merger.ln_q.weight", "vision_model.projection.encoder.linear_fc1.weight": "visual.merger.mlp.0.weight", "vision_model.projection.encoder.linear_fc1.bias": "visual.merger.mlp.0.bias", "vision_model.projection.encoder.linear_fc2.weight": "visual.merger.mlp.2.weight", "vision_model.projection.encoder.linear_fc2.bias": "visual.merger.mlp.2.bias", } if name in direct_name_mapping: return [direct_name_mapping[name]], [params_one_group[0]] if "self_attention" in name: return self._convert_attention_param(name, params_one_group) elif "mlp" in name: return self._convert_mlp_param(name, params_one_group) else: raise NotImplementedError(f"Unsupported parameter name: {name}") def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: model_type, _, _, layer_number = name.split(".")[:4] convert_names = [] if model_type == "language_model": name_map_after_layer = { "self_attention.linear_qkv.bias": [ "self_attn.q_proj.bias", "self_attn.k_proj.bias", "self_attn.v_proj.bias", ], "self_attention.linear_qkv.weight": [ "self_attn.q_proj.weight", "self_attn.k_proj.weight", "self_attn.v_proj.weight", ], "self_attention.linear_proj.weight": "self_attn.o_proj.weight", "self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight", } name_after_layer = ".".join(name.split(".")[-3:]) mapped_name = name_map_after_layer.get(name_after_layer) if isinstance(mapped_name, list): assert len(params) == len(mapped_name) for one in mapped_name: convert_names.append(f"model.layers.{layer_number}.{one}") else: assert len(params) == 1 convert_names.append(f"model.layers.{layer_number}.{mapped_name}") elif model_type == "vision_model": name_map_after_layer = { "self_attention.linear_proj.weight": "attn.proj.weight", "self_attention.linear_proj.bias": "attn.proj.bias", "self_attention.linear_qkv.layer_norm_weight": "norm1.weight", } name_after_layer = ".".join(name.split(".")[-3:]) mapped_name = name_map_after_layer.get(name_after_layer, None) if mapped_name is None: assert "linear_qkv" in name_after_layer assert len(params) == 3 new_param = torch.cat(params, dim=0) params = [new_param] if "bias" in name_after_layer: convert_names.append(f"visual.blocks.{layer_number}.attn.qkv.bias") else: convert_names.append(f"visual.blocks.{layer_number}.attn.qkv.weight") else: assert len(params) == 1 convert_names.append(f"visual.blocks.{layer_number}.{mapped_name}") else: raise NotImplementedError(f"Unsupported model type: {model_type}") return convert_names, params def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: model_type, _, _, layer_number = name.split(".")[:4] convert_names = [] if model_type == "language_model": name_map_after_layer = { "mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"], "mlp.linear_fc1.bias": ["mlp.gate_proj.bias", "mlp.up_proj.bias"], "mlp.linear_fc2.weight": "mlp.down_proj.weight", "mlp.linear_fc2.bias": "mlp.down_proj.bias", "mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight", } name_after_layer = ".".join(name.split(".")[-3:]) mapped_name = name_map_after_layer.get(name_after_layer) if isinstance(mapped_name, list): assert len(params) == len(mapped_name) for one in mapped_name: convert_names.append(f"model.layers.{layer_number}.{one}") else: assert len(params) == 1 convert_names.append(f"model.layers.{layer_number}.{mapped_name}") elif model_type == "vision_model": name_map_after_layer = { "mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"], "mlp.linear_fc1.bias": ["mlp.gate_proj.bias", "mlp.up_proj.bias"], "mlp.linear_fc2.weight": "mlp.down_proj.weight", "mlp.linear_fc2.bias": "mlp.down_proj.bias", "mlp.linear_fc1.layer_norm_weight": "norm2.weight", } name_after_layer = ".".join(name.split(".")[-3:]) mapped_name = name_map_after_layer.get(name_after_layer) if isinstance(mapped_name, list): assert len(params) == len(mapped_name) for one in mapped_name: convert_names.append(f"visual.blocks.{layer_number}.{one}") else: assert len(params) == 1 convert_names.append(f"visual.blocks.{layer_number}.{mapped_name}") else: raise NotImplementedError(f"Unsupported model type: {model_type}") return convert_names, params class McoreToHFWeightConverterDpskv3(McoreToHFWeightConverterBase): def _convert_attention_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # mcore # 'decoder.layers.0.input_layernorm.weight' # 'decoder.layers.0.self_attention.linear_proj.weight' # 'decoder.layers.0.self_attention.linear_q_proj.weight' # 'decoder.layers.0.self_attention.linear_kv_down_proj.weight' # 'decoder.layers.0.self_attention.linear_kv_up_proj.layer_norm_weight' # 'decoder.layers.0.self_attention.linear_kv_up_proj.weight' # 'decoder.layers.0.self_attention.linear_q_down_proj.weight' # 'decoder.layers.0.self_attention.linear_q_up_proj.weight' # 'decoder.layers.0.self_attention.linear_q_up_proj.layer_norm_weight' # hf # 'model.layers.0.input_layernorm.weight' # 'model.layers.0.self_attn.o_proj.weight' # 'model.layers.0.self_attn.q_proj.weight' # 'model.layers.0.self_attn.kv_a_proj_with_mqa.weight' # 'model.layers.0.self_attn.kv_a_layernorm.weight' # 'model.layers.0.self_attn.kv_b_proj.weight' # 'model.layers.0.self_attn.q_a_proj.weight' # 'model.layers.0.self_attn.q_b_proj.weight' # 'model.layers.0.self_attn.q_a_layernorm.weight' name_map_after_layer = { "input_layernorm.weight": "input_layernorm.weight", "self_attention.linear_proj.weight": "self_attn.o_proj.weight", "self_attention.linear_q_proj.weight": "self_attn.q_proj.weight", "self_attention.linear_kv_down_proj.weight": "self_attn.kv_a_proj_with_mqa.weight", "self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight", "self_attention.linear_kv_up_proj.weight": "self_attn.kv_b_proj.weight", "self_attention.linear_q_down_proj.weight": "self_attn.q_a_proj.weight", "self_attention.linear_q_up_proj.weight": "self_attn.q_b_proj.weight", "self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight", } assert len(params) == 1 convert_names = [] layer_number = name.split(".")[2] name_after_layer = name.split(f".{layer_number}.")[1] convert_names.append(f"model.layers.{layer_number}.{name_map_after_layer[name_after_layer]}") return convert_names, params def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # mcore dense # 'decoder.layers.0.mlp.linear_fc1.layer_norm_weight' # 'decoder.layers.0.mlp.linear_fc2.weight' # 'decoder.layers.0.mlp.linear_fc1.weight' # --- # 'decoder.layers.1.mlp.shared_experts.linear_fc1.weight' # --- # 'decoder.layers.1.mlp.shared_experts.linear_fc2.weight' # hf dense # 'model.layers.0.post_attention_layernorm.weight' # 'model.layers.0.mlp.down_proj.weight' # 'model.layers.0.mlp.gate_proj.weight' # 'model.layers.0.mlp.up_proj.weight' # 'model.layers.1.mlp.shared_experts.gate_proj.weight' # 'model.layers.1.mlp.shared_experts.up_proj.weight' # 'model.layers.1.mlp.shared_experts.down_proj.weight' # mcore moe # 'decoder.layers.1.pre_mlp_layernorm.weight' # 'decoder.layers.1.mlp.router.weight' # 'decoder.layers.1.mlp.router.expert_bias' # 'decoder.layers.1.mlp.experts.linear_fc1.weight0' # --- # 'decoder.layers.1.mlp.experts.linear_fc2.weight0' # hf moe # 'model.layers.1.post_attention_layernorm.weight' # 'model.layers.1.mlp.gate.weight' # 'model.layers.1.mlp.gate.e_score_correction_bias' # 'model.layers.1.mlp.experts.0.gate_proj.weight' # 'model.layers.1.mlp.experts.0.up_proj.weight' # 'model.layers.1.mlp.experts.0.down_proj.weight' name_map_after_layer = { "mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight", "mlp.linear_fc2.weight": "mlp.down_proj.weight", "mlp.shared_experts.linear_fc2.weight": "mlp.shared_experts.down_proj.weight", "mlp.linear_fc1.weight": ["mlp.gate_proj.weight", "mlp.up_proj.weight"], "mlp.shared_experts.linear_fc1.weight": [ "mlp.shared_experts.gate_proj.weight", "mlp.shared_experts.up_proj.weight", ], "pre_mlp_layernorm.weight": "post_attention_layernorm.weight", "mlp.router.weight": "mlp.gate.weight", "mlp.router.expert_bias": "mlp.gate.e_score_correction_bias", } convert_names = [] layer_number = name.split(".")[2] name_after_layer = name.split(f".{layer_number}.")[1] if name_after_layer in name_map_after_layer: mapped_name = name_map_after_layer[name_after_layer] if isinstance(mapped_name, list): assert len(params) == len(mapped_name) for one in mapped_name: convert_names.append(f"model.layers.{layer_number}.{one}") else: assert len(params) == 1 convert_names.append(f"model.layers.{layer_number}.{mapped_name}") else: if "mlp.experts.linear_fc1.weight" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight") assert len(params) == 2 elif "mlp.experts.linear_fc2.weight" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params def _convert_mtp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: assert self.mcore_config.mtp_num_layers == 1, "only support one mtp layer for now" assert self.mcore_config.num_layers == 61, "only support 61 layers for now" direct_name_mapping = { "mtp.layers.0.enorm.weight": "model.layers.61.enorm.weight", "mtp.layers.0.hnorm.weight": "model.layers.61.hnorm.weight", "mtp.layers.0.eh_proj.weight": "model.layers.61.eh_proj.weight", "mtp.layers.0.final_layernorm.weight": "model.layers.61.shared_head.norm.weight", } if name in direct_name_mapping: return [direct_name_mapping[name]], [params[0]] assert "mtp.layers.0.transformer_layer" in name, "only support transformer layer for now" # use proxy name to convert proxy_name = name.replace("mtp.layers.0.transformer_layer", "decoder.layers.61") if "self_attention" in proxy_name or "input_layernorm.weight" in proxy_name: convert_names, params = self._convert_attention_param(proxy_name, params) elif "mlp" in proxy_name: convert_names, params = self._convert_mlp_param(proxy_name, params) else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params def convert_param(self, name: str, params_one_group: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: direct_name_mapping = { "embedding.word_embeddings.weight": "model.embed_tokens.weight", "decoder.final_layernorm.weight": "model.norm.weight", "output_layer.weight": "lm_head.weight", } if name in direct_name_mapping: return [direct_name_mapping[name]], [params_one_group[0]] if "mtp" in name: return self._convert_mtp_param(name, params_one_group) elif "self_attention" in name or "input_layernorm.weight" in name: return self._convert_attention_param(name, params_one_group) elif "mlp" in name: return self._convert_mlp_param(name, params_one_group) else: raise NotImplementedError(f"Unsupported parameter name: {name}") class McoreToHFWeightConverterMixtral(McoreToHFWeightConverterDense): def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # decoder.layers.0.mlp.router.weight # decoder.layers.0.mlp.experts.linear_fc1.weight0 - weight7 # decoder.layers.0.mlp.experts.linear_fc2.weight0 - weight7 layer_number = name.split(".")[2] convert_names = [] if "pre_mlp_layernorm" in name: convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight") elif "mlp.router.weight" in name: convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.gate.weight") elif "mlp.experts.linear_fc1.weight" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w1.weight") convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w3.weight") elif "mlp.experts.linear_fc2.weight" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.block_sparse_moe.experts.{expert_id}.w2.weight") else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params class McoreToHFWeightConverterQwen3Moe(McoreToHFWeightConverterDense): def _convert_mlp_param(self, name: str, params: list[torch.Tensor]) -> tuple[list[str], list[torch.Tensor]]: # qwen3 moe no share expert # 'decoder.layers.0.pre_mlp_layernorm.weight', # 'decoder.layers.0.mlp.router.weight', # moe1 # 'decoder.layers.0.mlp.experts.linear_fc1.weight0', # 'decoder.layers.0.mlp.experts.linear_fc1.weight1', # 'decoder.layers.0.mlp.experts.linear_fc1.weight2', # 'decoder.layers.0.mlp.experts.linear_fc1.weight3', # moe2 # 'decoder.layers.0.mlp.experts.linear_fc2.weight0', # 'decoder.layers.0.mlp.experts.linear_fc2.weight1', layer_number = name.split(".")[2] convert_names = [] if "pre_mlp_layernorm" in name: convert_names.append(f"model.layers.{layer_number}.post_attention_layernorm.weight") assert len(params) == 1 elif "mlp.router.weight" in name: convert_names.append(f"model.layers.{layer_number}.mlp.gate.weight") assert len(params) == 1 elif "mlp.experts.linear_fc1" in name: # split gate_proj and up_proj expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.gate_proj.weight") convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.up_proj.weight") assert len(params) == 2 elif "mlp.experts.linear_fc2" in name: expert_id = name.split("weight")[-1] convert_names.append(f"model.layers.{layer_number}.mlp.experts.{expert_id}.down_proj.weight") assert len(params) == 1 else: raise NotImplementedError(f"Unsupported parameter name: {name}") return convert_names, params ================================================ FILE: verl_rl/verl/models/qwen2/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/models/qwen2/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .modeling_qwen2_megatron import ( ParallelQwen2ForCausalLM, # rmpad with megatron ParallelQwen2ForCausalLMRmPad, # rmpad with megatron and pipeline parallelism ParallelQwen2ForCausalLMRmPadPP, ParallelQwen2ForValueRmPad, ParallelQwen2ForValueRmPadPP, # original model with megatron ParallelQwen2Model, ) __all__ = [ "ParallelQwen2ForCausalLM", "ParallelQwen2ForCausalLMRmPad", "ParallelQwen2ForCausalLMRmPadPP", "ParallelQwen2ForValueRmPad", "ParallelQwen2ForValueRmPadPP", "ParallelQwen2Model", ] ================================================ FILE: verl_rl/verl/models/qwen2/megatron/checkpoint_utils/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/models/qwen2/megatron/checkpoint_utils/qwen2_loader.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_qwen2( state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False ): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def fetch_params(module): for param in module.parameters(): torch.distributed.fetch( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, ( f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size: " f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}" ) models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.model.layers) == num_layers_per_model def _fetch_tensor(tensor, name) -> torch.Tensor: """fetch tensor""" nonlocal state_dict if tensor is not None: tensor = tensor.data.copy_(state_dict[name], non_blocking=True) def _fetch_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """fetch tensor in tp shards""" nonlocal state_dict tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) if tensor is not None: tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True) else: print(f"tp_shard tensor:[{name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """fetch tensor in tp shards""" nonlocal state_dict tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) if tensor is not None: tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True) else: print(f"tp_shard tensor:[{name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """fetch gate_up tensor in tp shards""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if gate_name in state_dict and up_name in state_dict: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) if tensor is not None: tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True) else: print(f"tp_shard tensor:[{gate_name}, {up_name}] not in state_dict, skip loading") def _fetch_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor: """fetch tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp if not bias: new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) else: new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0)) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp if not bias: new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) else: new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part], dim=0)) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) if tensor is not None: tensor = tensor.data.copy_(tensor_chunk[tp_rank], non_blocking=True) # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) if pp_rank == 0: embed_tokens_weight = gpt_model_module.model.embed_tokens.weight _fetch_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() num_layer_per_pp = config.num_hidden_layers // pp_size vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() layer_list = [] if vpp_size is not None: for vpp_rank in range(vpp_size): num_layer_vpp_chunk = num_layer_per_pp // vpp_size num_layer_this_model = num_layer_vpp_chunk offset = vpp_rank * (config.num_hidden_layers // mpu.get_virtual_pipeline_model_parallel_world_size()) + ( mpu.get_pipeline_model_parallel_rank() * num_layer_vpp_chunk ) layer_list.extend(list(range(offset, offset + num_layer_this_model))) else: num_layer_this_model = num_layer_per_pp offset = pp_rank * num_layer_per_pp layer_list.extend(list(range(offset, offset + num_layer_this_model))) for layer in layer_list: print(f"{torch.distributed.get_rank()} loading layer #{layer}...") layer_name = f"model.layers.{layer}" dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] print( f"{torch.distributed.get_rank()} offset: {offset}, num_layer_this_model: {num_layer_this_model}, " f"layer_name: {layer_name}, layer_map[layer]: {layer_map[layer]}" ) gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[dst_layer_idx] _fetch_tensor( sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) _fetch_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) _fetch_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.bias if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", bias=True, ) _fetch_tp_shard_tensor( sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _fetch_tensor( sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _fetch_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _fetch_tp_shard_tensor( sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _fetch_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", ) if tie_word_embeddings: print_rank_0("tie_word_embeddings skip load lm_head") else: print_rank_0("loading lm_head...") if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.lm_head.weight if is_value_model: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _fetch_tensor(lm_head_weight, "lm_head.weight") print_rank_0("load lm_head from value_head weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _fetch_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _fetch_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") else: _fetch_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_rl/verl/models/qwen2/megatron/checkpoint_utils/qwen2_loader_depracated.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from verl.utils.device import get_device_id, get_torch_device def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def load_state_dict_to_megatron_qwen2( state_dict, wrapped_models, config, params_dtype, is_value_model=False, tie_word_embeddings=False ): """Load merged state_dict to sharded Megatron module in training.""" from megatron.core import DistributedDataParallel as LocalDDP from megatron.core import mpu from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model start_time = time.time() def _get_gpt_model(model): return model def broadcast_params(module): for param in module.parameters(): torch.distributed.broadcast( param.data, src=mpu.get_data_parallel_src_rank(), group=mpu.get_data_parallel_group() ) dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if torch.distributed.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers, ( f"num_layers_per_model: {num_layers_per_model} * pp_size: {pp_size} * virtual_pp_size: " f"{virtual_pp_size} != config.num_hidden_layers: {config.num_hidden_layers}" ) models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) gpt_model_module = _get_gpt_model(models[i]) assert len(gpt_model_module.model.layers) == num_layers_per_model def _broadcast_tensor(tensor, name) -> torch.Tensor: """broadcast tensor from rank0 across mp_group""" nonlocal state_dict nonlocal mp_group if torch.distributed.get_rank() == 0: if name in state_dict: weight = state_dict[name] tensor_shape = weight.shape else: tensor_shape = None else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not in state_dict, skip load") return if tensor is None: tensor = torch.empty( tensor_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) if torch.distributed.get_rank() == 0: tensor.data.copy_(weight) dist.broadcast(tensor, src=0, group=mp_group) def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: if name in state_dict: full_weight = state_dict[name] if mutate_func is not None: full_weight = mutate_func(full_weight) tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: gate_weight = state_dict[gate_name] up_weight = state_dict[up_name] new_gate_up_weight = torch.empty( config.intermediate_size * 2, config.hidden_size, dtype=params_dtype, device=get_device_id() ) for i in range(tp_size): intermediate_size_tp = config.intermediate_size // tp_size gate_weight_tp = gate_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] up_weight_tp = up_weight[i * intermediate_size_tp : (i + 1) * intermediate_size_tp] new_gate_up_weight[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)].copy_( torch.cat([gate_weight_tp, up_weight_tp], dim=0) ) tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank() == 0:} tensor {gate_name, up_name} shape " f"{tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() if torch.distributed.get_rank() == 0: assert q_name in state_dict and k_name in state_dict and v_name in state_dict full_weight_q = state_dict[q_name] full_weight_k = state_dict[k_name] full_weight_v = state_dict[v_name] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp if not bias: new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) else: new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] k_part = full_weight_k[i * kv_size_tp : (i + 1) * kv_size_tp] v_part = full_weight_v[i * kv_size_tp : (i + 1) * kv_size_tp] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_( torch.cat([q_part, k_part, v_part], dim=0) ) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp if not bias: new_weight_qkv = torch.empty( total_size * tp_size, config.hidden_size, dtype=params_dtype, device=get_device_id() ) else: new_weight_qkv = torch.empty(total_size * tp_size, dtype=params_dtype, device=get_device_id()) for i in range(tp_size): q_part = full_weight_q[i * q_size_tp : (i + 1) * q_size_tp] start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head k_part = full_weight_k[start_idx:end_idx] v_part = full_weight_v[start_idx:end_idx] new_weight_qkv[i * total_size : (i + 1) * total_size].copy_( torch.cat([q_part, k_part, v_part], dim=0) ) tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0) chunk_shape = tensor_chunk[0].shape else: chunk_shape = None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=0, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name, k_name, v_name}] not in state_dict, skip loading") return if tensor is None: sync_tensor = torch.empty( chunk_shape, dtype=params_dtype, device=get_device_id(), requires_grad=False, ) else: assert tensor.shape == chunk_shape, ( f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}" ) sync_tensor = torch.empty_like(tensor, device=get_device_id(), requires_grad=False) for i in range(tp_size): if torch.distributed.get_rank() == 0: sync_tensor.data.copy_(tensor_chunk[i]) dist.broadcast(sync_tensor, src=0, group=mp_group) if (i == tp_rank) and (tensor is not None): tensor.data.copy_(sync_tensor) if dp_rank == 0: # Embeddings # ------------------- print_rank_0("loading embeddings...") gpt_model_module = _get_gpt_model(models[0]) embed_tokens_weight = None if pp_rank == 0: embed_tokens_weight = gpt_model_module.model.embed_tokens.weight _broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight") # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"loading layer #{layer}...") layer_name = f"model.layers.{layer}" dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[dst_layer_idx] _broadcast_tensor( sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.input_layernorm.weight", ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.bias if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", bias=True, ) _broadcast_tp_shard_tensor( sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.self_attn.o_proj.weight", chunk_dim=1, ) _broadcast_tensor( sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.post_attention_layernorm.weight", ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", ) _broadcast_tp_shard_tensor( sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None, f"{layer_name}.mlp.down_proj.weight", chunk_dim=1, ) # Final Layernorm # ------------------- print_rank_0("loading final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", ) if tie_word_embeddings: print_rank_0("tie_word_embeddings skip load lm_head") else: print_rank_0("loading lm_head...") lm_head_weight = None if pp_rank + 1 == pp_size: lm_head_weight = gpt_model_module.lm_head.weight if is_value_model: if "lm_head.weight" in state_dict and state_dict["lm_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "lm_head.weight") print_rank_0("load lm_head from value_head weight") elif "reward_head.weight" in state_dict and state_dict["reward_head.weight"].shape[0] == 1: _broadcast_tensor(lm_head_weight, "reward_head.weight") print_rank_0("load lm_head from value_head weight") else: _broadcast_tensor(None, "lm_head.weight") print_rank_0("fail to match lm_head in value_model") else: _broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight") dist.barrier() # Broadcast weights inside data parallel groups for wrapped_model in wrapped_models: broadcast_params(wrapped_model) get_torch_device().empty_cache() print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s") ================================================ FILE: verl_rl/verl/models/qwen2/megatron/checkpoint_utils/qwen2_saver.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import torch import torch.distributed as dist from megatron.core import mpu from megatron.core.distributed import DistributedDataParallel as LocalDDP from megatron.core.transformer.module import Float16Module from torch.nn.parallel import DistributedDataParallel as torchDDP from verl.utils.device import get_device_id, get_torch_device from verl.utils.logger import print_rank_0 from verl.utils.megatron_utils import unwrap_model def _megatron_calc_global_rank(tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0): """given TP,DP,PP rank to get the global rank.""" tp_size = mpu.get_tensor_model_parallel_world_size() dp_size = mpu.get_data_parallel_world_size() pp_size = mpu.get_pipeline_model_parallel_world_size() assert tp_size * dp_size * pp_size == torch.distributed.get_world_size(), ( f"{tp_size} x {dp_size} x {pp_size} != {torch.distributed.get_world_size()}" ) # We only support TP-DP-PP grouping, for correctness when resharding return (pp_rank * dp_size + dp_rank) * tp_size + tp_rank def _megatron_calc_layer_map(config): """Calculate the mapping of global layer_idx to local layer_idx Returns: layer_map (Dict: int -> tuple(int, int, int)): mapping from the global layer index to a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model) """ from megatron.core import mpu pp_size = mpu.get_pipeline_model_parallel_world_size() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 layer_map = dict() num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers for pp_rank_idx in range(pp_size): for virtual_pp_rank_idx in range(virtual_pp_size): layer_offset = ( virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) + pp_rank_idx * num_layers_per_model ) for layer_idx in range(num_layers_per_model): layer_map[layer_offset + layer_idx] = ( pp_rank_idx, virtual_pp_rank_idx, layer_idx, ) return layer_map def merge_megatron_ckpt_qwen2(wrapped_models, config, dtype, is_value_model=False, tie_word_embeddings=False): """Merge sharded parameters of a Megatron module into a merged checkpoint. Args: wrapped_models (list of megatron.core.distributed.DistributedDataParallel): The local DDP wrapped megatron modules. config (str or None): HF config for model dtype: model params type is_value_model: if model is value model tie_word_embeddings: tie_word_embeddings Returns: state_dict (dict): The merged state_dict in rank 0, and an empty dictionary in other ranks. """ start_time = time.time() def _get_gpt_model(model): return model dp_rank = mpu.get_data_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() pp_rank = mpu.get_pipeline_model_parallel_rank() virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1 mp_group = mpu.get_model_parallel_group() if dist.get_rank() == 0: assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0" assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0" assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0" if not isinstance(wrapped_models, list | tuple): wrapped_models = list(wrapped_models) assert len(wrapped_models) == virtual_pp_size num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers models = [None] * len(wrapped_models) for i, wrapped_model in enumerate(wrapped_models): models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module)) assert len(models[i].model.layers) == num_layers_per_model, ( "len model layers {} not equal to num_layers_per_model {}".format( len(models[i].model.layers), num_layers_per_model ) ) state_dict = dict() def _get_cpu_tensor(tensor: torch.Tensor): if tensor is None: return None if tensor.device == torch.device("cpu"): return tensor.detach().clone() return tensor.detach().cpu() def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor: """broadcast tensor across mp_group""" nonlocal state_dict nonlocal mp_group src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) if torch.distributed.get_rank() == src_rank: if tensor is None: weight = None tensor_shape = None else: weight = tensor tensor_shape = weight.shape else: weight = None tensor_shape = None obj_list = [tensor_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) tensor_shape = obj_list[0] if tensor_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tensor:[{name}] not exist, skip collect") return if weight is None: weight = torch.empty( tensor_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) dist.broadcast(weight, src=src_rank, group=mp_group) if torch.distributed.get_rank() == 0: state_dict[name] = _get_cpu_tensor(weight) def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=concat_dim) if mutate_func is not None: full_tensor = mutate_func(full_tensor) state_dict[name] = full_tensor def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor: """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) intermediate_size_tp = config.intermediate_size // tp_size gate_weight_list = [] up_weight_list = [] for i in range(tp_size): gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i : intermediate_size_tp * 2 * (i + 1)] gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp] up_weight_tp = gate_up_weight_tp[intermediate_size_tp:] gate_weight_list.append(gate_weight_tp) up_weight_list.append(up_weight_tp) state_dict[gate_name] = torch.cat(gate_weight_list, dim=0) state_dict[up_name] = torch.cat(up_weight_list, dim=0) def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank): """broadcast tensor in tp shards across mp_group""" nonlocal state_dict nonlocal mp_group tp_size = mpu.get_tensor_model_parallel_world_size() src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank) chunk_shape = tensor.shape if torch.distributed.get_rank() == src_rank else None obj_list = [chunk_shape] dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group) chunk_shape = obj_list[0] if chunk_shape is None: # all or none ranks in the mp_group should reach here print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting") return buffer_tensor = torch.empty( chunk_shape, dtype=dtype, device=get_device_id(), requires_grad=False, ) chunk_tensors = [None] * tp_size for i in range(tp_size): cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank) sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group) if torch.distributed.get_rank() == 0: chunk_tensors[i] = _get_cpu_tensor(sync_tensor) if torch.distributed.get_rank() == 0: full_tensor = torch.concat(chunk_tensors, dim=0) q_weight_list = [] k_weight_list = [] v_weight_list = [] hidden_size_per_head = config.hidden_size // config.num_attention_heads if config.num_key_value_heads >= tp_size: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_part = qkv_part[:q_size_tp] k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp] v_part = qkv_part[q_size_tp + kv_size_tp : total_size] q_weight_list.append(q_part) k_weight_list.append(k_part) v_weight_list.append(v_part) else: q_size_tp = config.hidden_size // tp_size kv_size_tp = hidden_size_per_head total_size = q_size_tp + 2 * kv_size_tp for i in range(tp_size): qkv_part = full_tensor[i * total_size : (i + 1) * total_size] q_part = qkv_part[:q_size_tp] k_part = qkv_part[q_size_tp : q_size_tp + kv_size_tp] v_part = qkv_part[q_size_tp + kv_size_tp : total_size] q_weight_list.append(q_part) if i * config.num_key_value_heads % tp_size == 0: k_weight_list.append(k_part) v_weight_list.append(v_part) state_dict[q_name] = torch.cat(q_weight_list, dim=0) state_dict[k_name] = torch.cat(k_weight_list, dim=0) state_dict[v_name] = torch.cat(v_weight_list, dim=0) # empty cache before collecting weights get_torch_device().empty_cache() # Embeddings # ------------------- if dp_rank == 0: # Embeddings # ------------------- print_rank_0("collecting embeddings...") gpt_model_module = _get_gpt_model(models[0]) _broadcast_tp_shard_tensor( gpt_model_module.model.embed_tokens.weight if pp_rank == 0 else None, "model.embed_tokens.weight", src_pp_rank=0, ) # Transformer layers # ------------------- layer_map = _megatron_calc_layer_map(config) for layer in range(config.num_hidden_layers): print_rank_0(f"collecting layer #{layer}...") layer_name = f"model.layers.{layer}" src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer] gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank]) sync_layer = gpt_model_module.model.layers[src_layer_idx] _broadcast_tensor( sync_layer.input_layernorm.weight, f"{layer_name}.input_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.weight, f"{layer_name}.self_attn.q_proj.weight", f"{layer_name}.self_attn.k_proj.weight", f"{layer_name}.self_attn.v_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_qkv( sync_layer.self_attn.qkv_proj.bias, f"{layer_name}.self_attn.q_proj.bias", f"{layer_name}.self_attn.k_proj.bias", f"{layer_name}.self_attn.v_proj.bias", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.self_attn.o_proj.weight, f"{layer_name}.self_attn.o_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) _broadcast_tensor( sync_layer.post_attention_layernorm.weight, f"{layer_name}.post_attention_layernorm.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor_gate_up( sync_layer.mlp.gate_up_proj.weight, f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight", src_pp_rank=src_pp_rank, ) _broadcast_tp_shard_tensor( sync_layer.mlp.down_proj.weight, f"{layer_name}.mlp.down_proj.weight", concat_dim=1, src_pp_rank=src_pp_rank, ) # Final Layernorm # ------------------- print_rank_0("collecting final layernorm...") gpt_model_module = _get_gpt_model(models[-1]) _broadcast_tensor( getattr(gpt_model_module.model.norm, "weight", None), "model.norm.weight", src_pp_rank=pp_size - 1, ) if tie_word_embeddings: print_rank_0("tie word embedding skip load lm_head...") else: print_rank_0("collecting lm_head...") if is_value_model: _broadcast_tensor( gpt_model_module.lm_head.weight if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) _broadcast_tensor( gpt_model_module.reward_head.weight if pp_rank == pp_size - 1 and getattr(gpt_model_module, "reward_weight", None) is not None else None, "reward_head.weight", src_pp_rank=pp_size - 1, ) else: _broadcast_tp_shard_tensor( getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None, "lm_head.weight", src_pp_rank=pp_size - 1, ) dist.barrier() get_torch_device().empty_cache() if torch.distributed.get_rank() == 0: for k, v in state_dict.items(): if dtype != v.dtype: state_dict[k] = v.to(dtype) print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s") return state_dict ================================================ FILE: verl_rl/verl/models/qwen2/megatron/layers/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .parallel_attention import ParallelQwen2Attention from .parallel_decoder import ParallelQwen2DecoderLayer, ParallelQwen2DecoderLayerRmPad from .parallel_mlp import ParallelQwen2MLP from .parallel_rmsnorm import ParallelQwen2RMSNorm __all__ = [ "ParallelQwen2Attention", "ParallelQwen2DecoderLayer", "ParallelQwen2DecoderLayerRmPad", "ParallelQwen2MLP", "ParallelQwen2RMSNorm", ] ================================================ FILE: verl_rl/verl/models/qwen2/megatron/layers/parallel_attention.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional import torch.nn.functional as F from einops import rearrange from transformers.utils import is_flash_attn_2_available if is_flash_attn_2_available(): from flash_attn import flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa import torch from flash_attn.layers.rotary import apply_rotary_emb from megatron.core import ModelParallelConfig, tensor_parallel from megatron.core import parallel_state as mpu from torch import nn from transformers import Qwen2Config from verl.models.qwen2.megatron.layers.parallel_linear import QKVParallelLinear from verl.utils.megatron import tensor_parallel as tp_utils class Qwen2RotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) class Qwen2LinearScalingRotaryEmbedding(Qwen2RotaryEmbedding): """Qwen2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) t = t / self.scaling_factor freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) class Qwen2DynamicNTKScalingRotaryEmbedding(Qwen2RotaryEmbedding): """Qwen2RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class ParallelQwen2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config = config self.megatron_config = megatron_config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta # assign values after tp tp_size = mpu.get_tensor_model_parallel_world_size() assert self.num_heads % tp_size == 0, ( f"num_head must be divisible by tp_size. Got num_head={self.num_heads}, tp_size={tp_size}" ) assert self.num_key_value_heads % tp_size == 0, ( f"num_key_value_heads must be divisible by tp_size. Got num_key_value_heads=" f"{self.num_key_value_heads}, tp_size={tp_size}" ) self.num_heads_per_tp = self.num_heads // tp_size self.num_key_value_heads_per_tp = self.num_key_value_heads // tp_size self.hidden_size_per_tp = self.hidden_size // tp_size if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and " f"`num_heads`: {self.num_heads})." ) column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" assert row_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, megatron_config) tp_utils.update_kwargs_with_config(row_kwargs, megatron_config) # [self.q_size, self.k_size, self.v_size] self.qkv_proj = QKVParallelLinear( input_size=self.hidden_size, num_heads=self.num_heads, num_key_value_heads=self.num_key_value_heads, head_dim=self.head_dim, # bias=config.attention_bias, bias=True, gather_output=False, skip_bias_add=False, **column_kwargs, ) self.q_size = self.num_heads_per_tp * self.head_dim self.k_size = self.num_key_value_heads_per_tp * self.head_dim self.v_size = self.num_key_value_heads_per_tp * self.head_dim self.o_proj = tensor_parallel.RowParallelLinear( input_size=self.num_heads * self.head_dim, output_size=self.hidden_size, # bias=config.attention_bias, bias=False, input_is_parallel=True, skip_bias_add=False, **row_kwargs, ) self._init_rope() def _init_rope(self): self.rotary_emb = Qwen2RotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta, ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() qkv = self.qkv_proj(hidden_states)[0] query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1) query_states = query_states.view(bsz, q_len, self.num_heads_per_tp, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads_per_tp, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads_per_tp, q_len, kv_seq_len)}, " f"but is {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads_per_tp, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads_per_tp, q_len, self.head_dim)}, " f"but is {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size_per_tp) attn_output = self.o_proj(attn_output)[0] return attn_output """ Remove padding Attention - Using Flash-attn 2 - Compatible with sequence parallel """ def apply_rotary_pos_emb_rmpad(q, k, cos, sin, position_ids, indices, sequence_length): batch_size = position_ids.shape[0] q = pad_input(q, indices, batch_size, sequence_length) # (batch_size, seqlen, num_head, head_dim) k = pad_input(k, indices, batch_size, sequence_length) cos = cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim] sin = sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) q_embed = index_first_axis(rearrange(q_embed, "b s ... -> (b s) ..."), indices) k_embed = index_first_axis(rearrange(k_embed, "b s ... -> (b s) ..."), indices) return q_embed, k_embed # use flash-attn rotary embeddings with rmpad # cos/sin shoudl be: (seq_length, rotary_dim / 2) def apply_rotary_pos_emb_rmpad_flash(q, k, cos, sin, cu_seqlens, max_seqlen): q_embed = apply_rotary_emb( q, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) k_embed = apply_rotary_emb( k, cos, sin, interleaved=False, inplace=False, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen ) return q_embed, k_embed class ParallelQwen2AttentionRmPad(ParallelQwen2Attention): def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: torch.Tensor = None, max_seqlen_in_batch: int = None, ): total_nnz, _, _ = hidden_states.size() # This is the total_nnz padded after sequence parallel if self.megatron_config.sequence_parallel: total_nnz = total_nnz * mpu.get_tensor_model_parallel_world_size() qkv = self.qkv_proj(hidden_states)[0] query_states, key_states, value_states = qkv.split( [self.q_size, self.k_size, self.v_size], dim=-1 ) # (total_nnz, 1, hidden_size) if self.megatron_config.sequence_parallel: sequence_parallel_pad = total_nnz - cu_seqlens[-1] total_nnz = cu_seqlens[-1] # total_nnz before sp padding query_states = query_states[:total_nnz] key_states = key_states[:total_nnz] value_states = value_states[:total_nnz] # Flash attention requires the input to have the shape # batch_size x seq_length x head_dime x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(total_nnz, self.num_heads_per_tp, self.head_dim) key_states = key_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim) value_states = value_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim) cos, sin = self.rotary_emb(value_states, seq_len=sequence_length) cos, sin = cos[:, : cos.shape[1] // 2], sin[:, : sin.shape[1] // 2] # flash attn only needs half query_states, key_states = apply_rotary_pos_emb_rmpad_flash( query_states, key_states, cos, sin, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen_in_batch ) # query_states, key_states = apply_rotary_pos_emb_rmpad(query_states, key_states, cos, sin, # position_ids, indices, # It is recommended to use dropout with FA according to the docs # when training. dropout_rate = 0.0 # if not self.training else self.attn_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (Qwen2RMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: query_states = query_states.to(torch.float16) key_states = key_states.to(torch.float16) value_states = value_states.to(torch.float16) attn_output_unpad = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens, cu_seqlens_k=cu_seqlens, max_seqlen_q=max_seqlen_in_batch, max_seqlen_k=max_seqlen_in_batch, dropout_p=dropout_rate, softmax_scale=None, causal=True, ) attn_output_unpad = attn_output_unpad.to(input_dtype) attn_output_unpad = attn_output_unpad.reshape(total_nnz, 1, self.hidden_size_per_tp).contiguous() # sequence parallel reduce_scatter is performed inside RowColumnParallel if enabled # Here we need to repad if self.megatron_config.sequence_parallel: attn_output_unpad = F.pad(attn_output_unpad, pad=(0, 0, 0, 0, 0, sequence_parallel_pad)) attn_output_unpad = self.o_proj(attn_output_unpad)[0] return attn_output_unpad ================================================ FILE: verl_rl/verl/models/qwen2/megatron/layers/parallel_decoder.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch from megatron.core import ModelParallelConfig from torch import nn from transformers import Qwen2Config from verl.utils.megatron_utils import TransformerConfig, convert_config from .parallel_attention import ParallelQwen2Attention, ParallelQwen2AttentionRmPad from .parallel_mlp import ParallelQwen2MLP from .parallel_rmsnorm import ParallelQwen2RMSNorm class ParallelQwen2DecoderLayer(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, layer_idx: int): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.self_attn = ParallelQwen2Attention(config=config, megatron_config=megatron_config) self.mlp = ParallelQwen2MLP(config, megatron_config=megatron_config) self.input_layernorm = ParallelQwen2RMSNorm(config, megatron_config) self.post_attention_layernorm = ParallelQwen2RMSNorm(config, megatron_config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Note: sequence parallel is hidden inside ColumnParallelLinear # reduce scatter is hidden inside RowParallelLinear # Self Attention hidden_states = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, ) # TODO: add sequence parallel operator reduce_scatter here hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) # TODO: add sequence parallel operator all_gather here hidden_states = self.mlp(hidden_states) # TODO: add sequence parallel operator reduce_scatter here hidden_states = residual + hidden_states outputs = hidden_states return outputs class ParallelQwen2DecoderLayerRmPad(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, layer_idx: int): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.hidden_size = config.hidden_size self.layer_idx = layer_idx self.self_attn = ParallelQwen2AttentionRmPad(config=config, megatron_config=megatron_config) self.mlp = ParallelQwen2MLP(config, megatron_config=megatron_config) self.input_layernorm = ParallelQwen2RMSNorm(config, megatron_config) self.post_attention_layernorm = ParallelQwen2RMSNorm(config, megatron_config) def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states # (total_nnz // sp, 1, hidden_size) hidden_states = self.input_layernorm(hidden_states) # Self Attention # (total_nnz // sp, 1, hidden_size) -> all-gather (total_nnz, 1, hidden_size) # -> col + row -> reduce-scatter -> (total_nnz // sp, 1, hidden_size) hidden_states = self.self_attn( hidden_states=hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = residual + hidden_states # Fully Connected # shape changes same as attn residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = hidden_states return outputs ================================================ FILE: verl_rl/verl/models/qwen2/megatron/layers/parallel_linear.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023 The vLLM team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/linear.py from megatron.core import tensor_parallel class QKVParallelLinear(tensor_parallel.ColumnParallelLinear): def __init__( self, input_size, num_heads, num_key_value_heads, head_dim, *, bias=True, gather_output=True, skip_bias_add=False, **kwargs, ): # Keep input parameters, and already restrict the head numbers self.input_size = input_size self.q_output_size = num_heads * head_dim self.kv_output_size = num_key_value_heads * head_dim self.head_dim = head_dim self.gather_output = gather_output self.skip_bias_add = skip_bias_add input_size = self.input_size output_size = (num_heads + 2 * num_key_value_heads) * self.head_dim super().__init__( input_size=input_size, output_size=output_size, bias=bias, gather_output=gather_output, skip_bias_add=skip_bias_add, **kwargs, ) class MergedColumnParallelLinear(tensor_parallel.ColumnParallelLinear): def __init__( self, input_size, gate_ouput_size, up_output_size, *, bias=True, gather_output=True, skip_bias_add=False, **kwargs, ): # Keep input parameters, and already restrict the head numbers self.input_size = input_size self.output_size = gate_ouput_size + up_output_size self.gather_output = gather_output self.skip_bias_add = skip_bias_add super().__init__( input_size=self.input_size, output_size=self.output_size, bias=bias, gather_output=gather_output, skip_bias_add=skip_bias_add, **kwargs, ) ================================================ FILE: verl_rl/verl/models/qwen2/megatron/layers/parallel_mlp.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from megatron.core import ModelParallelConfig, tensor_parallel from megatron.core import parallel_state as mpu from torch import nn from transformers.activations import ACT2FN from verl.models.qwen2.megatron.layers.parallel_linear import MergedColumnParallelLinear from verl.utils.megatron import tensor_parallel as tp_utils class ParallelQwen2MLP(nn.Module): def __init__(self, config, megatron_config: ModelParallelConfig = None) -> None: super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size # The weight is only [hidden_size, intermediate_size // model_parallel_world_size] column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" assert row_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(row_kwargs, megatron_config) tp_utils.update_kwargs_with_config(column_kwargs, megatron_config) tp_size = mpu.get_tensor_model_parallel_world_size() self.gate_up_proj = MergedColumnParallelLinear( input_size=self.hidden_size, gate_ouput_size=self.intermediate_size, up_output_size=self.intermediate_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) self.gate_size = self.intermediate_size // tp_size self.down_proj = tensor_parallel.RowParallelLinear( input_size=self.intermediate_size, output_size=self.hidden_size, bias=False, input_is_parallel=True, skip_bias_add=False, **row_kwargs, ) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): gate_up = self.gate_up_proj(x)[0] gate, up = gate_up.split(self.gate_size, dim=-1) return self.down_proj(self.act_fn(gate) * up)[0] ================================================ FILE: verl_rl/verl/models/qwen2/megatron/layers/parallel_rmsnorm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numbers import torch from apex.normalization.fused_layer_norm import fused_rms_norm_affine from megatron.core import ModelParallelConfig from torch import nn from transformers import Qwen2Config from verl.utils.megatron import sequence_parallel as sp_utils class ParallelQwen2RMSNorm(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): """ Qwen2RMSNorm is equivalent to T5LayerNorm """ super().__init__() if isinstance(config.hidden_size, numbers.Integral): normalized_shape = (config.hidden_size,) self.normalized_shape = torch.Size(normalized_shape) self.weight = nn.Parameter(torch.ones(self.normalized_shape)) self.variance_epsilon = config.rms_norm_eps if megatron_config.sequence_parallel: sp_utils.mark_parameter_as_sequence_parallel(self.weight) def forward(self, hidden_states): return fused_rms_norm_affine( input=hidden_states, weight=self.weight, normalized_shape=self.normalized_shape, eps=self.variance_epsilon, memory_efficient=True, ) ================================================ FILE: verl_rl/verl/models/qwen2/megatron/modeling_qwen2_megatron.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen2 model.""" from typing import Optional import torch import torch.utils.checkpoint from megatron.core import ModelParallelConfig, mpu, parallel_state, tensor_parallel from torch import nn from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.models.qwen2.configuration_qwen2 import Qwen2Config from transformers.models.qwen2.modeling_qwen2 import CausalLMOutputWithPast from verl.utils.device import get_device_name from verl.utils.megatron import sequence_parallel as sp_utils from verl.utils.megatron import tensor_parallel as tp_utils from verl.utils.megatron_utils import TransformerConfig, convert_config from .layers import ParallelQwen2DecoderLayer, ParallelQwen2DecoderLayerRmPad, ParallelQwen2RMSNorm """ TODO: 1. Add weight initialization. Here we need to be careful on TP weight init. 2. Add sequence parallel 3. Load checkpoint from Qwen2 pretrained checkpoint """ # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class ParallelQwen2Model(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`] Args: config: Qwen2Config """ def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, megatron_config) self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) self.layers = nn.ModuleList( [ParallelQwen2DecoderLayer(config, megatron_config) for _ in range(config.num_hidden_layers)] ) self.norm = ParallelQwen2RMSNorm(config, megatron_config) # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (batch_size, seq_length) attention_mask: attention_mask. shape (batch_size, seq_length) position_ids: position ids. shape (batch_size, seq_length) Returns: """ batch_size, seq_length = input_ids.shape inputs_embeds = self.embed_tokens(input_ids) # embed positions attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds) hidden_states = inputs_embeds for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return hidden_states class ParallelQwen2ForCausalLM(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.model = ParallelQwen2Model(config, megatron_config=megatron_config) self.vocab_size = config.vocab_size column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) hidden_states = outputs logits = self.lm_head(hidden_states)[0] logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) logits = logits.float() return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa class ParallelQwen2ModelRmPad(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`] Args: config: Qwen2Config """ def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() self.megatron_config = megatron_config if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) self.layers = nn.ModuleList( [ParallelQwen2DecoderLayerRmPad(config, megatron_config) for _ in range(config.num_hidden_layers)] ) self.norm = ParallelQwen2RMSNorm(config, megatron_config) def forward( self, input_ids: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (1, totol_nnz) position_ids: position ids. shape (batch_size, seq_length) Returns: """ inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size) # (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size) inputs_embeds = inputs_embeds.transpose(0, 1) if self.megatron_config.sequence_parallel: inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds) hidden_states = inputs_embeds for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = layer_outputs hidden_states = self.norm(hidden_states) return hidden_states class ParallelQwen2ForCausalLMRmPad(nn.Module): def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.megatron_config = megatron_config self.model = ParallelQwen2ModelRmPad(config, megatron_config=megatron_config) self.vocab_size = config.vocab_size self._init_head(config) def _init_head(self, config: Qwen2Config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, **column_kwargs, ) def _forward_head(self, hidden_states): # all_gather from sequence parallel region is performed inside lm_head logits = self.lm_head(hidden_states)[0] logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp) logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) # (total_nnz_padded, 1, vocab_size) return logits def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" batch_size, sequence_length = input_ids.shape # remove padding here input_ids, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input( input_ids.unsqueeze(dim=-1), attention_mask ) # (total_nnz, 1) # pad input_ids to multiple of tp for all tp ranks # TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap if self.megatron_config.sequence_parallel: input_ids = sp_utils.pad_to_sequence_parallel(input_ids) input_ids = input_ids.transpose(0, 1) # (1, total_nnz+pad) outputs = self.model( input_ids=input_ids, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = outputs logits = self._forward_head(hidden_states) # remove padding from sequence parallel if self.megatron_config.sequence_parallel: totol_nnz = cu_seqlens[-1] logits = logits[:totol_nnz] # (total_nnz_padded) logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # add removed padding back logits = pad_input( logits, indices, batch_size, seqlen=sequence_length ) # (batch_size, sequence_length, vocab_size) return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) class ParallelQwen2ForValueRmPad(ParallelQwen2ForCausalLMRmPad): def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False) # lm_head is effectively the same as sequence parallel sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight) def _forward_head(self, hidden_states): logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1) logits = logits.float() if self.megatron_config.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: output = super().forward(input_ids, attention_mask, position_ids) output.logits = torch.squeeze(output.logits, dim=-1) return output """ Support pipeline parallelism """ class ParallelQwen2ModelRmPadPP(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`] This model definition supports pipeline parallelism. To support pp and vpp, - This model only contains layer in this pp stage and vpp chunk - When calling get_model in Megatron, this rank will instantiate all the vpp chunks in this pp. Args: config: Qwen2Config """ def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, pre_process, post_process): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.pre_process = pre_process self.post_process = post_process self.megatron_config = megatron_config embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding() if megatron_config is not None: assert embedding_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config) if pre_process: self.embed_tokens = tensor_parallel.VocabParallelEmbedding( num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, **embedding_kwargs ) else: self.embed_tokens = None pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = megatron_config.pipeline_model_parallel_size self.num_layer_per_pp = config.num_hidden_layers // pp_size vpp_size = megatron_config.virtual_pipeline_model_parallel_size vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank() if vpp_size is not None: self.num_layer_vpp_chunk = self.num_layer_per_pp // vpp_size self.num_layer_this_model = self.num_layer_vpp_chunk offset = vpp_rank * (config.num_hidden_layers // vpp_size) + (pp_rank * self.num_layer_vpp_chunk) else: self.num_layer_this_model = self.num_layer_per_pp offset = pp_rank * self.num_layer_per_pp self.layers = nn.ModuleList() for i in range(self.num_layer_this_model): layer = ParallelQwen2DecoderLayerRmPad(config, megatron_config, layer_idx=i + offset) self.layers.add_module(f"{i}", layer) if post_process: self.norm = ParallelQwen2RMSNorm(config, megatron_config) else: self.norm = None def set_input_tensor(self, input_tensor): """Set input tensor to be used instead of forward()'s input. When doing pipeline parallelism the input from the previous stage comes from communication, not from the input, so the model's forward_step_func won't have it. This function is thus used by internal code to bypass the input provided by the forward_step_func""" self.input_tensor = input_tensor def forward( self, input_ids: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, sequence_length: int = None, indices: torch.Tensor = None, cu_seqlens: int = None, max_seqlen_in_batch: int = None, ) -> tuple | BaseModelOutputWithPast: """ Args: input_ids: input ids. shape (1, totol_nnz) position_ids: position ids. shape (batch_size, seq_length) Returns: """ if self.pre_process: inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size) # vocab parallel embedding will not do sequence parallel reduce-scatter in open source megatron # so need to deal with it by handle here: # (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size) inputs_embeds = inputs_embeds.transpose(0, 1) if self.megatron_config.sequence_parallel: inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds) hidden_states = inputs_embeds else: # self.hidden_states should be passed by Megatron hidden_states = self.input_tensor for idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) hidden_states = layer_outputs if self.post_process: hidden_states = self.norm(hidden_states) return hidden_states class ParallelQwen2ForCausalLMRmPadPP(nn.Module): def __init__( self, config: Qwen2Config, megatron_config: ModelParallelConfig, pre_process, post_process, share_embeddings_and_output_weights, ): super().__init__() self.config: TransformerConfig = convert_config(config, megatron_config) self.megatron_config = megatron_config self.model = ParallelQwen2ModelRmPadPP( config, megatron_config=megatron_config, pre_process=pre_process, post_process=post_process ) self.share_embeddings_and_output_weights = share_embeddings_and_output_weights self.vocab_size = config.vocab_size self.pre_process = pre_process self.post_process = post_process if post_process: self._init_head(config) if pre_process or post_process: self.setup_embeddings_and_output_layer() def set_input_tensor(self, input_tensor): """Set input tensor to be used instead of forward()'s input. When doing pipeline parallelism the input from the previous stage comes from communication, not from the input, so the model's forward_step_func won't have it. This function is thus used by internal code to bypass the input provided by the forward_step_func""" assert len(input_tensor) == 1 self.model.set_input_tensor(input_tensor[0]) def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = tensor_parallel.ColumnParallelLinear( input_size=config.hidden_size, output_size=config.vocab_size, bias=False, gather_output=False, skip_bias_add=False, skip_weight_param_allocation=self.pre_process and self.share_embeddings_and_output_weights, **column_kwargs, ) def setup_embeddings_and_output_layer(self) -> None: """Sets up embedding layer in first stage and output layer in last stage. This function initalizes word embeddings in the final stage when we are using pipeline parallelism and sharing word embeddings, and sets up param attributes on the embedding and output layers. """ # Set `is_embedding_or_output_parameter` attribute. if self.pre_process: self.model.embed_tokens.weight.is_embedding_or_output_parameter = True if self.post_process and self.lm_head.weight is not None: self.lm_head.weight.is_embedding_or_output_parameter = True if not self.share_embeddings_and_output_weights: return if parallel_state.get_pipeline_model_parallel_world_size() == 1: # Zero out wgrad if sharing embeddings between two layers on same # pipeline stage to make sure grad accumulation into main_grad is # correct and does not include garbage values (e.g., from torch.empty). self.shared_embedding_or_output_weight().zero_out_wgrad = True return if parallel_state.is_pipeline_first_stage() and self.pre_process and not self.post_process: self.shared_embedding_or_output_weight().shared_embedding = True if self.post_process and not self.pre_process: assert not parallel_state.is_pipeline_first_stage() # set word_embeddings weights to 0 here, then copy first # stage's weights using all_reduce below. self.lm_head.weight.data.fill_(0) self.lm_head.weight.shared = True self.lm_head.weight.shared_embedding = True if torch.distributed.is_initialized() and parallel_state.is_rank_in_embedding_group(): weight = self.shared_embedding_or_output_weight() weight.data = weight.data.to(get_device_name()) torch.distributed.all_reduce(weight.data, group=parallel_state.get_embedding_group()) def shared_embedding_or_output_weight(self) -> torch.Tensor: if self.pre_process: return self.model.embed_tokens.weight elif self.post_process: return self.lm_head.weight return None def _forward_head(self, hidden_states): # all_gather from sequence parallel region is performed inside lm_head # print(f'logits shape before forward_head: {hidden_states.shape}, vocab_size = ' # f'{self.config.vocab_size}') # [4, 32, 4096] output_weight = None if self.share_embeddings_and_output_weights: output_weight = self.shared_embedding_or_output_weight() logits = self.lm_head(hidden_states, weight=output_weight)[0] # print(f'logits shape after forward_head: {logits.shape}') # [8, 32, 8] logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp) return logits def forward( self, # original input *, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: ```""" # Note that input_ids, attention_mask and position_ids should be passed to every pp layer. # In the first pp, input_ids will be used, in other pp layers hidden_states will be used inside self.model batch_size, sequence_length = input_ids.shape # remove padding here input_ids_rmpad, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input( input_ids.unsqueeze(dim=-1), attention_mask ) # (total_nnz, 1) # pad input_ids to multiple of tp for all tp ranks # TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap if self.megatron_config.sequence_parallel: input_ids_rmpad = sp_utils.pad_to_sequence_parallel(input_ids_rmpad) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz+pad) outputs = self.model( input_ids=input_ids_rmpad, position_ids=position_ids, sequence_length=sequence_length, indices=indices, cu_seqlens=cu_seqlens, max_seqlen_in_batch=max_seqlen_in_batch, ) if self.post_process: hidden_states = outputs logits = self._forward_head(hidden_states) logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # torch.Size([8, 32, 16]) # remove padding from sequence parallel if self.megatron_config.sequence_parallel: totol_nnz = cu_seqlens[-1] logits = logits[:totol_nnz] # (total_nnz_padded) # add removed padding back. If input is already rmpad, we let the caller pad_input logits = pad_input( logits, indices, batch_size, seqlen=sequence_length ) # (batch_size, sequence_length, vocab_size) return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=None, hidden_states=None, attentions=None, ) else: return outputs class ParallelQwen2ForValueRmPadPP(ParallelQwen2ForCausalLMRmPadPP): def _init_head(self, config): column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear() if self.megatron_config is not None: assert column_kwargs.get("config", False), "must have ModelParallelConfig" tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config) self.lm_head = nn.Linear(in_features=config.hidden_size, out_features=1, bias=False) # lm_head is effectively the same as sequence parallel sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight) def _forward_head(self, hidden_states): logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1) logits = logits.float() if self.megatron_config.sequence_parallel: logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False) return logits def forward( self, *, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ) -> tuple | CausalLMOutputWithPast: output = super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) if self.post_process: output.logits = torch.squeeze(output.logits, dim=-1) return output else: return output ================================================ FILE: verl_rl/verl/models/registry.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from typing import Optional import torch.nn as nn # Supported models in Megatron-LM # Architecture -> (module, class). _MODELS = { "LlamaForCausalLM": ( "llama", ("ParallelLlamaForCausalLMRmPadPP", "ParallelLlamaForValueRmPadPP", "ParallelLlamaForCausalLMRmPad"), ), "Qwen2ForCausalLM": ( "qwen2", ("ParallelQwen2ForCausalLMRmPadPP", "ParallelQwen2ForValueRmPadPP", "ParallelQwen2ForCausalLMRmPad"), ), "MistralForCausalLM": ( "mistral", ("ParallelMistralForCausalLMRmPadPP", "ParallelMistralForValueRmPadPP", "ParallelMistralForCausalLMRmPad"), ), } # return model class class ModelRegistry: @staticmethod def load_model_cls(model_arch: str, value=False) -> Optional[type[nn.Module]]: if model_arch not in _MODELS: return None megatron = "megatron" module_name, model_cls_name = _MODELS[model_arch] if not value: # actor/ref model_cls_name = model_cls_name[0] elif value: # critic/rm model_cls_name = model_cls_name[1] module = importlib.import_module(f"verl.models.{module_name}.{megatron}.modeling_{module_name}_megatron") return getattr(module, model_cls_name, None) @staticmethod def get_supported_archs() -> list[str]: return list(_MODELS.keys()) ================================================ FILE: verl_rl/verl/models/transformers/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/models/transformers/dense_common.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Union import torch from transformers.cache_utils import Cache from transformers.modeling_outputs import CausalLMOutputWithPast @dataclass class CausalLMOutputForPPO(CausalLMOutputWithPast): log_probs: Optional[torch.FloatTensor] = None entropy: Optional[torch.FloatTensor] = None def forward_base_model( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> CausalLMOutputWithPast: r""" Copy paste LLaMa's forward https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/transformers/model/llama.py This function should be generic enough for all pure text models. ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) return outputs def forward_with_torch_backend( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union["Cache", list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: int | torch.Tensor = 0, temperature: float = 1.0, **loss_kwargs, ) -> tuple | CausalLMOutputForPPO: from verl.utils.experimental.torch_functional import FusedLinearForPPO outputs = forward_base_model( self, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, ) hidden_states = outputs[0] if not return_dict: raise NotImplementedError("forward_with_torch_backend has to return_dict") # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.") fused_linear_for_ppo = FusedLinearForPPO() log_probs, entropy = fused_linear_for_ppo.forward( hidden_states=hidden_states, vocab_weights=self.lm_head.weight, input_ids=rolled_labels, temperature=temperature, ) return CausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def forward_with_triton_backend( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union["Cache", list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: int | torch.Tensor = 0, temperature: float = 1.0, **loss_kwargs, ) -> tuple | CausalLMOutputForPPO: from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy outputs = forward_base_model( self, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = outputs[0] if not return_dict: raise NotImplementedError("forward_with_triton_backend has to return_dict") # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.") log_probs, entropy = linear_cross_entropy( hidden_states, self.lm_head.weight, rolled_labels, temperature, "none", ) return CausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) ================================================ FILE: verl_rl/verl/models/transformers/kimi_vl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import torch import torch.nn.functional as F from transformers.cache_utils import Cache from transformers.modeling_flash_attention_utils import _flash_attention_forward from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`): The position indices of the tokens corresponding to the query and key tensors. For example, this can be used to pass offsetted position ids when working with a KV-cache. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) b, h, s, d = q.shape q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) b, h, s, d = k.shape k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def _ulysses_flash_attn_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() if self.q_lora_rank is None: q = self.q_proj(hidden_states) else: q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))) q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape compressed_kv = self.kv_a_proj_with_mqa(hidden_states) compressed_kv, k_pe = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1) k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2) kv = ( self.kv_b_proj(self.kv_a_layernorm(compressed_kv)) .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim) .transpose(1, 2) ) k_nope, value_states = torch.split(kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1) # patch ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.num_heads, ulysses_sp_size) num_key_value_groups = self.config.num_attention_heads // self.config.num_key_value_heads k_pe = repeat_kv(k_pe, ulysses_sp_size) # to keep heads=1 after a2a k_nope = repeat_kv(k_nope, num_key_value_groups) value_states = repeat_kv(value_states, num_key_value_groups) q = gather_seq_scatter_heads(q, seq_dim=2, head_dim=1) k_pe = gather_seq_scatter_heads(k_pe, seq_dim=2, head_dim=1) k_nope = gather_seq_scatter_heads(k_nope, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) # (batch_size, num_head / sp_size, seq_length, head_size) full_q_len = q.size(2) # full_q_len = seq_length else: full_q_len = q_len q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1) cos, sin = self.rotary_emb(value_states, seq_len=full_q_len) q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids) query_states = k_pe.new_empty(bsz, self.num_heads // ulysses_sp_size, full_q_len, self.q_head_dim) query_states[:, :, :, : self.qk_nope_head_dim] = q_nope query_states[:, :, :, self.qk_nope_head_dim :] = q_pe key_states = k_pe.new_empty(bsz, self.num_heads // ulysses_sp_size, full_q_len, self.q_head_dim) key_states[:, :, :, : self.qk_nope_head_dim] = k_nope key_states[:, :, :, self.qk_nope_head_dim :] = k_pe if self.q_head_dim != self.v_head_dim: value_states = F.pad(value_states, [0, self.q_head_dim - self.v_head_dim]) # TODO: These transpose are quite inefficient but Flash Attention requires the layout # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, full_q_len, dropout=dropout_rate, sliding_window=None, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, position_ids=position_ids, # important: pass position ids softmax_scale=self.softmax_scale, ) if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1) if self.q_head_dim != self.v_head_dim: attn_output = attn_output[:, :, :, : self.v_head_dim] attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim).contiguous() attn_output = self.o_proj(attn_output) return attn_output, None, None ================================================ FILE: verl_rl/verl/models/transformers/llama.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from typing import Callable, Optional import torch if sys.version_info >= (3, 11): pass else: pass from transformers.cache_utils import Cache from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.models.llama.modeling_llama import apply_rotary_pos_emb from transformers.utils import logging from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) logger = logging.get_logger(__name__) def llama_flash_attn_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """ Adapted from transformers 4.47.1 to support Ulysses sequence parallelism. NOTE: This function is used for transformers versions in the range [4.45.0, 4.47.1]. """ output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) # trade off: repeat first and then all to all # key_states = repeat_kv(key_states, self.num_key_value_groups) # value_states = repeat_kv(value_states, self.num_key_value_groups) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.num_heads, ulysses_sp_size) # (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) # full seq length if position_embeddings is None: logger.warning_once( "The attention layers in this model are transitioning from computing the RoPE embeddings internally " "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " "removed and `position_embeddings` will be mandatory." ) cos, sin = self.rotary_emb(value_states, position_ids) else: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to " f"the fact you have upcasted embedding or layer norm layers in float32. We will cast back the " f"input in {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, full_q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, "sliding_window", None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def llama_attn_forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """ Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0. NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.50.0. """ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS from transformers.models.llama.modeling_llama import eager_attention_forward bsz, q_len, _ = hidden_states.shape query_states = self.q_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.config.num_attention_heads, ulysses_sp_size) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. " "Falling back to eager attention. This warning can be removed using the argument " '`attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights ================================================ FILE: verl_rl/verl/models/transformers/monkey_patch.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Apply monkey-patch function to models """ import importlib.metadata import sys from functools import lru_cache from typing import Optional import torch from packaging import version from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.modeling_utils import PreTrainedModel from verl.utils.import_utils import is_trl_available from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_group, get_ulysses_sequence_parallel_world_size, slice_input_tensor, ) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=2, repeats=n_rep). The hidden states go from (batch, seqlen, num_key_value_heads, head_dim) to (batch, seqlen, num_attention_heads, head_dim) """ batch, slen, num_key_value_heads, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, :, None, :].expand(batch, slen, num_key_value_heads, n_rep, head_dim) return hidden_states.reshape(batch, slen, num_key_value_heads * n_rep, head_dim) def _ulysses_flash_attention_forward( query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, *args, position_ids: Optional[torch.Tensor] = None, **kwargs, ): """Insert all-to-all before and after flash attention. DeepSpeed-Ulysses: https://arxiv.org/pdf/2309.14509 Args: query_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads, head_dim) key_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads_k, head_dim) value_states (torch.Tensor): (batch_size, seqlen/sp_size, nheads_k, head_dim) position_ids (torch.Tensor, optional): (batch_size, seqlen/sp_size) Returns: torch.Tensor: (batch_size, seqlen/sp_size, nheads, head_dim) """ ulysses_sp_size = get_ulysses_sequence_parallel_world_size() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: assert position_ids is not None, "position_ids is required for Ulysses sequence parallelism" # NOTE: repeat kv heads to be divided by sequence parallel. Instead of repeating nheads_q//nheads_k, # we choose to repeat sp_size//nheads_k, since flash_attention supports MQA/GQA. # For example: # - nheads_k=4, sp=8, repeats=2 # - nheads_k=8, sp=8, repeats=1 # - nheads_k=16, sp=8, repeats=1 repeats = max(ulysses_sp_size // key_states.size(2), 1) key_states = repeat_kv(key_states, repeats) value_states = repeat_kv(value_states, repeats) # (bsz, seq_len/n, n_head, head_dim) -> (bsz, seq_len, n_head/n, head_dim) query_states = gather_seq_scatter_heads(query_states, seq_dim=1, head_dim=2) key_states = gather_seq_scatter_heads(key_states, seq_dim=1, head_dim=2) value_states = gather_seq_scatter_heads(value_states, seq_dim=1, head_dim=2) # TODO: all_gather position_ids because `prepare_fa2_from_position_ids` needs it, we can eliminate # this all_gather by passing cu_seq_lens_q, cu_seq_lens_k, max_length_k, max_length_q explicitly. # https://github.com/huggingface/transformers/pull/33932 # (bsz, seq_len/n) -> (bsz, seq_len) position_ids_list = [torch.empty_like(position_ids) for _ in range(ulysses_sp_size)] torch.distributed.all_gather(position_ids_list, position_ids, group=get_ulysses_sequence_parallel_group()) position_ids = torch.concat(position_ids_list, dim=-1) # (bsz, seq_len, n_head/n, head_dim) attn_output = _flash_attention_forward( query_states, key_states, value_states, *args, position_ids=position_ids, **kwargs ) ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: # (bsz, seq_len, n_head/n, head_dim) -> (bsz, seq_len/n, n_head, head_dim) attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) return attn_output def patch_vlm_for_ulysses_input_slicing(model_class: type): """ Applies a monkey patch to the forward method of a given model class to enable Ulysses sequence parallelism input slicing. """ def _create_ulysses_wrapped_decoder_forward(original_forward): def ulysses_wrapped_decoder_forward(self, *args, **kwargs): inputs_embeds = kwargs.get("inputs_embeds") call_kwargs = kwargs.copy() current_ulysses_sp_size = get_ulysses_sequence_parallel_world_size() slice_now = ( inputs_embeds is not None and current_ulysses_sp_size > 1 and getattr(self, "_needs_initial_slice", True) ) if slice_now: call_kwargs["inputs_embeds"] = slice_input_tensor(inputs_embeds, dim=1, padding=False) self._needs_initial_slice = False try: return original_forward(self, *args, **call_kwargs) finally: if slice_now: self._needs_initial_slice = True return ulysses_wrapped_decoder_forward original_forward = model_class.forward wrapped_forward = _create_ulysses_wrapped_decoder_forward(original_forward) model_class.forward = wrapped_forward print(f"Monkey patch {model_class.__name__}.forward for Ulysses SP input slicing.") def patch_forward_with_backends( model: PreTrainedModel, use_fused_kernels: bool = False, fused_kernels_backend: str = None, ): """ Choose the forward function based on the model and backend. Args: model (PreTrainedModel): The model to apply the monkey patch. use_fused_kernels (bool): Whether to use fused kernels. fused_kernels_backend (str): The backend to use for fused kernels. """ if not use_fused_kernels or fused_kernels_backend not in ["triton", "torch"]: print( f"Skipping monkey patch for {model.__class__.__name__} as use_fused_kernels is " f"{use_fused_kernels} or fused_kernels_backend is {fused_kernels_backend}" ) return forward_with_torch_backend_function = model.__class__.forward forward_with_triton_backend_function = model.__class__.forward if model.config.model_type == "qwen2_5_vl": from verl.models.transformers.qwen2_5_vl import forward_with_torch_backend, forward_with_triton_backend forward_with_torch_backend_function = forward_with_torch_backend forward_with_triton_backend_function = forward_with_triton_backend elif model.config.model_type == "qwen2_vl": from verl.models.transformers.qwen2_vl import forward_with_torch_backend, forward_with_triton_backend forward_with_torch_backend_function = forward_with_torch_backend forward_with_triton_backend_function = forward_with_triton_backend else: from verl.models.transformers.dense_common import forward_with_torch_backend, forward_with_triton_backend forward_with_torch_backend_function = forward_with_torch_backend forward_with_triton_backend_function = forward_with_triton_backend if fused_kernels_backend == "triton": model.__class__.forward = forward_with_triton_backend_function print(f"Using Triton backend for fused kernels in {model.__class__.__name__}") elif fused_kernels_backend == "torch": model.__class__.forward = forward_with_torch_backend_function print(f"Using Torch backend for fused kernels in {model.__class__.__name__}") else: raise ValueError(f"Unsupported fused_kernels_backend: {fused_kernels_backend}. Choose 'triton' or 'torch'.") def apply_monkey_patch( model: PreTrainedModel, ulysses_sp_size: int = 1, use_remove_padding: bool = True, use_fused_kernels: bool = False, fused_kernels_backend: str = None, ): """ Apply monkey patch to the models for ulysses sequence parallel and fused kernel. In the end of this function forward function of the model is patched for fused kernel. If the model is not supported with fused kernel, please return after patch. """ """Replace _flash_attention_forward to _ulysses_flash_attention_forward""" module = sys.modules[model.__module__] try: num_attention_heads, num_key_value_heads = model.config.num_attention_heads, model.config.num_key_value_heads except AttributeError: num_attention_heads, num_key_value_heads = ( model.config.text_config.num_attention_heads, model.config.text_config.num_key_value_heads, ) assert num_attention_heads % ulysses_sp_size == 0, ( f"num_attention_heads {num_attention_heads} must be divisible by ulysses_sp_size {ulysses_sp_size}" ) assert num_key_value_heads % ulysses_sp_size == 0 or ulysses_sp_size % num_key_value_heads == 0, ( f"num_key_value_heads {num_key_value_heads} must be divisible by ulysses_sp_size " f"{ulysses_sp_size}or vise versa. Upon ulysses_sp_size % num_key_value_heads == 0," f"kv heads are repeated to ensure correctness." ) if is_trl_available(): from trl import AutoModelForCausalLMWithValueHead # type: ignore def state_dict(self, *args, **kwargs): return torch.nn.Module.state_dict(self, *args, **kwargs) AutoModelForCausalLMWithValueHead.state_dict = state_dict print("Monkey patch state_dict in AutoModelForCausalLMWithValueHead. ") # TODO: VLM models only, unify monkey patch to LLM models. if model.config.model_type == "qwen2_5_vl": if is_transformers_version_in_range(min_version="4.53.0"): from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLAttention # TODO: Support transformers 4.53 raise ValueError("Transformers 4.53 is not supported") else: from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( Qwen2_5_VLFlashAttention2 as Qwen2_5_VLAttention, ) if use_remove_padding or ulysses_sp_size > 1: from verl.models.transformers.qwen2_vl import ulysses_flash_attn_forward Qwen2_5_VLAttention.forward = ulysses_flash_attn_forward print("Monkey patch FlashAttention2.forward in Qwen2.5VL") if ulysses_sp_size > 1: if is_transformers_version_in_range(min_version="4.52.0"): from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLTextModel patch_vlm_for_ulysses_input_slicing(Qwen2_5_VLTextModel) else: from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLModel patch_vlm_for_ulysses_input_slicing(Qwen2_5_VLModel) elif model.config.model_type == "qwen2_vl": if is_transformers_version_in_range(min_version="4.53.0"): from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLAttention # TODO: Support transformers 4.53 raise ValueError("Transformers 4.53 is not supported") else: from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLFlashAttention2 as Qwen2VLAttention if use_remove_padding or ulysses_sp_size > 1: from verl.models.transformers.qwen2_vl import ulysses_flash_attn_forward Qwen2VLAttention.forward = ulysses_flash_attn_forward print("Monkey patch FlashAttention2.forward in Qwen2VL") if ulysses_sp_size > 1: if is_transformers_version_in_range(min_version="4.52.0"): from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLTextModel patch_vlm_for_ulysses_input_slicing(Qwen2VLTextModel) else: from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLModel patch_vlm_for_ulysses_input_slicing(Qwen2VLModel) elif model.config.model_type == "kimi_vl": if use_remove_padding or ulysses_sp_size > 1: # TODO: Changes need to be made when transformers are adapted. from verl.models.transformers.kimi_vl import _ulysses_flash_attn_forward module.DeepseekV3FlashAttention2.forward = _ulysses_flash_attn_forward print("Monkey patch FlashAttention2.forward in KimiVL") if ulysses_sp_size > 1: patch_vlm_for_ulysses_input_slicing(module.DeepseekV3ForCausalLM) if use_fused_kernels: print("Not support fused kernels for KimiVL") return # transformers<=4.47.1 if use_remove_padding or ulysses_sp_size > 1: if hasattr(module, "_flash_attention_forward"): module._flash_attention_forward = _ulysses_flash_attention_forward print(f"Monkey patch _flash_attention_forward in {model.__module__}") else: # transformers>=4.48.0 from transformers.integrations import flash_attention flash_attention._flash_attention_forward = _ulysses_flash_attention_forward print(f"Monkey patch _flash_attention_forward in {flash_attention.__name__}") patch_forward_with_backends(model, use_fused_kernels=use_fused_kernels, fused_kernels_backend=fused_kernels_backend) @lru_cache def is_transformers_version_in_range(min_version: Optional[str] = None, max_version: Optional[str] = None) -> bool: try: # Get the installed version of the transformers library transformers_version_str = importlib.metadata.version("transformers") except importlib.metadata.PackageNotFoundError as e: raise ModuleNotFoundError("The `transformers` package is not installed.") from e transformers_version = version.parse(transformers_version_str) lower_bound_check = True if min_version is not None: lower_bound_check = version.parse(min_version) <= transformers_version upper_bound_check = True if max_version is not None: upper_bound_check = transformers_version <= version.parse(max_version) return lower_bound_check and upper_bound_check ================================================ FILE: verl_rl/verl/models/transformers/npu_patch.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Copyright 2025 The Qwen Team and The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch_npu from torch_npu import npu_rotary_mul as apply_rotary_emb from transformers.models.qwen2_5_vl import modeling_qwen2_5_vl from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2RMSNorm # This patch takes effect when using apply_rotary_pos_emb_flashatt on qwen2_5_vl and will be removed in # subsequent versions # https://github.com/huggingface/transformers/pull/38491 def apply_rotary_pos_emb_flashatt_npu( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: cos = cos.chunk(2, dim=-1)[0].contiguous() sin = sin.chunk(2, dim=-1)[0].contiguous() cos = cos.repeat(1, 2) sin = sin.repeat(1, 2) q_embed = apply_rotary_emb( q.float(), cos.unsqueeze(0).unsqueeze(2).float(), sin.unsqueeze(0).unsqueeze(2).float() ).type_as(q) k_embed = apply_rotary_emb( k.float(), cos.unsqueeze(0).unsqueeze(2).float(), sin.unsqueeze(0).unsqueeze(2).float() ).type_as(k) return q_embed, k_embed # This api can improve performance on ASCEND NPU def rms_norm_forward(self, x): return torch_npu.npu_rms_norm(x, self.weight, epsilon=self.variance_epsilon)[0] Qwen2RMSNorm.forward = rms_norm_forward modeling_qwen2_5_vl.apply_rotary_pos_emb_flashatt = apply_rotary_pos_emb_flashatt_npu ================================================ FILE: verl_rl/verl/models/transformers/qwen2.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional import torch from transformers.cache_utils import Cache from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv from transformers.utils import logging from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) logger = logging.get_logger(__name__) def qwen2_flash_attn_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 ): """ Adapted from transformers 4.47.1 to support Ulysses sequence parallelism. NOTE: This function is only tested on transformers versions between 4.45.0 and 4.47.1. """ bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.num_heads, ulysses_sp_size) # (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) # full seq length if position_embeddings is None: logger.warning_once( "The attention layers in this model are transitioning from computing the RoPE embeddings internally " "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " "removed and `position_embeddings` will be mandatory." ) cos, sin = self.rotary_emb(value_states, position_ids) else: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in float16 just to be sure everything works as expected. input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to " f"the fact you have upcasted embedding or layer norm layers in float32. We will cast back the " f"input in {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) # Reashape to the expected shape for Flash Attention query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) if ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): sliding_window = self.config.sliding_window else: sliding_window = None attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, full_q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=sliding_window, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) # use full_q_len to reshape attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def qwen2_attn_forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """ Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0. NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.50.0. """ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS bsz, q_len, _ = hidden_states.shape hidden_shape = (bsz, q_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) ########## AlltoAll for Ulysses ########## ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.config.num_attention_heads, ulysses_sp_size) # (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) full_q_len = query_states.size(2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) sliding_window = None if ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): sliding_window = self.config.sliding_window from transformers.models.qwen2.modeling_qwen2 import eager_attention_forward attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. " "Falling back to eager attention. This warning can be removed using the argument " '`attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=sliding_window, # main diff with Llama **kwargs, ) attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous() ########## AlltoAll for Ulysses ########## if ulysses_sp_size > 1: # (bsz, seq_len, n_head/n, head_dim) -> (bsz, seq_len/n, n_head, head_dim) attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights ================================================ FILE: verl_rl/verl/models/transformers/qwen2_5_vl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional import torch from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( Qwen2_5_VLCausalLMOutputWithPast, Qwen2_5_VLForConditionalGeneration, ) @dataclass class Qwen2_5_VLCausalLMOutputForPPO(Qwen2_5_VLCausalLMOutputWithPast): log_probs: Optional[torch.FloatTensor] = None entropy: Optional[torch.FloatTensor] = None def forward_base_model( self: Qwen2_5_VLForConditionalGeneration, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, ) -> tuple | Qwen2_5_VLCausalLMOutputWithPast: r""" Copy paste Qwen2_5_VL's forward https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/transformers/model/qwen2_5_vl.py ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: inputs_embeds = self.model.embed_tokens(input_ids) if pixel_values is not None: pixel_values = pixel_values.type(self.visual.dtype) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) n_image_tokens = (input_ids == self.config.image_token_id).sum().item() n_image_features = image_embeds.shape[0] if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, " f"features {n_image_features}" ) mask = input_ids == self.config.image_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) image_mask = mask_expanded.to(inputs_embeds.device) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: pixel_values_videos = pixel_values_videos.type(self.visual.dtype) video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) n_video_tokens = (input_ids == self.config.video_token_id).sum().item() n_video_features = video_embeds.shape[0] if n_video_tokens != n_video_features: raise ValueError( f"Video features and video tokens do not match: tokens: {n_video_tokens}, " f"features {n_video_features}" ) mask = input_ids == self.config.video_token_id mask_unsqueezed = mask.unsqueeze(-1) mask_expanded = mask_unsqueezed.expand_as(inputs_embeds) video_mask = mask_expanded.to(inputs_embeds.device) video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) # if we get 4D attention mask we cannot calculate rope deltas anymore. TODO @raushan fixme if position_ids is None and (attention_mask is None or attention_mask.ndim == 2): # calculate RoPE index once per generation in the pre-fill stage only if (cache_position is not None and cache_position[0] == 0) or self.rope_deltas is None: position_ids, rope_deltas = self.get_rope_index( input_ids, image_grid_thw, video_grid_thw, second_per_grid_ts, attention_mask, ) self.rope_deltas = rope_deltas # then use the prev pre-calculated rope-deltas to get the correct position ids else: batch_size, seq_length, _ = inputs_embeds.shape delta = (cache_position[0] + self.rope_deltas).to(inputs_embeds.device) if cache_position is not None else 0 position_ids = torch.arange(seq_length, device=inputs_embeds.device) position_ids = position_ids.view(1, -1).expand(batch_size, -1) if cache_position is not None: # otherwise `deltas` is an int `0` delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) position_ids = position_ids.add(delta) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) outputs = self.model( input_ids=None, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) return outputs def forward_with_torch_backend( self: Qwen2_5_VLForConditionalGeneration, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, temperature: float = 1.0, **loss_kwargs, ) -> tuple | Qwen2_5_VLCausalLMOutputForPPO: from verl.utils.experimental.torch_functional import FusedLinearForPPO outputs = forward_base_model( self, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, rope_deltas=rope_deltas, cache_position=cache_position, second_per_grid_ts=second_per_grid_ts, ) hidden_states = outputs[0] if not return_dict: raise NotImplementedError("forward_with_torch_backend has to return_dict") # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.") fused_linear_for_ppo = FusedLinearForPPO() log_probs, entropy = fused_linear_for_ppo.forward( hidden_states=hidden_states, vocab_weights=self.lm_head.weight, input_ids=rolled_labels, temperature=temperature, ) return Qwen2_5_VLCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=rope_deltas, ) def forward_with_triton_backend( self: Qwen2_5_VLForConditionalGeneration, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, temperature: float = 1.0, **loss_kwargs, ) -> tuple | Qwen2_5_VLCausalLMOutputForPPO: from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy outputs = forward_base_model( self, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, rope_deltas=rope_deltas, cache_position=cache_position, second_per_grid_ts=second_per_grid_ts, ) hidden_states = outputs[0] if not return_dict: raise NotImplementedError("forward_with_triton_backend has to return_dict") # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.") log_probs, entropy = linear_cross_entropy( hidden_states, self.lm_head.weight, rolled_labels, temperature, "none", ) return Qwen2_5_VLCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=rope_deltas, ) ================================================ FILE: verl_rl/verl/models/transformers/qwen2_vl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os from dataclasses import dataclass from typing import Optional import torch from transformers.modeling_flash_attention_utils import _flash_attention_forward from transformers.models.qwen2_vl.modeling_qwen2_vl import ( Qwen2VLCausalLMOutputWithPast, Qwen2VLForConditionalGeneration, ) from transformers.utils import is_flash_attn_greater_or_equal from verl.utils.ulysses import ( gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size, validate_ulysses_config, ) try: from transformers.modeling_flash_attention_utils import flash_attn_func, flash_attn_varlen_func _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) except ImportError: flash_attn_varlen_func = None def get_rope_index( processor, input_ids: torch.Tensor, image_grid_thw: Optional[torch.Tensor] = None, video_grid_thw: Optional[torch.Tensor] = None, second_per_grid_ts: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Gets the position ids for Qwen2-VL, it should be generated before sharding the sequence. The batch dim has been removed and the input_ids should be a 1D tensor representing a single example. https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py#L1546 """ spatial_merge_size = processor.image_processor.merge_size tokens_per_second = 2 image_token_id = processor.tokenizer.convert_tokens_to_ids("<|image_pad|>") video_token_id = processor.tokenizer.convert_tokens_to_ids("<|video_pad|>") vision_start_token_id = processor.tokenizer.convert_tokens_to_ids("<|vision_start|>") if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): if attention_mask is None: attention_mask = torch.ones_like(input_ids) position_ids = torch.ones(3, input_ids.size(0), dtype=input_ids.dtype, device=input_ids.device) # (3, seqlen) image_index, video_index = 0, 0 input_ids = input_ids[attention_mask == 1] image_nums, video_nums = 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id) vision_tokens = input_ids[vision_start_indices + 1] image_nums = (vision_tokens == image_token_id).sum() video_nums = (vision_tokens == video_token_id).sum() input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos = image_nums, video_nums for _ in range(image_nums + video_nums): if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if ed_image < ed_video: t, h, w = ( image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2], ) second_per_grid_t = 0 image_index += 1 remain_images -= 1 ed = ed_image else: t, h, w = ( video_grid_thw[video_index][0], video_grid_thw[video_index][1], video_grid_thw[video_index][2], ) second_per_grid_t = second_per_grid_ts[video_index] if second_per_grid_ts is not None else 1.0 video_index += 1 remain_videos -= 1 ed = ed_video llm_grid_t, llm_grid_h, llm_grid_w = ( t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size, ) text_len = ed - st st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w) t_index = (t_index * second_per_grid_t * tokens_per_second).long().flatten() h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten() llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx) st = ed + llm_grid_t * llm_grid_h * llm_grid_w if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device) else: if attention_mask is not None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1).to(input_ids.device) else: position_ids = torch.arange(input_ids.shape[1], device=input_ids.device).view(1, -1).expand(3, -1) return position_ids def prepare_fa2_from_position_ids( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_ids: torch.Tensor ): query = query.view(-1, query.size(-2), query.size(-1)) key = key.view(-1, key.size(-2), key.size(-1)) value = value.view(-1, value.size(-2), value.size(-1)) position_ids = position_ids.flatten() indices_q = torch.arange(position_ids.size(0), device=position_ids.device, dtype=torch.int32) cu_seqlens = torch.cat( ( indices_q[position_ids == 0], torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32), ) ) max_length = cu_seqlens.diff().max() # use cu_seqlens to infer max_length for qwen2vl mrope return (query, key, value, indices_q, (cu_seqlens, cu_seqlens), (max_length, max_length)) def flash_attention_forward( query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: torch.Tensor, query_length: int, is_causal: bool = True, position_ids: Optional[torch.Tensor] = None, sliding_window: Optional[int] = None, use_top_left_mask: bool = False, deterministic: Optional[bool] = None, **kwargs, ): """ Patches flash attention forward to handle 3D position ids in mrope. (3, batch_size, seq_length) """ causal = is_causal if not use_top_left_mask else is_causal and query_length != 1 # Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length). use_sliding_windows = ( _flash_supports_window_size and sliding_window is not None and key_states.shape[1] > sliding_window ) flash_kwargs = {"window_size": (sliding_window, sliding_window)} if use_sliding_windows else {} if is_flash_attn_greater_or_equal("2.4.1"): if deterministic is None: deterministic = os.environ.get("FLASH_ATTENTION_DETERMINISTIC", "0") == "1" flash_kwargs["deterministic"] = deterministic if position_ids is not None and query_length != 1 and not (torch.diff(position_ids[0], dim=-1) >= 0).all(): batch_size = query_states.size(0) query_states, key_states, value_states, _, cu_seq_lens, max_seq_lens = prepare_fa2_from_position_ids( query_states, key_states, value_states, position_ids[0] ) # remove channel dimension cu_seqlens_q, cu_seqlens_k = cu_seq_lens max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens attn_output = flash_attn_varlen_func( query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=kwargs.pop("dropout", 0.0), softmax_scale=kwargs.pop("softmax_scale", None), causal=causal, **flash_kwargs, ) attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1)) else: attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, query_length, is_causal=is_causal, sliding_window=sliding_window, use_top_left_mask=use_top_left_mask, deterministic=deterministic, **kwargs, ) # do not pass position_ids to old flash_attention_forward return attn_output def ulysses_flash_attn_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 **kwargs, ) -> tuple[torch.Tensor, None, None]: from transformers.models.qwen2_vl.modeling_qwen2_vl import apply_multimodal_rotary_pos_emb, repeat_kv bsz, q_len, _ = hidden_states.size() # q_len = seq_length / sp_size query_states = self.q_proj(hidden_states) # (batch_size, seq_length / sp_size, num_heads * head_size) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) ulysses_sp_size = get_ulysses_sequence_parallel_world_size() if ulysses_sp_size > 1: validate_ulysses_config(self.num_heads, ulysses_sp_size) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1) key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1) value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1) # (batch_size, num_head / sp_size, seq_length, head_size) full_q_len = query_states.size(2) # full_q_len = seq_length else: full_q_len = q_len # Because the input can be padded, the absolute sequence length depends on the max position id. if position_embeddings is None: cos, sin = self.rotary_emb(value_states, position_ids) else: cos, sin = position_embeddings query_states, key_states = apply_multimodal_rotary_pos_emb( query_states, key_states, cos, sin, self.rope_scaling["mrope_section"] ) dropout_rate = 0.0 if not self.training else self.attention_dropout # Reashape to the expected shape for Flash Attention query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) if ( self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and self.layer_idx >= self.config.max_window_layers ): sliding_window = self.config.sliding_window else: sliding_window = None attn_output = flash_attention_forward( query_states, key_states, value_states, attention_mask, full_q_len, dropout=dropout_rate, sliding_window=sliding_window, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, position_ids=position_ids, # important: pass position ids ) # (batch_size, seq_length, num_head / sp_size, head_size) if ulysses_sp_size > 1: attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) return attn_output, None, None @dataclass class Qwen2VLCausalLMOutputForPPO(Qwen2VLCausalLMOutputWithPast): log_probs: Optional[torch.FloatTensor] = None entropy: Optional[torch.FloatTensor] = None def forward_base_model( self: Qwen2VLForConditionalGeneration, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, ) -> tuple | Qwen2VLCausalLMOutputWithPast: r""" Copy paste Qwen2VL's forward https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/transformers/model/qwen2_vl.py ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: inputs_embeds = self.model.embed_tokens(input_ids) if pixel_values is not None: pixel_values = pixel_values.type(self.visual.get_dtype()) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) n_image_tokens = (input_ids == self.config.image_token_id).sum().item() n_image_features = image_embeds.shape[0] if n_image_tokens != n_image_features: raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, " f"features {n_image_features}" ) image_mask = ( (input_ids == self.config.image_token_id) .unsqueeze(-1) .expand_as(inputs_embeds) .to(inputs_embeds.device) ) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: pixel_values_videos = pixel_values_videos.type(self.visual.get_dtype()) video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) n_video_tokens = (input_ids == self.config.video_token_id).sum().item() n_video_features = video_embeds.shape[0] if n_video_tokens != n_video_features: raise ValueError( f"Video features and video tokens do not match: tokens: {n_video_tokens}, " f"features {n_video_features}" ) video_mask = ( (input_ids == self.config.video_token_id) .unsqueeze(-1) .expand_as(inputs_embeds) .to(inputs_embeds.device) ) video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) if position_ids is None and (attention_mask is None or attention_mask.ndim == 2): # calculate RoPE index once per generation in the pre-fill stage only if (cache_position is not None and cache_position[0] == 0) or self.rope_deltas is None: position_ids, rope_deltas = self.get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask) self.rope_deltas = rope_deltas # then use the prev pre-calculated rope-deltas to get the correct position ids else: batch_size, seq_length, _ = inputs_embeds.shape delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0 position_ids = torch.arange(seq_length, device=inputs_embeds.device) position_ids = position_ids.view(1, -1).expand(batch_size, -1) if cache_position is not None: # otherwise `deltas` is an int `0` delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) position_ids = position_ids.add(delta) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) outputs = self.model( input_ids=None, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) return outputs def forward_with_torch_backend( self: Qwen2VLForConditionalGeneration, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, temperature: float = 1.0, **loss_kwargs, ) -> tuple | Qwen2VLCausalLMOutputForPPO: from verl.utils.experimental.torch_functional import FusedLinearForPPO outputs = forward_base_model( self, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, rope_deltas=rope_deltas, cache_position=cache_position, ) hidden_states = outputs[0] if not return_dict: raise NotImplementedError("forward_with_torch_backend has to return_dict") # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.") fused_linear_for_ppo = FusedLinearForPPO() log_probs, entropy = fused_linear_for_ppo.forward( hidden_states=hidden_states, vocab_weights=self.lm_head.weight, input_ids=rolled_labels, temperature=temperature, ) return Qwen2VLCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=rope_deltas, ) def forward_with_triton_backend( self: Qwen2VLForConditionalGeneration, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.Tensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, temperature: float = 1.0, **loss_kwargs, ) -> tuple | Qwen2VLCausalLMOutputForPPO: from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy outputs = forward_base_model( self, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, rope_deltas=rope_deltas, cache_position=cache_position, ) hidden_states = outputs[0] if not return_dict: raise NotImplementedError("forward_with_triton_backend has to return_dict") # Loss calculations if labels is not None: rolled_labels = torch.roll(labels, shifts=-1, dims=-1) elif input_ids is not None: rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1) else: raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.") log_probs, entropy = linear_cross_entropy( hidden_states, self.lm_head.weight, rolled_labels, temperature, "none", ) return Qwen2VLCausalLMOutputForPPO( log_probs=log_probs, entropy=entropy, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=rope_deltas, ) ================================================ FILE: verl_rl/verl/models/weight_loader_registry.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_weight_loader(arch: str): from verl.models.mcore.loader import load_state_dict_to_megatron_gptmodel _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY = { "LlamaForCausalLM": load_state_dict_to_megatron_gptmodel, "Qwen2ForCausalLM": load_state_dict_to_megatron_gptmodel, } if arch in _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY: return _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY[arch] raise ValueError( f"Model architectures {arch} loader are not supported for now. Supported architectures: " f"{_MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY.keys()}" ) def get_weight_saver(arch: str): from verl.models.mcore.saver import ( merge_megatron_ckpt_gptmodel, merge_megatron_ckpt_gptmodel_dpskv3, merge_megatron_ckpt_gptmodel_mixtral, merge_megatron_ckpt_gptmodel_qwen2_5_vl, merge_megatron_ckpt_gptmodel_qwen_moe, ) _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY = { "LlamaForCausalLM": merge_megatron_ckpt_gptmodel, "Qwen2ForCausalLM": merge_megatron_ckpt_gptmodel, "MixtralForCausalLM": merge_megatron_ckpt_gptmodel_mixtral, "Qwen2MoeForCausalLM": merge_megatron_ckpt_gptmodel_qwen_moe, "Qwen2_5_VLForConditionalGeneration": merge_megatron_ckpt_gptmodel_qwen2_5_vl, "DeepseekV3ForCausalLM": merge_megatron_ckpt_gptmodel_dpskv3, "Qwen3ForCausalLM": merge_megatron_ckpt_gptmodel, "Qwen3MoeForCausalLM": merge_megatron_ckpt_gptmodel_qwen_moe, } if arch in _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY: return _MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY[arch] raise ValueError( f"Model architectures {arch} saver are not supported for now. Supported architectures: " f"{_MODEL_WEIGHT_MEGATRON_SAVER_REGISTRY.keys()}" ) ================================================ FILE: verl_rl/verl/protocol.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implement base data transfer protocol between any two functions, modules. We can subclass Protocol to define more detailed batch info with specific keys """ import contextlib import copy import logging import os import pickle from dataclasses import dataclass, field from typing import Callable, Optional import numpy as np import pandas as pd import ray import tensordict import torch import torch.distributed from packaging import version from tensordict import TensorDict from torch.utils.data import DataLoader from verl.utils.device import get_device_id, get_torch_device from verl.utils.py_functional import union_two_dict from verl.utils.torch_functional import allgather_dict_tensors __all__ = ["DataProto", "union_tensor_dict"] with contextlib.suppress(Exception): tensordict.set_lazy_legacy(False).set() class _DataProtoConfigMeta(type): _config = {} auto_padding_key = "_verl_auto_padding" @property def auto_padding(cls): enabled_by_env = os.getenv("VERL_AUTO_PADDING", "FALSE").upper() in ["TRUE", "1"] return enabled_by_env or cls._config.get(cls.auto_padding_key, False) @auto_padding.setter def auto_padding(cls, enabled: bool): assert isinstance(enabled, bool), f"enabled must be a boolean, got {enabled} as {type(enabled)}" cls._config[cls.auto_padding_key] = enabled class DataProtoConfig(metaclass=_DataProtoConfigMeta): pass _padding_size_key = "_padding_size_key_x123d" def pad_dataproto_to_divisor(data: "DataProto", size_divisor: int): """Pad a DataProto to size divisible by size_divisor Args: size_divisor (int): size divisor Returns: data: (DataProto): the padded DataProto pad_size (int) """ assert isinstance(data, DataProto), "data must be a DataProto" if len(data) % size_divisor != 0: pad_size = size_divisor - len(data) % size_divisor padding_protos = [] remaining_pad = pad_size while remaining_pad > 0: take_size = min(remaining_pad, len(data)) padding_protos.append(data[:take_size]) remaining_pad -= take_size data_padded = DataProto.concat([data] + padding_protos) else: if len(data) == 0: logging.warning("padding a DataProto with no item, no changed made") pad_size = 0 data_padded = data return data_padded, pad_size def unpad_dataproto(data: "DataProto", pad_size): """Unpad the data proto with pad_size. i.e. `data[:-pad_size]`""" if pad_size != 0: data = data[:-pad_size] return data def union_tensor_dict(tensor_dict1: TensorDict, tensor_dict2: TensorDict) -> TensorDict: """Union two tensordicts.""" assert tensor_dict1.batch_size == tensor_dict2.batch_size, ( f"Two tensor dict must have identical batch size. Got {tensor_dict1.batch_size} and {tensor_dict2.batch_size}" ) for key in tensor_dict2.keys(): if key not in tensor_dict1.keys(): tensor_dict1[key] = tensor_dict2[key] else: assert tensor_dict1[key].equal(tensor_dict2[key]), ( f"{key} in tensor_dict1 and tensor_dict2 are not the same object" ) return tensor_dict1 def union_numpy_dict(tensor_dict1: dict[str, np.ndarray], tensor_dict2: dict[str, np.ndarray]) -> dict[str, np.ndarray]: for key, val in tensor_dict2.items(): if key in tensor_dict1: assert isinstance(tensor_dict2[key], np.ndarray) assert isinstance(tensor_dict1[key], np.ndarray) # to properly deal with nan and object type assert pd.DataFrame(tensor_dict2[key]).equals(pd.DataFrame(tensor_dict1[key])), ( f"{key} in tensor_dict1 and tensor_dict2 are not the same object" ) tensor_dict1[key] = val return tensor_dict1 def list_of_dict_to_dict_of_list(list_of_dict: list[dict]): if len(list_of_dict) == 0: return {} keys = list_of_dict[0].keys() output = {key: [] for key in keys} for data in list_of_dict: for key, item in data.items(): assert key in output output[key].append(item) return output def fold_batch_dim(data: "DataProto", new_batch_size): """ Fold a batch dim from [bsz, xxx] into [new_bsz, bsz // new_bsz, xxx] """ batch_size = data.batch.batch_size[0] assert batch_size % new_batch_size == 0 tensor: TensorDict = data.batch non_tensor = data.non_tensor_batch tensor = tensor.view(new_batch_size, -1) tensor.auto_batch_size_(batch_dims=1) for key, val in non_tensor.items(): non_tensor[key] = np.reshape(val, newshape=(new_batch_size, -1, *val.shape[1:])) return type(data)(batch=tensor, non_tensor_batch=non_tensor, meta_info=data.meta_info) def unfold_batch_dim(data: "DataProto", batch_dims=2): """ Unfold the first n dims as new batch dim """ tensor: TensorDict = data.batch non_tensor = data.non_tensor_batch tensor.auto_batch_size_(batch_dims=batch_dims) tensor = tensor.view(-1) batch_size = tensor.batch_size[0] non_tensor_new = {} for key, val in non_tensor.items(): non_tensor_new[key] = np.reshape(val, newshape=(batch_size, *val.shape[batch_dims:])) return type(data)(batch=tensor, non_tensor_batch=non_tensor_new, meta_info=data.meta_info) def collate_fn(x: list["DataProtoItem"]): batch = [] non_tensor_batch = [] for data in x: batch.append(data.batch) non_tensor_batch.append(data.non_tensor_batch) batch = torch.stack(batch).contiguous() non_tensor_batch = list_of_dict_to_dict_of_list(non_tensor_batch) for key, val in non_tensor_batch.items(): non_tensor_batch[key] = np.array(val, dtype=object) return DataProto(batch=batch, non_tensor_batch=non_tensor_batch) @dataclass class DataProtoItem: # TODO(zhangchi.usc1992) add consistency check batch: TensorDict = None non_tensor_batch: dict = field(default_factory=dict) meta_info: dict = field(default_factory=dict) @dataclass class DataProto: """ A DataProto is a data structure that aims to provide a standard protocol for data exchange between functions. It contains a batch (TensorDict) and a meta_info (Dict). The batch is a TensorDict https://pytorch.org/tensordict/. TensorDict allows you to manipulate a dictionary of Tensors like a single Tensor. Ideally, the tensors with the same batch size should be put inside batch. """ batch: TensorDict = None non_tensor_batch: dict = field(default_factory=dict) meta_info: dict = field(default_factory=dict) def __post_init__(self): # perform necessary checking self.check_consistency() def __len__(self): if self.batch is not None: return self.batch.batch_size[0] elif self.non_tensor_batch is not None and len(self.non_tensor_batch) > 0: random_key = list(self.non_tensor_batch.keys())[0] return self.non_tensor_batch[random_key].shape[0] else: return 0 def __getitem__(self, item): """ Enhanced indexing for DataProto objects. Args: item: Can be one of: - int: A single index - slice: A slice object (start:stop:step) - list: A list of indices - numpy.ndarray: An array of indices - torch.Tensor: A tensor of indices Returns: DataProto: For all indexing types except single integers DataProtoItem: Only for single integer indices """ # Case 1: Slice object - use the slice method if isinstance(item, slice): return self.slice(item.start, item.stop, item.step) # Case 2: List, numpy array, or torch tensor - use sel_idxs elif isinstance(item, list | np.ndarray | torch.Tensor): return self.select_idxs(item) # Case 3: Single integer - return DataProtoItem for backward compatibility elif isinstance(item, int | np.integer): tensor_data = self.batch[item] if self.batch is not None else None non_tensor_data = {key: val[item] for key, val in self.non_tensor_batch.items()} return DataProtoItem(batch=tensor_data, non_tensor_batch=non_tensor_data, meta_info=self.meta_info) # # Case 4: Unsupported type else: raise TypeError(f"Indexing with {type(item)} is not supported") def __getstate__(self): import io buffer = io.BytesIO() if version.parse(tensordict.__version__) >= version.parse("0.5.0") and self.batch is not None: self.batch = self.batch.contiguous() self.batch = self.batch.consolidate() torch.save(self.batch, buffer) buffer_bytes = buffer.getvalue() return buffer_bytes, self.non_tensor_batch, self.meta_info def __setstate__(self, data): import io batch_deserialized_bytes, non_tensor_batch, meta_info = data batch_deserialized = io.BytesIO(initial_bytes=batch_deserialized_bytes) batch = torch.load( batch_deserialized, weights_only=False, map_location="cpu" if not get_torch_device().is_available() else None, ) self.batch = batch self.non_tensor_batch = non_tensor_batch self.meta_info = meta_info def save_to_disk(self, filepath): with open(filepath, "wb") as f: pickle.dump(self, f) @staticmethod def load_from_disk(filepath) -> "DataProto": with open(filepath, "rb") as f: data = pickle.load(f) return data def print_size(self, prefix=""): size_of_tensordict = 0 if self.batch is not None: for _, tensor in self.batch.items(): size_of_tensordict += tensor.element_size() * tensor.numel() size_of_numpy_array = 0 for _, numpy_array in self.non_tensor_batch.items(): size_of_numpy_array += numpy_array.nbytes size_of_numpy_array /= 1024**3 size_of_tensordict /= 1024**3 message = f"Size of tensordict: {size_of_tensordict} GB, size of non_tensor_batch: {size_of_numpy_array} GB" if prefix: message = f"{prefix}, " + message print(message) def check_consistency(self): """Check the consistency of the DataProto. Mainly for batch and non_tensor_batch We expose this function as a public one so that user can call themselves directly """ if self.batch is not None: assert len(self.batch.batch_size) == 1, "only support num_batch_dims=1" if self.non_tensor_batch is not None: for key, val in self.non_tensor_batch.items(): assert isinstance(val, np.ndarray) if self.batch is not None and self.non_tensor_batch is not None and len(self.non_tensor_batch) != 0: # TODO: we can actually lift this restriction if needed assert len(self.batch.batch_size) == 1, "only support num_batch_dims=1 when non_tensor_batch is not empty." batch_size = self.batch.batch_size[0] for key, val in self.non_tensor_batch.items(): assert isinstance(val, np.ndarray), ( f"data in the non_tensor_batch must be a numpy.array with dtype=object, but for " f"{key=}, got {type(val)=}" ) assert val.shape[0] == batch_size, ( f"key {key} length {len(val)} is not equal to batch size {batch_size}" ) @classmethod def from_single_dict(cls, data: dict[str, torch.Tensor | np.ndarray], meta_info=None, auto_padding=False): """Create a DataProto from a dict of tensors and non_tensors""" tensors = {} non_tensors = {} for key, val in data.items(): if isinstance(val, torch.Tensor): tensors[key] = val elif isinstance(val, np.ndarray): non_tensors[key] = val else: raise ValueError(f"Unsupported type in data {type(val)}") return cls.from_dict(tensors=tensors, non_tensors=non_tensors, meta_info=meta_info, auto_padding=auto_padding) @classmethod def from_dict( cls, tensors: Optional[dict[str, torch.Tensor]] = None, non_tensors=None, meta_info=None, num_batch_dims=1, auto_padding=False, ): """Create a DataProto from a dict of tensors. This assumes that 1. All the tensor in tensors have the same dim0 2. Only dim0 is the batch dim """ assert num_batch_dims > 0, "num_batch_dims must be greater than zero" if non_tensors is not None: assert num_batch_dims == 1, "only support num_batch_dims=1 when non_tensors is not None." if tensors is None: tensors = {} if meta_info is None: meta_info = {} if non_tensors is None: non_tensors = {} assert isinstance(non_tensors, dict) # get and check batch size batch_size = None pivot_key = None for key, tensor in tensors.items(): if batch_size is None: batch_size = tensor.shape[:num_batch_dims] pivot_key = key else: current_batch = tensor.shape[:num_batch_dims] assert batch_size == current_batch, ( f"Not all the tensor in tensors have the same batch size with batch_dims={num_batch_dims}. " f"Got {pivot_key} has {batch_size}, {key} has {current_batch}" ) for key, val in non_tensors.items(): if not isinstance(val, np.ndarray): non_tensors[key] = np.array(val, dtype=object) tensor_dict = TensorDict(source=tensors, batch_size=batch_size) if tensors else None if auto_padding: meta_info[DataProtoConfig.auto_padding_key] = True return cls(batch=tensor_dict, non_tensor_batch=non_tensors, meta_info=meta_info) def to(self, device) -> "DataProto": """move the batch to device Args: device (torch.device, str): torch device Returns: DataProto: the current DataProto """ if self.batch is not None: self.batch = self.batch.to(device) return self def select(self, batch_keys=None, non_tensor_batch_keys=None, meta_info_keys=None, deepcopy=False) -> "DataProto": """Select a subset of the DataProto via batch_keys and meta_info_keys Args: batch_keys (list, optional): a list of strings indicating the keys in batch to select meta_info_keys (list, optional): a list of keys indicating the meta info to select Returns: DataProto: the DataProto with the selected batch_keys and meta_info_keys """ # TODO (zhangchi.usc1992) whether to copy if batch_keys is not None: batch_keys = tuple(batch_keys) sub_batch = self.batch.select(*batch_keys) else: sub_batch = self.batch if non_tensor_batch_keys is not None: non_tensor_batch = {key: val for key, val in self.non_tensor_batch.items() if key in non_tensor_batch_keys} else: non_tensor_batch = self.non_tensor_batch if deepcopy: non_tensor_batch = copy.deepcopy(non_tensor_batch) if meta_info_keys is not None: sub_meta_info = {key: val for key, val in self.meta_info.items() if key in meta_info_keys} else: sub_meta_info = self.meta_info if deepcopy: sub_meta_info = copy.deepcopy(sub_meta_info) return type(self)(batch=sub_batch, non_tensor_batch=non_tensor_batch, meta_info=sub_meta_info) def select_idxs(self, idxs): """ Select specific indices from the DataProto. Args: idxs (torch.Tensor or numpy.ndarray or list): Indices to select Returns: DataProto: A new DataProto containing only the selected indices """ if isinstance(idxs, list): idxs = torch.tensor(idxs) if idxs.dtype != torch.bool: idxs = idxs.type(torch.int32) if isinstance(idxs, np.ndarray): idxs_np = idxs idxs_torch = torch.from_numpy(idxs) else: # torch.Tensor idxs_torch = idxs idxs_np = idxs.detach().cpu().numpy() batch_size = int(idxs_np.sum()) if idxs_np.dtype == bool else idxs_np.shape[0] if self.batch is not None: # Use TensorDict's built-in indexing capabilities selected_batch = TensorDict( source={key: tensor[idxs_torch] for key, tensor in self.batch.items()}, batch_size=(batch_size,), device=self.batch.device, ) else: selected_batch = None selected_non_tensor = {} for key, val in self.non_tensor_batch.items(): selected_non_tensor[key] = val[idxs_np] return type(self)(batch=selected_batch, non_tensor_batch=selected_non_tensor, meta_info=self.meta_info) def slice(self, start=None, end=None, step=None): """ Slice the DataProto and return a new DataProto object. This is an improved version of direct slicing which returns a DataProtoItem. Args: start (int, optional): Start index. Defaults to None (start from beginning). end (int, optional): End index (exclusive). Defaults to None (go to end). step (int, optional): Step size. Defaults to None (step=1). Returns: DataProto: A new DataProto containing the sliced data Examples: # Using the slice method directly sliced_data = data_proto.slice(10, 20) # Using enhanced indexing (returns DataProto) sliced_data = data_proto[10:20] sliced_data = data_proto[::2] # Every other element # Using list indexing (returns DataProto) indices = [1, 5, 10] selected_data = data_proto[indices] # Single index still returns DataProtoItem single_item = data_proto[5] """ # Create a slice object slice_obj = slice(start, end, step) # Handle the batch data if self.batch is not None: # Use TensorDict's built-in slicing capabilities sliced_batch = self.batch[slice_obj] else: sliced_batch = None # Handle the non-tensor batch data sliced_non_tensor = {} for key, val in self.non_tensor_batch.items(): sliced_non_tensor[key] = val[slice_obj] # Return a new DataProto object return type(self)(batch=sliced_batch, non_tensor_batch=sliced_non_tensor, meta_info=self.meta_info) def pop(self, batch_keys=None, non_tensor_batch_keys=None, meta_info_keys=None) -> "DataProto": """Pop a subset of the DataProto via `batch_keys` and `meta_info_keys` Args: batch_keys (list, optional): a list of strings indicating the keys in batch to pop meta_info_keys (list, optional): a list of keys indicating the meta info to pop Returns: DataProto: the DataProto with the poped batch_keys and meta_info_keys """ if batch_keys is None: batch_keys = [] if meta_info_keys is None: meta_info_keys = [] if non_tensor_batch_keys is None: non_tensor_batch_keys = [] tensors = {} # tensor batch for key in batch_keys: assert key in self.batch.keys() tensors[key] = self.batch.pop(key) non_tensors = {} # non tensor batch for key in non_tensor_batch_keys: assert key in self.non_tensor_batch.keys() non_tensors[key] = self.non_tensor_batch.pop(key) meta_info = {} for key in meta_info_keys: assert key in self.meta_info.keys() meta_info[key] = self.meta_info.pop(key) return DataProto.from_dict(tensors=tensors, non_tensors=non_tensors, meta_info=meta_info) def rename(self, old_keys=None, new_keys=None) -> "DataProto": """ Note that this function only rename the key in the batch """ def validate_input(keys): if keys is not None: if isinstance(keys, str): keys = [keys] elif isinstance(keys, list): pass else: raise TypeError(f"keys must be a list or a string, but got {type(keys)}") return keys old_keys = validate_input(old_keys) new_keys = validate_input(new_keys) if len(new_keys) != len(old_keys): raise ValueError( f"new_keys and old_keys must have the same length, but got {len(new_keys)} and {len(old_keys)}" ) self.batch.rename_key_(tuple(old_keys), tuple(new_keys)) return self def union(self, other: "DataProto") -> "DataProto": """Union with another DataProto. Union batch and meta_info separately. Throw an error if - there are conflict keys in batch and they are not equal - the batch size of two data batch is not the same - there are conflict keys in meta_info and they are not the same. Args: other (DataProto): another DataProto to union Returns: DataProto: the DataProto after union """ self.batch = union_tensor_dict(self.batch, other.batch) self.non_tensor_batch = union_numpy_dict(self.non_tensor_batch, other.non_tensor_batch) self.meta_info = union_two_dict(self.meta_info, other.meta_info) return self def make_iterator(self, mini_batch_size, epochs, seed=None, dataloader_kwargs=None): r"""Make an iterator from the DataProto. This is built upon that TensorDict can be used as a normal Pytorch dataset. See https://pytorch.org/tensordict/tutorials/data_fashion for more details. Args: mini_batch_size (int): mini-batch size when iterating the dataset. We require that ``batch.batch_size[0] % mini_batch_size == 0``. epochs (int): number of epochs when iterating the dataset. dataloader_kwargs (Any): internally, it returns a DataLoader over the batch. The dataloader_kwargs is the kwargs passed to the DataLoader. Returns: Iterator: an iterator that yields a mini-batch data at a time. The total number of iteration steps is ``self.batch.batch_size * epochs // mini_batch_size`` """ assert self.batch.batch_size[0] % mini_batch_size == 0, f"{self.batch.batch_size[0]} % {mini_batch_size} != 0" # we can directly create a dataloader from TensorDict if dataloader_kwargs is None: dataloader_kwargs = {} if seed is not None: generator = torch.Generator() generator.manual_seed(seed) else: generator = None assert isinstance(dataloader_kwargs, dict) train_dataloader = DataLoader( dataset=self, batch_size=mini_batch_size, collate_fn=collate_fn, generator=generator, **dataloader_kwargs ) def get_data(): for _ in range(epochs): for d in train_dataloader: d.meta_info = self.meta_info yield d return iter(get_data()) def is_padding_enabled(self): """ Check if padding is enabled for the DataProto. Returns: bool: True if padding is enabled, False otherwise. """ dataproto_specific_padding = self.meta_info.get(DataProtoConfig.auto_padding_key, False) return dataproto_specific_padding or DataProtoConfig.auto_padding def padding(self, padding_size, padding_candidate=""): """Pad the DataProto by concating with padding_candidate.repeat(padding_size) Args: padding_size (int): the number of repeated padding_candidate padding_candidate: the item to be repeated and appended to the DataProto, only supporting ["first", "last"] """ if padding_size == 0: return padding_candidate = self.select_idxs([0 if padding_candidate == "first" else len(self) - 1]) padding_part = padding_candidate.repeat(padding_size) padded_dp = DataProto.concat([self, padding_part]) self.batch = padded_dp.batch self.non_tensor_batch = padded_dp.non_tensor_batch def chunk(self, chunks: int) -> list["DataProto"]: """Split the batch among dim=0 into chunks. The meta_info is passed to each DataProto after split. Args: chunks (int): the number of chunks to split on dim=0 Returns: List[DataProto]: a list of DataProto after splitting """ if not self.is_padding_enabled(): assert len(self) % chunks == 0, ( f"only support equal chunk. Got size of DataProto {len(self)} and chunk {chunks}." ) bsz_in_batch = None if self.batch is not None: batch_lst = self.batch.chunk(chunks=chunks, dim=0) bsz_in_batch = np.array([batch.batch_size[0] for batch in batch_lst]) chunk_indices = np.cumsum(bsz_in_batch)[:-1] else: batch_lst = [None for _ in range(chunks)] non_tensor_batch_lst = [{} for _ in range(chunks)] for key, val in self.non_tensor_batch.items(): assert isinstance(val, np.ndarray) if bsz_in_batch is not None: non_tensor_lst = np.array_split(val, chunk_indices.tolist()) else: non_tensor_lst = np.array_split(val, chunks) assert len(non_tensor_lst) == chunks for i in range(chunks): non_tensor_batch_lst[i][key] = non_tensor_lst[i] output = [] for i in range(chunks): output.append( type(self)(batch=batch_lst[i], non_tensor_batch=non_tensor_batch_lst[i], meta_info=self.meta_info) ) return output def split(self, split_size: int) -> list["DataProto"]: """Split the batch among dim=0 into chunks. The meta_info is passed to each DataProto after split. Args: split_size (int): the size of each split Returns: List[DataProto]: a list of DataProto after splitting """ return [self[i : i + split_size] for i in range(0, len(self), split_size)] @staticmethod def concat(data: list["DataProto"]) -> "DataProto": """Concat a list of DataProto. The batch is concatenated among dim=0. The meta_info is assumed to be identical and will use the first one. Args: data (List[DataProto]): list of DataProto Returns: DataProto: concatenated DataProto """ batch_lst = [] for batch in data: batch_lst.append(batch.batch) new_batch = torch.cat(batch_lst, dim=0) if batch_lst[0] is not None else None non_tensor_batch = list_of_dict_to_dict_of_list(list_of_dict=[d.non_tensor_batch for d in data]) for key, val in non_tensor_batch.items(): non_tensor_batch[key] = np.concatenate(val, axis=0) cls = type(data[0]) if len(data) > 0 else DataProto return cls(batch=new_batch, non_tensor_batch=non_tensor_batch, meta_info=data[0].meta_info) def reorder(self, indices): """ Note that this operation is in-place """ indices_np = indices.detach().numpy() self.batch = self.batch[indices] self.non_tensor_batch = {key: val[indices_np] for key, val in self.non_tensor_batch.items()} def repeat(self, repeat_times=2, interleave=True): """ Repeat the batch data a specified number of times. Args: repeat_times (int): Number of times to repeat the data. interleave (bool): Whether to interleave the repeated data. Returns: DataProto: A new DataProto with repeated data. """ if self.batch is not None: if interleave: # Interleave the data repeated_tensors = { key: tensor.repeat_interleave(repeat_times, dim=0) for key, tensor in self.batch.items() } else: # Stack the data repeated_tensors = { key: tensor.unsqueeze(0).expand(repeat_times, *tensor.shape).reshape(-1, *tensor.shape[1:]) for key, tensor in self.batch.items() } repeated_batch = TensorDict( source=repeated_tensors, batch_size=(self.batch.batch_size[0] * repeat_times,), ) else: repeated_batch = None repeated_non_tensor_batch = {} for key, val in self.non_tensor_batch.items(): if interleave: repeated_non_tensor_batch[key] = np.repeat(val, repeat_times, axis=0) else: repeated_non_tensor_batch[key] = np.tile(val, (repeat_times,) + (1,) * (val.ndim - 1)) return type(self)( batch=repeated_batch, non_tensor_batch=repeated_non_tensor_batch, meta_info=self.meta_info, ) def unfold_column_chunks(self, n_split: int, split_keys: Optional[list[str]] = None): """Split along the second dim into `n_split`, unfold it to the first dim (batch dim) Useful in passing grouped tensors that doesn't want to be shuffled in dataset. keys not in split_keys are repeated to match the shape Note that if the `split_keys` is not provided, it will repeat all the keys in the second dim. """ if self.batch is not None: unfolded_batch = {} for key in self.batch.keys(): if key in split_keys if split_keys is not None else False: shape = list(self.batch[key].shape) shape[0] = self.batch[key].shape[0] * n_split shape[1] = self.batch[key].shape[1] // n_split unfolded_batch[key] = self.batch[key].reshape(*shape) else: unfolded_batch[key] = torch.repeat_interleave(self.batch[key], n_split, dim=0) # locate the `unfolded_batch` as a TensorDict on the same device as the original batch unfolded_batch = TensorDict( source=unfolded_batch, batch_size=(self.batch.batch_size[0] * n_split,), device=self.batch.device ) else: unfolded_batch = None repeated_non_tensor_batch = {} for key, val in self.non_tensor_batch.items(): if key in split_keys: shape = list(val.shape) shape[0] = val.shape[0] * n_split shape[1] = val.shape[1] // n_split repeated_non_tensor_batch[key] = val.reshape(*shape) else: repeated_non_tensor_batch[key] = np.repeat(val, n_split, axis=0) return type(self)( batch=unfolded_batch, non_tensor_batch=repeated_non_tensor_batch, meta_info=self.meta_info, ) def sample_level_repeat(self, repeat_times): """ Repeat each row of the batch data a specified number of times. Args: repeat_times (torch.tensor, list, tuple, ndarray): Number of times to repeat the data. Returns: DataProto: A new DataProto with repeated data. """ if isinstance(repeat_times, tuple): repeat_times = list(repeat_times) elif isinstance(repeat_times, torch.Tensor): assert len(repeat_times.shape) == 1 repeat_times = repeat_times.tolist() elif isinstance(repeat_times, np.ndarray): assert len(repeat_times.shape) == 1 repeat_times = repeat_times.tolist() else: assert isinstance(repeat_times, list), ( f"repeat_times type must be in [list, torch.Tensor, np.ndarray, tuple], got {type(repeat_times)}" ) repeat_times = torch.tensor(repeat_times) if self.batch is not None: # Interleave the data repeated_tensors = { key: tensor.repeat_interleave(repeat_times, dim=0) for key, tensor in self.batch.items() } repeated_batch = TensorDict( source=repeated_tensors, batch_size=(repeat_times.sum().item(),), device=self.batch.device, ) else: repeated_batch = None repeated_non_tensor_batch = {} for key, val in self.non_tensor_batch.items(): repeated_non_tensor_batch[key] = np.repeat(val, repeat_times, axis=0) return type(self)( batch=repeated_batch, non_tensor_batch=repeated_non_tensor_batch, meta_info=self.meta_info, ) @dataclass class DataProtoFuture: """ DataProtoFuture aims to eliminate actual data fetching on driver. By doing so, the driver doesn't have to wait for data so that asynchronous execution becomes possible. DataProtoFuture contains a list of futures from another WorkerGroup of size world_size. - collect_fn is a Callable that reduces the list of futures to a DataProto - dispatch_fn is a Callable that partitions the DataProto into a list of DataProto of size world_size and then select Potential issue: we can optimize dispatch_fn(collect_fn) such that only needed data is fetched on destination - DataProtoFuture only supports directly passing from the output of a method to another input. You can't perform any operation on the DataProtoFuture in driver. """ collect_fn: Callable futures: list[ray.ObjectRef] dispatch_fn: Callable = None @staticmethod def concat(data: list[ray.ObjectRef]) -> "DataProtoFuture": output = DataProtoFuture(collect_fn=DataProto.concat, futures=data) return output def chunk(self, chunks: int) -> list["DataProtoFuture"]: from functools import partial arg_future_lst = [] for i in range(chunks): # note that we can't directly pass i and chunks def dispatch_fn(x, i, chunks): return x.chunk(chunks=chunks)[i] arg_future = DataProtoFuture( collect_fn=self.collect_fn, dispatch_fn=partial(dispatch_fn, i=i, chunks=chunks), futures=self.futures ) arg_future_lst.append(arg_future) return arg_future_lst def get(self): output = ray.get(self.futures) # dp_size. for o in output: assert isinstance(o, DataProto) output = self.collect_fn(output) # select dp, concat if self.dispatch_fn is not None: output = self.dispatch_fn(output) # split in batch dim, select using dp return output def all_gather_data_proto(data: DataProto, process_group): # Note that this is an inplace operator just like torch.distributed.all_gather group_size = torch.distributed.get_world_size(group=process_group) assert isinstance(data, DataProto) prev_device = data.batch.device data.batch = data.batch.to(get_device_id()) data.batch = allgather_dict_tensors(data.batch.contiguous(), size=group_size, group=process_group, dim=0) data.batch = data.batch.to(prev_device) # all gather non_tensor_batch all_non_tensor_batch = [None for _ in range(group_size)] torch.distributed.all_gather_object(all_non_tensor_batch, data.non_tensor_batch, group=process_group) data.non_tensor_batch = {k: np.concatenate([d[k] for d in all_non_tensor_batch]) for k in data.non_tensor_batch} ================================================ FILE: verl_rl/verl/py.typed ================================================ ================================================ FILE: verl_rl/verl/single_controller/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from . import base from .base import * version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__))) # Note(haibin.lin): single_controller.__version__ is deprecated with open(os.path.join(os.path.join(version_folder, os.pardir), "version/version")) as f: __version__ = f.read().strip() __all__ = base.__all__ ================================================ FILE: verl_rl/verl/single_controller/base/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .worker import Worker from .worker_group import ClassWithInitArgs, ResourcePool, WorkerGroup __all__ = ["Worker", "WorkerGroup", "ClassWithInitArgs", "ResourcePool"] ================================================ FILE: verl_rl/verl/single_controller/base/decorator.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from functools import wraps from types import FunctionType from verl.protocol import DataProtoFuture, _padding_size_key from verl.utils.py_functional import DynamicEnum # here we add a magic number of avoid user-defined function already have this attribute MAGIC_ATTR = "attrs_3141562937" class Dispatch(DynamicEnum): """Enum class defining different dispatch modes for distributed computation. Each mode represents a specific strategy for distributing data across different ranks in a distributed system. The modes are used to control how data is partitioned and processed across different worker groups. """ _registry = {} _next_value = 0 def init_predefined_dispatch_mode(): Dispatch.register("RANK_ZERO") Dispatch.register("ONE_TO_ALL") Dispatch.register("ALL_TO_ALL") Dispatch.register("MEGATRON_COMPUTE") Dispatch.register("MEGATRON_PP_AS_DP") Dispatch.register("MEGATRON_PP_ONLY") Dispatch.register("MEGATRON_COMPUTE_PROTO") Dispatch.register("MEGATRON_PP_AS_DP_PROTO") Dispatch.register("DP_COMPUTE") Dispatch.register("DP_COMPUTE_PROTO") Dispatch.register("DP_COMPUTE_PROTO_WITH_FUNC") Dispatch.register("DP_COMPUTE_METRIC") # This is a special dispatch mode for vllm ExternalRayDistributedExecutor Dispatch.register("DIRECT_ROLLOUT_METHOD") class Execute(DynamicEnum): """Enum class defining different execution modes for distributed computation. These modes control how a function should be executed across different ranks in a distributed system. """ _registry = {} _next_value = 0 def init_predefined_execute_mode(): Execute.register("ALL") Execute.register("RANK_ZERO") # Initialize the two Dynamic Enum Classes init_predefined_dispatch_mode() init_predefined_execute_mode() def _split_args_kwargs_data_proto(chunks, *args, **kwargs): from verl.protocol import DataProto, DataProtoFuture splitted_args = [] for arg in args: assert isinstance(arg, DataProto | DataProtoFuture) splitted_args.append(arg.chunk(chunks=chunks)) splitted_kwargs = {} for key, val in kwargs.items(): assert isinstance(val, DataProto | DataProtoFuture) splitted_kwargs[key] = val.chunk(chunks=chunks) return splitted_args, splitted_kwargs def _split_args_kwargs_data_proto_with_auto_padding(chunks, *args, **kwargs): from verl.protocol import DataProto, DataProtoFuture data_proto_len = None padding_size = None def _padding_and_split_data(obj, chunks): nonlocal data_proto_len, padding_size assert isinstance(obj, DataProto | DataProtoFuture) if isinstance(obj, DataProto) and obj.is_padding_enabled(): # for padding, we only support DataProto with same length if data_proto_len is None: data_proto_len = len(obj) padding_size = (chunks - (data_proto_len % chunks)) if (data_proto_len % chunks > 0) else 0 else: assert data_proto_len == len(obj), ( f"expecting all arg share same length of {data_proto_len}, but got {len(obj)}" ) obj.padding(padding_size=padding_size) return obj.chunk(chunks=chunks) splitted_args = [_padding_and_split_data(arg, chunks) for arg in args] splitted_kwargs = {key: _padding_and_split_data(val, chunks) for key, val in kwargs.items()} if padding_size is not None: splitted_kwargs[_padding_size_key] = padding_size return splitted_args, splitted_kwargs def dispatch_one_to_all(worker_group, *args, **kwargs): args = tuple([arg] * worker_group.world_size for arg in args) kwargs = {k: [v] * worker_group.world_size for k, v in kwargs.items()} return args, kwargs def dummy_direct_rollout_call(worker_group, *args, **kwargs): raise NotImplementedError("Direct rollout call is forbidden.") def dispatch_all_to_all(worker_group, *args, **kwargs): return args, kwargs def collect_all_to_all(worker_group, output): return output def dispatch_megatron_compute(worker_group, *args, **kwargs): """ User passes in dp data. The data is dispatched to all tp/pp ranks with the same dp """ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup assert isinstance(worker_group, MegatronWorkerGroup), ( f"worker_group must be MegatronWorkerGroup, Got {type(worker_group)}" ) # ray put all the args in advance to avoid duplicate serialization cost import ray args = [[ray.put(dp_arg) for dp_arg in arg] for arg in args] kwargs = {k: [ray.put(dp_v) for dp_v in v] for k, v in kwargs.items()} def _transform_data(obj_list, worker_group): assert isinstance(obj_list, tuple | list) and len(obj_list) == worker_group.dp_size transformed_data = [] for i in range(worker_group.world_size): local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank transformed_data.append(obj_list[local_dp_rank]) return transformed_data all_args = tuple([_transform_data(arg, worker_group) for arg in args]) all_kwargs = {key: _transform_data(val, worker_group) for key, val in kwargs.items()} return all_args, all_kwargs def collect_megatron_compute(worker_group, output): """ Only collect the data from the tp=0 and pp=last and every dp ranks """ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup assert isinstance(worker_group, MegatronWorkerGroup) output_in_dp = [] pp_size = worker_group.get_megatron_global_info().pp_size for global_rank in range(worker_group.world_size): local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank) if local_rank_info.tp_rank == 0 and local_rank_info.pp_rank == pp_size - 1 and local_rank_info.cp_rank == 0: output_in_dp.append(output[global_rank]) return output_in_dp def dispatch_megatron_compute_data_proto(worker_group, *args, **kwargs): """ All the args and kwargs must be DataProto. The batch will be chunked by dp_size and passed to each rank """ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup assert isinstance(worker_group, MegatronWorkerGroup) splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.dp_size, *args, **kwargs) return dispatch_megatron_compute(worker_group, *splitted_args, **splitted_kwargs) def _concat_data_proto_or_future(output: list): import ray from verl.protocol import DataProto, DataProtoFuture # make sure all the elements in output has the same type for o in output: assert type(o) is type(output[0]) o = output[0] if isinstance(o, DataProto): return DataProto.concat(output) elif isinstance(o, ray.ObjectRef): return DataProtoFuture.concat(output) else: raise NotImplementedError def collect_megatron_compute_data_proto(worker_group, output): """ Each output must be a DataProto. We concat the dim=0 of output """ import ray from verl.protocol import DataProto output = collect_megatron_compute(worker_group, output) for o in output: assert isinstance(o, DataProto | ray.ObjectRef), f"expecting {o} to be DataProto, but got {type(o)}" return _concat_data_proto_or_future(output) def dispatch_megatron_pp_as_dp(worker_group, *args, **kwargs): """ treat pp as dp. """ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup assert isinstance(worker_group, MegatronWorkerGroup) pp_size = worker_group.pp_size dp_size = worker_group.dp_size cp_size = worker_group.cp_size pp_dp_cp_size = pp_size * dp_size * cp_size def _transform_data(obj_list, worker_group): assert isinstance(obj_list, list | tuple) and len(obj_list) == pp_dp_cp_size transformed_data = [] for i in range(worker_group.world_size): local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank local_pp_rank = worker_group.get_megatron_rank_info(rank=i).pp_rank local_cp_rank = worker_group.get_megatron_rank_info(rank=i).cp_rank # compute the rank in obj_list. Note that the order is dp then cp then pp # Also note that the outputs within a pp group will be firstly allgathered, then only the # output of pp0 will be collected. # For pp=2 dp=4, a batch of data "ABCDEFGH" should be dispatched and collected in below order: # dispatch: pp_allgther: collect: # dp 0 1 2 3 dp 0 1 2 3 # pp +---------+ pp +-------------+ # 0 | A C E G | 0 | AB CD EF GH | ABCDEFGH # 1 | B D F H | 1 | AB CD EF GH | # +---------+ +-------------+ dp_cp_rank = local_cp_rank * dp_size + local_dp_rank arg_rank = dp_cp_rank * pp_size + local_pp_rank transformed_data.append(obj_list[arg_rank]) return transformed_data all_args = tuple([_transform_data(arg, worker_group) for arg in args]) all_kwargs = {key: _transform_data(val, worker_group) for key, val in kwargs.items()} return all_args, all_kwargs def collect_megatron_pp_as_dp(worker_group, output): """ treat pp as dp. Only collect data on tp=0 """ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup assert isinstance(worker_group, MegatronWorkerGroup) output_in_dp = [] for global_rank in range(worker_group.world_size): local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank) if local_rank_info.tp_rank == 0: output_in_dp.append(output[global_rank]) return output_in_dp def collect_megatron_pp_only(worker_group, output): """ Only collect output of megatron pp. This is useful when examine weight names as they are identical in tp/dp """ from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup assert isinstance(worker_group, MegatronWorkerGroup) output_in_pp = [] for global_rank in range(worker_group.world_size): local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank) if local_rank_info.tp_rank == 0 and local_rank_info.dp_rank == 0: output_in_pp.append(output[global_rank]) return output_in_pp def dispatch_megatron_pp_as_dp_data_proto(worker_group, *args, **kwargs): from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup assert isinstance(worker_group, MegatronWorkerGroup) pp_dp_cp_size = worker_group.dp_size * worker_group.pp_size * worker_group.cp_size splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(pp_dp_cp_size, *args, **kwargs) ret = dispatch_megatron_pp_as_dp(worker_group, *splitted_args, **splitted_kwargs) return ret def collect_megatron_pp_as_dp_data_proto(worker_group, output): from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup assert isinstance(worker_group, MegatronWorkerGroup) output = collect_megatron_pp_as_dp(worker_group, output) return _concat_data_proto_or_future(output) def dispatch_dp_compute(worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) for arg in args: assert isinstance(arg, tuple | list) and len(arg) == worker_group.world_size for k, v in kwargs.items(): assert isinstance(v, tuple | list) and len(v) == worker_group.world_size return args, kwargs def collect_dp_compute(worker_group, output): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) assert len(output) == worker_group.world_size return output def dispatch_dp_compute_data_proto(worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) # Note: enable auto padding for dp compute DatapProto splitted_args, splitted_kwargs = _split_args_kwargs_data_proto_with_auto_padding( worker_group.world_size, *args, **kwargs, ) return splitted_args, splitted_kwargs def dispatch_dp_compute_data_proto_with_func(worker_group, *args, **kwargs): from verl.single_controller.base.worker_group import WorkerGroup assert isinstance(worker_group, WorkerGroup) assert isinstance(args[0], FunctionType) # NOTE: The first one args is a function! splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.world_size, *args[1:], **kwargs) splitted_args_with_func = [[args[0]] * worker_group.world_size] + splitted_args return splitted_args_with_func, splitted_kwargs def collect_dp_compute_data_proto(worker_group, output): import ray from verl.protocol import DataProto for o in output: assert isinstance(o, DataProto | ray.ObjectRef), f"expecting {o} to be DataProto, but got {type(o)}" output = collect_dp_compute(worker_group, output) return _concat_data_proto_or_future(output) # Global registry for dispatch mode. DISPATCH_MODE_FN_REGISTRY = { Dispatch.ONE_TO_ALL: { "dispatch_fn": dispatch_one_to_all, "collect_fn": collect_all_to_all, }, Dispatch.ALL_TO_ALL: { "dispatch_fn": dispatch_all_to_all, "collect_fn": collect_all_to_all, }, Dispatch.MEGATRON_COMPUTE: { "dispatch_fn": dispatch_megatron_compute, "collect_fn": collect_megatron_compute, }, Dispatch.MEGATRON_PP_AS_DP: { "dispatch_fn": dispatch_megatron_pp_as_dp, "collect_fn": collect_megatron_pp_as_dp, }, Dispatch.MEGATRON_PP_ONLY: {"dispatch_fn": dispatch_one_to_all, "collect_fn": collect_megatron_pp_only}, Dispatch.MEGATRON_COMPUTE_PROTO: { "dispatch_fn": dispatch_megatron_compute_data_proto, "collect_fn": collect_megatron_compute_data_proto, }, Dispatch.MEGATRON_PP_AS_DP_PROTO: { "dispatch_fn": dispatch_megatron_pp_as_dp_data_proto, "collect_fn": collect_megatron_pp_as_dp_data_proto, }, Dispatch.DP_COMPUTE: {"dispatch_fn": dispatch_dp_compute, "collect_fn": collect_dp_compute}, Dispatch.DP_COMPUTE_PROTO: { "dispatch_fn": dispatch_dp_compute_data_proto, "collect_fn": collect_dp_compute_data_proto, }, Dispatch.DP_COMPUTE_PROTO_WITH_FUNC: { "dispatch_fn": dispatch_dp_compute_data_proto_with_func, "collect_fn": collect_dp_compute_data_proto, }, Dispatch.DP_COMPUTE_METRIC: {"dispatch_fn": dispatch_dp_compute_data_proto, "collect_fn": collect_dp_compute}, Dispatch.DIRECT_ROLLOUT_METHOD: { "dispatch_fn": dummy_direct_rollout_call, "collect_fn": dummy_direct_rollout_call, }, } def get_predefined_dispatch_fn(dispatch_mode): return DISPATCH_MODE_FN_REGISTRY[dispatch_mode] def register_dispatch_mode(dispatch_mode_name, dispatch_fn, collect_fn): """ Register a new dispatch mode. """ dispatch_mode = Dispatch.register(dispatch_mode_name) _check_dispatch_mode(dispatch_mode) assert dispatch_mode not in DISPATCH_MODE_FN_REGISTRY, f"dispatch_mode_name {dispatch_mode_name} already exists" DISPATCH_MODE_FN_REGISTRY[dispatch_mode] = {"dispatch_fn": dispatch_fn, "collect_fn": collect_fn} def update_dispatch_mode(dispatch_mode, dispatch_fn, collect_fn): """ Update the dispatch mode. """ _check_dispatch_mode(dispatch_mode) assert dispatch_mode in DISPATCH_MODE_FN_REGISTRY, f"dispatch_mode {dispatch_mode} not found" DISPATCH_MODE_FN_REGISTRY[dispatch_mode] = {"dispatch_fn": dispatch_fn, "collect_fn": collect_fn} def get_predefined_execute_fn(execute_mode): """ Note that here we only asks execute_all and execute_rank_zero to be implemented Leave the choice of how these two functions handle argument 'blocking' to users """ predefined_execute_mode_fn = { Execute.ALL: {"execute_fn_name": "execute_all"}, Execute.RANK_ZERO: {"execute_fn_name": "execute_rank_zero"}, } return predefined_execute_mode_fn[execute_mode] def _check_dispatch_mode(dispatch_mode): assert isinstance(dispatch_mode, Dispatch | dict), ( f"dispatch_mode must be a Dispatch or a Dict. Got {dispatch_mode}" ) if isinstance(dispatch_mode, dict): necessary_keys = ["dispatch_fn", "collect_fn"] for key in necessary_keys: assert key in dispatch_mode, f"key {key} should be in dispatch_mode if it is a dictionary" def _check_execute_mode(execute_mode): assert isinstance(execute_mode, Execute), f"execute_mode must be a Execute. Got {execute_mode}" def _materialize_futures(*args, **kwargs): new_args = [] for arg in args: if isinstance(arg, DataProtoFuture): arg = arg.get() # add more type to materialize new_args.append(arg) for k, v in kwargs.items(): if isinstance(v, DataProtoFuture): kwargs[k] = v.get() new_args = tuple(new_args) return new_args, kwargs def register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.ALL, blocking=True, materialize_futures=True): """Register a function with distributed execution configuration. This decorator registers a function with specific dispatch and execution modes for distributed computation. It handles both synchronous and asynchronous functions, and optionally materializes futures before execution. Args: dispatch_mode: Dispatch mode for computation distribution. Default: Dispatch.ALL_TO_ALL. execute_mode: Execute mode for computation distribution. Default: Execute.ALL. blocking: Whether the execution should be blocking. Defaults to True. materialize_futures: Whether to materialize the data before dispatching. Defaults to True. Returns: A decorator that wraps the original function with distributed execution configuration. """ _check_dispatch_mode(dispatch_mode=dispatch_mode) _check_execute_mode(execute_mode=execute_mode) def decorator(func): @wraps(func) def inner(*args, **kwargs): if materialize_futures: args, kwargs = _materialize_futures(*args, **kwargs) return func(*args, **kwargs) @wraps(func) async def async_inner(*args, **kwargs): if materialize_futures: args, kwargs = _materialize_futures(*args, **kwargs) return await func(*args, **kwargs) wrapper = async_inner if inspect.iscoroutinefunction(func) else inner attrs = {"dispatch_mode": dispatch_mode, "execute_mode": execute_mode, "blocking": blocking} setattr(wrapper, MAGIC_ATTR, attrs) return wrapper return decorator ================================================ FILE: verl_rl/verl/single_controller/base/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/single_controller/base/megatron/worker.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from verl.single_controller.base.worker import DistGlobalInfo, DistRankInfo, Worker class MegatronWorker(Worker): def __init__(self, cuda_visible_devices=None) -> None: super().__init__(cuda_visible_devices) def get_megatron_global_info(self): from megatron.core import parallel_state as mpu tp_size = mpu.get_tensor_model_parallel_world_size() dp_size = mpu.get_data_parallel_world_size() pp_size = mpu.get_pipeline_model_parallel_world_size() cp_size = mpu.get_context_parallel_world_size() info = DistGlobalInfo(tp_size=tp_size, dp_size=dp_size, pp_size=pp_size, cp_size=cp_size) return info def get_megatron_rank_info(self): from megatron.core import parallel_state as mpu tp_rank = mpu.get_tensor_model_parallel_rank() dp_rank = mpu.get_data_parallel_rank() pp_rank = mpu.get_pipeline_model_parallel_rank() cp_rank = mpu.get_context_parallel_rank() info = DistRankInfo(tp_rank=tp_rank, dp_rank=dp_rank, pp_rank=pp_rank, cp_rank=cp_rank) return info def _init_hf_config_and_tf_config( self, model_path, tokenizer_or_path, dtype, override_model_config, override_transformer_config, trust_remote_code=False, use_mbridge=False, ): from transformers import AutoConfig from verl.models.mcore import hf_to_mcore_config from verl.utils import hf_processor, hf_tokenizer from verl.utils.fs import copy_to_local from verl.utils.model import update_model_config # Step 1: initialize the tokenizer self.local_path = copy_to_local(model_path) if tokenizer_or_path is None: self.tokenizer = hf_tokenizer(self.local_path, trust_remote_code=trust_remote_code) self.processor = hf_processor(self.local_path, trust_remote_code=trust_remote_code) elif isinstance(tokenizer_or_path, str): self.tokenizer = hf_tokenizer(copy_to_local(tokenizer_or_path), trust_remote_code=trust_remote_code) self.processor = hf_processor(copy_to_local(tokenizer_or_path), trust_remote_code=trust_remote_code) else: self.tokenizer = tokenizer_or_path self.processor = tokenizer_or_path if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template # Step 2: get the hf hf_config = AutoConfig.from_pretrained(self.local_path, trust_remote_code=trust_remote_code) # Step 3: override the hf config override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_model_config.get("model_config", {})) self.share_embeddings_and_output_weights = getattr(hf_config, "tie_word_embeddings", False) update_model_config(hf_config, override_config_kwargs=override_config_kwargs) self.architectures = getattr(hf_config, "architectures", None) if self.rank == 0: print(f"Model config after override: {hf_config}") tf_config = hf_to_mcore_config(hf_config, dtype, **override_transformer_config) if use_mbridge: from verl.models.mcore.mbridge import AutoBridge bridge = AutoBridge.from_config(hf_config) bridge.set_extra_args(**override_transformer_config) tf_config = bridge.config self.bridge = bridge else: self.bridge = None print(f"TF config: {tf_config}") self.hf_config = hf_config self.tf_config = tf_config ================================================ FILE: verl_rl/verl/single_controller/base/megatron/worker_group.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from verl.single_controller.base import ResourcePool, WorkerGroup from .worker import DistGlobalInfo, DistRankInfo class MegatronWorkerGroup(WorkerGroup): def __init__(self, resource_pool: ResourcePool, **kwargs): super().__init__(resource_pool=resource_pool, **kwargs) self._megatron_rank_info = None self._megatron_global_info: DistGlobalInfo = None def init_megatron(self, default_megatron_kwargs: dict = None): raise NotImplementedError("MegatronWorkerGroup.init_megatron should be overwritten") def get_megatron_rank_info(self, rank: int) -> DistRankInfo: assert 0 <= rank < self.world_size, f"rank must be from [0, world_size), Got {rank}" return self._megatron_rank_info[rank] @property def tp_size(self): assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized" return self._megatron_global_info.tp_size @property def dp_size(self): assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized" return self._megatron_global_info.dp_size @property def pp_size(self): assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized" return self._megatron_global_info.pp_size @property def cp_size(self): assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized" return self._megatron_global_info.cp_size def get_megatron_global_info(self): return self._megatron_global_info ================================================ FILE: verl_rl/verl/single_controller/base/register_center/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/single_controller/base/register_center/ray.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ray @ray.remote class WorkerGroupRegisterCenter: def __init__(self, rank_zero_info): self.rank_zero_info = rank_zero_info # rank -> node_id self.workers_info: dict[int, str] = {} def get_rank_zero_info(self): return self.rank_zero_info def set_worker_info(self, rank, node_id) -> None: self.workers_info[rank] = node_id def get_worker_info(self) -> dict[int, str]: return self.workers_info def create_worker_group_register_center(name, info): return WorkerGroupRegisterCenter.options(name=name).remote(info) ================================================ FILE: verl_rl/verl/single_controller/base/worker.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ the class for Worker """ import os import socket from dataclasses import dataclass import ray from verl.utils.device import get_torch_device, get_visible_devices_keyword from .decorator import Dispatch, Execute, register @dataclass class DistRankInfo: tp_rank: int dp_rank: int pp_rank: int cp_rank: int @dataclass class DistGlobalInfo: tp_size: int dp_size: int pp_size: int cp_size: int class WorkerHelper: @staticmethod def _get_node_ip(): if os.getenv("WG_BACKEND", None) == "ray": return ray.util.get_node_ip_address() else: raise NotImplementedError("WG_BACKEND now just support ray mode.") @staticmethod def _get_free_port(): with socket.socket() as sock: sock.bind(("", 0)) return sock.getsockname()[1] def get_availale_master_addr_port(self): return self._get_node_ip().strip("[]"), str(self._get_free_port()) # we assume that in each WorkerGroup, there is a Master Worker class Worker(WorkerHelper): """A distributed worker that handles initialization and configuration for distributed training. This class manages worker initialization, configuration, and provides methods for executing distributed operations. It handles communication settings, device configuration, and worker metadata management. """ fused_worker_attr_name = "fused_worker_dict" def __new__(cls, *args, **kwargs): """Create a new Worker instance with proper initialization based on environment settings.""" instance = super().__new__(cls) # note that here we use int to distinguish disable_worker_init = int(os.environ.get("DISABLE_WORKER_INIT", 0)) if disable_worker_init: return instance rank = os.environ.get("RANK", None) worker_group_prefix = os.environ.get("WG_PREFIX", None) # when decorator @ray.remote applies, __new__ will be called while we don't want to apply _configure_before_init if None not in [rank, worker_group_prefix] and "ActorClass(" not in cls.__name__: instance._configure_before_init(f"{worker_group_prefix}_register_center", int(rank)) return instance def _configure_before_init(self, register_center_name: str, rank: int): """Configure worker settings before initialization. Args: register_center_name (str): Name of the register center Ray actor for worker coordination rank (int): Rank of the worker in the distributed setup """ assert isinstance(rank, int), f"rank must be int, instead of {type(rank)}" if rank == 0: master_addr, master_port = self.get_availale_master_addr_port() rank_zero_info = { "MASTER_ADDR": master_addr, "MASTER_PORT": master_port, } if os.getenv("WG_BACKEND", None) == "ray": from verl.single_controller.base.register_center.ray import create_worker_group_register_center self.register_center = create_worker_group_register_center( name=register_center_name, info=rank_zero_info ) os.environ.update(rank_zero_info) else: self.register_center = ray.get_actor(register_center_name) # set worker info for node affinity scheduling ray.get(self.register_center.set_worker_info.remote(rank, ray.get_runtime_context().get_node_id())) @classmethod def env_keys(cls): """The keys of the environment variables that are used to configure the Worker.""" return [ "WORLD_SIZE", "RANK", "LOCAL_WORLD_SIZE", "LOCAL_RANK", "MASTER_ADDR", "MASTER_PORT", get_visible_devices_keyword().upper(), ] def __init__(self, cuda_visible_devices=None) -> None: """Initialize the worker with environment settings and device configuration. Args: cuda_visible_devices (str, optional): CUDA visible devices configuration. Defaults to None. """ # construct a meta from environment variable. Note that the import must be inside the class because # it is executed remotely import os self._setup_env_cuda_visible_devices() world_size = int(os.environ["WORLD_SIZE"]) rank = int(os.environ["RANK"]) self._rank = rank self._world_size = world_size master_addr = os.environ["MASTER_ADDR"] master_port = os.environ["MASTER_PORT"] local_world_size = int(os.getenv("LOCAL_WORLD_SIZE", "1")) local_rank = int(os.getenv("LOCAL_RANK", "0")) store = { "_world_size": world_size, "_rank": rank, "_local_world_size": local_world_size, "_local_rank": local_rank, "_master_addr": master_addr, "_master_port": master_port, } if cuda_visible_devices is not None: store[f"_{get_visible_devices_keyword()}".lower()] = cuda_visible_devices self._configure_with_store(store=store) self.fused_worker_dict = {} def get_fused_worker_by_name(self, worker_name: str): """Get a fused worker by its name. Args: worker_name (str): Name of the worker to retrieve """ return self.fused_worker_dict.get(worker_name, None) def _setup_env_cuda_visible_devices(self): from verl.utils.ray_utils import ray_noset_visible_devices is_ray_noset_visible_devices = ray_noset_visible_devices() # Prevent use of clashing `{CUDA/HIP/ROCR}_VISIBLE_DEVICES`` rocr_val = os.environ.get("ROCR_VISIBLE_DEVICES", None) hip_val = os.environ.get("HIP_VISIBLE_DEVICES", None) cuda_val = os.environ.get("CUDA_VISIBLE_DEVICES", None) if hip_val: # Switch the use of HIP_VISIBLE_DEVICES to CUDA_VISIBLE_DEVICES for consistency. # Make sure that the HIP_VISIBLE_DEVICES is set to the same value as CUDA_VISIBLE_DEVICES # at this point. val = os.environ.pop("HIP_VISIBLE_DEVICES") hip_val = None if cuda_val: assert val == cuda_val, ( f"Please use the same HIP_VISIBLE_DEVICES or CUDA_VISIBLE_DEVICES, inconsistant values " f"found: {val} and {cuda_val}." ) else: cuda_val = val os.environ["CUDA_VISIBLE_DEVICES"] = val # os.environ["HIP_VISIBLE_DEVICES"] = val if rocr_val: # You must take care if both HIP/CUDA and ROCR env vars are set as they have # different meanings. Both env vars accept either a list of ints or a # list of UUIDs. The ROCR env var is processed first which then reduces # the number of GPUs that HIP can select from. # https://github.com/pytorch/pytorch/pull/144026 # To avoid the complexity of this, we simply gives out error if both are set # (Also to keep consistency with ray's practice with 2.45.0). # Otherwise, we will set ROCR_VISIBLE_DEVICES to CUDA_VISIBLE_DEVICES # and remove ROCR_VISIBLE_DEVICES. if cuda_val: raise ValueError("Please don't set ROCR_VISIBLE_DEVICES when HIP/CUDA_VISIBLE_DEVICES is set.") cuda_val = os.environ.pop("ROCR_VISIBLE_DEVICES") os.environ["CUDA_VISIBLE_DEVICES"] = cuda_val rocr_val = None if is_ray_noset_visible_devices: # NOTE: Ray will automatically set the *_VISIBLE_DEVICES # environment variable for each actor, unless # RAY_EXPERIMENTAL_NOSET_*_VISIBLE_DEVICES is set, # so we need to set local rank when the flag is set. local_rank = os.environ.get("RAY_LOCAL_RANK") os.environ["LOCAL_RANK"] = local_rank get_torch_device().set_device(int(local_rank)) def _configure_with_store(self, store: dict): """ This function should only be called inside by WorkerGroup """ store_env_dict = {f"_{key.lower()}": store.get(f"_{key.lower()}", None) for key in type(self).env_keys()} self.__dict__.update(store_env_dict) # this is hacky # print(f"__dict__: {self.__dict__}") for key in type(self).env_keys(): val = self.__dict__.get(f"_{key.lower()}", None) if val is not None: # print(f"set {key} to {val}") os.environ[key] = str(val) os.environ["REDIS_STORE_SERVER_HOST"] = ( str(self._master_addr).replace("[", "").replace("]", "") if self._master_addr else "" ) def get_master_addr_port(self): """Get the master address and port for distributed communication.""" return self._master_addr, self._master_port def get_cuda_visible_devices(self): """Get the CUDA visible devices configuration.""" import os visible_devices = os.environ.get(get_visible_devices_keyword().upper(), "not set") return visible_devices @property def world_size(self): """Get the total number of workers in the distributed setup.""" return self._world_size @property def rank(self): """Get the rank of this worker in the distributed setup.""" return self._rank @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO_WITH_FUNC) def execute_with_func_generator(self, func, *args, **kwargs): """Execute a function with function generator dispatch mode. Args: func: Function to execute *args: Positional arguments for the function **kwargs: Keyword arguments for the function """ ret_proto = func(self, *args, **kwargs) return ret_proto @register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.RANK_ZERO) def execute_func_rank_zero(self, func, *args, **kwargs): """Execute a function in rank zero execution mode. Args: func: Function to execute *args: Positional arguments for the function **kwargs: Keyword arguments for the function """ result = func(*args, **kwargs) return result ================================================ FILE: verl_rl/verl/single_controller/base/worker_group.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ the class of WorkerGroup """ import logging import signal import threading import time from typing import Any, Callable from .decorator import MAGIC_ATTR, Dispatch, get_predefined_dispatch_fn, get_predefined_execute_fn class ResourcePool: """ Manages a pool of resources across multiple nodes, tracking process counts and GPU allocations. The class provides methods to calculate world size, local world sizes, and local ranks across all nodes in the pool. """ def __init__(self, process_on_nodes=None, max_colocate_count: int = 10, n_gpus_per_node=8) -> None: """Initialize the ResourcePool with node processes and GPU configuration. Args: process_on_nodes (List[int], optional): List of process counts per node. Defaults to empty list. max_colocate_count (int, optional): Maximum number of processes that can be colocated. Defaults to 10. n_gpus_per_node (int, optional): Number of GPUs available per node. Defaults to 8. """ if process_on_nodes is None: process_on_nodes = [] self._store = process_on_nodes self.max_colocate_count = max_colocate_count self.n_gpus_per_node = n_gpus_per_node # this is left for future huawei GPU that contains 16 GPUs per node def add_node(self, process_count): self._store.append(process_count) @property def world_size(self): """Total number of processes across all nodes in the pool.""" return sum(self._store) def __call__(self) -> Any: return self._store @property def store(self): return self._store def local_world_size_list(self) -> list[int]: """Returns a flat list where each process has its local world size.""" nested_local_world_size_list = [ [local_world_size for _ in range(local_world_size)] for local_world_size in self._store ] return [item for row in nested_local_world_size_list for item in row] def local_rank_list(self) -> list[int]: """Returns a flat list of local ranks for all processes across all nodes.""" nested_local_rank_list = [[i for i in range(local_world_size)] for local_world_size in self._store] return [item for row in nested_local_rank_list for item in row] class ClassWithInitArgs: """ Wrapper class that stores constructor arguments for deferred instantiation. This class is particularly useful for remote class instantiation where the actual construction needs to happen at a different time or location. """ def __init__(self, cls, *args, **kwargs) -> None: """Initialize the ClassWithInitArgs instance. Args: cls: The class to be instantiated later *args: Positional arguments for the class constructor **kwargs: Keyword arguments for the class constructor """ self.cls = cls self.args = args self.kwargs = kwargs self.fused_worker_used = False def __call__(self) -> Any: """Instantiate the stored class with the stored arguments.""" return self.cls(*self.args, **self.kwargs) def check_workers_alive(workers: list, is_alive: Callable, gap_time: float = 1) -> None: """Continuously monitors worker processes and raises SIGABRT if any worker dies. Args: workers (List): List of worker objects to monitor is_alive (Callable): Function to check if a worker is alive gap_time (float): Time interval between checks """ import time while True: for worker in workers: if not is_alive(worker): logging.warning(f"worker {worker} is not alive sending signal to main thread") signal.raise_signal(signal.SIGABRT) time.sleep(gap_time) class WorkerGroup: """ Base class for managing a group of workers in a distributed system. The class provides methods for worker management, aliveness checking, and method binding. """ fused_worker_execute_fn_name = "_fuw_execute" def __init__(self, resource_pool: ResourcePool, **kwargs) -> None: self._is_init_with_detached_workers = resource_pool is None self.fused_worker_used = False if resource_pool is not None: # handle the case when WorkGroup is attached to an existing one self._procecss_dispatch_config = resource_pool() else: self._procecss_dispatch_config = None self._workers = [] self._worker_names = [] self._master_addr = None self._master_port = None self._checker_thread: threading.Thread = None def _is_worker_alive(self, worker): """Check if a worker is alive. Must be implemented by derived classes.""" raise NotImplementedError("WorkerGroup._is_worker_alive called, should be implemented in derived class.") def _block_until_all_workers_alive(self) -> None: """Blocks until all workers in the group are alive.""" while True: all_state = [self._is_worker_alive(worker) for worker in self._workers] if False in all_state: time.sleep(1) else: break def start_worker_aliveness_check(self, every_n_seconds=1) -> None: """Starts a background thread to monitor worker aliveness. Args: every_n_seconds (int): Interval between aliveness checks """ # before starting checking worker aliveness, make sure all workers are already alive self._block_until_all_workers_alive() self._checker_thread = threading.Thread( target=check_workers_alive, args=(self._workers, self._is_worker_alive, every_n_seconds) ) self._checker_thread.start() @property def world_size(self): """Number of workers in the group.""" return len(self._workers) def _bind_worker_method(self, user_defined_cls, func_generator): """Binds worker methods to the WorkerGroup based on registered attributes. Args: user_defined_cls (type): The class containing methods to bind func_generator (Callable): Function that generates the bound method Returns: List[str]: List of method names that were successfully bound """ method_names = [] for method_name in dir(user_defined_cls): try: method = getattr(user_defined_cls, method_name) assert callable(method), f"{method_name} in {user_defined_cls} is not callable" except Exception: # if it is a property, it will fail because Class doesn't have instance property continue if hasattr(method, MAGIC_ATTR): # this method is decorated by register attribute = getattr(method, MAGIC_ATTR) assert isinstance(attribute, dict), f"attribute must be a dictionary. Got {type(attribute)}" assert "dispatch_mode" in attribute, "attribute must contain dispatch_mode in its key" dispatch_mode = attribute["dispatch_mode"] execute_mode = attribute["execute_mode"] blocking = attribute["blocking"] # get dispatch fn if isinstance(dispatch_mode, Dispatch): # get default dispatch fn fn = get_predefined_dispatch_fn(dispatch_mode=dispatch_mode) dispatch_fn = fn["dispatch_fn"] collect_fn = fn["collect_fn"] else: assert isinstance(dispatch_mode, dict) assert "dispatch_fn" in dispatch_mode assert "collect_fn" in dispatch_mode dispatch_fn = dispatch_mode["dispatch_fn"] collect_fn = dispatch_mode["collect_fn"] # get execute_fn_name execute_mode = get_predefined_execute_fn(execute_mode=execute_mode) wg_execute_fn_name = execute_mode["execute_fn_name"] # get execute_fn from string try: execute_fn = getattr(self, wg_execute_fn_name) assert callable(execute_fn), "execute_fn must be callable" except Exception: print(f"execute_fn {wg_execute_fn_name} is invalid") raise # bind a new method to the RayWorkerGroup func = func_generator( self, method_name, dispatch_fn=dispatch_fn, collect_fn=collect_fn, execute_fn=execute_fn, blocking=blocking, ) try: setattr(self, method_name, func) method_names.append(method_name) except Exception as e: raise ValueError(f"Fail to set method_name {method_name}") from e return method_names ================================================ FILE: verl_rl/verl/single_controller/ray/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import ( RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, create_colocated_worker_cls, create_colocated_worker_cls_fused, ) __all__ = [ "RayClassWithInitArgs", "RayResourcePool", "RayWorkerGroup", "create_colocated_worker_cls", "create_colocated_worker_cls_fused", ] ================================================ FILE: verl_rl/verl/single_controller/ray/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import time from copy import deepcopy from typing import Any, Optional import ray from ray.experimental.state.api import get_actor from ray.util import list_named_actors from ray.util.placement_group import PlacementGroup, placement_group from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy, PlacementGroupSchedulingStrategy from verl.protocol import DataProto, _padding_size_key from verl.single_controller.base import ClassWithInitArgs, ResourcePool, Worker, WorkerGroup from verl.single_controller.base.decorator import MAGIC_ATTR, Dispatch from verl.utils.py_functional import temp_env_var __all__ = ["Worker"] def get_random_string(length: int) -> str: import random import string letters_digits = string.ascii_letters + string.digits return "".join(random.choice(letters_digits) for _ in range(length)) def func_generator(self, method_name, dispatch_fn, collect_fn, execute_fn, blocking): class Functor: def __call__(this, *args, **kwargs): args, kwargs = dispatch_fn(self, *args, **kwargs) padding_count = kwargs.pop(_padding_size_key, 0) output = execute_fn(method_name, *args, **kwargs) if blocking: output = ray.get(output) output = collect_fn(self, output) if padding_count > 0: if isinstance(output, DataProto): indices = [i for i in range(len(output))][:-padding_count] output = output.select_idxs(indices) elif isinstance(output, list): output = output[:-padding_count] return output # use class type to pass the method_name to get a better observability return type(method_name, (Functor,), {})() def sort_placement_group_by_node_ip(pgs: list[PlacementGroup]) -> list[PlacementGroup]: """ Sort the placement groups by node ip, all bundles in a single placement group should be on the same node. FSDPCheckpointManager saves sharded model states and optimizer states in local storage, which requires RANK to be consistent across nodes when resume from checkpoint. With this function, if there's only one resource pool and there's no node change, RANK should be consistent across nodes in multiple ray jobs, even if the whole ray cluster is restarted. """ node_ip = {node["NodeID"]: node["NodeManagerAddress"] for node in ray.nodes()} pg_ip = {} for pg in pgs: specs = ray._private.state.state.placement_group_table(pg.id) # all bunles should be on the same node node_id = specs["bundles_to_node_id"][0] pg_ip[pg.id] = node_ip[node_id] return sorted(pgs, key=lambda pg: pg_ip[pg.id]) class RayResourcePool(ResourcePool): def __init__( self, process_on_nodes: Optional[list[int]] = None, use_gpu: bool = True, name_prefix: str = None, max_colocate_count: int = 10, detached=False, accelerator_type: Optional[str] = None, ) -> None: super().__init__(process_on_nodes, max_colocate_count) self.use_gpu = use_gpu # print(f"in RayProcessDispatchConfiguration: name_prefix = {name_prefix}") self.name_prefix = get_random_string(length=6) if name_prefix is None else name_prefix self.pgs = None self.detached = detached self.accelerator_type = accelerator_type def get_placement_groups(self, strategy="STRICT_PACK", name=None, device_name="cuda"): if self.pgs is not None: return self.pgs pg_name_prefix = ( name if name else f"{self.name_prefix}verl_group_{'_'.join([str(count) for count in self._store])}:" ) # print(f"pg_name_prefix = {pg_name_prefix}") if device_name == "npu": device_name = "NPU" elif device_name == "cuda": device_name = "GPU" bundle = {"CPU": self.max_colocate_count} if self.use_gpu: bundle[device_name] = 1 if self.accelerator_type is not None: bundle[self.accelerator_type] = 1e-4 pg_scheme = [[bundle.copy() for _ in range(process_count)] for process_count in self._store] lifetime = "detached" if self.detached else None pgs = [ placement_group(bundles=bundles, strategy=strategy, name=pg_name_prefix + str(idx), lifetime=lifetime) for idx, bundles in enumerate(pg_scheme) ] ray.get([pg.ready() for pg in pgs]) self.pgs = pgs return pgs def extract_pg_from_exist( resource_pools: dict[str, RayResourcePool], src_role_names: list[str], resource_pool: RayResourcePool ) -> list: src_pgs = [ pg for role_name, resource_pool in resource_pools.items() for pg in resource_pool.get_placement_groups() if role_name in src_role_names ] sorted_src_pgs = sorted(src_pgs, key=lambda pg: pg.bundle_count, reverse=True) sorted_process_on_nodes = sorted([(val, idx) for idx, val in enumerate(resource_pool.store)], reverse=True) unsorted_pgs: list[tuple[int, PlacementGroup]] = [] searching_idx = 0 for request_process, original_idx in sorted_process_on_nodes: assert searching_idx < len(sorted_src_pgs), f"no enough nodes for request: searching {searching_idx} th node" assert request_process <= sorted_src_pgs[searching_idx].bundle_count, ( f"requesting {request_process} processes, bundle count cannot satisfy" ) unsorted_pgs.append((original_idx, sorted_src_pgs[searching_idx])) searching_idx += 1 return [pg for _, pg in sorted(unsorted_pgs)] def merge_resource_pool(rp1: RayResourcePool, rp2: RayResourcePool) -> RayResourcePool: assert rp1.use_gpu == rp2.use_gpu, "Both RayResourcePool must either use_gpu or not" assert rp1.max_colocate_count == rp2.max_colocate_count, "Both RayResourcePool must has the same max_colocate_count" assert rp1.n_gpus_per_node == rp2.n_gpus_per_node, "Both RayResourcePool must has the same n_gpus_per_node" assert rp1.detached == rp2.detached, "Detached ResourcePool cannot be merged with non-detached ResourcePool" new_store = rp1.store + rp2.store merged = type(rp1)(new_store, rp1.use_gpu, f"{rp1.name_prefix}_{rp2.name_prefix}") merged.pgs = rp1.get_placement_groups() + rp2.get_placement_groups() return merged class RayClassWithInitArgs(ClassWithInitArgs): """A wrapper class for Ray actors with initialization arguments. This class extends ClassWithInitArgs to provide additional functionality for configuring and creating Ray actors with specific resource requirements and scheduling strategies. """ def __init__(self, cls, *args, **kwargs) -> None: # self._options = kwargs.pop('options', dict()) super().__init__(cls, *args, **kwargs) self._options = {} self._additional_resource = {} def set_additional_resource(self, additional_resource): """Set additional resource requirements for the actor. Args: additional_resource: Dictionary specifying additional resource requirements """ self._additional_resource = additional_resource def update_options(self, options: dict): """Update the Ray actor creation options. Args: options: Dictionary of options to update """ self._options.update(options) def __call__( self, placement_group, placement_group_bundle_idx, use_gpu: bool = True, num_gpus=1, sharing_with=None, device_name="cuda", ) -> Any: """Create and return a Ray actor with the configured options. Args: placement_group: Ray placement group for scheduling placement_group_bundle_idx: Index of the bundle in the placement group use_gpu: Whether to use GPU resources num_gpus: Number of GPUs to allocate sharing_with: Actor to share resources with device_name: Device for training Returns: A Ray actor handle with the configured options """ if sharing_with is not None: target_node_id = ray.get(sharing_with.get_node_id.remote()) visible_devices = ray.get(sharing_with.get_cuda_visible_devices.remote()) options = {"scheduling_strategy": NodeAffinitySchedulingStrategy(node_id=target_node_id, soft=False)} return self.cls.options(**options).remote(*self.args, cuda_visible_devices=visible_devices, **self.kwargs) options = { "scheduling_strategy": PlacementGroupSchedulingStrategy( placement_group=placement_group, placement_group_bundle_index=placement_group_bundle_idx ) } options.update(self._options) if use_gpu and device_name == "cuda": options["num_gpus"] = num_gpus if use_gpu and device_name == "npu": options["resources"] = {"NPU": num_gpus} if len(self._additional_resource) > 1: for k, v in self._additional_resource.items(): options[k] = v # print("cls:", self.cls) # print("args: ", self.args) # print("kwargs: ", self.kwargs) return self.cls.options(**options).remote(*self.args, **self.kwargs) class RayWorkerGroup(WorkerGroup): """A group of Ray workers that can be managed collectively. This class extends WorkerGroup to provide Ray-specific functionality for creating and managing groups of Ray actors with specific resource requirements and scheduling strategies. """ def __init__( self, resource_pool: RayResourcePool = None, ray_cls_with_init: RayClassWithInitArgs = None, bin_pack: bool = True, name_prefix: str = None, detached=False, worker_names=None, worker_handles: list[ray.actor.ActorHandle] = None, ray_wait_register_center_timeout: int = 300, **kwargs, ) -> None: """Initialize a RayWorkerGroup. Args: resource_pool: Resource pool for worker allocation ray_cls_with_init: Class with initialization arguments for workers bin_pack: Whether to use strict bin packing for resource allocation name_prefix: Prefix for worker names detached: Whether workers should be detached worker_names: Names of existing workers to attach to ray_wait_register_center_timeout: Timeout for waiting on register center **kwargs: Additional keyword arguments """ super().__init__(resource_pool=resource_pool, **kwargs) self.ray_cls_with_init = ray_cls_with_init self.name_prefix = get_random_string(length=6) if name_prefix is None else name_prefix self._ray_wait_register_center_timeout = ray_wait_register_center_timeout # Whether the WorkerGroup is a Colocate WorkerGroup created by FusedWorker. self.fused_worker_used = ray_cls_with_init.fused_worker_used # if a WorkerGroup is spawned from Colocate WorkerGroup, this indicates which sub-class is binded to # this WorkerGroup. self.sub_cls_name = "" self.device_name = kwargs.get("device_name", "cuda") self.profile_steps = kwargs.get("profile_steps", None) self.worker_nsight_options = kwargs.get("worker_nsight_options", None) if self.worker_nsight_options is not None and self.worker_nsight_options["capture-range-end"] is None: self.worker_nsight_options["capture-range-end"] = f"repeat-shutdown:{6 * len(self.profile_steps)}" if worker_names is not None and (not self.fused_worker_used): assert self._is_init_with_detached_workers self._worker_names = worker_names if self._is_init_with_detached_workers: self._init_with_detached_workers(worker_names=worker_names, worker_handles=worker_handles) else: self._init_with_resource_pool( resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, bin_pack=bin_pack, detached=detached ) if ray_cls_with_init is not None: self._bind_worker_method(self.ray_cls_with_init.cls, func_generator) self.wg_dict = None self.method_names = [] def _is_worker_alive(self, worker: ray.actor.ActorHandle): """Check if a worker actor is still alive. Args: worker: Ray actor handle to check Returns: bool: True if the worker is alive, False otherwise """ worker_state_dict = get_actor(worker._actor_id.hex()) return worker_state_dict.get("state", "undefined") == "ALIVE" if worker_state_dict is not None else False def _init_with_detached_workers(self, worker_names, worker_handles): # ray.get_actor holds a weak reference to the actor, which causes actors garbage collected unexpectedly # if we only hold spawn RayWorkerGroup. By passing actor handle explicitly, spawn RayWorkerGroup have # strong reference to these actors. # https://github.com/ray-project/ray/pull/45699 workers = worker_handles if worker_handles else [ray.get_actor(name=name) for name in worker_names] self._workers = workers self._world_size = len(worker_names) def _init_with_resource_pool(self, resource_pool, ray_cls_with_init, bin_pack, detached): """Initialize the worker group by creating new workers from a resource pool. Args: resource_pool: Resource pool for worker allocation ray_cls_with_init: Class with initialization arguments for workers bin_pack: Whether to use strict bin packing for resource allocation detached: Whether workers should be detached """ use_gpu = resource_pool.use_gpu strategy = "PACK" if bin_pack: strategy = "STRICT_PACK" pgs = resource_pool.get_placement_groups(strategy=strategy, device_name=self.device_name) world_size = resource_pool.world_size self._world_size = world_size # cia.add_kwarg("_world_size", world_size) num_gpus = 1 / resource_pool.max_colocate_count rank = -1 local_world_size = resource_pool.store[0] for pg_idx, pg in enumerate(sort_placement_group_by_node_ip(pgs)): assert local_world_size <= pg.bundle_count, f"when generating for {self.name_prefix}, for the " for local_rank in range(local_world_size): rank += 1 # we pass in environment variable at option so that Worker can use environment variable to set env_vars = { "WORLD_SIZE": str(world_size), "RANK": str(rank), "WG_PREFIX": self.name_prefix, "WG_BACKEND": "ray", "RAY_LOCAL_WORLD_SIZE": str(local_world_size), "RAY_LOCAL_RANK": str(local_rank), } if rank != 0: env_vars["MASTER_ADDR"] = self._master_addr env_vars["MASTER_PORT"] = self._master_port import re cia_name = type(ray_cls_with_init.cls).__name__ match = re.search(r"ActorClass\(([^)]+)\)", cia_name) # ray.remote(Obj) -> "ActorClass(Obj)" cia_name = match.group(1) if match else cia_name # "ActorClass(Obj)" -> "Obj" name = f"{self.name_prefix}{cia_name}_{pg_idx}:{local_rank}" # e.g. Worker_2:5 if self.profile_steps and self.device_name == "cuda": ray_cls_with_init.update_options( { "runtime_env": { "env_vars": env_vars, "nsight": self.worker_nsight_options, }, "name": name, } ) else: ray_cls_with_init.update_options({"runtime_env": {"env_vars": env_vars}, "name": name}) if detached: ray_cls_with_init.update_options({"lifetime": "detached"}) # create a worker worker = ray_cls_with_init( placement_group=pg, placement_group_bundle_idx=local_rank, use_gpu=use_gpu, num_gpus=num_gpus, device_name=self.device_name, ) self._workers.append(worker) self._worker_names.append(name) if rank == 0: register_center_actor = None actor_name = f"{self.name_prefix}_register_center" start_time = time.time() while time.time() - start_time < self._ray_wait_register_center_timeout: if actor_name in list_named_actors(): register_center_actor = ray.get_actor(actor_name) break elapsed = int(time.time() - start_time) if elapsed % 30 == 0: logging.warning( "Waiting for register center actor %s to be ready. Elapsed time: %s seconds out of " "%s seconds.", actor_name, elapsed, self._ray_wait_register_center_timeout, ) time.sleep(1) if register_center_actor is None: raise TimeoutError( f"Failed to get register_center_actor {actor_name} " f"in {list_named_actors(all_namespaces=True)} " f"for {self._ray_wait_register_center_timeout} seconds. " "Ensure that any lingering Ray resources from previous " "runs are cleaned up (e.g., by restarting the Ray cluster), " "or adjust the waiting time by modifying the config " "`trainer.ray_wait_register_center_timeout`." ) rank_zero_info = ray.get(register_center_actor.get_rank_zero_info.remote()) self._master_addr, self._master_port = rank_zero_info["MASTER_ADDR"], rank_zero_info["MASTER_PORT"] # print(f"rank_zero_info: {rank_zero_info}") # print(f"master_addr: {self._master_addr}, master_port: {self._master_port}") @property def worker_names(self): return self._worker_names @classmethod def from_detached( cls, name_prefix=None, worker_names=None, worker_handles=None, ray_cls_with_init=None, **kwargs, ): """Create a worker group from existing detached workers. Args: name_prefix: Prefix for worker names worker_names: Names of existing workers to attach to ray_cls_with_init: Class with initialization arguments for workers Returns: A new RayWorkerGroup instance """ worker_group = cls( resource_pool=None, ray_cls_with_init=ray_cls_with_init, name_prefix=name_prefix, worker_names=worker_names, worker_handles=worker_handles, **kwargs, ) return worker_group def spawn(self, prefix_set): """Spawn to a dictionary of worker groups, each with a subset of method with prefix. Args: prefix_set: Set of prefixes to create worker groups for Returns: Dictionary of worker groups keyed by prefix """ if self.fused_worker_used: return self.spawn_fused(prefix_set) def _rebind_actor_methods(worker_group, actor_name): prefix: str = actor_name + "_" for method_name in dir(worker_group): if method_name.startswith(prefix): original_method_name = method_name.removeprefix(prefix) method = getattr(worker_group, method_name) setattr(worker_group, original_method_name, method) new_worker_group_dict = {} for prefix in prefix_set: new_worker_group = self.from_detached( name_prefix=self.name_prefix, worker_names=self._worker_names, worker_handles=self._workers, ray_cls_with_init=self.ray_cls_with_init, profile_steps=self.profile_steps, worker_nsight_options=self.worker_nsight_options, ) _rebind_actor_methods(new_worker_group, prefix) new_worker_group_dict[prefix] = new_worker_group return new_worker_group_dict def spawn_fused(self, prefix_set): """Create a dictionary of worker groups for fused workers. Args: prefix_set: Set of prefixes to create worker groups for Returns: Dictionary of worker groups keyed by prefix """ wg_dict = dict() for key in prefix_set: new_wg = deepcopy(self) new_wg._bind_worker_method(self.ray_cls_with_init.cls.raw_cls_dict[key], func_generator) new_wg.sub_cls_name = key wg_dict[key] = new_wg return wg_dict def fuse(self, prefix_set): """Fuse multiple worker groups into the current worker group. Args: prefix_set: Set of prefixes to fuse into the worker group """ if self.wg_dict is None: self.wg_dict = self.spawn(prefix_set) for role_name, role_wg in self.wg_dict.items(): setattr(self, role_name, role_wg) self.method_names = self._bind_worker_method(self.ray_cls_with_init.cls, func_generator) def _execute_remote_single_worker(self, worker, method_name: str, *args, **kwargs): """Execute a method on a single worker remotely. Args: worker: The worker actor handle method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: Remote object reference to the method execution """ if self.fused_worker_used and method_name not in self.method_names: remote_call = getattr(worker, self.fused_worker_execute_fn_name) return remote_call.remote(f"{self.sub_cls_name}_fwmn_{method_name}", *args, **kwargs) # fused worker not used remote_call = getattr(worker, method_name) return remote_call.remote(*args, **kwargs) def execute_rank_zero_sync(self, method_name: str, *args, **kwargs): """Execute a method on rank zero worker synchronously. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: Result of the method execution """ return ray.get(self.execute_rank_zero_async(method_name, *args, **kwargs)) def execute_rank_zero_async(self, method_name: str, *args, **kwargs): """Execute a method on rank zero worker asynchronously. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: Remote object reference to the method execution """ return self._execute_remote_single_worker(self._workers[0], method_name, *args, **kwargs) def execute_rank_zero(self, method_name: str, *args, **kwargs): """Alias for execute_rank_zero_async. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: Remote object reference to the method execution """ return self.execute_rank_zero_async(method_name, *args, **kwargs) def execute_all(self, method_name: str, *args, **kwargs): """Alias for execute_all_async. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: List of remote object references to the method executions """ return self.execute_all_async(method_name, *args, **kwargs) def execute_all_sync(self, method_name: str, *args, **kwargs): """Execute a method on all workers synchronously. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: List of results from all workers """ return ray.get(self.execute_all_async(method_name, *args, **kwargs)) def execute_all_async(self, method_name: str, *args, **kwargs): """Execute a method on all workers asynchronously. Args: method_name: Name of the method to execute *args: Positional arguments for the method **kwargs: Keyword arguments for the method Returns: List of remote object references to the method executions """ # Here, we assume that if all arguments in args and kwargs are lists, # and their lengths match len(self._workers), we'll distribute each # element in these lists to the corresponding worker # print(f"execute_all_async: method {method_name}({args}, {kwargs})") length = len(self._workers) if all(isinstance(arg, list) for arg in args) and all(isinstance(kwarg, list) for kwarg in kwargs.values()): if all(len(arg) == length for arg in args) and all(len(kwarg) == length for kwarg in kwargs.values()): # print(f"splitting args and kwargs into {length} shards") result = [] for i in range(length): sliced_args = tuple(arg[i] for arg in args) sliced_kwargs = {k: v[i] for k, v in kwargs.items()} result.append( self._execute_remote_single_worker(self._workers[i], method_name, *sliced_args, **sliced_kwargs) ) return result return [self._execute_remote_single_worker(worker, method_name, *args, **kwargs) for worker in self._workers] @property def master_address(self): return self._master_addr @property def master_port(self): return self._master_port @property def workers(self): return self._workers @property def world_size(self): return self._world_size """ Utilities that enables creating workers inside the same ray.Actor, with code written in separate ray.Actors. """ # deprecated, switching to FusedWorker def _bind_workers_method_to_parent(cls, key, user_defined_cls): """ Binds the methods of each worker to the WorkerDict. Note that we only bind public methods that are decorated by register """ for method_name in dir(user_defined_cls): try: method = getattr(user_defined_cls, method_name) assert callable(method), f"{method_name} in {user_defined_cls} is not callable" except Exception: # if it is a property, it will fail because Class doesn't have instance property continue if hasattr(method, MAGIC_ATTR): def generate_function(name, key=key): def func(self, *args, **kwargs): # dispatch to the actual worker return getattr(self.worker_dict[key], name)(*args, **kwargs) async def async_func(self, *args, **kwargs): # dispatch to the actual worker return await getattr(self.worker_dict[key], name)(*args, **kwargs) wrapper = async_func if inspect.iscoroutinefunction(method) else func # noqa: B023 return wrapper func = generate_function(method_name) # pass MAGIC_ATTR for outer worker group attrs = getattr(method, MAGIC_ATTR) setattr(func, MAGIC_ATTR, attrs) try: # bind direct rollout method to class without prefix if attrs["dispatch_mode"] == Dispatch.DIRECT_ROLLOUT_METHOD and "rollout" in key: assert not hasattr(cls, method_name), ( f"conflict direct rollout method {method_name} with role {key}" ) setattr(cls, method_name, func) print(f"bind role {key} method {method_name} to class {cls}") else: method_name_with_prefix = key + "_" + method_name setattr(cls, method_name_with_prefix, func) except Exception as e: raise ValueError(f"Fail to set method_name {method_name}") from e def _unwrap_ray_remote(cls): if hasattr(cls, "__ray_actor_class__"): cls = cls.__ray_actor_class__ return cls def _determine_fsdp_megatron_base_class(mros: list): """ - megatron: base class should be MegatronWorker - fsdp: base class should be Worker """ for cls in mros[0]: if cls.__name__ == "MegatronWorker": return cls if cls.__name__ == "Worker": return cls raise ValueError(f"Cannot determine base class for {mros}") # deprecated, switching to FusedWorker def create_colocated_worker_cls(class_dict: dict[str, RayClassWithInitArgs]): """ This function should return a class instance that delegates the calls to every cls in cls_dict """ cls_dict = {} init_args_dict = {} worker_cls = _determine_fsdp_megatron_base_class( [cls.cls.__ray_actor_class__.__mro__ for cls in class_dict.values()] ) assert issubclass(worker_cls, Worker), f"worker_cls {worker_cls} should be a subclass of Worker" print(f"colocated worker base class {worker_cls}") for key, cls in class_dict.items(): cls_dict[key] = cls.cls init_args_dict[key] = {"args": cls.args, "kwargs": cls.kwargs} assert cls_dict.keys() == init_args_dict.keys() # TODO: create a class with customizable name class WorkerDict(worker_cls): def __init__(self): super().__init__() self.worker_dict = {} for key, user_defined_cls in cls_dict.items(): user_defined_cls = _unwrap_ray_remote(user_defined_cls) # directly instantiate the class without remote # in worker class, e.g. # when DISABLE_WORKER_INIT == 1 it will return immediately with temp_env_var("DISABLE_WORKER_INIT", "1"): self.worker_dict[key] = user_defined_cls( *init_args_dict[key].get("args", ()), **init_args_dict[key].get("kwargs", {}) ) # now monkey-patch the methods from inner class to WorkerDict for key, user_defined_cls in cls_dict.items(): user_defined_cls = _unwrap_ray_remote(user_defined_cls) _bind_workers_method_to_parent(WorkerDict, key, user_defined_cls) remote_cls = ray.remote(WorkerDict) remote_cls = RayClassWithInitArgs(cls=remote_cls) return remote_cls FusedWorkerCLSName = "FusedWorker" def create_colocated_worker_raw_cls(class_dict: dict[str, RayClassWithInitArgs]): """ This function returns a FusedWorker class. `FusedWorker.{class_name}` -> FusedClass Use `class_name` as a param to directly access the underlying class. `FusedWorker._fuw_execute("{class_name}_fwmn_{method_name}", *args, **kwargs)` First param must be "{class_name}_fwmn_{method_name}" in order to access `method_name` of underlying class `{class_name}`. `FusedWorker.fused_worker_dict` -> {"class_name": FusedClass} Stores all underlying classes. `FusedClass.fused_worker_dict` -> {"class_name": FusedClass} The same as `FusedWorker.fused_worker_dict`, enables underlying class to access other underlying classes. """ raw_cls_dict = {cls_name: _unwrap_ray_remote(cia.cls) for cls_name, cia in class_dict.items()} init_args_dict = {cls_name: cia.args for cls_name, cia in class_dict.items()} init_kwargs_dict = {cls_name: cia.kwargs for cls_name, cia in class_dict.items()} cls_names = list(class_dict.keys()) # FusedWorker_Actor_Critic class_name_renamed = "_".join([FusedWorkerCLSName] + cls_names) class FusedWorker(Worker): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cls_names = cls_names self.raw_cls_dict = raw_cls_dict self.init_args_dict = init_args_dict self.init_kwargs_dict = init_kwargs_dict for cls_name, udc, ud_args, ud_kwargs in zip( self.cls_names, self.raw_cls_dict.values(), self.init_args_dict.values(), self.init_kwargs_dict.values(), strict=True, ): with temp_env_var("DISABLE_WORKER_INIT", "1"): udc._get_ray_actor_cls_name = lambda x, name_renamed=class_name_renamed: name_renamed udc._get_ray_method_prefix = lambda x, name_prefixed=cls_name: f"{name_prefixed}_" # cls_name = "actor", "critic", udc = ActorWorker, CriticWorker self.fused_worker_dict[cls_name] = udc(*ud_args, **ud_kwargs) setattr(self, cls_name, self.fused_worker_dict[cls_name]) # injecting fused_worker to each sub worker so they can be aware of existence of each other for _, worker in self.fused_worker_dict.items(): setattr(worker, Worker.fused_worker_attr_name, self.fused_worker_dict) def _fuw_execute(self, method_name: str, *args, **kwargs): # for fused_worker, method_name is in a form of "{cls_name}_fwmn_{method_name}" # where fwmn stands "fused worker method name" names = method_name.split("_fwmn_") cls_name = names[0] method_name = names[1] assert cls_name in self.fused_worker_dict, ( f"calling {cls_name}'s {method_name}, but {cls_name} not in fused_worker_dict" ) udc_method = getattr(self.fused_worker_dict[cls_name], method_name) return udc_method(*args, **kwargs) renamed_fused_worker_cls = type(class_name_renamed, (FusedWorker,), {}) renamed_fused_worker_cls.is_fused_worker = True renamed_fused_worker_cls.raw_cls_dict = raw_cls_dict return renamed_fused_worker_cls def create_colocated_worker_cls_fused(class_dict: dict[str, RayClassWithInitArgs]): """ This function returns a RayClassWithInitArgs instance of FusedWorker, which is an replacement of `create_colocated_worker_cls`. WorkerGroup constructed using this class will be a colocated WorkerGroup, which will be referenced as `ColocateWorkerGroup` below. `ColocateWorkerGroup.spawn(prefix_set)` returns a dict of WorkerGroup {"class_name": WorkerGroup}, WorkerGroup in this dict will have methods of underlying class `class_name` attached. `ColocateWorkerGroup.fuse(prefix_set)` After executing this function, `ColocateWorkerGroup.{class_name}` will return WorkerGroup with methods of underlying class `class_name` attached. """ raw_colocated_worker_cls = create_colocated_worker_raw_cls(class_dict) remote_cls = ray.remote(raw_colocated_worker_cls) cia = RayClassWithInitArgs(cls=remote_cls) cia.fused_worker_used = True return cia ================================================ FILE: verl_rl/verl/single_controller/ray/megatron.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional import ray from verl.single_controller.base.megatron.worker import DistGlobalInfo, DistRankInfo from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup from .base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup # NOTE(sgm): for open-source megatron-core class NVMegatronRayWorkerGroup(RayWorkerGroup, MegatronWorkerGroup): """ MegatronWorkerGroup will query each worker of its megatron rank info and store it inside the WorkerGroup so that the dispatcher can use it to dispatch data. """ def __init__(self, resource_pool: RayResourcePool, ray_cls_with_init: RayClassWithInitArgs, **kwargs): """ Initialize the NVMegatronRayWorkerGroup. Args: resource_pool (RayResourcePool): The resource pool containing worker resources ray_cls_with_init (RayClassWithInitArgs): The Ray class with initialization arguments **kwargs: Additional keyword arguments to pass to the parent class """ super().__init__(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, **kwargs) self._megatron_rank_info: DistRankInfo = self.execute_all_sync(method_name="get_megatron_rank_info") self._megatron_global_info: DistGlobalInfo = ray.get( self.execute_rank_zero_async(method_name="get_megatron_global_info") ) class MegatronRayWorkerGroup(RayWorkerGroup, MegatronWorkerGroup): """ MegatronWorkerGroup will query each worker of its megatron rank info and store it inside the WorkerGroup so that the dispatcher can use it to dispatch data. """ def __init__( self, resource_pool: RayResourcePool, ray_cls_with_init: RayClassWithInitArgs, default_megatron_kwargs: dict = None, **kwargs, ): super().__init__( resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, default_megatron_kwargs=default_megatron_kwargs, **kwargs, ) self.init_megatron(default_megatron_kwargs=default_megatron_kwargs) self._megatron_rank_info: DistRankInfo = self.execute_all_sync(method_name="get_megatron_rank_info") self._megatron_global_info: DistGlobalInfo = ray.get( self.execute_rank_zero_async(method_name="get_megatron_global_info") ) def init_megatron(self, default_megatron_kwargs: Optional[dict] = None): # after super, we will call init of each worker if not self._is_init_with_detached_workers: # only init_megatron if the WorkerGroup is created from scratch self.execute_all_sync(method_name="init_megatron", default_megatron_kwargs=default_megatron_kwargs) ================================================ FILE: verl_rl/verl/third_party/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/third_party/sglang/__init__.py ================================================ # Copyright 2023-2024 SGLang Team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/third_party/sglang/parallel_state.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023 The SGlang team. # Adapted from # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. """Model and data parallel groups.""" import os from typing import Optional import sglang.srt.distributed.parallel_state as ps import torch import torch.distributed from sglang.srt.distributed.parallel_state import ( get_pp_group, get_world_group, init_distributed_environment, init_model_parallel_group, ) """ This version is strongly tied with Megatron to implement HybridEngine and weight sharing between vllm and Megatron. - We assume the Megatron tp+dp+pp world is already established before calling this function. """ # Device mesh for using DTensor _DEVICE_MESH = None # Tensor model parallel group that the current rank belongs to. _TP = None # Pipeline model parallel group that the current rank belongs to. _PP = None # This method is for initializing the ParallelGroup when using HybridEngine # NOTE(linjunrong): this function is for megatron def initialize_parallel_state( distributed_init_method: str = "env://", backend: str = "nccl", tensor_model_parallel_size: int = 1, num_tp_per_train_tp: int = 1, pipeline_model_parallel_size: int = 1, ): # torch.distributed.all_reduce does not free the input tensor until # the synchronization point. This causes the memory usage to grow # as the number of all_reduce calls increases. This env var disables # this behavior. # Related issue: # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573 os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" # NOTE(sgm): Modify for verl, Env vars will be set by TORCHRUN. rank = int(os.getenv("RANK", "-1")) local_rank = int(os.getenv("LOCAL_RANK", "0")) # Use the world_size set by TORCHRUN world_size = int(os.getenv("WORLD_SIZE", "-1")) assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN" init_distributed_environment(world_size, rank, distributed_init_method, local_rank, backend) if torch.distributed.get_world_size() > 1: # NOTE: build a separate inference group with infer tp & micro dp initialize_model_parallel_for_sglang( tensor_model_parallel_size=tensor_model_parallel_size, num_tensor_model_parallel_groups_per_train_tp=num_tp_per_train_tp, ) else: initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend) # NOTE(linjunrong): After init SGLang rollout using class EngineFragment, user should always remember to call # this function to sync the _TP, _PP define at the beginning of this file. Otherwise, only the conterparts # inside sglang.srt.distributed are init as ProcessGroup, the symbols defined in this file remain as None. # It could be weird to maintain two _TP and _PP, I follow the same way to maintain an extra ones for # verl itself as how it was done in verl.third_party.vllm.parallel_state. Note that the process is a little # bit different def ensure_model_parallel_initialized( tensor_model_parallel_size: int, pipeline_model_parallel_size: int = 1, backend: Optional[str] = None, ) -> None: """Helper to initialize model parallel groups if they are not initialized, or ensure tensor-parallel and pipeline-parallel sizes are equal to expected values if the model parallel groups are initialized. """ # get the backend of _DEVICE_WORLD_GROUP backend = backend or torch.distributed.get_backend(get_world_group().device_group) if not model_parallel_is_initialized(): initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend) return assert get_tensor_model_parallel_world_size() == tensor_model_parallel_size, ( f"tensor parallel group already initialized, but of unexpected size: " f"{get_tensor_model_parallel_world_size()=} vs. {tensor_model_parallel_size=}" ) pp_world_size = get_pp_group().world_size assert pp_world_size == pipeline_model_parallel_size, ( f"pipeline parallel group already initialized, but of unexpected size: {pp_world_size=} vs. " f"{pipeline_model_parallel_size=}" ) # TODO(sgm): deviate from the v0.5.4, not pp now # NOTE(linjunrong): the SGLang version using _TP instead of ps._TP def model_parallel_is_initialized(): """Check if tensor and pipeline parallel groups are initialized.""" return _TP is not None # and _PIPELINE_MODEL_PARALLEL_GROUP is not None) def initialize_model_parallel_for_sglang( tensor_model_parallel_size: int, num_tensor_model_parallel_groups_per_train_tp: int = 1, pipeline_model_parallel_size: int = 1, ) -> None: pass # Get world size and rank. Ensure some consistencies. assert torch.distributed.is_initialized() assert isinstance(tensor_model_parallel_size, int) # assert num_tensor_model_parallel_groups_per_train_tp == 1 and not different_tp_group # assert num_tensor_model_parallel_groups_per_train_tp > 1 and different_tp_group # Build the tensor model-parallel groups. assert ps._TP is None, "tensor model parallel group is already initialized" global _TP world_size: int = torch.distributed.get_world_size() backend = torch.distributed.get_backend() num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size if num_tensor_model_parallel_groups_per_train_tp == 1: # if tensor_model_parallel_size == train_tensor_parallel_size: # using the same tp group as Megatron/vllm assert _TP is None, "tensor model parallel group is already initialized" group_ranks = [] for i in range(num_tensor_model_parallel_groups): ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size) group_ranks.append(ranks) _TP = init_model_parallel_group( group_ranks=group_ranks, local_rank=get_world_group().local_rank, backend=backend, use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer use_message_queue_broadcaster=True, ) ps._TP = _TP # _MICRO_DATA_PARALLEL_GROUP is move to hybrid engine else: # initialize a micro_dp group and a tp group # assume training tp=4, infer tp=2, then, weight is partitioned as # [1], [2], [3], [4] for training and [1,2], [1,2], [3,4], [3,4] for inference # Build the inference tp groups # train_tp = train_tensor_parallel_size train_tp = num_tensor_model_parallel_groups_per_train_tp * tensor_model_parallel_size # num_tensor_model_parallel_groups_per_train_tp = train_tp // tensor_model_parallel_size assert _TP is None, "tensor model parallel group is already initialized" group_ranks = [] for i in range(num_tensor_model_parallel_groups // num_tensor_model_parallel_groups_per_train_tp): start = train_tp * i end = train_tp * (i + 1) for j in range(num_tensor_model_parallel_groups_per_train_tp): ranks = list(range(start, end, num_tensor_model_parallel_groups_per_train_tp)) for i in range(len(ranks)): ranks[i] += j group_ranks.append(ranks) _TP = init_model_parallel_group( group_ranks=group_ranks, local_rank=get_world_group().local_rank, backend=backend, use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer use_message_queue_broadcaster=True, ) ps._TP = _TP # Build the pipeline model-parallel groups. # global _PIPELINE_MODEL_PARALLEL_GROUP # global _PIPELINE_GLOBAL_RANKS # assert ps._PIPELINE_MODEL_PARALLEL_GROUP is None, ("pipeline model parallel group is already initialized") # ps._PIPELINE_MODEL_PARALLEL_GROUP = mpu.get_pipeline_model_parallel_group() # ps._PIPELINE_GLOBAL_RANKS = mpu.get_pipeline_model_parallel_ranks() # TODO: init using device mesh (not support hybrid engine now) # Build the pipeline model-parallel groups. num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size global _PP assert _PP is None, "pipeline model parallel group is already initialized" group_ranks = [] for i in range(num_pipeline_model_parallel_groups): ranks = list(range(i, world_size, num_pipeline_model_parallel_groups)) group_ranks.append(ranks) # pipeline parallel does not need custom allreduce _PP = init_model_parallel_group(group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False) ps._PP = _PP # for verl def initialize_model_parallel( tensor_model_parallel_size: int = 1, pipeline_model_parallel_size: int = 1, backend: Optional[str] = None, ) -> None: """ NOTE: This method is a hack from the open-sourced version without asertion of world_size = tp * pp Initialize model parallel groups. Arguments: tensor_model_parallel_size: number of GPUs used for tensor model parallelism. pipeline_model_parallel_size: number of GPUs used for pipeline model parallelism. Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize the model pipeline. The present function will create 4 tensor model-parallel groups and 2 pipeline model-parallel groups: 4 tensor model-parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7] 2 pipeline model-parallel groups: [g0, g2, g4, g6], [g1, g3, g5, g7] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. """ # Get world size and rank. Ensure some consistencies. assert torch.distributed.is_initialized() world_size: int = torch.distributed.get_world_size() backend = backend or torch.distributed.get_backend(ps.get_world_group().device_group) # NOTE(sgm) we don't assert world_size == tp * pp # DP is not managed by vllm but by the VeRL WorkerGroup # if (world_size != # tensor_model_parallel_size * pipeline_model_parallel_size): # raise RuntimeError( # f"world_size ({world_size}) is not equal to " # f"tensor_model_parallel_size ({tensor_model_parallel_size}) x " # f"pipeline_model_parallel_size ({pipeline_model_parallel_size})") num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size global _TP assert _TP is None, "tensor model parallel group is already initialized" group_ranks = [] for i in range(num_tensor_model_parallel_groups): ranks = list(range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)) group_ranks.append(ranks) # message queue broadcaster is only used in tensor model parallel group if ps._TP is not None: _TP = ps._TP else: _TP = init_model_parallel_group( group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer use_message_queue_broadcaster=True, ) ps._TP = _TP # TODO: init using device mesh (not support hybrid engine now) # Build the pipeline model-parallel groups. num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size global _PP assert _PP is None, "pipeline model parallel group is already initialized" group_ranks = [] for i in range(num_pipeline_model_parallel_groups): ranks = list(range(i, world_size, num_pipeline_model_parallel_groups)) group_ranks.append(ranks) # pipeline parallel does not need custom allreduce if ps._TP is not None: _PP = ps._TP else: _PP = init_model_parallel_group(group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False) ps._PP = _PP """ Device mesh utilities """ def get_device_mesh(): assert _DEVICE_MESH is not None, "device mesh is not initialized" return _DEVICE_MESH """ Tensor model parallel utilities """ # NOTE(linjunrong): In the vllm version parallel_state.py. verl created its own _TP and _PP as verl want to use # the process group for some extra purpose. Under the hood, there is no difference between them and the original # one in vllm.distributed.parallel_state. However, the implementation need to hack the init process of inference # engine, as we do not maintain another SGLang here, I just use the original _TP and _PP directly. def get_tensor_model_parallel_group(): """Get the tensor model parallel group the caller rank belongs to.""" assert _TP is not None, "tensor model parallel group is not initialized" return _TP.device_group def get_tensor_model_parallel_world_size(): """Return world size for the tensor model parallel group.""" return torch.distributed.get_world_size(group=get_tensor_model_parallel_group()) def get_tensor_model_parallel_rank(): """Return my rank for the tensor model parallel group.""" return torch.distributed.get_rank(group=get_tensor_model_parallel_group()) def get_tensor_model_parallel_src_rank(): """Calculate the global rank corresponding to the first local rank in the tensor model parallel group.""" global_rank = torch.distributed.get_rank() local_world_size = get_tensor_model_parallel_world_size() return (global_rank // local_world_size) * local_world_size ================================================ FILE: verl_rl/verl/third_party/torch/__init__.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: verl_rl/verl/third_party/torch/distributed/__init__.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: verl_rl/verl/third_party/torch/distributed/_state_dict_utils.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ruff: noqa: B028, UP038, UP007, E721, E501 # mypy: allow-untyped-defs import copy import io import math import weakref from collections.abc import Mapping, MutableMapping from typing import TYPE_CHECKING, Any, Callable, NamedTuple, Optional, Union, cast import torch import torch.distributed as dist import torch.nn.functional as F from torch.distributed._functional_collectives import AsyncCollectiveTensor if dist.is_available() or TYPE_CHECKING: from torch.distributed import distributed_c10d from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed.tensor import DTensor, Replicate, distribute_tensor from torch.distributed.tensor._utils import compute_local_shape_and_global_offset def _identity_func( obj: torch.Tensor, pg: Optional[dist.ProcessGroup], device: Optional[torch.device], companion_obj: Any, ) -> torch.Tensor: return obj def _all_gather_sharded_tensor( sharded_tensor: "ShardedTensor", pg: Optional[dist.ProcessGroup] = None, device: Optional[torch.device] = None, ) -> torch.Tensor: if pg is None: pg = distributed_c10d._get_default_group() world_size = dist.get_world_size(pg) shards = sharded_tensor.local_shards() dim_0_size = sharded_tensor.size()[0] # type: ignore[index] tensor_numel = sharded_tensor.size().numel() # type: ignore[union-attr] chunk_size = math.ceil(dim_0_size / world_size) * tensor_numel // dim_0_size pg_device = distributed_c10d._get_pg_default_device(pg) if device is None else device if shards: local_tensor = shards[0].tensor.flatten() if local_tensor.device.type != pg_device.type: local_tensor = local_tensor.to(pg_device) num_padding = chunk_size - local_tensor.numel() if num_padding > 0: local_tensor = F.pad(local_tensor, [0, num_padding]) else: local_tensor = torch.zeros(chunk_size, dtype=sharded_tensor.dtype, device=pg_device) tensor = torch.empty( chunk_size * world_size, dtype=local_tensor.dtype, device=pg_device, ) dist.all_gather_into_tensor(tensor, local_tensor, group=pg) tensor = tensor.narrow(0, 0, tensor_numel).reshape(sharded_tensor.size()) return tensor class CompanionMismatch(Exception): pass def _iterate_state_dict( iter_object: Any, sharded_tensor_func: Callable, dtensor_func: Callable, tensor_func: Callable, *, pg: Optional[dist.ProcessGroup] = None, device: Optional[torch.device] = None, cpu_offload: bool = False, companion_obj: Any = None, ranks_only: tuple[int, ...] = (), type_check: bool = True, non_blocking: bool = True, ) -> dict[str, Any]: """Iterate through the state dict, applying the given functions to each tensor type. Args: iter_object (Any): the target state_dict. sharded_tensor_func (Callable): the function to apply to ShardedTensor dtensor_func (Callable): the function to apply to DTensor tensor_func (Callable): the function to apply to Tensor pg (Optional[dist.ProcessGroup]): process group passed to tensor functions device (Optional[torch.device]): device passed to tensor functions cpu_offload (bool): whether to offload the tensors to CPU memory. This option is ignored if a companion_obj is supplied. companion_obj (Any): A companion object to the state dict. If this object is supplied, we attempt to copy the tensor to the companion object. ranks_only (Tuple[int, ...]): if this tuple is empty, all ranks will have the same state_dicts. Otherwise only ranks that in ``ranks_only`` have the same state_dicts. Other ranks will get empty state_dicts. type_check (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. non_blocking (bool): whether to use non-blocking copy when copying to the companion object. """ # TODO: should we use pytree? cpu_device = torch.device("cpu") if isinstance(iter_object, ShardedTensor): ret = sharded_tensor_func(iter_object, pg, device, companion_obj) elif isinstance(iter_object, DTensor): ret = dtensor_func(iter_object, pg, device, companion_obj) elif isinstance(iter_object, torch.Tensor): ret = tensor_func(iter_object, pg, device, companion_obj) elif isinstance(iter_object, (int, float, str, bytes, io.BytesIO)) or iter_object is None: ret = iter_object elif isinstance(iter_object, dict): if companion_obj is not None and ( not isinstance(companion_obj, dict) or set(companion_obj.keys()) != set(iter_object.keys()) ): msg = "" if isinstance(companion_obj, dict) else f"{set(companion_obj.keys())=} {set(iter_object.keys())=}" raise CompanionMismatch(msg) ret = { key: _iterate_state_dict( value, sharded_tensor_func, dtensor_func, tensor_func, pg=pg, device=device, cpu_offload=cpu_offload, companion_obj=companion_obj[key] if companion_obj is not None else None, ranks_only=ranks_only, type_check=type_check, non_blocking=non_blocking, ) for key, value in iter_object.items() } elif isinstance(iter_object, (list, tuple)): if companion_obj is not None and ( not isinstance(companion_obj, (list, tuple)) or len(companion_obj) != len(iter_object) ): raise CompanionMismatch ret = [ _iterate_state_dict( v, sharded_tensor_func, dtensor_func, tensor_func, pg=pg, device=device, cpu_offload=cpu_offload, companion_obj=companion_obj[idx] if companion_obj is not None else None, ranks_only=ranks_only, type_check=type_check, non_blocking=non_blocking, ) for idx, v in enumerate(iter_object) ] if isinstance(iter_object, tuple): ret = tuple(ret) elif not type_check: ret = copy.deepcopy(iter_object) else: raise ValueError(f"Unexpected value type {type(iter_object)}") if not ranks_only or dist.get_rank(pg) in ranks_only: if isinstance(ret, torch.Tensor): if cpu_offload and companion_obj is None: ret = ret.to(cpu_device) if companion_obj is not None: if isinstance(companion_obj, DTensor): assert isinstance(ret, DTensor) companion_obj._local_tensor.copy_(ret._local_tensor, non_blocking=non_blocking) else: companion_obj.copy_(ret, non_blocking=non_blocking) ret = companion_obj else: ret = {} if isinstance(ret, dict) else None return ret def _gather_state_dict( state_dict: dict[str, Any], *, pg: Optional[dist.ProcessGroup] = None, device: Optional[torch.device] = None, cpu_offload: bool = False, ranks_only: tuple[int, ...] = (), type_check: bool = True, ) -> dict[str, Any]: """ Given a state_dict, this API gathers all the ShardedTensors or DTensors in the state_dict. Args: state_dict (Dict[str, Any]): the target sharded state_dict. pg (Optional[dist.ProcessGroup]): the process group that is used to gather ShardedTensor. Note that gathering a DTensor will use the DeviceMesh. So this argument will be ignored when gathering a DTensor. device: (Optional[torch.device]): the device that is used to perform allgather for ShardedTensor. Note that gathering a DTensor will use the DeviceMesh. So this argument will be ignored when gathering a DTensor. cpu_offload (bool): whether to offload the tensors to CPU memory. The default value is False. ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will have the same state_dicts. Otherwise only ranks that in ``ranks_only`` have the same state_dicts. Other ranks will get empty state_dicts. type_check: (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Returns: The gathered state dictionary. """ def sharded_tensor_func(value, pg, device, companion_obj): # ShardedTensor does not seem to record the original device type. # So if the tensor is moved to CPU, we won't know the original type. # As a result, we have to rely on the user to tell us the correct one. cpu_device = torch.device("cpu") output_tensor = _all_gather_sharded_tensor(value, pg, device) local_shard_device = value.local_shards()[0].tensor.device if value.local_shards() else cpu_device if output_tensor.device != local_shard_device: value = output_tensor.to(local_shard_device) else: value = output_tensor return value def dtensor_func(value, pg, device, companion_obj): if value.device != value.device_mesh.device_type: value = value.to(value.device_mesh.device_type) # FSDP all_gather: [Shard(0)] -> [Replicate()] # HSDP all_gather: [Replicate(), Shard(0)] -> [Replicate(), Replicate()] # 2D FSDP + TP all_gather: # - [Shard(0), Shard(n)] -> [Replicate(), Replicate()] # - [Shard(0), Replicate()] -> [Replicate(), Replicate()] placements = [Replicate() for _ in value.placements] value = value.redistribute( device_mesh=value.device_mesh, placements=placements, ) # Call `wait()` to force the tensor to be synchronous with respect # to the main stream. # See the discussion in https://github.com/pytorch/pytorch/pull/117799. value = value.to_local() if isinstance(value, AsyncCollectiveTensor): value = value.wait() return value return _iterate_state_dict( state_dict, sharded_tensor_func, dtensor_func, _identity_func, pg=pg, device=device, cpu_offload=cpu_offload, ranks_only=ranks_only, type_check=type_check, ) def _offload_state_dict_to_cpu( state_dict: dict[str, Any], *, ranks_only: tuple[int, ...] = (), type_check: bool = True, ) -> dict[str, Any]: """ Given a state_dict, this API offload all the tensors to CPU memory. Args: state_dict (Dict[str, Any]): the target state_dict. pg (Optional[dist.ProcessGroup]): the process group that is used to gather ShardedTensor. Note that gathering a DTensor will use the DeviceMesh. So this argument will be ignored when gathering a DTensor. ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will have the same state_dicts. Otherwise only ranks that in ``ranks_only`` have the same state_dicts. Other ranks will get empty state_dicts. type_check: (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Returns: The gathered state dictionary. """ ret = _iterate_state_dict( state_dict, _identity_func, _identity_func, _identity_func, pg=None, device=None, cpu_offload=True, ranks_only=ranks_only, type_check=type_check, ) return ret @torch.no_grad() def _copy_state_dict( state_dict: dict[str, Any], copy_state_dict: dict[str, Any], non_blocking: bool = False, type_check: bool = True, ) -> dict[str, Any]: """ Copies all tensors in a given state dict into a different state_dict with the same structure. Additionally, a copied state dict with the same value references is returned. Editing the keys on this state dict will not affect the passed in copy_state_dict (but the value references are the same). .. warning:: It is expected by this function that state_dict and copy_state_dict share the same structure and data types. .. warning:: The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Args: state_dict (Dict[str, Any]): the target state_dict. copy_state_dict (Dict[str, Any]): The state dict we are copying into. This state_dict must have exactly the same structure as the source `state_dict`. non_blocking: (bool): Whether copy ops should be performed asynchronously type_check (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Returns: State Dict copy """ return _iterate_state_dict( state_dict, _identity_func, _identity_func, _identity_func, pg=None, device=None, cpu_offload=False, ranks_only=(), companion_obj=copy_state_dict, type_check=type_check, non_blocking=non_blocking, ) @torch.no_grad() def _create_cpu_state_dict( state_dict: dict[str, Any], pin_memory: bool = False, share_memory: bool = False ) -> dict[str, Any]: """ Given a state_dict, create another state_dict with the same structure and elements. However, all tensors in the returned state_dict are new tensors on CPU. These tensors can be placed on pin_memory or share_memory based on the provided arguments. .. warning:: Setting both `pin_memory` and `share_memory` to True significantly increases the latency of this method because of the nuances which require us to register memory as pinned directly as opposed to relying on the pin_memory cache allocator. This option should only be used for long lived tensors which are required to be shared. This is not the case as long as at least one of `pin_memory` or `share_memory` is set to False. """ def tensor_func( obj: torch.Tensor, pg: Optional[dist.ProcessGroup], device: Optional[torch.device], _: Any, ) -> torch.Tensor: if len(obj.size()) == 0: return torch.tensor(0, dtype=obj.dtype) if share_memory: t = torch.empty(*tuple(obj.size()), dtype=obj.dtype) t = t.share_memory_() if pin_memory: def unpin_memory(t): succ = int(torch.cuda.cudart().cudaHostUnregister(t.data_ptr())) assert succ == 0, f"Unpinning shared memory failed with error-code: {succ}" weakref.finalize(t, unpin_memory, t) succ = int( torch.cuda.cudart().cudaHostRegister( t.data_ptr(), t.numel() * t.element_size(), 1, # lines up with 'cudaHostRegisterPortable' ) ) assert succ == 0, f"Pinning shared memory failed with error-code: {succ}" return t elif pin_memory: return torch.empty(*tuple(obj.size()), dtype=obj.dtype).pin_memory() else: return torch.empty(*tuple(obj.size()), dtype=obj.dtype) def dtensor_func( obj: DTensor, pg: Optional[dist.ProcessGroup], device: Optional[torch.device], _: Any, ) -> DTensor: if len(obj.size()) == 0: return obj if obj.device != torch.device("cpu"): ret = cast(DTensor, obj.to(device="cpu")) else: ret = copy.deepcopy(obj) ret._local_tensor = tensor_func(ret._local_tensor, pg, device, None) return ret ret = _iterate_state_dict( state_dict, _identity_func, dtensor_func, tensor_func, pg=None, device=None, cpu_offload=False, ranks_only=(), type_check=False, ) return ret def _check_state_dict_similarity( state_dict: dict[str, Any], compared_state_dict: dict[str, Any], ) -> bool: """ Given two state_dicts, check if the structures are the same. And if a [key, tensor] pair exist in one state_dict there must be the a corresponding pait, [key, other_tensor], in the other state_dict, where tensor and other_tensor have the same size and dtype. Return the check result. """ def tensor_func( obj: torch.Tensor, pg: Optional[dist.ProcessGroup], device: Optional[torch.device], companion_obj: Any, ) -> torch.Tensor: if companion_obj.dtype != obj.dtype or companion_obj.size() != obj.size(): raise CompanionMismatch return obj try: _iterate_state_dict( state_dict, _identity_func, _identity_func, tensor_func, pg=None, device=None, cpu_offload=False, ranks_only=(), companion_obj=compared_state_dict, type_check=False, ) except CompanionMismatch: return False return True class _TensorInfo(NamedTuple): size: torch.Size dtype: torch.dtype def _broadcast_tensors( full_state_dict: dict[str, Any], local_state_dict: dict[str, Any], keys: list[str], device: torch.device, pg: Optional[dist.ProcessGroup] = None, ) -> None: tensors = [] for key in keys: if dist.get_rank() == 0: full_state = full_state_dict[key] assert isinstance(full_state, torch.Tensor) full_tensor = full_state.detach().to(device) else: tensor_info = full_state_dict[key] full_tensor = torch.empty( size=tensor_info.size, device=device, dtype=tensor_info.dtype, ) tensors.append(full_tensor) local_state = local_state_dict.get(key, None) if local_state is None: continue elif isinstance(local_state, DTensor): local_state_dict[key] = (local_state, full_tensor) else: local_state_dict[key] = full_tensor if pg is None: pg = dist.distributed_c10d._get_default_group() if len(tensors) > 1: dist._broadcast_coalesced(pg, tensors, 500, 0) else: dist.broadcast(tensors[0], src=0, group=pg) _distribute_tensors(local_state_dict, keys, device, pg) def _distribute_tensors( local_state_dict: dict[str, Any], keys: list[str], device: torch.device, pg: Optional[dist.ProcessGroup] = None, ) -> None: if pg is None: pg = dist.distributed_c10d._get_default_group() for key in keys: _local_state = local_state_dict.get(key, None) if _local_state is None or torch.is_tensor(_local_state): continue local_state = _local_state[0] full_tensor = _local_state[1] shape, offset = compute_local_shape_and_global_offset( full_tensor.shape, local_state.device_mesh, local_state.placements ) slices = [ slice(cur_offset, cur_offset + cur_shape) for cur_shape, cur_offset in zip(shape, offset, strict=False) ] if local_state.is_meta: # Use .clone() here rather than view to clone and return only the sliced portion, minimizing memory access and cost. local_tensor = full_tensor[slices].detach().clone() # TODO: currently, we cannot handle strided sharding if the dp dimension is not even. For example, # one of the case that is not yet supported is when placements = (Shard(0), _StridedShard(0, sf=2)). ret = DTensor.from_local( local_tensor, local_state.device_mesh, local_state.placements, shape=local_state.shape, stride=local_state.stride(), ) else: ret = local_state # Copy full_tensor[slices] into local_state.to_local() to reduce memory footprint. ret.to_local().copy_(full_tensor[slices]) local_state_dict[key] = ret def _broadcast_state_dict( full_state_dict: dict[str, Any], local_state_dict: dict[str, Any], device: torch.device, pg: Optional[dist.ProcessGroup] = None, strict: bool = False, cpu_offload: bool = False, ) -> None: # Broadcast from rank0's `full_state_dict` to all ranks' `local_state_dict`. # If strict is True, any keys in `local_state_dict` but not in `full_state_dict` # will be removed from `local_state_dict`. ret = {} if dist.get_rank() == 0: for key, value in full_state_dict.items(): if not torch.is_tensor(value): ret[key] = value elif value.dim() == 0: ret[key] = value.cpu() else: ret[key] = _TensorInfo(value.size(), value.dtype) broadcast_list = [ret] dist.broadcast_object_list(broadcast_list, src=0, group=pg) ret = broadcast_list[0] # Gather values keys = [] local_state_dict_keys = set(local_state_dict.keys()) global_keys = set() for key, value in ret.items(): global_keys.add(key) if not isinstance(value, _TensorInfo): if key in local_state_dict: local_state_dict[key] = value continue if dist.get_rank() == 0: ret[key] = full_state_dict[key] keys.append(key) # Broadcast every tensor to avoid OOM for now. if len(keys) >= 1: _broadcast_tensors(ret, local_state_dict, keys, device, pg) if cpu_offload: for key in keys: local_state_dict[key] = local_state_dict[key].cpu() keys.clear() if strict: if missing_keys := (local_state_dict_keys - global_keys): for key in missing_keys: local_state_dict.pop(key) if keys: _broadcast_tensors(ret, local_state_dict, keys, device, pg) if cpu_offload: for key in keys: local_state_dict[key] = local_state_dict[key].cpu() def _distribute_state_dict( full_state_dict: dict[str, Any], local_state_dict: dict[str, Any], device: torch.device, pg: Optional[dist.ProcessGroup] = None, ) -> None: # Full_state_dict = True, broadcast_from_rank0 = False here. Each rank has # full_state_dict. Skip the broadcast in ``_broadcast_state_dict`` and # distribute tensors in each rank for key, value in full_state_dict.items(): if key not in full_state_dict: continue if not torch.is_tensor(value): local_state_dict[key] = value elif value.dim() == 0: local_state_dict[key] = value.cpu() else: assert isinstance(value, torch.Tensor) local_state = local_state_dict.get(key, None) if local_state is None: continue elif isinstance(local_state, DTensor): local_state_dict[key] = distribute_tensor( value.detach().to(device), local_state.device_mesh, local_state.placements, ) else: local_state_dict[key] = value.detach().to(device) # These APIs are from torch.distributed.checkpoint. # TODO: We should consolidate the code here as some not all modules can depend on # DCP. PATH_ITEM = Union[str, int] OBJ_PATH = tuple[PATH_ITEM, ...] FLATTEN_MAPPING = dict[str, OBJ_PATH] STATE_DICT_TYPE = dict[str, Any] CONTAINER_TYPE = MutableMapping[PATH_ITEM, Any] def _traverse_state_dict( state_dict: STATE_DICT_TYPE, visitor: Callable[[OBJ_PATH, Any], None], ) -> None: """ Invoke ``visitor`` for each value recursively in ``state_dict``. Mapping, list, and tuple will be flattened and other value types are treated as the terminal values and will invoke ``visitor``. """ def _traverse_obj(path: OBJ_PATH, value: Any) -> None: if isinstance(value, Mapping): for k, v in value.items(): _traverse_obj(path + (str(k),), v) elif isinstance(value, (list, tuple)): for i, v in enumerate(value): _traverse_obj(path + (i,), v) else: visitor(path, value) for key, value in state_dict.items(): _traverse_obj((str(key),), value) def _flatten_state_dict( state_dict: STATE_DICT_TYPE, ) -> tuple[STATE_DICT_TYPE, FLATTEN_MAPPING]: """ Flatten ``state_dict`` made of nested dicts and lists into a top level dictionary. Use ``unflatten_state_dict`` to revert this process. Returns: A tuple with the flatten state_dict and a mapping from original to new state_dict. N.B. The new keys are derived from the object paths, joined by dot. For example: ``{ 'a': {'b':...}}`` results in the key `a.b`. """ flattened: STATE_DICT_TYPE = {} mappings: FLATTEN_MAPPING = {} def flat_copy(path: OBJ_PATH, value: Any) -> None: new_fqn = ".".join(map(str, path)) if new_fqn in flattened: raise ValueError(f"duplicated flatten key {new_fqn}") flattened[new_fqn] = value mappings[new_fqn] = path _traverse_state_dict(state_dict, flat_copy) return flattened, mappings def _set_element(root_dict: STATE_DICT_TYPE, path: OBJ_PATH, value: Any) -> None: """Set ``value`` in ``root_dict`` along the ``path`` object path.""" cur_container = cast(CONTAINER_TYPE, root_dict) def extend_list(lst: list[Any], idx: int) -> None: while len(lst) <= idx: lst.append(None) for i in range(1, len(path)): prev_key = path[i - 1] key = path[i] def_val: CONTAINER_TYPE | list[Any] = {} if type(key) == str else [] if isinstance(cur_container, Mapping): cur_container = cast(CONTAINER_TYPE, cur_container.setdefault(prev_key, def_val)) else: extend_list(cur_container, prev_key) if cur_container[prev_key] is None: cur_container[prev_key] = def_val cur_container = cur_container[prev_key] key = path[-1] if type(key) == int: extend_list(cast(list[Any], cur_container), key) cur_container[key] = value def _unflatten_state_dict(state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING) -> STATE_DICT_TYPE: """Restore the original nested state_dict according to ``mapping`` and the flattened ``state_dict``.""" nested: STATE_DICT_TYPE = {} for key, value in state_dict.items(): _set_element(nested, mapping[key], value) return nested ================================================ FILE: verl_rl/verl/third_party/torch/distributed/checkpoint/__init__.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: verl_rl/verl/third_party/torch/distributed/checkpoint/state_dict.py ================================================ # official torch 2.6.0 set_model_state_dict API leads to OOM # this is a copy of torch/distributed/checkpoint from torch 2.7.0 # From PyTorch: # Copyright (c) 2016- Facebook, Inc (Adam Paszke) # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) # Copyright (c) 2011-2013 NYU (Clement Farabet) # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) # From Caffe2: # Copyright (c) 2016-present, Facebook Inc. All rights reserved. # All contributions by Facebook: # Copyright (c) 2016 Facebook Inc. # All contributions by Google: # Copyright (c) 2015 Google Inc. # All rights reserved. # All contributions by Yangqing Jia: # Copyright (c) 2015 Yangqing Jia # All rights reserved. # All contributions by Kakao Brain: # Copyright 2019-2020 Kakao Brain # All contributions by Cruise LLC: # Copyright (c) 2022 Cruise LLC. # All rights reserved. # All contributions by Tri Dao: # Copyright (c) 2024 Tri Dao. # All rights reserved. # All contributions by Arm: # Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates # All contributions from Caffe: # Copyright(c) 2013, 2014, 2015, the respective contributors # All rights reserved. # All other contributions: # Copyright(c) 2015, 2016 the respective contributors # All rights reserved. # Caffe2 uses a copyright model similar to Caffe: each contributor holds # copyright over their contributions to Caffe2. The project versioning records # all such contribution and copyright details. If a contributor wants to further # mark their specific copyright on a particular contribution, they should # indicate their copyright solely in the commit message of the change when it is # committed. # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America # and IDIAP Research Institute nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ruff: noqa: B028, UP038, UP007, E721 # mypy: allow-untyped-defs import contextlib import functools import gc import warnings from collections.abc import Generator, Iterable from dataclasses import asdict, dataclass, field from itertools import chain from typing import Any, Callable, Optional, Union, cast, no_type_check import torch import torch.distributed as dist import torch.nn as nn from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( _CHECKPOINT_PREFIX, ) from torch.distributed.fsdp import ( FullOptimStateDictConfig, FullStateDictConfig, OptimStateDictConfig, ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictConfig, StateDictType, ) from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ) from torch.distributed.fsdp._common_utils import ( FSDP_WRAPPED_MODULE, _get_module_fsdp_state_if_fully_sharded_module, ) from torch.distributed.tensor import DTensor from torch.nn.modules.module import _IncompatibleKeys from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils._pytree import tree_map_only from verl.third_party.torch.distributed._state_dict_utils import ( _broadcast_state_dict, _distribute_state_dict, _flatten_state_dict, _gather_state_dict, _offload_state_dict_to_cpu, _unflatten_state_dict, ) __all__ = [ "FQNS_T", "PrimitiveType", "ValueType", "DictValueType", "ListDictValueType", "OptimizerStateType", "StateDictOptions", "get_model_state_dict", "get_optimizer_state_dict", "get_state_dict", "set_model_state_dict", "set_optimizer_state_dict", "set_state_dict", ] _FLAT_PARAM = "_flat_param" _PG = "param_groups" _PARAMS = "params" _STATE = "state" FQNS_T = set[str] PrimitiveType = Union[DTensor, ShardedTensor, torch.Tensor, int, float, str] ValueType = Union[PrimitiveType, list[PrimitiveType], tuple[PrimitiveType], dict[str, "ValueType"]] DictValueType = dict[str, ValueType] ListDictValueType = list[DictValueType] OptimizerStateType = dict[str, DictValueType | ListDictValueType] _patched_state_dict: set[Callable] = set() @contextlib.contextmanager def _gc_context(): is_enabled = gc.isenabled() gc.disable() try: yield finally: if is_enabled: gc.enable() @dataclass class StateDictOptions: """ This dataclass specifies how get_state_dict/set_state_dict will work. - ``full_state_dict``: if this is set to True, all the tensors in the returned state_dict will be gathered. No ShardedTensor and DTensor will be in the returned state_dict. - ``cpu_offload``: offload all the tensors to cpu. To prevent CPU OOM, if ``full_state_dict`` is also true, then only the rank0 will get the state_dict and all other ranks will get empty state_dict. - ``ignore_frozen_params``: if the value is True, the returned state_dict won't contain any frozen parameters -- the ``requires_grad`` is False. The default value is False. - ``keep_submodule_prefixes`` (deprecated): when ``submodules`` is not None, this option indicates whether to keep the submodule prefixes from the state_dict keys. or example, if the submodule is ``module.pretrain`` and the full FQN of the parameter is ``pretrain.layer1.weight`` of the param. When this option is True, the parameter's key in the returned state_dict will be ``pretrain.layer1.weight``. If the options is False, the key will be ``layer1.weight``. Note that if ``keep_submodule_prefixes`` is False, there may be conflicted FQNs, hence there should be only one submodule in ``submodules``. - ``strict``: the ``strict`` option when ``set_state_dict`` calls model.load_state_dict(). - ``broadcast_from_rank0``: when the option is True, rank0 should receive a full state_dict and will broadcast the tensors in the state_dict/ optim_state_dict one by one to other ranks. Other ranks will receive the tensors and shard according to the local shards in the model and optimizer. ``full_state_dict`` must be set to True when using this option. This option currently only supports DTensor, not the legacy ShardedTensor. """ full_state_dict: bool = False cpu_offload: bool = False ignore_frozen_params: bool = False keep_submodule_prefixes: bool = True strict: bool = True broadcast_from_rank0: bool = False flatten_optimizer_state_dict: bool = False dsd_fqn_modifiers: str = "_fqn_modifiers" @dataclass class _StateDictInfo(StateDictOptions): fqn_param_mapping: dict[ str | torch.Tensor, FQNS_T | torch.Tensor, ] = field(default_factory=dict) shared_params_mapping: dict[ str | torch.Tensor, FQNS_T | torch.Tensor, ] = field(default_factory=dict) submodule_prefixes: set[str] = field(default_factory=set) handle_model: bool = True handle_optim: bool = True fsdp_context: Callable = contextlib.nullcontext fsdp_modules: list[nn.Module] = field(default_factory=list) @functools.cache def _get_fqns( model: nn.Module, name: str, dsd_fqn_modifiers: str = "_fqn_modifiers", skip_ddp_prefix: bool = True, skip_compiler_prefix: bool = True, ) -> FQNS_T: """ This API is used to convert the name of a parameter to the FQNs. For FSDP without `use_orig_params`, the name of FlatParameter can be mapped to multiple original parameters. As a result, the return type of this function is `set[str]`. Args: module (nn.Module): the root model. name (str): the name skip_ddp_prefix (bool): whether to skip DDP's `module` prefix Returns: The canonical FQNs based on the model traversal. """ # Remove the checkpoint prefix, if it exists. name = name.replace(_CHECKPOINT_PREFIX, "") if "." not in name: return {name} obj_names = name.split(".") fqn_obj_names = [] curr_obj = model for i, curr_obj_name in enumerate(obj_names): if isinstance(curr_obj, DDP): assert curr_obj_name == "module" curr_obj = curr_obj.module if not skip_ddp_prefix: fqn_obj_names.append(curr_obj_name) elif isinstance(curr_obj, FSDP): if i < len(obj_names) - 1 and obj_names[i + 1] == _FLAT_PARAM: prefix = ".".join(fqn_obj_names) flat_param = getattr(curr_obj, _FLAT_PARAM) if prefix: prefix = f"{prefix}." return {f"{prefix}{fqn}" for fqn in flat_param._fqns} curr_obj = getattr(curr_obj, FSDP_WRAPPED_MODULE) if curr_obj_name != FSDP_WRAPPED_MODULE: fqn_obj_names.append(curr_obj_name) curr_obj = getattr(curr_obj, curr_obj_name) elif isinstance(curr_obj, torch._dynamo.eval_frame.OptimizedModule): assert curr_obj_name == "_orig_mod" curr_obj = curr_obj._orig_mod if not skip_compiler_prefix: fqn_obj_names.append(curr_obj_name) else: # In some modeuls, _fqn_modifiers would not shown in the state_dict keys, # skip them in the fqn to ensure load stat dict successfully for them. if hasattr(curr_obj, dsd_fqn_modifiers): if removed_fqn := getattr(curr_obj, dsd_fqn_modifiers)().get(curr_obj_name): if hasattr(curr_obj, removed_fqn): curr_obj = getattr(curr_obj, removed_fqn) fqn_obj_names.append(curr_obj_name) if curr_obj_name == nn.modules.module._EXTRA_STATE_KEY_SUFFIX: if i != len(obj_names) - 1: raise RuntimeError("Expect `_extra_state` to be the last obj name") else: curr_obj = getattr(curr_obj, curr_obj_name) return {".".join(fqn_obj_names).replace(_CHECKPOINT_PREFIX, "")} class _EXTRA_STATE: pass def _iterate_valid_model_state(model, dsd_fqn_modifiers="_fqn_modifiers"): visited_modules: set[nn.Module] = set() def recurse(module: nn.Module, curr_fqn: str) -> Generator: visited_modules.add(module) curr_fqn = f"{curr_fqn}." if curr_fqn else "" for name, submodule in module.named_children(): if submodule in visited_modules: continue # if user have state_dict_hooks in their model, they can add the state_dict key changes # at dsd_fqn_modifiers in input to align with the function of state_dict_hook if hasattr(module, dsd_fqn_modifiers) and name in getattr(module, dsd_fqn_modifiers)().values(): # skip _fqn_modifiers here thus remove the last `.` added new_fqn = curr_fqn[:-1] else: new_fqn = f"{curr_fqn}{name}" yield from recurse(submodule, new_fqn) for name, obj in chain(module.named_buffers(recurse=False), module.named_parameters(recurse=False)): if name in module._non_persistent_buffers_set: continue new_fqn = f"{curr_fqn}{name}" yield new_fqn, obj if getattr(module.__class__, "get_extra_state", nn.Module.get_extra_state) != nn.Module.get_extra_state: new_fqn = f"{curr_fqn}{nn.modules.module._EXTRA_STATE_KEY_SUFFIX}" yield new_fqn, _EXTRA_STATE() yield from recurse(model, "") def _verify_options( model: nn.Module, optims: tuple[torch.optim.Optimizer, ...], optim_only: bool, *, submodules: Optional[set[nn.Module]] = None, options: Optional[StateDictOptions] = None, ) -> _StateDictInfo: """ Verify the model and options passed by the user and generates _StateDictInfo. """ if submodules: warnings.warn( "Getting submodules only model/optim state_dict is deprecated and " "will be removed in 2.5. This feature can be achieved by manually " "filtering out the state_dict returned from get_state_dict.", FutureWarning, ) if optim_only and not optims: raise RuntimeError("Optimizers are not passed in but optim_only is set to True.") options = options or StateDictOptions() fqn_param_mapping: dict[str | torch.Tensor, set[str] | torch.Tensor] = {} shared_params_mapping: dict[str | torch.Tensor, set[str] | torch.Tensor] = {} for name, param in _iterate_valid_model_state(model): if isinstance(param, _EXTRA_STATE): continue fqns = _get_fqns(model, name) fqn = fqn_param_mapping.get(param, None) if fqn is not None: cast(set[str], fqn_param_mapping[param]).update(fqns) shared_params_mapping[param] = fqn_param_mapping[param] else: # We need to do copy as _get_fqns is lru_cached fqn_param_mapping[param] = fqns.copy() for fqn in fqns: if not isinstance(param, _EXTRA_STATE): fqn_param_mapping[fqn] = param for param_, fqns_ in list(shared_params_mapping.items()): for fqn in fqns_: shared_params_mapping[fqn] = cast(torch.Tensor, param_) submodule_prefixes: set[str] = set() if submodules: submodules = set(submodules) for name, module in model.named_modules(): if module not in submodules: continue fqns = _get_fqns(model, name) assert len(fqns) == 1, "Submodule FQN should only have 1 instance" submodule_prefixes.update(f"{fqn}." for fqn in fqns) if options.broadcast_from_rank0 and not options.full_state_dict: raise ValueError("full_state_dict must be True when broadcast_from_rank0 is True.") fsdp_modules = FSDP.fsdp_modules(model) state_dict_config: StateDictConfig optim_state_dict_config: OptimStateDictConfig fsdp_context: Callable if fsdp_modules: # FSDP API only work if at least one FSDP instance exists. if options.full_state_dict: state_dict_config = FullStateDictConfig(offload_to_cpu=options.cpu_offload, rank0_only=options.cpu_offload) optim_state_dict_config = FullOptimStateDictConfig( offload_to_cpu=options.cpu_offload, rank0_only=(options.cpu_offload or options.broadcast_from_rank0), ) state_dict_type = StateDictType.FULL_STATE_DICT else: state_dict_config = ShardedStateDictConfig( offload_to_cpu=options.cpu_offload, ) optim_state_dict_config = ShardedOptimStateDictConfig( offload_to_cpu=options.cpu_offload, ) state_dict_type = StateDictType.SHARDED_STATE_DICT @contextlib.contextmanager def fsdp_state_dict_type_without_warning( module, state_dict_type, state_dict_config, optim_state_dict_config, ): with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="FSDP.state_dict_type", category=FutureWarning) with FSDP.state_dict_type( module=module, state_dict_type=state_dict_type, state_dict_config=state_dict_config, optim_state_dict_config=optim_state_dict_config, ): yield fsdp_context = functools.partial( fsdp_state_dict_type_without_warning, module=model, state_dict_type=state_dict_type, state_dict_config=state_dict_config, optim_state_dict_config=optim_state_dict_config, ) else: fsdp_context = contextlib.nullcontext return _StateDictInfo( **asdict(options), fqn_param_mapping=fqn_param_mapping, shared_params_mapping=shared_params_mapping, submodule_prefixes=submodule_prefixes, fsdp_context=fsdp_context, fsdp_modules=cast(list[nn.Module], fsdp_modules), handle_model=not optim_only, handle_optim=(len(optims) > 0), ) def _verify_state_dict( model_state_dict: dict[str, ValueType], optim_state_dict: OptimizerStateType, info: _StateDictInfo, ) -> None: for module in info.fsdp_modules: fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module) assert fsdp_state is not None, "Expected a fsdp_state with a fsdp module." # Verify if the model_state_dict and optim_state_dict are valid. This API # should give the users an explicit error message to debug or report. if ( info.handle_model and not model_state_dict and not info.submodule_prefixes and not info.ignore_frozen_params and not (info.cpu_offload and info.full_state_dict) and info.strict and not info.broadcast_from_rank0 ): raise RuntimeError( "The option indicates that model state_dict is required to save " "or load, but model state_dict is empty." f"rank = {dist.get_rank()=}." ) if info.handle_optim: if not optim_state_dict and not (info.cpu_offload and info.full_state_dict) and (not info.broadcast_from_rank0): raise RuntimeError( "The option indicates that model state_dict is required to save, " f"or load but optim state_dict is empty. {optim_state_dict}" ) for key in model_state_dict.keys(): if _FLAT_PARAM in key: raise RuntimeError(f"{key} contains {_FLAT_PARAM}. This can happen if the model is not the root module.") def _state_dict_fn(obj: nn.Module | torch.optim.Optimizer, api: str) -> Callable: call = getattr(obj, api) if call in _patched_state_dict: call = functools.partial(getattr(obj.__class__, api), self=obj) return call def _maybe_full_or_cpu_state_dict(state_dict: dict[str, Any], info: _StateDictInfo) -> dict[str, Any]: if info.full_state_dict: ranks_only = () if (not info.cpu_offload or not torch.distributed.is_initialized()) else (0,) return _gather_state_dict(state_dict, cpu_offload=info.cpu_offload, ranks_only=ranks_only) elif info.cpu_offload: return _offload_state_dict_to_cpu(state_dict) else: return state_dict @torch.no_grad() def _get_model_state_dict(model: nn.Module, info: _StateDictInfo) -> dict[str, ValueType]: if not info.handle_model: return {} with info.fsdp_context(): state_dict = _state_dict_fn(model, "state_dict")() for key in list(state_dict.keys()): fqns = _get_fqns(model, key) assert len(fqns) == 1, (key, fqns) fqn = next(iter(fqns)) if fqn != key: # As we only support FSDP, DDP, and TP, the only cases are # wrapper-based DDP and compiler. Verify if the assumption # is correct. def verify(key, fqn) -> bool: if len(fqn) >= len(key): return False fqn_split = fqn.split(".") key_split = key.split(".") fqn_idx = 0 for key_idx, key_name in enumerate(key_split): if key_name == fqn_split[fqn_idx]: fqn_idx += 1 if fqn_idx == len(fqn_split): return key_idx == len(key_split) - 1 elif key_name in ("module", "_orig_mod"): continue else: return False return True if not verify(key, fqn): raise RuntimeError(f"An unexpected key, {key}, exists. FQN is {fqn}") state_dict[fqn] = state_dict.pop(key) if info.submodule_prefixes: new_state_dict: dict[str, ValueType] = {} # TODO: make this faster. for fqn in state_dict.keys(): for prefix in info.submodule_prefixes: if not fqn.startswith(prefix): continue if info.keep_submodule_prefixes: new_state_dict[fqn] = state_dict[fqn] else: new_fqn = fqn[len(prefix) :] new_state_dict[new_fqn] = state_dict[fqn] state_dict = new_state_dict if info.ignore_frozen_params: for key, param in model.named_parameters(): if param.requires_grad: continue fqns = _get_fqns(model, key) for fqn in fqns: state_dict.pop(fqn) for key, p in list(state_dict.items()): if torch.is_tensor(p) and p.is_meta: state_dict.pop(key) return _maybe_full_or_cpu_state_dict(state_dict, info) @torch.no_grad() def _load_model_state_dict( model: nn.Module, state_dict: dict[str, ValueType], info: _StateDictInfo, ) -> _IncompatibleKeys: if not info.handle_model or (not state_dict and not info.broadcast_from_rank0): return _IncompatibleKeys({}, {}) local_state_dict = {} for key, value in _iterate_valid_model_state(model, info.dsd_fqn_modifiers): fqns = _get_fqns(model, key, info.dsd_fqn_modifiers) fqns_with_prefix = _get_fqns( model, key, info.dsd_fqn_modifiers, skip_ddp_prefix=False, skip_compiler_prefix=False, ) for fqn, fqn_with_prefix in zip(fqns, fqns_with_prefix, strict=False): if (not info.broadcast_from_rank0 or dist.get_rank() == 0) and fqn != fqn_with_prefix: load_value = state_dict.pop(fqn, None) if load_value is None: if info.strict: raise RuntimeError(f"Missing key: {fqn}.") else: state_dict[fqn_with_prefix] = load_value local_state_dict[fqn_with_prefix] = value assign = False if info.broadcast_from_rank0 or info.full_state_dict: devices = set() for key, value in local_state_dict.items(): if torch.is_tensor(value) and value.dim() > 0: devices.add(value.device) # In lora state_dict, there could be multiple devices, with meta device inside. # Take the other device in the broadcast/distribtue, and set assign to True if torch.device("meta") in devices: devices.remove(torch.device("meta")) assign = True if len(devices) == 0: devices.add(dist.distributed_c10d._get_pg_default_device()) elif len(devices) > 1: raise ValueError("Multiple devices found") if info.broadcast_from_rank0: _broadcast_state_dict( state_dict, local_state_dict, device=devices.pop(), strict=info.strict, cpu_offload=info.cpu_offload, ) elif info.full_state_dict: _distribute_state_dict(state_dict, local_state_dict, device=devices.pop()) for fqn, local_state in local_state_dict.items(): state_dict[fqn] = local_state with info.fsdp_context(): return cast( _IncompatibleKeys, _state_dict_fn(model, "load_state_dict")(state_dict=state_dict, strict=info.strict, assign=assign), ) def _init_optim_state(optim: torch.optim.Optimizer) -> None: """ Initialize optim states by calling the step() with zero grads. """ if optim.state: # The optimizer state is initialized. return # There are some stateless optimizers like SGD. These optimizer will # not return in the above condition. So if gradients exist, we should also # return. If gradients do not exist, the following initialization should # not disturb SGD because the gradients and lr are both zero. for param_group in optim.param_groups: for param in param_group[_PARAMS]: if param.grad is not None: return for param_group in optim.param_groups: for param in param_group[_PARAMS]: if param.requires_grad: param.grad = torch.zeros_like(param) # Some optimizers will update parameters regardless of grads due to lr, so # make lr to zero when calling `step()`. lrs = [] for param_group in optim.param_groups: if "lr" in param_group: lrs.append(param_group["lr"]) param_group["lr"] = torch.tensor(0.0) if isinstance(param_group["lr"], torch.Tensor) else 0.0 optim.step(closure=None) # Whether to recover the "lr" should not matter too much as we will # restore checkpointing later. for param_group in optim.param_groups: if "lr" in param_group: param_group["lr"] = lrs.pop(0) optim.zero_grad(set_to_none=True) def _flatten_optim_state_dict(state_dict: OptimizerStateType) -> dict[str, ValueType]: """ This API flattens the optimizer state_dict to support optimizer resharding for MPMD, e.g., pipeline parallelism. Without the API, the original optimizer state_dict looks like: { "state": { "layer1.weight": { "step": 10, "exp_avg": SomeTensor, "exp_avg_sq": SomeTensor }, "layer2.weight": { "step": 10, "exp_avg": SomeTensor, "exp_avg_sq": SomeTensor }, }, "param_group": [ { "lr": 0.0, "betas": (0.9, 0.95), ..., "params": ["layer1.weight", "layer2.weight"] } ] } With this API, the optimizer state_dict looks like: { "state.layer1.weight.step": 10, "state.layer2.weight.step": 10, "state.layer1.weight.exp_avg": SomeTensor, "state.layer2.weight.exp_avg": SomeTensor, "state.layer1.weight.exp_avg_sq": SomeTensor, "state.layer2.weight.exp_avg_sq": SomeTensor, "param_group.layer1.weight.lr" : 0.1, "param_group.layer2.weight.lr" : 0.1, "param_group.layer1.weight.betas" : (0.9, 0.95), "param_group.layer2.weight.betas" : (0.9, 0.95), } Note that if any of the value is a container, like the betas in the example, this API won't flattent it. """ def _raise_if_type_not_supported(v): if not isinstance(v, (torch.Tensor, int, float)): raise NotImplementedError( f"Flattening optimizer state_dict only supports tensor, int, float states now. Type is {type(v)}." ) ret: dict[str, ValueType] = {} for fqn, state in cast(DictValueType, state_dict[_STATE]).items(): for k, v in cast(DictValueType, state).items(): _raise_if_type_not_supported(v) ret[f"{_STATE}.{fqn}.{k}"] = v for param_group in cast(ListDictValueType, state_dict[_PG]): fqns = param_group.pop(_PARAMS) for fqn in cast(list[str], fqns): for k, v in param_group.items(): ret[f"{_PG}.{fqn}.{k}"] = v return ret def _unflatten_optim_state_dict( optim: torch.optim.Optimizer, state_dict: dict[str, ValueType], info: _StateDictInfo, ) -> OptimizerStateType: """ This API unflattens the state_dict generated by _flatten_optim_state_dict(). See the docstring of _flatten_optim_state_dict() for more detail. """ state: DictValueType = {} pg_state: ListDictValueType = [] return_osd: OptimizerStateType = {_STATE: state, _PG: pg_state} for param_group in optim.param_groups: pg_state.append({_PARAMS: []}) for param in param_group[_PARAMS]: for fqn in info.fqn_param_mapping[param]: # If a parameter is shared, only one of the FQN will be used. # So we need to verify which if this fqn is actually used in # the state_dict. if fqn in info.shared_params_mapping: in_params = False for k in param_group.keys(): if k == _PARAMS: continue flatten_key = f"{_PG}.{fqn}.{k}" if flatten_key in state_dict: in_params = True break else: in_params = True if not in_params: continue params = pg_state[-1][_PARAMS] assert isinstance(params, list) # typing params.append(fqn) if not param.requires_grad: continue state[fqn] = {} for state_name in optim.state[param].keys(): cast(DictValueType, state[fqn])[state_name] = state_dict[f"{_STATE}.{fqn}.{state_name}"] first_param_fqn = cast(list[str], pg_state[-1][_PARAMS])[0] for k in param_group.keys(): if k == _PARAMS: continue value = state_dict[f"{_PG}.{first_param_fqn}.{k}"] if k not in pg_state[-1]: pg_state[-1][k] = value elif pg_state[-1][k] != value: raise RuntimeError( "All the parameters in the same parameter group should have " f"the same saved param_group value. But {first_param_fqn}.{k} " f"is {value} while other(s) is {pg_state[-1][k]}." ) return return_osd @torch.no_grad() def _get_optim_state_dict( model: nn.Module, optimizers: tuple[torch.optim.Optimizer, ...], info: _StateDictInfo, ) -> OptimizerStateType: if not info.handle_optim: return {} optim_state_dict: OptimizerStateType = {_STATE: {}, _PG: []} for optim in optimizers: _init_optim_state(optim) osd = _state_dict_fn(optim, "state_dict")() if info.fsdp_modules: with info.fsdp_context(): osd = FSDP.optim_state_dict(model, optim, osd) # We need to specially handle FlatParameter FSDP as # FlatParameter FSDP converts the FQNs. # There are no easy ways to do this conversion systematically. # We can only use a string replacment without correctness check. if not osd: continue for k in list(osd[_STATE].keys()): if "_orig_mod" in k: osd[_STATE][k.replace("_orig_mod.", "")] = osd[_STATE].pop(k) for g in osd[_PG]: params = [k.replace("_orig_mod.", "") for k in g[_PARAMS]] g[_PARAMS] = params else: params = list(chain.from_iterable(g[_PARAMS] for g in optim.param_groups)) param_pid_mapping = dict(zip(params, range(len(params)), strict=False)) fqn_pid_mapping = {} for key, param in model.named_parameters(): fqns = _get_fqns(model, key) assert len(fqns) == 1 fqn = next(iter(fqns)) if param not in param_pid_mapping: continue pid = param_pid_mapping[param] fqn_pid_mapping[fqn] = pid fqn_pid_mapping[pid] = fqn for key in list(osd[_STATE].keys()): fqn = fqn_pid_mapping[key] osd[_STATE][fqn] = osd[_STATE].pop(key) for group in osd[_PG]: group[_PARAMS] = [fqn_pid_mapping[pid] for pid in group[_PARAMS]] if not osd: continue cast(DictValueType, optim_state_dict[_STATE]).update(osd[_STATE]) cast(ListDictValueType, optim_state_dict[_PG]).extend(osd[_PG]) if info.flatten_optimizer_state_dict: optim_state_dict = cast(OptimizerStateType, _flatten_optim_state_dict(optim_state_dict)) return _maybe_full_or_cpu_state_dict(optim_state_dict, info) def _split_optim_state_dict( model: nn.Module, optim: torch.optim.Optimizer, optim_state_dict: OptimizerStateType, info: _StateDictInfo, ) -> OptimizerStateType: """ Extract the corresponding optim state_dict from ``optim_state_dict`` for ``optim`` and return the result optim state_dict. Args: model (nn.Module): the root model. optim (torch.optim.Optimizer): the optimizer. optim_state_dict (Dict[str, ValueType]): the superset optim state_dict that contains the optim state_dict of ``optim``. info (_StateDictInfo): state dict information. Returns: The optim state_dict of ``optim``. """ state: DictValueType = {} pg_state: ListDictValueType = [] return_osd: OptimizerStateType = {_STATE: state, _PG: pg_state} pg_mapping: dict[int, int] = {} if all(isinstance(k, int) for k in cast(DictValueType, optim_state_dict[_STATE]).keys()): return optim_state_dict for param_group in optim.param_groups: pg_state.append({_PARAMS: []}) for param in param_group[_PARAMS]: for fqn in info.fqn_param_mapping[param]: if fqn in info.shared_params_mapping: in_params = False for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]): if fqn in cast(list[str], loaded_param_group[_PARAMS]): in_params = True break else: in_params = True if not in_params: continue params = pg_state[-1][_PARAMS] assert isinstance(params, list) params.append(fqn) if param.requires_grad: state[fqn] = cast(DictValueType, optim_state_dict[_STATE])[fqn] for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]): if fqn in cast(list[str], loaded_param_group[_PARAMS]): pg_mapping[id(loaded_param_group)] = len(return_osd[_PG]) - 1 if len(param_group[_PARAMS]) == 0: # Param_group with empty params. ret = [] for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]): if len(cast(list[str], loaded_param_group[_PARAMS])) == 0: ret.append(loaded_param_group) if len(ret) != 1: raise ValueError( "There are param groups that have zero parameters. " "In such a case, DSD only support exactly one param group " "with zero parameters." "But the loaded state_dict has zero or more than one param groups " "that have zero parameters." ) if len(optim_state_dict[_PG]) != len(optim.param_groups): raise ValueError( "When there is a parameter group that has zero parameters, multiple optimizers are not supported." ) pg_mapping[id(loaded_param_group)] = len(return_osd[_PG]) - 1 for param_group in cast(ListDictValueType, optim_state_dict[_PG]): pg_idx = pg_mapping.get(id(param_group), -1) if pg_idx == -1: continue for key, value in param_group.items(): if key == _PARAMS: continue # TODO: check if value is the same if exists. pg_state[pg_idx][key] = value return return_osd @torch.no_grad() def _load_optim_state_dict( model: nn.Module, optimizers: tuple[torch.optim.Optimizer, ...], state_dict: OptimizerStateType, info: _StateDictInfo, ) -> None: if not info.handle_optim: return for optim in optimizers: _init_optim_state(optim) if state_dict: if _STATE in state_dict: optim_state_dict = _split_optim_state_dict(model, optim, state_dict, info) else: optim_state_dict = _unflatten_optim_state_dict(optim, cast(dict[str, ValueType], state_dict), info) else: optim_state_dict = {} if info.fsdp_modules: # We need to specially handle FlatParameter FSDP as # FlatParameter FSDP converts the FQNs. for original_fqn, _ in model.named_parameters(): fqns = _get_fqns(model, original_fqn) fqns_with_compiler = _get_fqns(model, original_fqn, skip_compiler_prefix=False) if fqns == fqns_with_compiler: continue assert len(fqns) == 1 fqn = fqns.pop() fqn_with_compiler = fqns_with_compiler.pop() for g in optim_state_dict[_PG]: val = cast(dict[str, Any], g) params = [key.replace(fqn, fqn_with_compiler) for key in val[_PARAMS]] val[_PARAMS] = params osd_state = cast(DictValueType, optim_state_dict[_STATE]) for k in list(osd_state.keys()): if fqn in k: osd_state[k.replace(fqn, fqn_with_compiler)] = osd_state.pop(k) with info.fsdp_context(): optim_state_dict = FSDP.optim_state_dict_to_load(model, optim, optim_state_dict) elif info.full_state_dict: info.full_state_dict = False local_state_dict = _get_optim_state_dict(model, (optim,), info) info.full_state_dict = True device = None def _device(t): if t.dim() > 0: nonlocal device if device is None: device = t.device elif device != t.device: raise ValueError("Device mismatch") return t _ = tree_map_only(torch.Tensor, _device, local_state_dict) assert device is not None flatten_osd, osd_mapping = _flatten_state_dict(optim_state_dict) flatten_local_osd, local_osd_mapping = _flatten_state_dict(local_state_dict) if info.broadcast_from_rank0: _broadcast_state_dict(flatten_osd, flatten_local_osd, device=device) else: _distribute_state_dict(flatten_osd, flatten_local_osd, device=device) # The modifications listed seek to address the problem where optim might possess # dissimilar parameters in comparison to optim_state_dict. This is achieved by # incorporating differential parameters within local, which may result in optim # having additional parameters ultimately. for optim_key in flatten_osd.keys(): if optim_key not in flatten_local_osd: assert optim_key in osd_mapping flatten_local_osd[optim_key] = flatten_osd[optim_key] local_osd_mapping[optim_key] = osd_mapping[optim_key] optim_state_dict = _unflatten_state_dict(flatten_local_osd, local_osd_mapping) for pg in optim_state_dict[_PG]: if _PARAMS not in pg: cast(dict[str, ValueType], pg)[_PARAMS] = [] # Note that we do not have to convert the FQN back to param id here if # order in optim.param_groups[idx][_PARAMS] is the same as the one in # optim_state_dict[_PG][idx][_PARAMS]. _state_dict_fn(optim, "load_state_dict")(state_dict=optim_state_dict) def get_model_state_dict( model: nn.Module, *, submodules: Optional[set[nn.Module]] = None, options: Optional[StateDictOptions] = None, ) -> dict[str, ValueType]: """ Return the model state_dict of ``model``. See ``get_state_dict`` for the detail usage. Args: model (nn.Module): the nn.Module to the model. submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters that belong to the submodules. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be returned. See `StateDictOptions` for the details. Returns: The state_dict for ``model``. :rtype: typing.Dict[str, ValueType] """ with _gc_context(): info = _verify_options( model, (), optim_only=False, submodules=submodules, options=options, ) model_state_dict = _get_model_state_dict(model, info) _verify_state_dict(model_state_dict, {}, info) return model_state_dict def get_optimizer_state_dict( model: nn.Module, optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer], *, submodules: Optional[set[nn.Module]] = None, options: Optional[StateDictOptions] = None, ) -> OptimizerStateType: """ Return the combined state_dict for optimizers. See ``get_state_dict`` for the detail usage. Args: model (nn.Module): the nn.Module to the model. optimizers (Union[None, Optimizer, Iterable[Optimizer]]): The optimizers that are used to optimize ``model``. submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters that belong to the submodules. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be returned. See `StateDictOptions` for the details. Returns: The state_dict for ``optimizers``. :rtype: OptimizerStateType """ with _gc_context(): optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) info = _verify_options( model, optimizers, optim_only=True, submodules=submodules, options=options, ) optim_state_dict = _get_optim_state_dict(model, optimizers, info) _verify_state_dict({}, optim_state_dict, info) return optim_state_dict def get_state_dict( model: nn.Module, optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer], *, submodules: Optional[set[nn.Module]] = None, options: Optional[StateDictOptions] = None, ) -> tuple[dict[str, ValueType], OptimizerStateType]: """ Return the model state_dict and optimizers state_dict. ``get_state_dict`` can process any module that is parallelized by PyTorch FSDP/fully_shard, DDP/replicate, tensor_parallel/parallelize_module, and any combination of these parallelisms. The main functions of ``get_state_dict`` are: 1.) returning a model and optimizer state_dict that can be resharded with a different number of trainers and/or different parallelisms. 2.) hiding the parallelism-specific state_dict APIs. Users don't have to call these APIs. 3.) sanity checking the result state_dict. The keys of the result state dictionary are the canonical FQNs (Fully Qualified Names). A canonical FQN refers to the FQN based on a parameter's position in an nn.Module hierarchy. More specifically, a canonical FQN to a parameter is the FQN returned by ``module.named_parameters()`` or ``module.named_buffers()`` when the module is not distributed by any parallelisms. Since the optimizer internally uses parameter IDs to represent a parameter, there will be a conversion from the parameter IDs to the canonical FQNs when calling this API. ``get_state_dict`` can also process a module that is not parallelized. In such a case, ``get_state_dict`` only performs one function -- converting the optimizer parameter IDs to the canonical FQNs. Example: >>> # xdoctest: +SKIP >>> import torch >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP >>> from torch.nn.parallel import DistributedDataParallel as DDP >>> from torch.distributed.checkpoint.state_dict import get_state_dict >>> fsdp_model = FSDP(copy.deepcopy(model)) >>> fsdp_optim = torch.optim.Adam(model.parameters(), lr=1e-3) >>> ddp_model = DDP(copy.deepcopy(model)) >>> ddp_optim = torch.optim.Adam(model.parameters(), lr=1e-3) >>> ddp_state_dict, ddp_optim_state_dict = get_state_dict(ddp_model, ddp_optim) >>> fsdp_state_dict, fsdp_optim_state_dict = get_state_dict( ... fsdp_model, fsdp_optim ... ) >>> # if we simply call ddp_model.state_dict() and fsdp_model.state_dict(), >>> # the asserts will fail. >>> assert ddp_state_dict == fsdp_state_dict >>> assert ddp_optim_state == fsdp_optim_state_dict Args: model (nn.Module): the nn.Module to the model. optimizers (Union[None, Optimizer, Iterable[Optimizer]]): The optimizers that are used to optimize ``model``. submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters that belong to the submodules. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be returned. See `StateDictOptions` for the details. Returns: ``Tuple`` that contain model state_dict and optimizer state_dict. :rtype: typing.Tuple[typing.Dict[str, ValueType], OptimizerStateType] """ with _gc_context(): optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) info = _verify_options( model, optimizers, optim_only=False, submodules=submodules, options=options, ) model_state_dict = _get_model_state_dict(model, info) optim_state_dict = _get_optim_state_dict(model, optimizers, info) _verify_state_dict(model_state_dict, optim_state_dict, info) return model_state_dict, optim_state_dict def _unflatten_model_state_dict( model: nn.Module, state_dict: dict[nn.Module, dict[str, ValueType]] | dict[str, ValueType], ) -> dict[str, ValueType]: if not state_dict: return {} if isinstance(next(iter(state_dict.keys())), nn.Module): warnings.warn( "Passing model_state_dict as a ``Dict[nn.Module, Dict[str, Any]]``" "is deprecated and will be removed in 2.5. If you need this " "feature, please preprocessing the model_state_dict to achieve the " "same functionality.", FutureWarning, ) cast_state_dict = cast(dict[nn.Module, dict[str, ValueType]], state_dict) new_state_dict: dict[str, ValueType] = {} for submodule, sub_state_dict in cast_state_dict.items(): for name, m in model.named_modules(): if m != submodule: continue fqns = _get_fqns(model, name) assert len(fqns) == 1, "FQNs for a submodule should only have 1 element" prefix = f"{next(iter(fqns))}." new_state_dict.update({prefix + subfqn: value for subfqn, value in sub_state_dict.items()}) return new_state_dict else: return cast(dict[str, ValueType], state_dict) def set_model_state_dict( model: nn.Module, model_state_dict: dict[str, ValueType], *, options: Optional[StateDictOptions] = None, ) -> _IncompatibleKeys: """Load the model state_dict. The counterpart of ``get_model_state_dict`` to set the state_dict to the model. See ``set_state_dict`` for the detail usage. Args: model (nn.Module): the nn.Module to the model. model_state_dict: (Dict[str, ValueType]): the model state_dict to load. If the key of the ``model_state_dict`` is nn.Module, the key is a submodule of ``model`` and the value should be the state_dict of the submodule. When loading the state_dict, the prefix of the submodule will be append to the state_dict. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys :type model_state_dict: typing.Dict[str, ValueType] """ model_state_dict: dict[str, ValueType] = _unflatten_model_state_dict(model, model_state_dict) with _gc_context(): info = _verify_options(model, (), optim_only=False, options=options) _verify_state_dict(model_state_dict, {}, info) return _load_model_state_dict(model, model_state_dict, info) def set_optimizer_state_dict( model: nn.Module, optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer], optim_state_dict: OptimizerStateType, *, options: Optional[StateDictOptions] = None, ) -> None: """Load the optimizers state_dict. The counterpart of ``get_optimizer_state_dict`` to set the state_dict to the optimizers. See ``set_state_dict`` for the detail usage. WARN: ``set_optimizer_state_dict`` can only be called before ``backward()`` or after ``step()`` is called on the optimizers. Otherwise, the optimizer states won't be initialized correctly. Args: model (nn.Module): the nn.Module to the model. optimizers (Union[Optimizer, Iterable[Optimizer]]): The optimizers that are used to optimize ``model``. optim_state_dict: OptimizerStateType: the optimizer state_dict to load. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: None :type optim_state_dict: typing.OptimizerStateType """ with _gc_context(): optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) info = _verify_options(model, optimizers, optim_only=True, options=options) _verify_state_dict({}, optim_state_dict, info) _load_optim_state_dict(model, optimizers, optim_state_dict, info) def set_state_dict( model: nn.Module, optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer], *, model_state_dict: dict[str, ValueType], optim_state_dict: OptimizerStateType, options: Optional[StateDictOptions] = None, ) -> _IncompatibleKeys: """Load the model state_dict and optimizers state_dict. The counterpart of ``get_state_dict`` to set the state_dict to the model and optimizers. The given ``model_state_dict`` and ``optim_state_dict`` do not have to be returned by ``get_state_dict`` but must meet the following requirements: 1) all FQNs are canonical FQNs as defined in ``get_state_dict``, 2) if a tensor is sharded, it must be either a ShardedTensor or DTensor, 3) optimizer state_dict cannot contain the parameter IDs; the keys should be the canonical FQNs. WARN: ``set_state_dict`` can only be called before ``backward()`` or after ``step()`` is called on the optimizers. Otherwise, the optimizer states won't be initialized correctly. Args: model (nn.Module): the nn.Module to the model. optimizers (Union[Optimizer, Iterable[Optimizer]]): The optimizers that are used to optimize ``model``. model_state_dict: (Union[Dict[nn.Module, Dict[str, ValueType]], Dict[str, ValueType]]): the model state_dict to load. If the key of the ``model_state_dict`` is nn.Module, the key is a submodule of ``model`` and the value should be the state_dict of the submodule. When loading the state_dict, the prefix of the submodule will be append to the state_dict. optim_state_dict: OptimizerStateType: the optimizer state_dict to load. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys of the model state_dict. * **unexpected_keys** is a list of str containing the unexpected keys of the model state_dict. :type model_state_dict: typing.Dict[str, ValueType] :type optim_state_dict: typing.OptimizerStateType """ model_state_dict: dict[str, ValueType] = _unflatten_model_state_dict(model, model_state_dict) with _gc_context(): optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) info = _verify_options(model, optimizers, optim_only=not model_state_dict, options=options) _verify_state_dict(model_state_dict, optim_state_dict, info) _load_optim_state_dict(model, optimizers, optim_state_dict, info) return _load_model_state_dict(model, model_state_dict, info) # TODO: correct the state_dict function signature. # TODO: this API is not yet fully tested. Make it private @no_type_check def _patch_model_state_dict( model: nn.Module, *, options: Optional[StateDictOptions] = None, ) -> None: """Patch the ``state_dict`` and ``load_state_dict`` attributes of ``model``. Patch the ``state_dict`` and ``load_state_dict`` attributes of ``model`` to be a partial function to call ``get_state_dict`` and ``set_state_dict``. Example: from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.checkpoint.state_dict import patch_model_state_dict model = fsdp(model) patch_model_state_dict(model) Args: model (nn.Module): the nn.Module to the model. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: None """ _state_dict_call = functools.partial( get_model_state_dict, model=model, options=options, ) def state_dict_call(): return _state_dict_call() model.state_dict = state_dict_call _load_state_dict_call = functools.partial( set_model_state_dict, model=model, options=options, ) def load_state_dict_call(state_dict: dict[str, Any]): _load_state_dict_call(model_state_dict=state_dict) model.load_state_dict = load_state_dict_call _patched_state_dict.add(state_dict_call) _patched_state_dict.add(load_state_dict_call) # TODO: correct the load_state_dict function signature. # TODO: this API is not yet fully tested. Make it private @no_type_check def _patch_optimizer_state_dict( model: nn.Module, *, optimizers: tuple[torch.optim.Optimizer, ...], options: Optional[StateDictOptions] = None, ) -> None: """Patch the ``state_dict`` and ``load_state_dict`` attributes of ``optimizers``. Patch the ``state_dict`` and ``load_state_dict`` attributes of ``optimizers`` to be a partial function to call ``get_state_dict`` and ``set_state_dict``. Note that if there are multiple optimizers, all of the optimizers will be patched. So users only need to call one of the state_dict() to get the full result. Example: from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.checkpoint.state_dict import patch_model_state_dict model = fsdp(model) patch_model_state_dict(model) Args: model (nn.Module): the nn.Module to the model. options (StateDictOptions): the options to control how model state_dict and optimizer state_dict should be loaded. See `StateDictOptions` for the details. Returns: None """ _state_dict_call = functools.partial( get_optimizer_state_dict, model=model, optimizers=optimizers, options=options, ) def state_dict_call(): return _state_dict_call() _load_state_dict_call = functools.partial( set_optimizer_state_dict, model=model, optimizers=optimizers, options=options, ) def load_state_dict_call(state_dict: dict[str, Any]): _load_state_dict_call(optim_state_dict=state_dict) _patched_state_dict.add(state_dict_call) _patched_state_dict.add(load_state_dict_call) optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers) for optim in optimizers: optim.state_dict = state_dict_call optim.load_state_dict = load_state_dict_call ================================================ FILE: verl_rl/verl/third_party/vllm/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from importlib.metadata import PackageNotFoundError, version from packaging import version as vs from verl.utils.import_utils import is_sglang_available def get_version(pkg): try: return version(pkg) except PackageNotFoundError: return None package_name = "vllm" package_version = get_version(package_name) vllm_version = None if package_version is None: if not is_sglang_available(): raise ValueError( f"vllm version {package_version} not supported and SGLang also not Found. Currently supported " f"vllm versions are 0.7.0+" ) elif vs.parse(package_version) >= vs.parse("0.7.0"): vllm_version = package_version from vllm import LLM from vllm.distributed import parallel_state else: if vs.parse(package_version) in [vs.parse("0.5.4"), vs.parse("0.6.3")]: raise ValueError( f"vLLM version {package_version} support has been removed. vLLM 0.5.4 and 0.6.3 are no longer " f"supported. Please use vLLM 0.7.0 or later." ) if not is_sglang_available(): raise ValueError( f"vllm version {package_version} not supported and SGLang also not Found. Currently supported " f"vllm versions are 0.7.0+" ) __all__ = ["LLM", "parallel_state"] ================================================ FILE: verl_rl/verl/tools/__init__.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/tools/base_tool.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from typing import Any, Optional from uuid import uuid4 from verl.utils.rollout_trace import rollout_trace_op from .schemas import OpenAIFunctionToolSchema class BaseTool: """Base class for tools. A tool should support the following methods: - `to_openai_function_tool_schema`: return the tool schema in OpenAI format. - `create`: create a tool instance for a trajectory. - `execute`: execute the tool. - `calc_reward`: calculate the reward respect to tool state. - `release`: release the tool instance. """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): self.config = config self.tool_schema = tool_schema or self.get_openai_tool_schema() assert self.tool_schema is not None, "Tool schema is not set!" self.name = self.tool_schema.function.name print(json.dumps(self.tool_schema.model_dump(exclude_unset=True, exclude_none=True), indent=2)) def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create(self, instance_id: Optional[str] = None, **kwargs) -> str: """Create a tool instance. Args: instance_id: The instance id of the tool. Returns: The instance id of the tool. """ if instance_id is None: return str(uuid4()) else: return instance_id @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[str, float, dict]: """Execute the tool. Args: instance_id: The instance id of the tool. parameters: The json string of the parameters of the tool. Returns: tool_response, tool_reward_score, tool_metrics tool_response: The response str of the tool. tool_reward_score: The step reward score of the tool. tool_metrics: The metrics of the tool. """ return "Updated the tool state.", 0.0, {} async def calc_reward(self, instance_id: str, **kwargs) -> float: """Calculate the reward of the tool. Args: instance_id: The instance id of the tool. Returns: The reward of the tool. """ return 0.0 async def release(self, instance_id: str, **kwargs) -> None: """Release the tool instance. Args: instance_id: The instance id of the tool. """ pass ================================================ FILE: verl_rl/verl/tools/geo3k_tool.py ================================================ # Copyright 2023-2025 SGLang Team # Copyright Amazon.com, Inc. or its affiliates. # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any, Optional from uuid import uuid4 from verl.utils.reward_score import geo3k from verl.utils.rollout_trace import rollout_trace_op from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class Geo3kTool(BaseTool): """A demo tool for calculating the reward of geo3k. - `to_openai_function_tool_schema`: return the tool schema in OpenAI format. - `create`: create a tool instance for a trajectory. - `execute`: execute the tool. - `calc_reward`: calculate the reward respect to tool state. - `release`: release the tool instance. """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """ _tool_schema = OpenAIFunctionToolSchema.model_validate({ "type": "function", "function": { "name": "calc_geo3k_reward", "description": "A tool for calculating the reward of geo3k", "parameters": { "type": "object", "properties": { "answer": { "type": "string", "description": "The answer to the question, enclosed in \\boxed{}", }, }, "required": ["answer"], }, } }) """ super().__init__(config, tool_schema) self._instance_dict = {} def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create(self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs) -> str: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id, None @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[str, float, dict]: answer = parameters.get("answer", "") if not isinstance(answer, str): answer = str(answer) self._instance_dict[instance_id]["response"] = answer reward = await self.calc_reward(instance_id) # penalty for non improved answer submission tool_reward = 0.0 if reward > self._instance_dict[instance_id]["reward"] else -0.05 # update the reward self._instance_dict[instance_id]["reward"] = reward return f"Current parsed {answer=} {reward=}", tool_reward, {} async def calc_reward(self, instance_id: str, **kwargs) -> float: return geo3k.compute_score( self._instance_dict[instance_id]["response"], self._instance_dict[instance_id]["ground_truth"], use_boxed=False, format_score=0.0, ) async def release(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_rl/verl/tools/gsm8k_tool.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from typing import Any, Optional from uuid import uuid4 from verl.utils.reward_score import gsm8k from verl.utils.rollout_trace import rollout_trace_op from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class Gsm8kTool(BaseTool): """A demo tool for calculating the reward of gsm8k. - `to_openai_function_tool_schema`: return the tool schema in OpenAI format. - `create`: create a tool instance for a trajectory. - `execute`: execute the tool. - `calc_reward`: calculate the reward respect to tool state. - `release`: release the tool instance. """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """ _tool_schema = OpenAIFunctionToolSchema.model_validate({ "type": "function", "function": { "name": "calc_gsm8k_reward", "description": "A tool for calculating the reward of gsm8k", "parameters": { "type": "object", "properties": { "answer": { "type": "string", "description": "The answer to the question", }, }, "required": ["answer"], }, } }) """ super().__init__(config, tool_schema) self._instance_dict = {} def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create(self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs) -> str: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": 0.0, } return instance_id @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[str, float, dict]: answer = parameters.get("answer", "") if not isinstance(answer, str): answer = str(answer) if answer.startswith("#### "): self._instance_dict[instance_id]["response"] = answer else: self._instance_dict[instance_id]["response"] = "#### " + answer reward = await self.calc_reward(instance_id) # penalty for non improved answer submission tool_reward = 0.0 if reward > self._instance_dict[instance_id]["reward"] else -0.05 # update the reward self._instance_dict[instance_id]["reward"] = reward return f"Current parsed {answer=} {reward=}", tool_reward, {} async def calc_reward(self, instance_id: str, **kwargs) -> float: return gsm8k.compute_score( self._instance_dict[instance_id]["response"], self._instance_dict[instance_id]["ground_truth"], method="flexible", format_score=0.0, score=1.0, ) async def release(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_rl/verl/tools/mcp_base_tool.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os from typing import Any, Optional from uuid import uuid4 from fastmcp.exceptions import ClientError from verl.tools.utils.mcp_clients.McpClientManager import ClientManager from verl.utils.rollout_trace import rollout_trace_op from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MCPBaseTool(BaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): super().__init__(config, tool_schema) self._instance_dict = {} self.timeout = config.get("timeout", 30) # TODO(hechanghao): create a global client manager to manage the rate limit, client and pool logger.info(f"Initialized MCPBaseTool with config: {config}") def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: """Return the OpenAI tool schema.""" return self.tool_schema async def create(self, instance_id: Optional[str] = None, **kwargs) -> str: """Create a tool instance. Args: instance_id: The instance id of the tool. Returns: The instance id of the tool. """ if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "reward": [], } return instance_id async def _call_tool(self, instance_id, parameters) -> tuple[str, dict]: err_msg = "" try: call_tool_result = await ClientManager.call_tool(self.name, parameters, self.timeout) except ClientError as e: err_msg = f"\n Tool call failed: {e}" except ConnectionError as e: err_msg = f"\n Connection failed: {e}" except Exception as e: err_msg = f"\n An unexpected error occurred: {e}" logger.debug(f"Tool result for instance {instance_id} with tool {self.name}: {call_tool_result.content}") result, metadata = self._parse_tool_result(call_tool_result.content) metadata["api_request_error"] += err_msg return result, metadata @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[str, float, dict]: if self.name == "" or self.name is None or parameters is None: error_msg = "Error: 'parameters' is missing or empty." logger.error(f"[MCPTool] {error_msg} Received tool name: {self.name}, parameters: {parameters}") return json.dumps({"result": error_msg}), 0.0, {} try: result_text, metadata = await self._call_tool(instance_id, parameters) # Store results in instance dictionary self._instance_dict[instance_id]["reward"].append(result_text.strip()) # Convert metadata to metrics metrics = { "query_count": metadata.get("query_count", 0), "status": metadata.get("status", "unknown"), "total_results": metadata.get("total_results", 0), "api_request_error": metadata.get("api_request_error"), } return result_text, 0.0, metrics except Exception as e: error_result = json.dumps({"result": f"Tool execution failed: {e}"}) logger.error(f"[MCPBaseTool] Execution failed: {e}") return error_result, 0.0, {"error": str(e)} async def calc_reward(self, instance_id: str, **kwargs) -> str: return self._instance_dict[instance_id]["reward"] async def release(self, instance_id: str, **kwargs) -> None: if instance_id in self._instance_dict: del self._instance_dict[instance_id] def _parse_tool_result(self, content: list) -> tuple[str, dict]: tools_content = [part.text for part in filter(lambda x: x.type == "text", content)] return " ".join(tools_content), {} ================================================ FILE: verl_rl/verl/tools/mcp_search_tool.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import re from verl.tools.mcp_base_tool import MCPBaseTool from .schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MCPSearchTool(MCPBaseTool): def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): super().__init__(config, tool_schema) def _parse_tool_result(self, content: list) -> tuple[str, dict]: res = "" res_cnt = 0 query_list = [] metadata = { "api_request_error": "", "status": "unknown", "total_results": 0, } try: for part in content: if part.type != "text": continue text = part.text.replace("'", '"') query_match = re.search(r'query"\s*:\s*"([^"]+)"', text) query = query_match.group(1) if query_match else "" query_list.append(query) title_matches = re.findall(r'"title"\s*:', text) title_count = len(title_matches) results_match = re.search(r'"results"\s*:\s*(\[.*?\])', text, re.DOTALL) results_content = results_match.group(1) if results_match else "" res += results_content res_cnt += title_count except json.JSONDecodeError: err_msg = "json parse error." logger.error(err_msg) metadata["api_request_error"] = err_msg metadata["status"] = "error" # update metadata metadata["status"] = "success" metadata["queries"] = query_list metadata["query_count"] = len(query_list) metadata["total_results"] = res_cnt return res, metadata ================================================ FILE: verl_rl/verl/tools/sandbox_fusion_tools.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import threading from contextlib import ExitStack from enum import Enum from typing import Any, Callable, Optional, TypeVar from uuid import uuid4 import ray from verl.tools.base_tool import BaseTool from verl.utils.reward_score.sandbox_fusion.utils import _process_single_case from verl.utils.rollout_trace import rollout_trace_op from .schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) T = TypeVar("T") class PoolMode(Enum): ThreadMode = 1 ProcessMode = 2 @ray.remote(concurrency_groups={"acquire": 1, "release": 10}) class TokenBucketWorker: def __init__(self, rate_limit: int): self.rate_limit = rate_limit # this only used for observalability self.current_count = 0 self._semaphore = threading.Semaphore(rate_limit) @ray.method(concurrency_group="acquire") def acquire(self): self._semaphore.acquire() self.current_count += 1 @ray.method(concurrency_group="release") def release(self): self._semaphore.release() self.current_count -= 1 def get_current_count(self): return self.current_count class ExecutionWorker: def __init__(self, enable_global_rate_limit=True, rate_limit=10): self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None def _init_rate_limit(self, rate_limit): # TODO validation for rate_limit # A Singleton Rate Limitor return TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote(rate_limit) def ping(self): return True def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T: with ExitStack() as stack: stack.callback(self.rate_limit_worker.release.remote) ray.get(self.rate_limit_worker.acquire.remote()) try: return fn(*fn_args, **fn_kwargs) except Exception as e: # TODO we should make this available to the tool caller logger.warning(f"Error when executing code: {e}") def init_execution_pool( num_workers: int, enable_global_rate_limit=True, rate_limit=10, mode: PoolMode = PoolMode.ThreadMode ): if mode == PoolMode.ThreadMode: return ( ray.remote(ExecutionWorker) .options(max_concurrency=num_workers) .remote(enable_global_rate_limit=enable_global_rate_limit, rate_limit=rate_limit) ) else: raise NotImplementedError("Process mode is not implemented yet") # return ray.util.multiprocessing.Pool(processes=num_workers) class SandboxFusionTool(BaseTool): """A tool for executing the code using sanbox fusion image. - `to_openai_function_tool_schema`: return the tool schema in OpenAI format. - `create`: create a tool instance for a trajectory. - `execute`: execute the tool. - `calc_reward`: calculate the reward respect to tool state. - `release`: release the tool instance. """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """ _tool_schema = OpenAIFunctionToolSchema.model_validate({ "type": "function", "function": { "name": "code_interpreter", "description": "A tool for execute code", "parameters": { "type": "object", "properties": { "code": { "type": "string", "description": "code needs to be execute and grad", }, }, "required": ["code"], }, } }) """ super().__init__(config, tool_schema) self._instance_dict = {} # TODO: better documentation for the config self.num_workers = config.get("num_workers", 10) self.rate_limit = config.get("rate_limit", 10) self.default_timeout = config.get("default_timeout", 30) self.default_language = config.get("default_language", "python") self.enable_global_rate_limit = config.get("enable_global_rate_limit", True) self.execution_pool = init_execution_pool( num_workers=self.num_workers, enable_global_rate_limit=self.enable_global_rate_limit, rate_limit=self.rate_limit, mode=PoolMode.ThreadMode, ) self.sandbox_fusion_url = config.get("sandbox_fusion_url", "") self.memory_limit_mb = config.get("memory_limit_mb", 1024) if self.sandbox_fusion_url == "": raise ValueError("sandbox_fusion_url is not set") log_msg = f"Init SandboxFusionTool with config: {config}" logger.info(log_msg) def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: return self.tool_schema async def create(self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs) -> str: if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "ground_truth": ground_truth, "reward": [], } return instance_id @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[str, float, dict]: code = parameters.get("code", "") timeout = parameters.get("timeout", self.default_timeout) language = parameters.get("language", self.default_language) if not isinstance(code, str): code = str(code) result = await self.execution_pool.execute.remote(self.execute_code, instance_id, code, timeout, language) # sandbox has no score or metrics, use Nones return result, None, None def execute_code(self, instance_id, code, timeout=30, language="python"): result_status, metadata = _process_single_case( 0, None, None, self.sandbox_fusion_url, code, timeout, self.memory_limit_mb, language ) # we should always expect this since we don't have correct answer if metadata["run_status"] == "Finished": actual_output = metadata["stdout"] + metadata["stderr"] logger.debug(f"actual_output from sandbox fusion: {actual_output},{instance_id}") return actual_output else: return "no stdout here" async def calc_reward(self, instance_id: str, **kwargs) -> str: return self._instance_dict[instance_id]["reward"] async def release(self, instance_id: str, **kwargs) -> None: del self._instance_dict[instance_id] ================================================ FILE: verl_rl/verl/tools/schemas.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from typing import Any, Literal from pydantic import BaseModel class OpenAIFunctionPropertySchema(BaseModel): """The schema of a parameter in OpenAI format.""" type: str description: str | None = None enum: list[str] | None = None class OpenAIFunctionParametersSchema(BaseModel): """The schema of parameters in OpenAI format.""" type: str properties: dict[str, OpenAIFunctionPropertySchema] required: list[str] class OpenAIFunctionSchema(BaseModel): """The schema of a function in OpenAI format.""" name: str description: str parameters: OpenAIFunctionParametersSchema strict: bool = False class OpenAIFunctionToolSchema(BaseModel): """The schema of a tool in OpenAI format.""" type: str function: OpenAIFunctionSchema class OpenAIFunctionParsedSchema(BaseModel): """The parsed schema of a tool in OpenAI format.""" name: str arguments: str # JSON string class OpenAIFunctionCallSchema(BaseModel): """The parsed schema of a tool in OpenAI format.""" name: str arguments: dict[str, Any] @staticmethod def from_openai_function_parsed_schema( parsed_schema: OpenAIFunctionParsedSchema, ) -> tuple["OpenAIFunctionCallSchema", bool]: has_decode_error = False try: arguments = json.loads(parsed_schema.arguments) except json.JSONDecodeError: arguments = {} has_decode_error = True # If the arguments is not a dict, it means the arguments is not a valid JSON string if not isinstance(arguments, dict): arguments = {} has_decode_error = True return OpenAIFunctionCallSchema(name=parsed_schema.name, arguments=arguments), has_decode_error class OpenAIFunctionToolCall(BaseModel): """The tool call in OpenAI format.""" id: str type: Literal["function"] = "function" function: OpenAIFunctionCallSchema ================================================ FILE: verl_rl/verl/tools/search_tool.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import threading from contextlib import ExitStack from enum import Enum from typing import Any, Callable, Optional, TypeVar from uuid import uuid4 import ray import ray.actor from verl.tools.utils.search_r1_like_utils import perform_single_search_batch from verl.utils.rollout_trace import rollout_trace_op from .base_tool import BaseTool from .schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__name__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) T = TypeVar("T") # Adapted from verl/tools/sandbox_fusion_tools.py class PoolMode(Enum): """Execution pool mode enumeration.""" ThreadMode = 1 ProcessMode = 2 @ray.remote(concurrency_groups={"acquire": 1, "release": 10}) class TokenBucketWorker: """Ray actor for rate limiting using token bucket algorithm.""" def __init__(self, rate_limit: int): self.rate_limit = rate_limit self.current_count = 0 # For observability self._semaphore = threading.Semaphore(rate_limit) @ray.method(concurrency_group="acquire") def acquire(self): """Acquire a token from the bucket.""" self._semaphore.acquire() self.current_count += 1 @ray.method(concurrency_group="release") def release(self): """Release a token back to the bucket.""" self._semaphore.release() self.current_count -= 1 def get_current_count(self): """Get current number of acquired tokens.""" return self.current_count class SearchExecutionWorker: """Worker for executing search operations with optional rate limiting.""" def __init__(self, enable_global_rate_limit=True, rate_limit=10): self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None def _init_rate_limit(self, rate_limit): """Initialize singleton rate limiter.""" return TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote(rate_limit) def ping(self): """Health check method.""" return True def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T: """Execute function with optional rate limiting.""" if self.rate_limit_worker: with ExitStack() as stack: stack.callback(self.rate_limit_worker.release.remote) ray.get(self.rate_limit_worker.acquire.remote()) try: return fn(*fn_args, **fn_kwargs) except Exception as e: # TODO we should make this available to the tool caller logger.warning(f"Error when executing search: {e}") else: return fn(*fn_args, **fn_kwargs) def init_search_execution_pool( num_workers: int, enable_global_rate_limit=True, rate_limit=10, mode: PoolMode = PoolMode.ThreadMode ): """Initialize search execution pool.""" if mode == PoolMode.ThreadMode: return ( ray.remote(SearchExecutionWorker) .options(max_concurrency=num_workers) .remote(enable_global_rate_limit=enable_global_rate_limit, rate_limit=rate_limit) ) else: raise NotImplementedError("Process mode is not implemented yet") class SearchTool(BaseTool): """Search tool for retrieving information using external retrieval services. This tool provides search functionality with rate limiting and concurrent execution support through Ray. It integrates with external retrieval services to perform semantic search operations. Methods: get_openai_tool_schema: Return the tool schema in OpenAI format create: Create a tool instance for a trajectory execute: Execute the search tool calc_reward: Calculate the reward with respect to tool state release: Release the tool instance """ def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema): """Initialize SearchTool with configuration and schema. Args: config: Configuration dictionary containing tool settings tool_schema: OpenAI function tool schema definition Example tool_schema: { "type": "function", "function": { "name": "search", "description": "Searches for relevant information based on queries.", "parameters": { "type": "object", "properties": { "query_list": { "type": "array", "items": {"type": "string"}, "description": "List of search queries" } }, "required": ["query_list"] } } } """ super().__init__(config, tool_schema) self._instance_dict = {} # Worker and rate limiting configuration self.num_workers = config.get("num_workers", 120) self.rate_limit = config.get("rate_limit", 120) self.timeout = config.get("timeout", 30) self.enable_global_rate_limit = config.get("enable_global_rate_limit", True) self.execution_pool = init_search_execution_pool( num_workers=self.num_workers, enable_global_rate_limit=self.enable_global_rate_limit, rate_limit=self.rate_limit, mode=PoolMode.ThreadMode, ) # Retrieval service configuration self.retrieval_service_url = config.get("retrieval_service_url") assert self.retrieval_service_url, "Configuration must include 'retrieval_service_url'" self.topk = config.get("topk", 3) if self.retrieval_service_url == "": raise ValueError("retrieval_service_url is not set") logger.info(f"Initialized SearchTool with config: {config}") def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema: """Return the OpenAI tool schema.""" return self.tool_schema async def create(self, instance_id: Optional[str] = None, **kwargs) -> str: """Create a tool instance. Args: instance_id: The instance id of the tool. Returns: The instance id of the tool. """ if instance_id is None: instance_id = str(uuid4()) self._instance_dict[instance_id] = { "response": "", "reward": [], } return instance_id def execute_search(self, instance_id: str, query_list: list, retrieval_service_url: str, topk: int, timeout: int): """Execute search operation using retrieval service. Args: instance_id: Tool instance ID query_list: List of search queries retrieval_service_url: URL of the retrieval service topk: Number of top results to return timeout: Request timeout in seconds Returns: Tuple of (result_text, metadata) """ result_text, metadata = perform_single_search_batch( retrieval_service_url=retrieval_service_url, query_list=query_list, topk=topk, concurrent_semaphore=None, # Ray handles concurrency control timeout=timeout, ) logger.debug(f"Search result for instance {instance_id}: {result_text}") return result_text, metadata @rollout_trace_op async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[str, float, dict]: """Execute the search tool. Args: instance_id: The instance ID of the tool parameters: Tool parameters containing query_list and optional timeout Returns: tool_response, tool_reward_score, tool_metrics tool_response: The response str of the tool. tool_reward_score: The step reward score of the tool. tool_metrics: The metrics of the tool. """ timeout = self.timeout query_list_from_params = parameters.get("query_list") if not query_list_from_params or not isinstance(query_list_from_params, list): error_msg = "Error: 'query_list' is missing, empty, or not a list in parameters." logger.error(f"[SearchTool] {error_msg} Received parameters: {parameters}") return json.dumps({"result": error_msg}), 0.0, {} # Execute search using Ray execution pool try: result_text, metadata = await self.execution_pool.execute.remote( self.execute_search, instance_id, query_list_from_params, self.retrieval_service_url, self.topk, timeout ) # Store results in instance dictionary self._instance_dict[instance_id]["reward"].append(result_text.strip()) # Convert metadata to metrics metrics = { "query_count": metadata.get("query_count", 0), "status": metadata.get("status", "unknown"), "total_results": metadata.get("total_results", 0), "api_request_error": metadata.get("api_request_error"), } return result_text, 0.0, metrics except Exception as e: error_result = json.dumps({"result": f"Search execution failed: {e}"}) logger.error(f"[SearchTool] Execution failed: {e}") return error_result, 0.0, {"error": str(e)} async def calc_reward(self, instance_id: str, **kwargs) -> str: return self._instance_dict[instance_id]["reward"] async def release(self, instance_id: str, **kwargs) -> None: if instance_id in self._instance_dict: del self._instance_dict[instance_id] ================================================ FILE: verl_rl/verl/tools/utils/__init__.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/tools/utils/mcp_clients/McpClientManager.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import json import logging from typing import Any from fastmcp import Client from fastmcp.client.transports import SSETransport from verl.tools.utils.mcp_clients.utils import TokenBucket, mcp2openai logger = logging.getLogger(__name__) class MCPClientManager: rootServerName = "mcpServers" initialized = False clients = [] tool_client_mapping = {} rate_limiter = None async def initialize(self, config_path, rate_limit: float = 10.0): if self.initialized: return """Initialize the MCP Client Manager and start all clients""" result = self._load_config(config_path) servers = result[self.rootServerName] exclude_sse_servers = {self.rootServerName: {}} for server_name in servers.keys(): server = servers[server_name] if "auth_token" in server: transport = SSETransport(url=server["url"], headers={"Authorization": f"Bearer {server['auth_token']}"}) client = Client(transport) self.clients.append(client) else: exclude_sse_servers[self.rootServerName][server_name] = server if exclude_sse_servers[self.rootServerName]: self.clients.append(Client(exclude_sse_servers)) # Initialize rate limiter self.rate_limiter = TokenBucket(rate_limit) self.initialized = True async def call_tool(self, tool_name, parameters, timeout): # Apply rate limiting while not self.rate_limiter.acquire(): await asyncio.sleep(0.1) client = self.get_client_with_tool_name(tool_name) async with client: return await client.call_tool_mcp(tool_name, parameters) async def fetch_tool_schemas(self, tool_selected_list: list[str]) -> list[dict]: tool_schemas = [] for client in self.clients: async with client: tools = await client.list_tools_mcp() for tool in tools.tools: if not tool_selected_list: self.tool_client_mapping[tool.name] = client tool_schemas.append(mcp2openai(tool)) elif tool.name in tool_selected_list: self.tool_client_mapping[tool.name] = client tool_schemas.append(mcp2openai(tool)) return tool_schemas def get_client_with_tool_name(self, tool_name: str): return self.tool_client_mapping[tool_name] def _load_config(self, file: str) -> dict[str, Any]: try: with open(file) as f: return json.load(f) except FileNotFoundError: logger.warning(f'the "{file}" file was not found') except Exception: logger.error(f'there was an error reading the "{file}" file') return {} ClientManager = MCPClientManager() ================================================ FILE: verl_rl/verl/tools/utils/mcp_clients/utils.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import threading import time from mcp import Tool logger = logging.getLogger(__file__) class TokenBucket: def __init__(self, rate_limit: float): self.rate_limit = rate_limit # tokens per second self.tokens = rate_limit self.last_update = time.time() self.lock = threading.Lock() def acquire(self) -> bool: with self.lock: now = time.time() # Add new tokens based on time elapsed new_tokens = (now - self.last_update) * self.rate_limit self.tokens = min(self.rate_limit, self.tokens + new_tokens) self.last_update = now if self.tokens >= 1: self.tokens -= 1 return True return False def mcp2openai(mcp_tool: Tool) -> dict: """Convert a MCP Tool to an OpenAI ChatCompletionTool.""" openai_format = { "type": "function", "function": { "name": mcp_tool.name, "description": mcp_tool.description, "parameters": mcp_tool.inputSchema, "strict": False, }, } if not openai_format["function"]["parameters"].get("required", None): openai_format["function"]["parameters"]["required"] = [] return openai_format ================================================ FILE: verl_rl/verl/tools/utils/search_r1_like_utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import threading import time import traceback import uuid from typing import Any, Optional import requests DEFAULT_TIMEOUT = 30 # Default search request timeout MAX_RETRIES = 10 INITIAL_RETRY_DELAY = 1 API_TIMEOUT = 10 logger = logging.getLogger(__name__) def call_search_api( retrieval_service_url: str, query_list: list[str], topk: int = 3, return_scores: bool = True, timeout: int = DEFAULT_TIMEOUT, ) -> tuple[Optional[dict[str, Any]], Optional[str]]: """ Calls the remote search API to perform retrieval with retry logic for various errors, using increasing delay between retries. Logs internal calls with a unique ID. Args: retrieval_service_url: The URL of the retrieval service API. query_list: List of search queries. topk: Number of top results to return. return_scores: Whether to return scores. timeout: Request timeout in seconds. Returns: A tuple (response_json, error_message). If successful, response_json is the API's returned JSON object, error_message is None. If failed after retries, response_json is None, error_message contains the error information. """ request_id = str(uuid.uuid4()) log_prefix = f"[Search Request ID: {request_id}] " payload = {"queries": query_list, "topk": topk, "return_scores": return_scores} headers = {"Content-Type": "application/json", "Accept": "application/json"} last_error = None for attempt in range(MAX_RETRIES): try: logger.info( f"{log_prefix}Attempt {attempt + 1}/{MAX_RETRIES}: Calling search API at {retrieval_service_url}" ) response = requests.post( retrieval_service_url, headers=headers, json=payload, timeout=timeout, ) # Check for Gateway Timeout (504) and other server errors for retrying if response.status_code in [500, 502, 503, 504]: last_error = ( f"{log_prefix}API Request Error: Server Error ({response.status_code}) on attempt " f"{attempt + 1}/{MAX_RETRIES}" ) logger.warning(last_error) if attempt < MAX_RETRIES - 1: delay = INITIAL_RETRY_DELAY * (attempt + 1) logger.info(f"{log_prefix}Retrying after {delay} seconds...") time.sleep(delay) continue # Check for other HTTP errors (e.g., 4xx) response.raise_for_status() # If successful (status code 2xx) logger.info(f"{log_prefix}Search API call successful on attempt {attempt + 1}") return response.json(), None except requests.exceptions.ConnectionError as e: last_error = f"{log_prefix}Connection Error: {e}" logger.warning(last_error) if attempt < MAX_RETRIES - 1: delay = INITIAL_RETRY_DELAY * (attempt + 1) logger.info(f"{log_prefix}Retrying after {delay} seconds...") time.sleep(delay) continue except requests.exceptions.Timeout as e: last_error = f"{log_prefix}Timeout Error: {e}" logger.warning(last_error) if attempt < MAX_RETRIES - 1: delay = INITIAL_RETRY_DELAY * (attempt + 1) logger.info(f"{log_prefix}Retrying after {delay} seconds...") time.sleep(delay) continue except requests.exceptions.RequestException as e: last_error = f"{log_prefix}API Request Error: {e}" break # Exit retry loop on other request errors except json.JSONDecodeError as e: raw_response_text = response.text if "response" in locals() else "N/A" last_error = f"{log_prefix}API Response JSON Decode Error: {e}, Response: {raw_response_text[:200]}" break # Exit retry loop on JSON decode errors except Exception as e: last_error = f"{log_prefix}Unexpected Error: {e}" break # Exit retry loop on other unexpected errors # If loop finishes without returning success, return the last recorded error logger.error(f"{log_prefix}Search API call failed. Last error: {last_error}") return None, last_error.replace(log_prefix, "API Call Failed: ") if last_error else "API Call Failed after retries" def _passages2string(retrieval_result): """Convert retrieval results to formatted string.""" format_reference = "" for idx, doc_item in enumerate(retrieval_result): content = doc_item["document"]["contents"] title = content.split("\n")[0] text = "\n".join(content.split("\n")[1:]) format_reference += f"Doc {idx + 1} (Title: {title})\n{text}\n\n" return format_reference.strip() def perform_single_search_batch( retrieval_service_url: str, query_list: list[str], topk: int = 3, concurrent_semaphore: Optional[threading.Semaphore] = None, timeout: int = DEFAULT_TIMEOUT, ) -> tuple[str, dict[str, Any]]: """ Performs a single batch search for multiple queries (original search tool behavior). Args: retrieval_service_url: The URL of the retrieval service API. query_list: List of search queries. topk: Number of top results to return. concurrent_semaphore: Optional semaphore for concurrency control. timeout: Request timeout in seconds. Returns: A tuple (result_text, metadata). result_text: The search result JSON string. metadata: Metadata dictionary for the batch search. """ logger.info(f"Starting batch search for {len(query_list)} queries.") api_response = None error_msg = None try: if concurrent_semaphore: with concurrent_semaphore: api_response, error_msg = call_search_api( retrieval_service_url=retrieval_service_url, query_list=query_list, topk=topk, return_scores=True, timeout=timeout, ) else: api_response, error_msg = call_search_api( retrieval_service_url=retrieval_service_url, query_list=query_list, topk=topk, return_scores=True, timeout=timeout, ) except Exception as e: error_msg = f"API Request Exception during batch search: {e}" logger.error(f"Batch search: {error_msg}") traceback.print_exc() metadata = { "query_count": len(query_list), "queries": query_list, "api_request_error": error_msg, "api_response": None, "status": "unknown", "total_results": 0, "formatted_result": None, } result_text = json.dumps({"result": "Search request failed or timed out after retries."}) if error_msg: metadata["status"] = "api_error" result_text = json.dumps({"result": f"Search error: {error_msg}"}) logger.error(f"Batch search: API error occurred: {error_msg}") elif api_response: logger.debug(f"Batch search: API Response: {api_response}") metadata["api_response"] = api_response try: raw_results = api_response.get("result", []) if raw_results: pretty_results = [] total_results = 0 for retrieval in raw_results: formatted = _passages2string(retrieval) pretty_results.append(formatted) total_results += len(retrieval) if isinstance(retrieval, list) else 1 final_result = "\n---\n".join(pretty_results) result_text = json.dumps({"result": final_result}) metadata["status"] = "success" metadata["total_results"] = total_results metadata["formatted_result"] = final_result logger.info(f"Batch search: Successful, got {total_results} total results") else: result_text = json.dumps({"result": "No search results found."}) metadata["status"] = "no_results" metadata["total_results"] = 0 logger.info("Batch search: No results found") except Exception as e: error_msg = f"Error processing search results: {e}" result_text = json.dumps({"result": error_msg}) metadata["status"] = "processing_error" logger.error(f"Batch search: {error_msg}") else: metadata["status"] = "unknown_api_state" result_text = json.dumps({"result": "Unknown API state (no response and no error message)."}) logger.error("Batch search: Unknown API state.") return result_text, metadata ================================================ FILE: verl_rl/verl/tools/utils/tool_registry.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import importlib import logging import os import sys from enum import Enum from omegaconf import OmegaConf from verl.tools.schemas import OpenAIFunctionToolSchema logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class ToolType(Enum): NATIVE = "native" MCP = "mcp" async def initialize_mcp_tool(tool_cls, tool_config) -> list: from verl.tools.utils.mcp_clients.McpClientManager import ClientManager tool_list = [] mcp_servers_config_path = tool_config.mcp.mcp_servers_config_path tool_selected_list = tool_config.mcp.tool_selected_list if "tool_selected_list" in tool_config.mcp else None await ClientManager.initialize(mcp_servers_config_path, tool_config.config.rate_limit) # Wait for MCP client to be ready max_retries = 10 retry_interval = 2 # seconds for i in range(max_retries): tool_schemas = await ClientManager.fetch_tool_schemas(tool_selected_list) if tool_schemas: break if i < max_retries - 1: logger.debug(f"Waiting for MCP client to be ready, attempt {i + 1}/{max_retries}") await asyncio.sleep(retry_interval) else: raise RuntimeError("Failed to initialize MCP tools after maximum retries") # mcp registry assert len(tool_schemas), "mcp tool is empty" for tool_schema_dict in tool_schemas: logger.debug(f"tool_schema_dict: {tool_schema_dict}") tool_schema = OpenAIFunctionToolSchema.model_validate(tool_schema_dict) tool = tool_cls( config=OmegaConf.to_container(tool_config.config, resolve=True), tool_schema=tool_schema, ) tool_list.append(tool) return tool_list def get_tool_class(cls_name): module_name, class_name = cls_name.rsplit(".", 1) if module_name not in sys.modules: spec = importlib.util.find_spec(module_name) module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) else: module = sys.modules[module_name] tool_cls = getattr(module, class_name) return tool_cls def initialize_tools_from_config(tools_config_file): tools_config = OmegaConf.load(tools_config_file) tool_list = [] for tool_config in tools_config.tools: cls_name = tool_config.class_name tool_type = ToolType(tool_config.config.type) tool_cls = get_tool_class(cls_name) match tool_type: case ToolType.NATIVE: if tool_config.get("tool_schema", None) is None: tool_schema = None else: tool_schema_dict = OmegaConf.to_container(tool_config.tool_schema, resolve=True) tool_schema = OpenAIFunctionToolSchema.model_validate(tool_schema_dict) tool = tool_cls( config=OmegaConf.to_container(tool_config.config, resolve=True), tool_schema=tool_schema, ) tool_list.append(tool) case ToolType.MCP: loop = asyncio.get_event_loop() mcp_tools = loop.run_until_complete(initialize_mcp_tool(tool_cls, tool_config)) tool_list.extend(mcp_tools) case _: raise NotImplementedError return tool_list ================================================ FILE: verl_rl/verl/trainer/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/trainer/config/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .algorithm import AlgoConfig, FilterGroupsConfig, KLControlConfig, PFPPOConfig from .config import CriticConfig, FSDPCriticConfig, MegatronCriticConfig __all__ = [ "AlgoConfig", "CriticConfig", "FilterGroupsConfig", "FSDPCriticConfig", "KLControlConfig", "MegatronCriticConfig", "PFPPOConfig", ] ================================================ FILE: verl_rl/verl/trainer/config/_generated_ppo_megatron_trainer.yaml ================================================ # This reference configration yaml is automatically generated via 'scripts/generate_trainer_config.sh' # in which it invokes 'python3 scripts/print_cfg.py --cfg job --config-name=ppo_megatron_trainer.yaml' to flatten the 'verl/trainer/config/ppo_megatron_trainer.yaml' config fields into a single file. # Do not modify this file directly. # The file is usually only for reference and never used. actor_rollout_ref: actor: strategy: megatron ppo_mini_batch_size: 256 ppo_micro_batch_size: null ppo_micro_batch_size_per_gpu: null use_dynamic_bsz: false ppo_max_token_len_per_gpu: 16384 clip_ratio: 0.2 clip_ratio_low: 0.2 clip_ratio_high: 0.2 policy_loss: loss_mode: vanilla clip_cov_ratio: 0.0002 clip_cov_lb: 1.0 clip_cov_ub: 5.0 kl_cov_ratio: 0.0002 ppo_kl_coef: 0.1 clip_ratio_c: 3.0 loss_agg_mode: token-mean entropy_coeff: 0 use_kl_loss: false use_torch_compile: true kl_loss_coef: 0.001 kl_loss_type: low_var_kl ppo_epochs: 1 shuffle: false checkpoint: save_contents: - model - optimizer - extra load_contents: ${.save_contents} async_save: false optim: lr: 1.0e-06 lr_warmup_steps_ratio: 0.0 total_training_steps: -1 weight_decay: 0.01 optimizer: adam clip_grad: 1.0 lr_warmup_init: 0.0 lr_warmup_steps: null lr_decay_steps: null lr_decay_style: constant min_lr: 0.0 weight_decay_incr_style: constant lr_wsd_decay_style: exponential lr_wsd_decay_steps: null use_checkpoint_opt_param_scheduler: false data_loader_seed: null load_weight: true megatron: param_offload: false grad_offload: false optimizer_offload: false tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: null pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: true use_distributed_optimizer: true use_dist_checkpointing: false dist_checkpointing_path: null seed: 42 override_ddp_config: {} override_transformer_config: recompute_granularity: null recompute_modules: - core_attn recompute_method: null recompute_num_layers: null use_mbridge: false profile: use_profile: false profile_ranks: null step_start: -1 step_end: -1 save_path: null ref: strategy: megatron use_torch_compile: ${oc.select:actor_rollout_ref.actor.use_torch_compile,true} log_prob_micro_batch_size: null log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} megatron: param_offload: false tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: None pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: true use_distributed_optimizer: false use_dist_checkpointing: false dist_checkpointing_path: null seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} profile: use_profile: false profile_ranks: null step_start: -1 step_end: -1 save_path: null load_weight: true rollout: name: vllm mode: sync temperature: 1.0 top_k: -1 top_p: 1 prompt_length: ${oc.select:data.max_prompt_length,512} response_length: ${oc.select:data.max_response_length,512} dtype: bfloat16 gpu_memory_utilization: 0.5 ignore_eos: false enforce_eager: true free_cache_engine: true tensor_model_parallel_size: 1 max_num_batched_tokens: 8192 max_model_len: null max_num_seqs: 1024 log_prob_micro_batch_size: null log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} disable_log_stats: true do_sample: true 'n': 1 multi_stage_wake_up: false engine_kwargs: vllm: swap_space: null disable_mm_preprocessor_cache: false sglang: attention_backend: null val_kwargs: top_k: -1 top_p: 1.0 temperature: 0 'n': 1 do_sample: false multi_turn: enable: false max_assistant_turns: null tool_config_path: null max_user_turns: null max_parallel_calls: 1 max_tool_response_length: 256 tool_response_truncate_side: middle interaction_config_path: null completion_callback: null use_inference_chat_template: false tokenization_sanity_check_mode: strict format: hermes calculate_log_probs: false agent: num_workers: 8 agent_loop_config_path: null custom_async_server: path: null name: null update_weights_bucket_megabytes: 512 trace: backend: null token2text: false enable_chunked_prefill: false load_format: dummy_megatron layer_name_map: qkv_layer_name: qkv gate_proj_layer_name: gate_up hybrid_engine: true nccl_timeout: 600 model: path: ~/models/deepseek-llm-7b-chat custom_chat_template: null external_lib: null override_config: model_config: {} moe_config: freeze_moe_router: false use_fused_kernels: false trust_remote_code: false profiler: _target_: verl.utils.profiler.ProfilerConfig discrete: false all_ranks: false ranks: [] trainer: npu_profile: options: save_path: ./profiler_data level: level1 with_memory: false record_shapes: false with_npu: true with_cpu: true with_module: false with_stack: false analysis: true balance_batch: true total_epochs: 30 total_training_steps: null profile_steps: null project_name: verl_examples experiment_name: gsm8k logger: - console - wandb log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 esi_redundant_time: 0 resume_mode: auto resume_from_path: null del_local_ckpt_after_load: false val_before_train: true test_freq: -1 critic_warmup: 0 default_hdfs_dir: null default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} max_actor_ckpt_to_keep: null max_critic_ckpt_to_keep: null ray_wait_register_center_timeout: 300 device: cuda controller_nsight_options: trace: cuda,nvtx,cublas,ucx cuda-memory-usage: 'true' cuda-graph-trace: graph worker_nsight_options: trace: cuda,nvtx,cublas,ucx cuda-memory-usage: 'true' cuda-graph-trace: graph capture-range: cudaProfilerApi capture-range-end: null kill: none data: tokenizer: null use_shm: false train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet prompt_key: prompt reward_fn_key: data_source max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 val_batch_size: null return_raw_input_ids: false return_raw_chat: false return_full_prompt: false shuffle: true dataloader_num_workers: 8 validation_shuffle: false filter_overlong_prompts: false filter_overlong_prompts_workers: 1 truncation: error image_key: images video_key: videos trust_remote_code: false custom_cls: path: null name: null return_multi_modal_inputs: true sampler: class_path: null class_name: null datagen: path: null name: null critic: rollout_n: ${oc.select:actor_rollout_ref.rollout.n,1} strategy: megatron optim: lr_warmup_steps_ratio: 0.0 total_training_steps: -1 weight_decay: 0.01 optimizer: adam lr: 1.0e-06 clip_grad: 1.0 lr_warmup_init: 0.0 lr_warmup_steps: null lr_decay_steps: null lr_decay_style: linear min_lr: 0.0 weight_decay_incr_style: constant lr_wsd_decay_style: exponential lr_wsd_decay_steps: null use_checkpoint_opt_param_scheduler: false model: path: ~/models/deepseek-llm-7b-chat tokenizer_path: ${oc.select:actor_rollout_ref.model.path,"~/models/deepseek-llm-7b-chat"} override_config: model_config: {} moe_config: freeze_moe_router: false external_lib: ${oc.select:actor_rollout_ref.model.external_lib,null} trust_remote_code: ${oc.select:actor_rollout_ref.model.trust_remote_code,false} ppo_mini_batch_size: ${oc.select:actor_rollout_ref.actor.ppo_mini_batch_size,256} ppo_micro_batch_size: null ppo_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size,null} use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} ppo_max_token_len_per_gpu: 32768 forward_max_token_len_per_gpu: ${.ppo_max_token_len_per_gpu} ppo_epochs: ${oc.select:actor_rollout_ref.actor.ppo_epochs,1} shuffle: ${oc.select:actor_rollout_ref.actor.shuffle,false} cliprange_value: 0.5 loss_agg_mode: ${oc.select:actor_rollout_ref.actor.loss_agg_mode,token-mean} checkpoint: save_contents: - model - optimizer - extra load_contents: ${.save_contents} async_save: false profiler: _target_: verl.utils.profiler.ProfilerConfig discrete: false all_ranks: false ranks: [] _target_: verl.trainer.config.MegatronCriticConfig nccl_timeout: 600 megatron: param_offload: false grad_offload: false optimizer_offload: false tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: null pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: true use_distributed_optimizer: true use_dist_checkpointing: false dist_checkpointing_path: null seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} override_ddp_config: ${oc.select:actor_rollout_ref.actor.megatron.override_ddp_config,{}} override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} load_weight: true data_loader_seed: ${oc.select:actor_rollout_ref.actor.data_loader_seed,null} reward_model: enable: false strategy: megatron model: input_tokenizer: ${actor_rollout_ref.model.path} path: ~/models/FsfairX-LLaMA3-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} trust_remote_code: false micro_batch_size: null micro_batch_size_per_gpu: null max_length: null use_dynamic_bsz: ${critic.use_dynamic_bsz} forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} reward_manager: naive launch_reward_fn_async: false sandbox_fusion: url: null max_concurrent: 64 memory_limit_mb: 1024 profiler: _target_: verl.utils.profiler.ProfilerConfig discrete: false all_ranks: false ranks: [] nccl_timeout: 600 megatron: param_offload: false tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: null pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: true use_distributed_optimizer: false use_dist_checkpointing: false dist_checkpointing_path: null seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} load_weight: true custom_reward_function: path: null name: compute_score algorithm: _target_: verl.trainer.config.AlgoConfig gamma: 1.0 lam: 1.0 adv_estimator: gae norm_adv_by_std_in_grpo: true use_kl_in_reward: false kl_penalty: kl kl_ctrl: _target_: verl.trainer.config.KLControlConfig type: fixed kl_coef: 0.001 horizon: 10000 target_kl: 0.1 use_pf_ppo: false pf_ppo: _target_: verl.trainer.config.PFPPOConfig reweight_method: pow weight_pow: 2.0 ray_init: num_cpus: null timeline_json_file: null ================================================ FILE: verl_rl/verl/trainer/config/_generated_ppo_trainer.yaml ================================================ # This reference configration yaml is automatically generated via 'scripts/generate_trainer_config.sh' # in which it invokes 'python3 scripts/print_cfg.py --cfg job ' to flatten the 'verl/trainer/config/ppo_trainer.yaml' config fields into a single file. # Do not modify this file directly. # The file is usually only for reference and never used. actor_rollout_ref: actor: strategy: fsdp ppo_mini_batch_size: 256 ppo_micro_batch_size: null ppo_micro_batch_size_per_gpu: null use_dynamic_bsz: false ppo_max_token_len_per_gpu: 16384 clip_ratio: 0.2 clip_ratio_low: 0.2 clip_ratio_high: 0.2 policy_loss: loss_mode: vanilla clip_cov_ratio: 0.0002 clip_cov_lb: 1.0 clip_cov_ub: 5.0 kl_cov_ratio: 0.0002 ppo_kl_coef: 0.1 clip_ratio_c: 3.0 loss_agg_mode: token-mean entropy_coeff: 0 use_kl_loss: false use_torch_compile: true kl_loss_coef: 0.001 kl_loss_type: low_var_kl ppo_epochs: 1 shuffle: false checkpoint: save_contents: - model - optimizer - extra load_contents: ${.save_contents} optim: lr: 1.0e-06 lr_warmup_steps_ratio: 0.0 total_training_steps: -1 weight_decay: 0.01 lr_warmup_steps: -1 min_lr_ratio: 0.0 num_cycles: 0.5 warmup_style: constant grad_clip: 1.0 ulysses_sequence_parallel_size: 1 entropy_from_logits_with_chunking: false entropy_checkpointing: false fsdp_config: wrap_policy: min_num_params: 0 param_offload: false optimizer_offload: false offload_policy: false reshard_after_forward: true fsdp_size: -1 forward_prefetch: false ref: strategy: ${actor_rollout_ref.actor.strategy} use_torch_compile: ${oc.select:actor_rollout_ref.actor.use_torch_compile,true} log_prob_micro_batch_size: null log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} fsdp_config: param_offload: false reshard_after_forward: true forward_prefetch: false wrap_policy: min_num_params: 0 ulysses_sequence_parallel_size: ${oc.select:actor_rollout_ref.actor.ulysses_sequence_parallel_size,1} entropy_from_logits_with_chunking: false entropy_checkpointing: false rollout: name: vllm mode: sync temperature: 1.0 top_k: -1 top_p: 1 prompt_length: ${oc.select:data.max_prompt_length,512} response_length: ${oc.select:data.max_response_length,512} dtype: bfloat16 gpu_memory_utilization: 0.5 ignore_eos: false enforce_eager: true free_cache_engine: true tensor_model_parallel_size: 2 max_num_batched_tokens: 8192 max_model_len: null max_num_seqs: 1024 log_prob_micro_batch_size: null log_prob_micro_batch_size_per_gpu: null log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} disable_log_stats: true do_sample: true 'n': 1 multi_stage_wake_up: false engine_kwargs: vllm: swap_space: null disable_mm_preprocessor_cache: false sglang: attention_backend: null val_kwargs: top_k: -1 top_p: 1.0 temperature: 0 'n': 1 do_sample: false multi_turn: enable: false max_assistant_turns: null tool_config_path: null max_user_turns: null max_parallel_calls: 1 max_tool_response_length: 256 tool_response_truncate_side: middle interaction_config_path: null completion_callback: null use_inference_chat_template: false tokenization_sanity_check_mode: strict format: hermes calculate_log_probs: false agent: num_workers: 8 agent_loop_config_path: null custom_async_server: path: null name: null update_weights_bucket_megabytes: 512 trace: backend: null token2text: false enable_chunked_prefill: true load_format: dummy_dtensor layered_summon: false hybrid_engine: true model: path: ~/models/deepseek-llm-7b-chat custom_chat_template: null use_shm: false external_lib: null override_config: {} enable_gradient_checkpointing: true enable_activation_offload: false use_remove_padding: false lora_rank: 0 lora_alpha: 16 target_modules: all-linear exclude_modules: null use_liger: false use_fused_kernels: false fused_kernel_options: impl_backend: torch trust_remote_code: false profiler: _target_: verl.utils.profiler.ProfilerConfig discrete: false all_ranks: false ranks: [] trainer: npu_profile: options: save_path: ./profiler_data level: level1 with_memory: false record_shapes: false with_npu: true with_cpu: true with_module: false with_stack: false analysis: true balance_batch: true total_epochs: 30 total_training_steps: null profile_steps: null controller_nsight_options: trace: cuda,nvtx,cublas,ucx cuda-memory-usage: 'true' cuda-graph-trace: graph worker_nsight_options: trace: cuda,nvtx,cublas,ucx cuda-memory-usage: 'true' cuda-graph-trace: graph capture-range: cudaProfilerApi capture-range-end: null kill: none project_name: verl_examples experiment_name: gsm8k logger: - console - wandb log_val_generations: 0 rollout_data_dir: null validation_data_dir: null nnodes: 1 n_gpus_per_node: 8 save_freq: -1 esi_redundant_time: 0 resume_mode: auto resume_from_path: null val_before_train: true val_only: false test_freq: -1 critic_warmup: 0 default_hdfs_dir: null del_local_ckpt_after_load: false default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} max_actor_ckpt_to_keep: null max_critic_ckpt_to_keep: null ray_wait_register_center_timeout: 300 device: cuda use_legacy_worker_impl: auto data: tokenizer: null use_shm: false train_files: ~/data/rlhf/gsm8k/train.parquet val_files: ~/data/rlhf/gsm8k/test.parquet prompt_key: prompt reward_fn_key: data_source max_prompt_length: 512 max_response_length: 512 train_batch_size: 1024 val_batch_size: null return_raw_input_ids: false return_raw_chat: false return_full_prompt: false shuffle: true dataloader_num_workers: 8 validation_shuffle: false filter_overlong_prompts: false filter_overlong_prompts_workers: 1 truncation: error image_key: images video_key: videos trust_remote_code: false custom_cls: path: null name: null return_multi_modal_inputs: true sampler: class_path: null class_name: null datagen: path: null name: null critic: rollout_n: ${oc.select:actor_rollout_ref.rollout.n,1} strategy: fsdp optim: lr_warmup_steps_ratio: 0.0 total_training_steps: -1 weight_decay: 0.01 lr: 1.0e-05 min_lr_ratio: null warmup_style: constant model: path: ~/models/deepseek-llm-7b-chat tokenizer_path: ${oc.select:actor_rollout_ref.model.path,"~/models/deepseek-llm-7b-chat"} override_config: {} external_lib: ${oc.select:actor_rollout_ref.model.external_lib,null} trust_remote_code: ${oc.select:actor_rollout_ref.model.trust_remote_code,false} use_shm: false enable_gradient_checkpointing: true enable_activation_offload: false use_remove_padding: false fsdp_config: param_offload: false optimizer_offload: false offload_policy: false reshard_after_forward: true wrap_policy: min_num_params: 0 fsdp_size: -1 forward_prefetch: false lora_rank: 0 lora_alpha: 16 target_modules: all-linear ppo_mini_batch_size: ${oc.select:actor_rollout_ref.actor.ppo_mini_batch_size,256} ppo_micro_batch_size: null ppo_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size,null} use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} ppo_max_token_len_per_gpu: 32768 forward_max_token_len_per_gpu: ${.ppo_max_token_len_per_gpu} ppo_epochs: ${oc.select:actor_rollout_ref.actor.ppo_epochs,1} shuffle: ${oc.select:actor_rollout_ref.actor.shuffle,false} cliprange_value: 0.5 loss_agg_mode: ${oc.select:actor_rollout_ref.actor.loss_agg_mode,token-mean} checkpoint: save_contents: - model - optimizer - extra load_contents: ${.save_contents} profiler: _target_: verl.utils.profiler.ProfilerConfig discrete: false all_ranks: false ranks: [] _target_: verl.trainer.config.FSDPCriticConfig forward_micro_batch_size: ${oc.select:.ppo_micro_batch_size,null} forward_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size_per_gpu,null} ulysses_sequence_parallel_size: 1 grad_clip: 1.0 reward_model: enable: false strategy: fsdp model: input_tokenizer: ${actor_rollout_ref.model.path} path: ~/models/FsfairX-LLaMA3-RM-v0.1 external_lib: ${actor_rollout_ref.model.external_lib} trust_remote_code: false use_shm: false use_remove_padding: false use_fused_kernels: ${actor_rollout_ref.model.use_fused_kernels} fsdp_config: wrap_policy: min_num_params: 0 param_offload: false reshard_after_forward: true fsdp_size: -1 forward_prefetch: false micro_batch_size: null micro_batch_size_per_gpu: null max_length: null use_dynamic_bsz: ${critic.use_dynamic_bsz} forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} reward_manager: naive launch_reward_fn_async: false sandbox_fusion: url: null max_concurrent: 64 memory_limit_mb: 1024 profiler: _target_: verl.utils.profiler.ProfilerConfig discrete: false all_ranks: false ranks: [] ulysses_sequence_parallel_size: 1 custom_reward_function: path: null name: compute_score algorithm: _target_: verl.trainer.config.AlgoConfig gamma: 1.0 lam: 1.0 adv_estimator: gae norm_adv_by_std_in_grpo: true use_kl_in_reward: false kl_penalty: kl kl_ctrl: _target_: verl.trainer.config.KLControlConfig type: fixed kl_coef: 0.001 horizon: 10000 target_kl: 0.1 use_pf_ppo: false pf_ppo: _target_: verl.trainer.config.PFPPOConfig reweight_method: pow weight_pow: 2.0 ray_init: num_cpus: null timeline_json_file: null ================================================ FILE: verl_rl/verl/trainer/config/actor/actor.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # the abstract actor configs # fsdp, fsdp2 or megatron. must be set. strategy: ??? # Split each sample into sub-batches of this size for PPO ppo_mini_batch_size: 256 # [Deprecated] Global micro batch size ppo_micro_batch_size: null # Local per-GPU micro batch size ppo_micro_batch_size_per_gpu: null # Whether to automatically adjust batch size at runtime # oc.select: the default val for ref.log_prob_use_dynamic_bsz use_dynamic_bsz: false # Max tokens per GPU in one PPO batch; affects gradient accumulation # Typically it should be: n * ${data.max_prompt_length} + ${data.max_response_length} # oc.select: the default val for ref.log_prob_max_token_len_per_gpu ppo_max_token_len_per_gpu: 16384 # PPO clip ratio clip_ratio: 0.2 # Lower bound for asymmetric clipping (used in dual-clip PPO) clip_ratio_low: 0.2 # Upper bound for asymmetric clipping (used in dual-clip PPO) clip_ratio_high: 0.2 # policy loss config policy_loss: # Loss function mode: vanilla / clip-cov / kl-cov /gpg from https://arxiv.org/abs/2505.22617 loss_mode: "vanilla" # Ratio of tokens to be clipped for clip-cov loss clip_cov_ratio: 0.0002 # Lower bound for clip-cov loss clip_cov_lb: 1.0 # Upper bound for clip-cov loss clip_cov_ub: 5.0 # Ratio of tokens to be applied kl penalty for kl-cov loss kl_cov_ratio: 0.0002 # KL divergence penalty coefficient ppo_kl_coef: 0.1 # Constant C in Dual-clip PPO; clips when advantage < 0 and ratio > C clip_ratio_c: 3.0 # Loss aggregation mode: "token-mean", "seq-mean-token-sum", or "seq-mean-token-mean" loss_agg_mode: token-mean # Entropy regularization coefficient in PPO loss entropy_coeff: 0 # Whether to use KL loss instead of KL reward penalty. True for GRPO use_kl_loss: false # Whether to use torch.compile() # oc.select: the default val for ref.use_torch_compile use_torch_compile: true # KL loss coefficient when use_kl_loss is enabled. For GRPO kl_loss_coef: 0.001 # Type of KL divergence loss. Options: "kl"(k1), "abs", "mse"(k2), "low_var_kl"(k3), "full" kl_loss_type: low_var_kl # Number of PPO epochs per batch ppo_epochs: 1 # Shuffle training data across PPO epochs shuffle: false # checkpoint configs checkpoint: # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # For more flexibility, you can specify the contents to load from the checkpoint. # .xxx refers to the local variable xxx from the same level of hierarchy similar to python pkg load_contents: ${.save_contents} # optimizer configs optim: # Learning rate lr: 1e-6 # Warmup steps ratio (used if lr_warmup_steps is negative) lr_warmup_steps_ratio: 0.0 # Total training steps (must be overridden at runtime) total_training_steps: -1 # Weight decay weight_decay: 0.01 ================================================ FILE: verl_rl/verl/trainer/config/actor/dp_actor.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # defaults specify the default config from each component defaults: # dp actor config, inheriting from trainer/config/actor/actor.yaml - actor # load the reference default config, then apply the fields in the current yaml - _self_ # TODO(haibin.lin): switch to fsdp2 strategy: fsdp # Gradient clipping for actor updates, specific to the strategy. grad_clip: 1.0 # Sequence parallelism size for Ulysses-style model parallelism # oc.select: the default val for ref.ulysses_sequence_parallel_size ulysses_sequence_parallel_size: 1 # calculate entropy with chunking to reduce memory peak entropy_from_logits_with_chunking: False # recompute entropy entropy_checkpointing: False # optimizer configs optim: # Warmup steps; negative value delegates to lr_warmup_steps_ratio lr_warmup_steps: -1 # Minimum LR ratio for cosine schedule min_lr_ratio: 0.0 # Number of cosine cycles in LR schedule num_cycles: 0.5 # LR warmup style: "constant" or "cosine" warmup_style: constant # configs for FSDP fsdp_config: # policy for wrapping the model wrap_policy: # Minimum number of parameters to trigger wrapping a layer with FSDP min_num_params: 0 # Whether to offload model parameters to CPU (trades speed for memory) param_offload: false # Whether to offload optimizer state to CPU optimizer_offload: false # Only for FSDP2: offload param/grad/optimizer during train offload_policy: false # Only for FSDP2: Reshard after forward pass to reduce memory footprint reshard_after_forward: true # Number of GPUs in each FSDP shard group; -1 means auto fsdp_size: -1 # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False ================================================ FILE: verl_rl/verl/trainer/config/actor/megatron_actor.yaml ================================================ # megatron actor config, inheriting from trainer/config/actor/actor.yaml defaults: - actor # load the reference default config, then apply the fields in the current yaml - _self_ strategy: megatron data_loader_seed: null load_weight: True checkpoint: async_save: False optim: optimizer: adam clip_grad: 1.0 # initial learning rate for warmup, default to 0.0 lr_warmup_init: 0.0 # Prioritized. None, 0 or Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps: null lr_decay_steps: null # select from constant/linear/cosine/inverse_square_root lr_decay_style: constant # minimum learning rate, default to 0.0 min_lr: 0.0 # select from constant/linear/cosine weight_decay_incr_style: constant # select from constant/exponential/cosine lr_wsd_decay_style: exponential lr_wsd_decay_steps: null # use checkpoint optimizer parameter scheduler use_checkpoint_opt_param_scheduler: False megatron: param_offload: False grad_offload: False optimizer_offload: False tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: null pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null context_parallel_size: 1 sequence_parallel: True use_distributed_optimizer: True use_dist_checkpointing: False dist_checkpointing_path: null # oc.select: default val for ref.megatron.seed seed: 42 # Allow to override Distributed Data Parallel (DDP) config override_ddp_config: {} # additional transformer config like: num_layers_in_first(/last)_pipeline_stage # oc.select: default val for ref.megatron.override_transformer_config override_transformer_config: # Recompute configuration, same as in megatron.training.arguments # default use minimal performance-interference recompute methods # Recompute granualarity, choices: ["full", "selective"] recompute_granularity: null # Recompute modules, multiple choices: ["core_attn", "moe_act", "layernorm", "mla_up_proj", "mlp", "moe"] # Please use correct module in matched model recompute_modules: ["core_attn"] # 'uniform', 'block' # 'uniform' divides the total number of transformer layers and checkpoints the input activation of each chunk # 'block' checkpoints the specified number of layers per pipeline stage at the specified granularity recompute_method: null # 'full' will checkpoint the entire transformer layer and 'selective' only checkpoints memory intensive part of attention recompute_num_layers: null # oc.select: default val for ref.megatron.use_mbridge use_mbridge: False # profile the actor model in `update_policy` profile: # turn it on when you want to profile the actor model use_profile: False # list, you can specify the ranks to profile profile_ranks: null # start step in update_policy step_start: -1 # end step step_end: -1 # the path to save the profile result save_path: null ================================================ FILE: verl_rl/verl/trainer/config/algorithm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from verl.base_config import BaseConfig @dataclass class KLControlConfig(BaseConfig): """Configuration for KL control. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: type (str): Type of KL control. Can be "fixed" or "adaptive". kl_coef (float): Initial coefficient for KL penalty. horizon (int): Horizon value for adaptive controller. target_kl (float): Target KL divergence for adaptive controller. """ _frozen_fields = ["type", "kl_coef", "horizon", "target_kl"] type: str = "fixed" kl_coef: float = 0.001 horizon: int = 10000 target_kl: float = 0.1 @dataclass class PFPPOConfig(BaseConfig): """Configuration for preference feedback PPO. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: reweight_method (str): Method for reweighting samples. Can be "pow", "max_min", or "max_random". weight_pow (float): Power used for weight scaling in "pow" method. """ _frozen_fields = ["reweight_method", "weight_pow"] reweight_method: str = "pow" weight_pow: float = 2.0 @dataclass class FilterGroupsConfig(BaseConfig): """Configuration for filter groups (used in DAPO and Entropy). The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: enable (bool): Whether to enable filter groups. metric (Optional[str]): Metric to use for filtering: "acc", "score", "seq_reward", "seq_final_reward", etc. max_num_gen_batches (int): Non-positive values mean no upper limit. """ _frozen_fields = ["enable", "metric", "max_num_gen_batches"] enable: bool = False metric: Optional[str] = None max_num_gen_batches: int = 0 @dataclass class AlgoConfig(BaseConfig): """Configuration for the algorithm. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: gamma (float): Discount factor for future rewards. lam (float): Trade-off between bias and variance in the GAE estimator. adv_estimator (str): Advantage estimator type: "gae", "grpo", "reinforce_plus_plus", etc. norm_adv_by_std_in_grpo (bool): Whether to normalize advantages by std (specific to GRPO). use_kl_in_reward (bool): Whether to enable in-reward KL penalty. kl_penalty (str): How to estimate KL divergence: "kl", "abs", "mse", "low_var_kl", or "full". kl_ctrl (KLControlConfig): KL control configuration. use_pf_ppo (bool): Whether to enable preference feedback PPO. pf_ppo (Optional[PFPPOConfig]): Preference feedback PPO settings. filter_groups (Optional[FilterGroupsConfig]): Filter groups configuration, used in DAPO and Entropy """ _frozen_fields = [ "gamma", "lam", "adv_estimator", "norm_adv_by_std_in_grpo", "use_kl_in_reward", "kl_penalty", "use_pf_ppo", ] gamma: float = 1.0 lam: float = 1.0 adv_estimator: str = "gae" norm_adv_by_std_in_grpo: bool = True use_kl_in_reward: bool = False kl_penalty: str = "kl" kl_ctrl: KLControlConfig = field(default_factory=KLControlConfig) use_pf_ppo: bool = False pf_ppo: Optional[PFPPOConfig] = None filter_groups: Optional[FilterGroupsConfig] = None ================================================ FILE: verl_rl/verl/trainer/config/config.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from verl.base_config import BaseConfig @dataclass class CriticConfig(BaseConfig): """Configuration for critic model training. The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config. Args: rollout_n (int): Number of rollouts per update (mirrors actor rollout_n). strategy (str): Strategy used for critic model training (fsdp, fsdp2, megatron). optim (Dict[str, Any]): Optimizer configuration including lr, weight_decay, etc. model (Dict[str, Any]): Model configuration including path, tokenizer_path, etc. ppo_mini_batch_size (int): PPO mini-batch size per update. ppo_micro_batch_size (Optional[int]): Global micro batch size (deprecated). ppo_micro_batch_size_per_gpu (Optional[int]): Local per-GPU micro batch size. use_dynamic_bsz (bool): Whether to automatically adjust batch size at runtime. ppo_max_token_len_per_gpu (int): Max tokens per GPU in one PPO batch. forward_max_token_len_per_gpu (int): Max token length per GPU in forward pass. ppo_epochs (int): Number of PPO epochs per batch. shuffle (bool): Shuffle training data across PPO epochs. cliprange_value (float): PPO value function clipping range. loss_agg_mode (str): Loss aggregation mode. checkpoint (Dict[str, Any]): Checkpoint configuration. profiler (Dict[str, Any]): Profiler configuration. """ # For legacy reason configs related to batch_size are mutated in each role # In the future they will be added to frozen fields instead _frozen_fields = [ "rollout_n", "strategy", "use_dynamic_bsz", "ppo_max_token_len_per_gpu", "forward_max_token_len_per_gpu", "ppo_epochs", "shuffle", "cliprange_value", "loss_agg_mode", ] rollout_n: int = 1 strategy: str = "fsdp" optim: dict[str, Any] = field(default_factory=dict) model: dict[str, Any] = field(default_factory=dict) ppo_mini_batch_size: int = 1 ppo_micro_batch_size: Optional[int] = None ppo_micro_batch_size_per_gpu: Optional[int] = None use_dynamic_bsz: bool = False ppo_max_token_len_per_gpu: int = 32768 forward_max_token_len_per_gpu: int = 32768 ppo_epochs: int = 1 shuffle: bool = True cliprange_value: float = 0.5 loss_agg_mode: str = "token-mean" checkpoint: dict[str, Any] = field(default_factory=dict) profiler: dict[str, Any] = field(default_factory=dict) @dataclass class MegatronCriticConfig(CriticConfig): """Configuration for Megatron-based critic model training. The inheritance from CriticConfig provides all base critic configuration plus Megatron-specific settings. Args: nccl_timeout (int): NCCL timeout in seconds for distributed operations. megatron (Dict[str, Any]): Megatron-specific parallelism settings. load_weight (bool): Whether to load initial weights. data_loader_seed (Optional[int]): Seed for data loader. """ _frozen_fields = CriticConfig._frozen_fields + [ "nccl_timeout", "load_weight", "data_loader_seed", ] strategy: str = "megatron" nccl_timeout: int = 600 megatron: dict[str, Any] = field(default_factory=dict) load_weight: bool = True data_loader_seed: Optional[int] = None @dataclass class FSDPCriticConfig(CriticConfig): """Configuration for FSDP-based critic model training. The inheritance from CriticConfig provides all base critic configuration plus FSDP-specific settings. Args: forward_micro_batch_size (int): Forward-only batch size during inference (global). forward_micro_batch_size_per_gpu (int): Forward-only batch size during inference (per GPU). ulysses_sequence_parallel_size (int): Sequence parallelism size for Ulysses-style model parallelism. grad_clip (float): Gradient clipping for critic updates. """ _frozen_fields = CriticConfig._frozen_fields + [ "ulysses_sequence_parallel_size", "grad_clip", ] strategy: str = "fsdp" forward_micro_batch_size: int = 1 forward_micro_batch_size_per_gpu: int = 1 ulysses_sequence_parallel_size: int = 1 grad_clip: float = 1.0 ================================================ FILE: verl_rl/verl/trainer/config/critic/critic.yaml ================================================ # Number of rollouts per update (mirrors actor rollout_n) rollout_n: ${oc.select:actor_rollout_ref.rollout.n,1} # fsdp or fsdp2 strategy used for critic model training strategy: ??? # optimizer configs optim: # Warmup steps ratio; total steps will be injected at runtime lr_warmup_steps_ratio: 0.0 # Total training steps (must be overridden at runtime) total_training_steps: -1 # Weight decay weight_decay: 0.01 # model config for the critic model: # Path to pretrained model weights path: ~/models/deepseek-llm-7b-chat # Tokenizer path (defaults to actor's model path) tokenizer_path: ${oc.select:actor_rollout_ref.model.path,"~/models/deepseek-llm-7b-chat"} # Hugging Face config override override_config: {} # External model implementation (optional) external_lib: ${oc.select:actor_rollout_ref.model.external_lib,null} # Whether to trust remote code from Hugging Face models trust_remote_code: ${oc.select:actor_rollout_ref.model.trust_remote_code,false} # PPO mini-batch size per update ppo_mini_batch_size: ${oc.select:actor_rollout_ref.actor.ppo_mini_batch_size,256} # [Deprecated] Global micro batch size ppo_micro_batch_size: null # Local per-GPU micro batch size ppo_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size,null} # Whether to automatically adjust batch size at runtime use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} # Max tokens per GPU in one PPO batch (doubled for critic) ppo_max_token_len_per_gpu: 32768 # Max token length per GPU in forward pass forward_max_token_len_per_gpu: ${.ppo_max_token_len_per_gpu} # Number of PPO epochs per batch ppo_epochs: ${oc.select:actor_rollout_ref.actor.ppo_epochs,1} # Shuffle training data across PPO epochs shuffle: ${oc.select:actor_rollout_ref.actor.shuffle,false} # PPO value function clipping range cliprange_value: 0.5 # Loss aggregation mode: "token-mean", "seq-mean-token-sum", or "seq-mean-token-mean" loss_agg_mode: ${oc.select:actor_rollout_ref.actor.loss_agg_mode,token-mean} # checkpoint configs checkpoint: # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ['model', 'optimizer', 'extra'] # What to include when loading checkpoints load_contents: ${.save_contents} # profiler configs # the corresponding dataclass is verl.utils.profiler.ProfilerConfig. profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.utils.profiler.ProfilerConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.CriticConfig ================================================ FILE: verl_rl/verl/trainer/config/critic/dp_critic.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # defaults specify the default config from each component defaults: # dp actor config, inheriting from trainer/config/critic/critic.yaml - critic # load the reference default config, then apply the fields in the current yaml - _self_ strategy: fsdp # optimizer configs optim: # Learning rate lr: 1e-5 # Minimum LR ratio for cosine schedule min_lr_ratio: null # LR warmup style: "constant" or "cosine" warmup_style: constant # model config for the critic model: # Whether to use shared memory for loading the model use_shm: False # Enable gradient checkpointing to save memory enable_gradient_checkpointing: True # Offload activations to CPU to reduce GPU memory usage enable_activation_offload: False # Use remove padding optimization (saves compute) use_remove_padding: False # FSDP-specific config fsdp_config: # Whether to offload model parameters to CPU param_offload: False # Whether to offload optimizer state to CPU optimizer_offload: False # Only for FSDP2: offload param/grad/optimizer during train offload_policy: False # Only for FSDP2: Reshard after forward pass to reduce memory footprint reshard_after_forward: True # Policy for wrapping layers with FSDP wrap_policy: # Minimum number of parameters to trigger wrapping min_num_params: 0 # Number of GPUs in each FSDP shard group; -1 means auto fsdp_size: -1 # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # Set to positive value to enable LoRA (e.g., 32) lora_rank: 0 # LoRA scaling factor lora_alpha: 16 # LoRA target modules: "all-linear" or list of linear projection layers target_modules: all-linear # Forward-only batch size during inference (global) forward_micro_batch_size: ${oc.select:.ppo_micro_batch_size,null} # Forward-only batch size during inference (per GPU) forward_micro_batch_size_per_gpu: ${oc.select:.ppo_micro_batch_size_per_gpu,null} # Sequence parallelism size for Ulysses-style model parallelism ulysses_sequence_parallel_size: 1 # Gradient clipping for critic updates grad_clip: 1.0 # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.FSDPCriticConfig ================================================ FILE: verl_rl/verl/trainer/config/critic/megatron_critic.yaml ================================================ # defaults specify the default config from each component defaults: # dp actor config, inheriting from trainer/config/critic/critic.yaml - critic # load the reference default config, then apply the fields in the current yaml - _self_ strategy: megatron # seconds, default is 10 minutes for torch, you can set it to a larger value if you have long-running operations like 32B or 72B model using megatron nccl_timeout: 600 # optimizer configs optim: # select optimizer, default is Adam optimizer: adam # Learning rate lr: 1e-6 # Clip gradients norm clip_grad: 1.0 # initial learning rate for warmup, default to 0.0 lr_warmup_init: 0.0 # Prioritized. None, 0 or Negative values mean delegating to lr_warmup_steps_ratio. lr_warmup_steps: null lr_decay_steps: null # select from constant/linear/cosine/inverse_square_root lr_decay_style: linear # minimum learning rate, default to 0.0 min_lr: 0.0 # select from constant/linear/cosine weight_decay_incr_style: constant # select from constant/exponential/cosine lr_wsd_decay_style: exponential # number of steps for weight std decay lr_wsd_decay_steps: null # use checkpoint optimizer parameter scheduler use_checkpoint_opt_param_scheduler: False # model config for the critic model: # override default empty mapping override_config: model_config: {} moe_config: freeze_moe_router: False # megatron-specific parallelism settings megatron: # Whether to offload model parameters to CPU param_offload: False # Whether to offload gradients to CPU grad_offload: False # Whether to offload optimizer state to CPU optimizer_offload: False # size of tensor model parallel group tensor_model_parallel_size: 1 # size of expert model parallel group expert_model_parallel_size: 1 # size of expert tensor parallel group expert_tensor_parallel_size: null # size of pipeline model parallel group pipeline_model_parallel_size: 1 # size of virtual pipeline model parallel group virtual_pipeline_model_parallel_size: null # size of context parallel group context_parallel_size: 1 # Whether to use sequence parallelism sequence_parallel: True # Whether to use distributed optimizer use_distributed_optimizer: True # Whether to use distributed checkpointing use_dist_checkpointing: False # Path for distributed checkpointing dist_checkpointing_path: null # Random seed for Megatron seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} # Allow to override Distributed Data Parallel (DDP) config override_ddp_config: ${oc.select:actor_rollout_ref.actor.megatron.override_ddp_config,{}} # Transformer config overrides for Megatron override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} # Whether to use mBridge communications use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} # Whether to load initial weights load_weight: True # seed for data loader data_loader_seed: ${oc.select:actor_rollout_ref.actor.data_loader_seed,null} # Asynchronous checkpoint saving checkpoint: async_save: False # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.MegatronCriticConfig ================================================ FILE: verl_rl/verl/trainer/config/data/legacy_data.yaml ================================================ # Tokenizer class or path. If null, it will be inferred from the model. tokenizer: null # Whether to use shared memory for data loading. use_shm: False # Training set parquet. Can be a list or a single file. # The program will read all files into memory, so it can't be too large (< 100GB). # The path can be either a local path or an HDFS path. # For HDFS path, we provide utils to download it to DRAM and convert it to a local path. train_files: ~/data/rlhf/gsm8k/train.parquet # Validation parquet. Can be a list or a single file. val_files: ~/data/rlhf/gsm8k/test.parquet # The field in the dataset where the prompt is located. Default is 'prompt'. prompt_key: prompt # The field used to select the reward function (if using different ones per example). reward_fn_key: data_source # Maximum prompt length. All prompts will be left-padded to this length. # An error will be reported if the length is too long. # oc.select: default val for rollout.prompt_length max_prompt_length: 512 # Maximum response length. Rollout in RL algorithms (e.g. PPO) generates up to this length. # oc.select: default val for rollout.response_length max_response_length: 512 # Batch size sampled for one training iteration of different RL algorithms. train_batch_size: 1024 # Batch size used during validation. Can be null. val_batch_size: null # Whether to return the original input_ids without adding chat template. # This is used when the reward model's chat template differs from the policy. # If using a model-based RM with different templates, this should be True. return_raw_input_ids: False # Whether to return the original chat (prompt) without applying chat template. return_raw_chat: False # Whether to return the full prompt with chat template. return_full_prompt: False # Whether to shuffle the data in the dataloader. shuffle: True # num dataloader workers dataloader_num_workers: 8 # Whether to shuffle the validation set. validation_shuffle: False # Whether to filter overlong prompts. filter_overlong_prompts: False # Number of workers for filtering overlong prompts. # For large-scale datasets, filtering can be time-consuming. # Use multiprocessing to speed up. Default is 1. filter_overlong_prompts_workers: 1 # Truncate the input_ids or prompt if they exceed max_prompt_length. # Options: 'error', 'left', 'right', 'middle'. Default is 'error'. truncation: error # The field in the multi-modal dataset where the image is located. Default is 'images'. image_key: images # The field in the multi-modal dataset where the video is located. video_key: videos # If the remote tokenizer has a Python file, this flag determines whether to allow using it. trust_remote_code: False # Optional: specify a custom dataset class path and name if overriding default loading behavior. custom_cls: # The path to the file containing your customized dataset class. If not specified, pre-implemented dataset will be used. path: null # The name of the dataset class within the specified file. name: null # Whether to return multi-modal inputs in the dataset. Set to False if rollout generates new multi-modal inputs. return_multi_modal_inputs: True # settings related to data sampler sampler: # the path to the module containing a curriculum class which implements the # AbstractSampler interface class_path: null # the name of the curriculum class like `MySampler` class_name: null # Data generation configuration for augmenting the dataset. datagen: # The path to the file containing your customized data generation class. # E.g. 'pkg://verl.experimental.dynamic_dataset.dynamicgen_dataset' path: null # The class name of the data generation class within the specified file. # E.g. 'MockDataGenerator' name: null ================================================ FILE: verl_rl/verl/trainer/config/evaluation.yaml ================================================ data: path: /tmp/math_Qwen2-7B-Instruct.parquet prompt_key: prompt response_key: responses data_source_key: data_source reward_model_key: reward_model custom_reward_function: path: null name: compute_score ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. timeline_json_file: null ================================================ FILE: verl_rl/verl/trainer/config/generation.yaml ================================================ trainer: nnodes: 1 n_gpus_per_node: 8 device: cuda data: path: ~/data/rlhf/math/test.parquet prompt_key: prompt n_samples: 5 output_path: /opt/tiger/math_Qwen2-7B-Instruct.parquet batch_size: 128 model: path: ~/models/Qwen2-7B-Instruct external_lib: null rollout: name: vllm mode: sync # sync: LLM, async: AsyncLLM temperature: 1.0 top_k: 50 # 0 for hf rollout, -1 for vllm rollout top_p: 0.7 prompt_length: 1536 response_length: 512 # for vllm rollout dtype: bfloat16 # should align with FSDP gpu_memory_utilization: 0.5 ignore_eos: False enforce_eager: True free_cache_engine: True load_format: dummy_dtensor tensor_model_parallel_size: 1 max_num_batched_tokens: 8192 max_model_len: null max_num_seqs: 1024 log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu log_prob_micro_batch_size_per_gpu: 8 # for hf rollout do_sample: True disable_log_stats: True enable_chunked_prefill: True n: 1 # support logging rollout prob for debugging purpose calculate_log_probs: False actor: strategy: fsdp # This is for backward-compatibility ulysses_sequence_parallel_size: 1 # sp size entropy_from_logits_with_chunking: False # calculate entropy with chunking to reduce memory peak entropy_checkpointing: False # recompute entropy fsdp_config: fsdp_size: -1 forward_prefetch: False # FSDP1 forward_prefetch configuration ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. timeline_json_file: null ================================================ FILE: verl_rl/verl/trainer/config/npu_profile/npu_profile.yaml ================================================ # Options for the npu profiler options: # Storage path of collected data. save_path: ./profiler_data # Collection level, optional values: level_none, level0, level1, level2. level: level1 # Whether to enable memory analysis. with_memory: False # Whether to record tensor shape. record_shapes: False # Whether to record Device-side performance data. with_npu: True # Whether to record Host-side performance data. with_cpu: True # Whether to record Python call stack information. with_module: False # Whether to record operator call stack information. with_stack: False # Whether to automatically parse the data. analysis: True ================================================ FILE: verl_rl/verl/trainer/config/ppo_megatron_trainer.yaml ================================================ # specify the default per-component configs defaults: # @.: # actor_rollout_ref.actor: trainer/config/actor/megatron_actor.yaml - actor@actor_rollout_ref.actor: megatron_actor # trainer.npu_profile: trainer/config/npu_profile/npu_profile.yaml - npu_profile@trainer.npu_profile: npu_profile # data: trainer/config/data/legacy_data.yaml - data@data: legacy_data # load the reference default config, then apply the fields in the current yaml # Reference model config. # Reference model will be enabled when actor.use_kl_loss or/and algorithm.use_kl_in_reward is/are True. - ref@actor_rollout_ref.ref: megatron_ref # Rollout model config. - rollout@actor_rollout_ref.rollout: rollout # Critic model config. - critic@critic: megatron_critic # Reward model config. - reward_model@reward_model: megatron_reward_model - _self_ actor_rollout_ref: hybrid_engine: True nccl_timeout: 600 # seconds, default is 10 minutes for torch, you can set it to a larger value if you have long-running operations like 32B or 72B model using megatron model: path: ~/models/deepseek-llm-7b-chat custom_chat_template: null external_lib: null override_config: model_config: {} moe_config: freeze_moe_router: False use_fused_kernels: False # Whether to use custom fused kernels (PostProcessing, for memory efficiency) trust_remote_code: False rollout: # may get higher throughput when set to True. When activated, Please increase max_num_batched_tokens or decrease max_model_len. enable_chunked_prefill: False load_format: dummy_megatron tensor_model_parallel_size: 1 layer_name_map: qkv_layer_name: qkv gate_proj_layer_name: gate_up profiler: _target_: verl.utils.profiler.ProfilerConfig discrete: False all_ranks: False ranks: [] custom_reward_function: path: null name: compute_score algorithm: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.AlgoConfig gamma: 1.0 lam: 1.0 adv_estimator: gae norm_adv_by_std_in_grpo: True use_kl_in_reward: False kl_penalty: kl # how to estimate kl divergence kl_ctrl: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.KLControlConfig type: fixed kl_coef: 0.001 horizon: 10000 target_kl: 0.1 use_pf_ppo: False pf_ppo: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.PFPPOConfig reweight_method: pow # ["pow", "max_min", "max_random"] weight_pow: 2.0 trainer: balance_batch: True total_epochs: 30 total_training_steps: null profile_steps: null # [1,2,5] or [] or null project_name: verl_examples experiment_name: gsm8k logger: ['console', 'wandb'] log_val_generations: 0 nnodes: 1 n_gpus_per_node: 8 save_freq: -1 esi_redundant_time: 0 # auto: find the last ckpt to resume. If can't find, start from scratch resume_mode: auto # or disable or resume_path if resume_from_path is set resume_from_path: null del_local_ckpt_after_load: False val_before_train: True test_freq: -1 critic_warmup: 0 default_hdfs_dir: null default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} max_actor_ckpt_to_keep: null max_critic_ckpt_to_keep: null # The timeout for ray worker group to wait for the register center to be ready ray_wait_register_center_timeout: 300 device: cuda # see ppo_trainer.yaml for more details controller_nsight_options: trace: "cuda,nvtx,cublas,ucx" cuda-memory-usage: "true" cuda-graph-trace: "graph" worker_nsight_options: trace: "cuda,nvtx,cublas,ucx" cuda-memory-usage: "true" cuda-graph-trace: "graph" capture-range: "cudaProfilerApi" capture-range-end: null kill: none ray_init: num_cpus: null # `None` means using all CPUs, which might cause hang if limited in systems like SLURM. Please set to a number allowed then. timeline_json_file: null ================================================ FILE: verl_rl/verl/trainer/config/ppo_trainer.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # specify the default per-component configs defaults: # @.: # actor_rollout_ref.actor: trainer/config/actor/dp_actor.yaml - actor@actor_rollout_ref.actor: dp_actor # trainer.npu_profile: trainer/config/npu_profile/npu_profile.yaml - npu_profile@trainer.npu_profile: npu_profile # data: trainer/config/data/legacy_data.yaml - data@data: legacy_data # Reference model config. # Reference model will be enabled when actor.use_kl_loss or/and algorithm.use_kl_in_reward is/are True. - ref@actor_rollout_ref.ref: dp_ref # Rollout model config. - rollout@actor_rollout_ref.rollout: rollout # Critic model config. - critic@critic: dp_critic # Reward model config. - reward_model@reward_model: dp_reward_model # load the reference default config, then apply the fields in the current yaml # self config override anything above - _self_ # config for actor, rollout and reference model actor_rollout_ref: # Whether it's a hybrid engine, currently only supports hybrid engine hybrid_engine: true # common configs for the model model: # Huggingface model path. This can be either local path or HDFS path. path: ~/models/deepseek-llm-7b-chat # Custom chat template for the model. custom_chat_template: null # Whether to use shared memory (SHM) for accelerating the loading of model weights use_shm: false # Additional Python packages to register huggingface models/tokenizers. external_lib: null # Used to override model's original configurations, mainly dropout override_config: {} # Enable gradient checkpointing for actor enable_gradient_checkpointing: true # Enable activation offloading for actor enable_activation_offload: false # Whether to remove padding tokens in inputs during training use_remove_padding: false # Set to positive value to enable LoRA (e.g., 32) lora_rank: 0 # LoRA scaling factor lora_alpha: 16 # Target modules to apply LoRA. Options: "all-linear" (not recommended for VLMs) or # [q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj] target_modules: all-linear # Exclude modules from applying Lora. Similar usage to target_modules and Peft. # Example: '.*visual.*' for excluding the ViT in Qwen2.5-VL, as currently vllm does not support ViT Lora. exclude_modules: null # Whether to use Liger for linear layer fusion use_liger: false # Whether to use custom fused kernels (e.g., FlashAttention, fused MLP) use_fused_kernels: false # Options for fused kernels. If use_fused_kernels is true, this will be used. fused_kernel_options: # Implementation backend for fused kernels. Options: "triton" or "torch". impl_backend: torch # Whether to enable loading a remote code model trust_remote_code: false # Rollout model config. rollout: # may get higher throughput when set to True. When activated, Please increase max_num_batched_tokens or decrease max_model_len. enable_chunked_prefill: True # Which loader to use for rollout model weights: dummy_dtensor, hf, megatron, etc. # safetensors (for huge model, and set use_shm=True); dummy_dtensor: randomly init model weight load_format: dummy_dtensor # for huge model, layered summon can save memory (prevent OOM) but make it slower layered_summon: False # profiler configs profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.utils.profiler.ProfilerConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] # custom reward function definition custom_reward_function: # The path to the file containing your customized reward function. # If not specified, pre-implemented reward functions will be used. path: null # The name of the reward function within the specified file. Default is 'compute_score'. name: compute_score # config for the algorithm algorithm: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.AlgoConfig # Discount factor for future rewards gamma: 1.0 # Trade-off between bias and variance in the GAE estimator lam: 1.0 # Advantage estimator type: "gae", "grpo", "reinforce_plus_plus", etc. adv_estimator: gae # Whether to normalize advantages by std (specific to GRPO) norm_adv_by_std_in_grpo: True # Whether to enable in-reward KL penalty use_kl_in_reward: False # How to estimate KL divergence: "kl", "abs", "mse", "low_var_kl", or "full" kl_penalty: kl # KL control configuration kl_ctrl: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.KLControlConfig # KL control type: "fixed" or "adaptive" type: fixed # Initial coefficient for KL penalty kl_coef: 0.001 # Horizon value for adaptive controller (if enabled) horizon: 10000 # Target KL divergence (used for adaptive controller) target_kl: 0.1 # Whether to enable preference feedback PPO use_pf_ppo: False # Preference feedback PPO settings pf_ppo: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.trainer.config.PFPPOConfig # Method for reweighting samples: "pow", "max_min", or "max_random" reweight_method: pow # Power used for weight scaling in "pow" method weight_pow: 2.0 # config for the trainer trainer: # Whether to balance batch sizes across distributed workers balance_batch: True # Number of epochs in training total_epochs: 30 # Total training steps (can be set explicitly or derived from epochs) total_training_steps: null # The steps that will be profiled. null means no profiling. null or [1,2,5,...] profile_steps: null # controller Nvidia Nsight Systems Options. Must set when profile_steps is not None. ## reference https://docs.nvidia.com/nsight-systems/UserGuide/index.html ## reference https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html controller_nsight_options: # Select the API(s) to be traced. trace: "cuda,nvtx,cublas,ucx" # Track the GPU memory usage by CUDA kernels. Must be string type "true" or "false". cuda-memory-usage: "true" # CUDA graphs will be traced as a whole cuda-graph-trace: "graph" # worker Nvidia Nsight Systems Options. Must set when profile_steps is not None. worker_nsight_options: # Select the API(s) to be traced. trace: "cuda,nvtx,cublas,ucx" # Track the GPU memory usage by CUDA kernels. Must be string type "true" or "false". cuda-memory-usage: "true" # CUDA graphs will be traced as a whole cuda-graph-trace: "graph" # Profiling only in a range of torch.cuda.profiler.start and stop. Do not change this config. capture-range: "cudaProfilerApi" # Specify the desired behavior when a capture range ends. # In verl we need the orch.cuda.profiler.start/stop pair to repeats n times. # valid values are "repeat-shutdown:n" or null. # For normal whole step profiling, n = len(profile_steps); # but for discrete profiling, n = len(profile_steps) * Number(subtasks). # Or you can just leave it null and the program will use n = len(profile_steps) * 6; capture-range-end: null # Send signal to the target application's process group. We let the program to exit by itself. kill: none # Project name for experiment tracking (e.g., wandb) project_name: verl_examples # Experiment name for run identification in tracking tools experiment_name: gsm8k # Logging backends to use: "console", "wandb", etc. logger: [ 'console', 'wandb' ] # Number of generations to log during validation log_val_generations: 0 # Directory for logging rollout data; no dump if null rollout_data_dir: null # Directory for logging validation data; no dump if null validation_data_dir: null # Number of nodes used in the training nnodes: 1 # Number of GPUs per node n_gpus_per_node: 8 # Save frequency (by iteration) for model checkpoints save_freq: -1 # ESI refers to the elastic server instance used during training, similar to the training plan. For example, # if you purchase 10 hours of computing power, the ESI will automatically shut down after 10 hours of training. # To ensure a checkpoint is saved before ESI shuts down, the system will start saving a checkpoint in advance. # The advance time is calculated as: Advance Time = Longest historical step duration + Checkpoint save duration + esi_redundant_time. # Here, esi_redundant_time is a user-defined value that further extends the advance time for added safety. esi_redundant_time: 0 # Resume mode: "auto", "disable", or "resume_path" # "auto": resume from last checkpoint if available # "disable": start from scratch # "resume_path": resume from a user-defined path resume_mode: auto # Path to resume training from (only used when resume_mode is "resume_path") resume_from_path: null # Whether to run validation before training begins val_before_train: True # Whether to run validation only val_only: False # Validation frequency (in training iterations) test_freq: -1 # Number of iterations to warm up the critic before updating policy critic_warmup: 0 # Default path to distributed filesystem for saving checkpoints default_hdfs_dir: null # Whether to delete local checkpoints after loading del_local_ckpt_after_load: False # Default local directory for saving checkpoints default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} # Maximum number of actor checkpoints to keep max_actor_ckpt_to_keep: null # Maximum number of critic checkpoints to keep max_critic_ckpt_to_keep: null # Timeout (in seconds) for Ray worker to wait for registration ray_wait_register_center_timeout: 300 # Device to run training on (e.g., "cuda", "cpu") device: cuda # whether to use legacy worker implementation # mode: "auto", "enable", or "disable" use_legacy_worker_impl: auto # configs related to ray initialization ray_init: # Number of CPUs for Ray. Use a fixed number instead of null when using SLURM. num_cpus: null # Path to save Ray timeline JSON for performance profiling timeline_json_file: null ================================================ FILE: verl_rl/verl/trainer/config/ref/dp_ref.yaml ================================================ # defaults specify the default config from each component defaults: # dp ref config, inheriting from trainer/config/ref/ref.yaml - ref # load the reference default config, then apply the fields in the current yaml - _self_ # config for FSDP strategy fsdp_config: # whether to offload parameters in FSDP param_offload: False # whether to perform reshard after model forward to save memory. # only for fsdp2, [True, False, int between 1 and fsdp_size] reshard_after_forward: True # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # the wrap policy for FSDP model wrap_policy: # minimum number of params in a wrapped module min_num_params: 0 # sequence parallel size # same as actor_rollout_ref.actor.ulysses_sequence_parallel_size if it exists, otherwise 1 ulysses_sequence_parallel_size: ${oc.select:actor_rollout_ref.actor.ulysses_sequence_parallel_size,1} # calculate entropy with chunking to reduce memory peak entropy_from_logits_with_chunking: False # recompute entropy entropy_checkpointing: False ================================================ FILE: verl_rl/verl/trainer/config/ref/megatron_ref.yaml ================================================ # megatron ref config, inheriting from trainer/config/ref/ref.yaml defaults: - ref # load the reference default config, then apply the fields in the current yaml - _self_ strategy: megatron megatron: param_offload: False tensor_model_parallel_size: 1 expert_model_parallel_size: 1 expert_tensor_parallel_size: None pipeline_model_parallel_size: 1 virtual_pipeline_model_parallel_size: null # change VPP interface for parallelism tests context_parallel_size: 1 sequence_parallel: True use_distributed_optimizer: False use_dist_checkpointing: False dist_checkpointing_path: null seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} profile: use_profile: False profile_ranks: null step_start: -1 step_end: -1 save_path: null load_weight: True ================================================ FILE: verl_rl/verl/trainer/config/ref/ref.yaml ================================================ # actor_rollout_ref.ref: FSDP config same as actor. For models larger than 7B, it’s recommended to turn on offload for ref by default strategy: ${actor_rollout_ref.actor.strategy} # whether to enable torch.compile # same as actor_rollout_ref.actor.use_torch_compile if it exists, otherwise 1 use_torch_compile: ${oc.select:actor_rollout_ref.actor.use_torch_compile,true} # [Will be deprecated, use log_prob_micro_batch_size_per_gpu] # The batch size for one forward pass in the computation of log_prob. Global batch size. log_prob_micro_batch_size: null # The batch size for one forward pass in the computation of log_prob. Local batch size per GPU. log_prob_micro_batch_size_per_gpu: null # enable dynamic batch size (sequence packing) for log_prob computation # same as actor_rollout_ref.actor.use_dynamic_bsz if it exists, otherwise false log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} # the max token length per GPU # same as actor_rollout_ref.actor.ppo_max_token_len_per_gpu if it exists, otherwise 16384 log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} ================================================ FILE: verl_rl/verl/trainer/config/reward_model/dp_reward_model.yaml ================================================ # Format checks enforced on CI: # 1. Comments must appear above each field. # 2. There must be a blank line between each field. # 3. Inline comments (after a field on the same line) are not allowed. # 4. Indentation level is respected for nested fields. # defaults specify the default config from each component defaults: # dp actor config, inheriting from trainer/config/reward_model/reward_model.yaml - reward_model # load the reference default config, then apply the fields in the current yaml - _self_ strategy: fsdp model: # Whether to use shared memory for loading the model use_shm: False # Use remove padding optimization (saves compute) use_remove_padding: False # Whether to use fused reward kernels for speedup use_fused_kernels: ${actor_rollout_ref.model.use_fused_kernels} # FSDP-specific config fsdp_config: # Policy for wrapping layers with FSDP wrap_policy: # Minimum number of parameters to trigger wrapping min_num_params: 0 # Whether to offload model parameters to CPU param_offload: False # Only for FSDP2: Reshard after forward pass to reduce memory footprint reshard_after_forward: True # Number of GPUs in each FSDP shard group; -1 means auto fsdp_size: -1 # Only for FSDP1: FSDP1 configuration, prefetch the next forward-pass all-gather # before the current forward computation. forward_prefetch: False # Sequence parallelism size for Ulysses-style model parallelism ulysses_sequence_parallel_size: 1 ================================================ FILE: verl_rl/verl/trainer/config/reward_model/megatron_reward_model.yaml ================================================ # defaults specify the default config from each component defaults: # dp actor config, inheriting from trainer/config/reward_model/reward_model.yaml - reward_model # load the reference default config, then apply the fields in the current yaml - _self_ strategy: megatron # seconds, default is 10 minutes for torch, you can set it to a larger value # if you have long-running operations like 32B or 72B model using megatron nccl_timeout: 600 # Megatron parallelism & checkpointing config megatron: # Whether to offload model parameters to CPU param_offload: False # Number of GPUs in tensor model parallel group tensor_model_parallel_size: 1 # Number of GPUs in expert model parallel group expert_model_parallel_size: 1 # Expert tensor parallel size expert_tensor_parallel_size: null # Number of pipeline model parallel stages pipeline_model_parallel_size: 1 # change VPP interface for parallelism tests virtual_pipeline_model_parallel_size: null # Context parallel size context_parallel_size: 1 # Whether to use sequence parallelism sequence_parallel: True # Whether to use distributed optimizer use_distributed_optimizer: False # Whether to enable distributed checkpointing use_dist_checkpointing: False # Path for distributed checkpoints dist_checkpointing_path: null # RNG seed for megatron seed: ${oc.select:actor_rollout_ref.actor.megatron.seed,42} # Any overrides to transformer config override_transformer_config: ${oc.select:actor_rollout_ref.actor.megatron.override_transformer_config,{}} # Whether to use mbridge for faster comms use_mbridge: ${oc.select:actor_rollout_ref.actor.megatron.use_mbridge,False} # Whether to load weights (default True) load_weight: True ================================================ FILE: verl_rl/verl/trainer/config/reward_model/reward_model.yaml ================================================ # configs for the reward model # Whether to enable reward model. If False, we compute the reward only with the user-defined reward functions. # In GSM8K and Math examples, we disable reward model. # For RLHF alignment example using full_hh_rlhf, we utilize reward model to assess the responses. # If False, the following parameters are not effective enable: False # FSDP strategy: "fsdp" or "fsdp2" strategy: ??? # model config for reward scoring model: # Input tokenizer. If the reward model’s chat template is inconsistent with the policy, # we need to first decode to plaintext, then apply the rm’s chat_template. # Then score with RM. If chat_templates are consistent, it can be set to null. # set this to null if the chat template is identical input_tokenizer: ${actor_rollout_ref.model.path} # RM’s HDFS path or local path. Note that RM only supports AutoModelForSequenceClassification. # Other model types need to define their own RewardModelWorker and pass it from the code. path: ~/models/FsfairX-LLaMA3-RM-v0.1 # External model implementation (optional) external_lib: ${actor_rollout_ref.model.external_lib} # Whether to enable loading a remote code model, default to False trust_remote_code: False # [Deprecated] Global micro batch size # will be deprecated, use micro_batch_size_per_gpu micro_batch_size: null # Local per-GPU micro batch size micro_batch_size_per_gpu: null # Maximum sequence length to process for scoring max_length: null # Whether to dynamically adjust batch size at runtime use_dynamic_bsz: ${critic.use_dynamic_bsz} # Maximum number of tokens per GPU in one forward pass forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} # Reward Manager. This defines the mechanism of computing rule-based reward and handling different reward sources. # Default is naive. If all verification functions are multiprocessing-safe, # the reward manager can be set to prime for parallel verification. reward_manager: naive # Whether to launch custom reward function asynchronously during log_prob # custom reward function executed async on CPU, during log_prob launch_reward_fn_async: False # Cloud/local sandbox fusion configuration for custom reward logic sandbox_fusion: # Cloud /local function URL for sandbox execution url: null # Max concurrent requests allowed to sandbox max_concurrent: 64 # Max memory limit for each sandbox process in MB memory_limit_mb: 1024 # profiler configs profiler: # Required when using verl.utils.omega_conf_to_dataclass to instantiate dataclass configs in the entrypoint _target_: verl.utils.profiler.ProfilerConfig # True for each task has its own database, False for all tasks in one training step share one database. discrete: False # Whether to profile all ranks. all_ranks: False # The ranks that will be profiled. [] or [0,1,...] ranks: [] ================================================ FILE: verl_rl/verl/trainer/config/rollout/rollout.yaml ================================================ # actor_rollout_ref.rollout.name: hf/vllm/sglang. The default value will be removed in the future name: vllm # sync: LLM, async: AsyncLLM mode: sync # Sampling temperature for rollout. temperature: 1.0 # Top-k sampling parameter. -1 for vLLM rollout, 0 for HF rollout. top_k: -1 # Top-p sampling parameter. Default 1.0. top_p: 1 # typically the same as data max prompt length # same as data.max_prompt_length if it exists prompt_length: ${oc.select:data.max_prompt_length,512} # typically the same as data max response length # same as data.max_response_length if it exists response_length: ${oc.select:data.max_response_length,512} # for vllm rollout # Rollout model parameters type. Align with actor model's FSDP/Megatron type. dtype: bfloat16 # Fraction of GPU memory used by vLLM/SGLang for KV cache. gpu_memory_utilization: 0.5 # Whether to ignore EOS and continue generating after EOS is hit. ignore_eos: False # Whether to disable CUDA graph. Default True to allow cache freeing. enforce_eager: True # Whether to free engine KVCache after generation. Set enforce_eager=True when enabled. free_cache_engine: True # TP size for rollout. Not effective for hf tensor_model_parallel_size: 2 # max number of tokens in a batch max_num_batched_tokens: 8192 # max length for rollout max_model_len: null # max length of sequences max_num_seqs: 1024 # [Will be deprecated, use log_prob_micro_batch_size_per_gpu] The batch size for one forward pass in the computation of log_prob. Global batch size. log_prob_micro_batch_size: null # The batch size for one forward pass in the computation of log_prob. Local batch size per GPU. log_prob_micro_batch_size_per_gpu: null # enable dynamic batch size (sequence packing) for log_prob computation # same as actor_rollout_ref.actor.use_dynamic_bsz if it exists, otherwise false log_prob_use_dynamic_bsz: ${oc.select:actor_rollout_ref.actor.use_dynamic_bsz,false} # max token length for log_prob computation # same as actor_rollout_ref.actor.ppo_max_token_len_per_gpu if it exists, otherwise 16384 log_prob_max_token_len_per_gpu: ${oc.select:actor_rollout_ref.actor.ppo_max_token_len_per_gpu,16384} # disable logging statistics disable_log_stats: True # for hf rollout # Whether to sample during training rollout. False uses greedy sampling. do_sample: True # number of responses (i.e. num sample times). > 1 for grpo n: 1 # Whether to wake up inference engine in multi-stage. (Wake up model weights first, then resume kv cache) multi_stage_wake_up: false # Extra inference engine arguments (vllm, sglang). engine_kwargs: # for vllm vllm: # Swap space (in GB) used by inference engine. null uses default (e.g., 4 GB). swap_space: null # Whether to disable the preprocessor cache for multimodel models. disable_mm_preprocessor_cache: False # for sglang sglang: # The attention backend for sglang engine. Options: flashinfer, triton, flashmla, null for default. attention_backend: null # Sampling parameters used during validation. val_kwargs: # sampling parameters for validation # Top-k sampling parameter. -1 for vLLM rollout, 0 for HF rollout. top_k: -1 # Top-p sampling parameter. Default 1.0. top_p: 1.0 # Sampling temperature for rollout. temperature: 0 # whether to repeat n times for validation n: 1 # Whether to sample during training rollout. False uses greedy sampling. do_sample: False # Multi-turn interaction config for tools or chat. multi_turn: # set to True for multi-turn tool interaction tasks; should set rollout.name to sglang as well enable: False # null for no limit (default max_length // 3) max_assistant_turns: null # null for no tool tool_config_path: null # null for no limit (default max_length // 3) max_user_turns: null # max parallel call for tools in single turn max_parallel_calls: 1 # max length of tool response max_tool_response_length: 256 # truncate side of tool response: left, middle, right tool_response_truncate_side: middle # null for no interaction interaction_config_path: null # null for default callback completion_callback: null # - When set to True, the model's default chat template is used for multi-turn rollout, which typically matches production behavior. # - When set to False, the token ids recorded for training are used instead; unlike the default chat template, these always include the model's full output, # which may contain additional content such as reasoning content. This maintains the consistency between training and rollout, but it will lead to longer prompts. use_inference_chat_template: False # Tokenization is performed turn by turn and the resulting token ids are concatenated to form the full conversation. # To ensure this matches the result of tokenizing the entire conversation at once, a sanity check is run at the end of each multi-turn rollout to compare the two sets of token ids. # Some models are known to produce different tokenization results when tokenizing turn by turn vs. all at once. aThis behavior has already been validated for them. # To reduce excessive warnings, you can turn off the sanity check for these models if you are using their default chat template: # Qwen/QwQ-32B, Qwen/Qwen3-xxB # - disable: disable tokenization sanity check # - strict: enable strict tokenization sanity check (default) # - ignore_strippable: ignore strippable tokens when checking tokenization sanity tokenization_sanity_check_mode: strict # Format of the multi-turn interaction. Options: hermes, llama3_json, ... format: hermes # support logging rollout prob for debugging purpose calculate_log_probs: False # [Experimental] agent loop based rollout configs agent: # Number of agent loop workers num_workers: 8 # custom agent loop config path, which should contain list of configs to intialize AgentLoop instances. # https://hydra.cc/docs/advanced/instantiate_objects/overview/ # # - name: react_agent # _target_: recipe.langgraph_agent.react_agent_loop.ReactAgentLoop # tools: ["get_current_temperature"] # - name: math_expression # _target_: recipe.langgraph_agent.example.math_expression.MathExpressionReactAgentLoop # min_terms: 2 # max_terms: 6 agent_loop_config_path: null # custom async server configs custom_async_server: # Path to the custom async server implementation path: null # Class name of the custom async server class (e.g. AsyncvLLMServer) name: null # Specifies the tensor bucket size (in megabytes) for batch weight updates during rollout operations. # This parameter controls the maximum payload size for a single weight update request. # Reference: https://github.com/volcengine/verl/pull/2418 # Currently only supported in SGLang rollout implementations # Larger values may improve throughput but increase memory overhead # Detailed performance comparison: # https://github.com/zhaochenyang20/Awesome-ML-SYS-Tutorial/issues/169#issuecomment-3070686720 # Default value (512MB) is optimized for typical GPU memory configurations # For the best performance of `rebuild_cuda_tensor`, it is recommended to: # 1. Enable `RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES` # 2. Manually set `CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7` # when using Tensor Parallelism (TP) >= 8. update_weights_bucket_megabytes: 512 # trace rollout data trace: # trace backend, support mlflow, weave backend: null # whether translate token id to text in output token2text: False ================================================ FILE: verl_rl/verl/trainer/config/sft_trainer.yaml ================================================ data: train_batch_size: 256 micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu micro_batch_size_per_gpu: 4 # this is also val batch size train_files: ~/data/gsm8k/train.parquet val_files: ~/data/gsm8k/test.parquet # Single-turn settings prompt_key: question response_key: answer prompt_dict_keys: null response_dict_keys: null # Multi-turn settings multiturn: enable: false # Set to true to use multi-turn dataset messages_key: messages # Key for messages list in multi-turn mode tools_key: tools # Key for tools list in multi-turn mode enable_thinking_key: enable_thinking # Whether to enable thinking in multi-turn mode max_length: 1024 truncation: error balance_dp_token: False chat_template: null custom_cls: path: null name: null use_shm: False model: partial_pretrain: ~/models/gemma-1.1-7b-it use_shm: False fsdp_config: model_dtype: fp32 wrap_policy: min_num_params: 0 cpu_offload: False offload_params: False external_lib: null enable_gradient_checkpointing: True trust_remote_code: False lora_rank: 0 # Set to positive value to enable LoRA (e.g., 32) lora_alpha: 16 # LoRA scaling factor target_modules: all-linear # Target modules for LoRA adaptation use_liger: False strategy: fsdp2 optim: lr: 1e-5 betas: [0.9, 0.95] weight_decay: 0.01 warmup_steps_ratio: 0.1 clip_grad: 1.0 lr_scheduler: cosine ulysses_sequence_parallel_size: 1 use_remove_padding: False trainer: default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} default_hdfs_dir: null project_name: gsm8k-sft experiment_name: test total_epochs: 4 total_training_steps: null logger: [ 'console', 'wandb' ] seed: 1 save_freq: -1 test_freq: -1 nnodes: 1 n_gpus_per_node: 8 max_ckpt_to_keep: null # Maximum number of checkpoints to keep, set to null to keep all # Resume mode: "auto", "disable", or "resume_path" # "auto": resume from last checkpoint if available # "disable": start from scratch # "resume_path": resume from a user-defined path resume_mode: auto # Path to resume training from (used when resume_mode is "resume_path" or "auto") resume_from_path: null # Checkpoint configuration checkpoint: # What to include in saved checkpoints # with 'hf_model' you can save whole model as hf format, now only use sharded model checkpoint to save space save_contents: ["model", "optimizer", "extra"] # For more flexibility, you can specify the contents to load from the checkpoint. load_contents: ${trainer.checkpoint.save_contents} device: cuda ================================================ FILE: verl_rl/verl/trainer/constants_ppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os PPO_RAY_RUNTIME_ENV = { "env_vars": { "TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_LOGGING_LEVEL": "WARN", "VLLM_ALLOW_RUNTIME_LORA_UPDATING": "true", "CUDA_DEVICE_MAX_CONNECTIONS": "1", "WANDB_MODE": "offline", "WANDB_DISABLE_SERVICE": "true", }, } def get_ppo_ray_runtime_env(): """ A filter function to return the PPO Ray runtime environment. To avoid repeat of some environment variables that are already set. """ runtime_env = {"env_vars": PPO_RAY_RUNTIME_ENV["env_vars"].copy()} for key in list(runtime_env["env_vars"].keys()): if os.environ.get(key) is not None: runtime_env["env_vars"].pop(key, None) return runtime_env ================================================ FILE: verl_rl/verl/trainer/fsdp_sft_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A lightweight one-file FSDP SFT Trainer TODO(zhangchi.usc1992) - Add calculation of mfu - Add validation """ import os os.environ["NCCL_DEBUG"] = "WARN" os.environ["TOKENIZERS_PARALLELISM"] = "true" import logging import re from contextlib import nullcontext import hydra import torch import torch.distributed from omegaconf import DictConfig from peft import LoraConfig, TaskType, get_peft_model from tensordict import TensorDict from torch import nn, optim from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.distributed.fsdp import CPUOffload, MixedPrecision, ShardingStrategy from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.utils.data import Dataset, DistributedSampler from torchdata.stateful_dataloader import StatefulDataLoader from tqdm import tqdm from transformers import AutoConfig, AutoModelForCausalLM, PreTrainedModel import verl.utils.hdfs_io as hdfs_io from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, get_checkpoint_tracker_filename from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.dataset import SFTDataset from verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset from verl.utils.device import get_device_id, get_device_name, is_cuda_available, is_npu_available from verl.utils.distributed import destroy_global_process_group, initialize_global_process_group from verl.utils.fs import copy_to_local from verl.utils.fsdp_utils import ( CPUOffloadPolicy, MixedPrecisionPolicy, apply_fsdp2, fsdp2_clip_grad_norm_, fsdp2_load_full_state_dict, get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, ) from verl.utils.logger import log_with_rank from verl.utils.profiler import log_gpu_memory_usage from verl.utils.py_functional import convert_to_regular_types from verl.utils.torch_dtypes import PrecisionType from verl.utils.torch_functional import get_cosine_schedule_with_warmup, get_wsd_schedule_with_warmup from verl.utils.tracking import Tracking from verl.utils.ulysses import ( gather_outputs_and_unpad, get_ulysses_sequence_parallel_world_size, ulysses_pad_and_slice_inputs, ) from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager if is_cuda_available: from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input elif is_npu_available: from transformers.integrations.npu_flash_attention import index_first_axis, pad_input, rearrange, unpad_input logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN")) def extract_step(path): match = re.search(r"global_step_(\d+)", path) if match: return int(match.group(1)) return None class FSDPSFTTrainer: def __init__( self, config, device_mesh: DeviceMesh, ulysses_device_mesh: DeviceMesh, tokenizer, train_dataset: Dataset, val_dataset: Dataset, ): self.config = config self.device_mesh = device_mesh self.ulysses_device_mesh = ulysses_device_mesh self.sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) self.tokenizer = tokenizer if self.config.data.chat_template is not None: raise ValueError("Apply Chat template from config is not supported yet.") # normalize dp size self._normalize_config_bsz() # Set sequence parallel size self.config.ulysses_sequence_parallel_size = getattr(self.config, "ulysses_sequence_parallel_size", 1) self.use_remove_padding = getattr(self.config, "use_remove_padding", False) if self.device_mesh.get_rank() == 0: print(f"Using sequence parallel size: {self.config.ulysses_sequence_parallel_size}") print(f"Using remove padding: {self.use_remove_padding}") self._build_dataloader(train_dataset, val_dataset) # Initialize resume-related variables self.resume_global_step = 0 # build model self._build_model_optimizer() # Initialize checkpoint manager self._init_checkpoint_manager() self.load_checkpoint() if self.device_mesh.get_rank() == 0: print(self.config) self.device_name = self.config.trainer.device def _normalize_config_bsz(self): dp_size = self.device_mesh.size(0) if not self.ulysses_device_mesh else self.ulysses_device_mesh.size(0) if self.device_mesh.get_rank() == 0: print(f"Normalize batch size by dp {dp_size}") assert self.config.data.train_batch_size % dp_size == 0, ( f"Global batch size {self.config.data.train_batch_size} is not divisible by dp size {dp_size}" ) self.config.data.train_batch_size //= dp_size assert self.config.data.train_batch_size % self.config.data.micro_batch_size_per_gpu == 0 def _build_dataloader(self, train_dataset, val_dataset): # build dataset config = self.config self.train_dataset, self.val_dataset = train_dataset, val_dataset # build dataloader # Use data parallel rank and size instead of global rank and world size # If doing SP, we need to use the local rank and size if self.config.ulysses_sequence_parallel_size > 1: rank = self.ulysses_device_mesh.get_local_rank("dp") world_size = self.ulysses_device_mesh.size(0) if self.ulysses_device_mesh.get_rank() == 0: print(f"Using SP rank {rank} and size {world_size} for data distribution") print("Each SP rank gets different data, but the same data WITHIN the same rank") else: rank = self.device_mesh.get_rank() world_size = self.device_mesh.size() if self.device_mesh.get_rank() == 0: print(f"Using FSDP rank {rank} and size {world_size} for data distribution") self.train_sampler = DistributedSampler( self.train_dataset, shuffle=True, num_replicas=world_size, rank=rank, drop_last=True ) self.train_dataloader = StatefulDataLoader( dataset=self.train_dataset, batch_size=config.data.train_batch_size, sampler=self.train_sampler, num_workers=8, pin_memory=True, drop_last=True, ) self.val_sampler = DistributedSampler( self.val_dataset, shuffle=False, num_replicas=world_size, rank=rank, drop_last=True ) self.val_dataloader = StatefulDataLoader( dataset=self.val_dataset, batch_size=config.data.micro_batch_size_per_gpu, sampler=self.val_sampler, num_workers=8, pin_memory=True, drop_last=True, ) def _build_model_optimizer(self): # TODO (zhangchi.usc1992): # 1. support pretrain from random weights # 2. support init directly from sharded weights local_model_path = copy_to_local(src=self.config.model.partial_pretrain, verbose=True) if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) log_gpu_memory_usage("Before model allocation", logger=logger) trust_remote_code = self.config.model.trust_remote_code torch_dtype = self.config.model.fsdp_config.get("model_dtype", "fp32") torch_dtype = PrecisionType.to_dtype(torch_dtype) # load config first config = AutoConfig.from_pretrained(local_model_path, trust_remote_code=trust_remote_code) self.model_config = config if hasattr(self.model_config, "max_position_embeddings"): self.model_config.max_position_embeddings = max( self.model_config.max_position_embeddings, self.config.data.max_length ) if self.config.ulysses_sequence_parallel_size > 1: assert self.use_remove_padding, "Sequence parallel is only supported when remove_padding is enabled" # This may be very large init_context = get_init_weight_context_manager( use_meta_tensor=not config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(): self.model: PreTrainedModel = AutoModelForCausalLM.from_pretrained( local_model_path, config=config, torch_dtype=torch_dtype, attn_implementation="flash_attention_2", trust_remote_code=trust_remote_code, ) if self.use_remove_padding or self.config.ulysses_sequence_parallel_size > 1: from verl.models.transformers.monkey_patch import apply_monkey_patch apply_monkey_patch(model=self.model, ulysses_sp_size=self.config.ulysses_sequence_parallel_size) # Apply Liger kernel if use_liger is enabled if self.config.model.get("use_liger", False): from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance _apply_liger_kernel_to_instance(model=self.model) if self.config.model.get("lora_rank", 0) > 0: self.model.enable_input_require_grads() # Convert config to regular Python types before creating PEFT model lora_config = { "task_type": TaskType.CAUSAL_LM, "r": self.config.model.lora_rank, "lora_alpha": self.config.model.lora_alpha, "target_modules": convert_to_regular_types(self.config.model.target_modules), "bias": "none", } self.model = get_peft_model(self.model, LoraConfig(**lora_config)) self.model = self.model.to(torch_dtype) if self.config.model.enable_gradient_checkpointing: self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) log_gpu_memory_usage("After model allocation", logger=logger) mixed_precision = MixedPrecision( param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32 ) auto_wrap_policy = get_fsdp_wrap_policy( self.model, config=self.config.model.fsdp_config.wrap_policy, is_lora=self.config.model.get("lora_rank", 0) > 0, ) if self.device_mesh.get_rank() == 0: print(auto_wrap_policy) if not self.config.model.fsdp_config.cpu_offload: cpu_offload = None else: cpu_offload = CPUOffload(offload_params=self.config.model.fsdp_config.offload_params) fsdp_strategy = self.config.model.strategy if fsdp_strategy == "fsdp": self.fsdp_model = FSDP( self.model, cpu_offload=cpu_offload, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=ShardingStrategy.FULL_SHARD, mixed_precision=mixed_precision, sync_module_states=True, device_mesh=self.device_mesh, forward_prefetch=False, ) elif fsdp_strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" mp_policy = MixedPrecisionPolicy( param_dtype=torch.bfloat16, reduce_dtype=torch.float32, cast_forward_inputs=True ) fsdp_kwargs = { "mesh": self.device_mesh, "mp_policy": mp_policy, "offload_policy": cpu_offload, "reshard_after_forward": True, } full_state = self.model.state_dict() apply_fsdp2(self.model, fsdp_kwargs, self.config.model.fsdp_config) fsdp2_load_full_state_dict(self.model, full_state, self.device_mesh, cpu_offload) self.fsdp_model = self.model else: raise NotImplementedError(f"not implement {fsdp_strategy}") log_gpu_memory_usage("After FSDP wrapping", logger=logger) self.optimizer = optim.AdamW( self.fsdp_model.parameters(), lr=self.config.optim.lr, betas=self.config.optim.betas, weight_decay=self.config.optim.weight_decay, ) log_gpu_memory_usage("After initialize optimizer", logger=logger) self.steps_per_epoch = len(self.train_dataloader) self.total_steps = self.steps_per_epoch * self.config.trainer.total_epochs if self.device_mesh.get_rank() == 0: print( f"Number of steps/epoch {self.steps_per_epoch}, number of epochs " f"{self.config.trainer.total_epochs}, total number of steps {self.total_steps}" ) num_warmup_steps = int(self.total_steps * self.config.optim.warmup_steps_ratio) if not hasattr(self.config.optim, "lr_scheduler") or self.config.optim.lr_scheduler == "cosine": self.lr_scheduler = get_cosine_schedule_with_warmup( optimizer=self.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=self.total_steps ) elif self.config.optim.lr_scheduler == "wsd": self.lr_scheduler = get_wsd_schedule_with_warmup( optimizer=self.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=self.total_steps ) else: raise ValueError(f"Unknown lr scheduler: {self.config.optim.lr_scheduler}") def _compute_loss_and_backward(self, batch, do_backward=True): """Compute loss with optional sequence parallelism and remove padding features""" use_sp = self.use_remove_padding and self.config.ulysses_sequence_parallel_size > 1 # Move inputs to GPU and prepare loss mask input_ids = batch["input_ids"].to(self.device_name) attention_mask = batch["attention_mask"].to(self.device_name) position_ids = batch["position_ids"].to(self.device_name) loss_mask = batch.pop("loss_mask")[:, :-1].reshape(-1).to(self.device_name) loss_fct = nn.CrossEntropyLoss(reduction="none") # Context manager for sequence parallel if needed context = self.sharding_manager if use_sp else nullcontext() with context, torch.autocast(device_type=self.device_name, dtype=torch.bfloat16): if not use_sp: # Standard forward pass without sequence parallel labels = input_ids[:, 1:].contiguous() output = self.fsdp_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False ) logits = output.logits shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels.contiguous() # Flatten the tokens shift_logits = shift_logits.view(-1, self.model.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) loss = loss * loss_mask.to(loss.device) else: # IMPORTANT: We have a big assumption here, so we can shard the SAME sequence across SP ranks # i.e., each GPU has <1 sequence, and each SP group has 1 sequence # 1. All SP ranks will receive the *SAME* batch # 2. Different SP groups will receive *DIFFERENT* batches # This is implemented by the DistributedSampler batch_size, seqlen = input_ids.shape # Remove padding input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # Unpad position_ids to align rotary position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # Pad and slice inputs for sequence parallelism input_ids_rmpad_sliced, position_ids_rmpad_padded, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=get_ulysses_sequence_parallel_world_size() ) # For computing loss input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz) input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs( input_ids_rmpad_rolled, None, get_ulysses_sequence_parallel_world_size() ) input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad) # Forward pass output = self.fsdp_model( input_ids=input_ids_rmpad_sliced, attention_mask=None, # Not needed with flash attention varlen position_ids=position_ids_rmpad_padded, use_cache=False, ) # Compute loss locally then aggregate logits_rmpad = output.logits.squeeze(0) input_ids_rmpad_rolled = input_ids_rmpad_rolled.to(logits_rmpad.device) loss = loss_fct(logits_rmpad, input_ids_rmpad_rolled) # Gather and unpad for sequence parallelism loss = gather_outputs_and_unpad(loss, gather_dim=0, unpad_dim=0, padding_size=pad_size) # This is the loss collected from all ulysses ranks full_loss = pad_input( hidden_states=loss.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen ) full_loss = full_loss.squeeze(-1)[:, :-1] # Remove last token's loss full_loss = full_loss.reshape(-1) loss_mask = loss_mask.to(full_loss.device) loss = full_loss * loss_mask valid_token_this_rank = torch.sum(loss_mask) if self.config.data.balance_dp_token: torch.distributed.all_reduce(valid_token_this_rank) dp_size = self.ulysses_device_mesh.size("dp") if use_sp else torch.distributed.get_world_size() else: dp_size = 1 loss = torch.sum(loss) / (valid_token_this_rank + 1e-8) * dp_size if do_backward: loss.backward() return loss def training_step(self, batch: TensorDict): self.fsdp_model.train() log_gpu_memory_usage("Before optimizer zero_grad", logger=logger) self.optimizer.zero_grad() log_gpu_memory_usage("After optimizer zero_grad", logger=logger) micro_batches = batch.split(self.config.data.micro_batch_size_per_gpu) n_micro_batches = len(micro_batches) step_loss = 0 for micro_batch in micro_batches: loss = self._compute_loss_and_backward(batch=micro_batch) / n_micro_batches step_loss += loss.item() if self.config.model.strategy == "fsdp": grad_norm = self.fsdp_model.clip_grad_norm_(max_norm=self.config.optim.clip_grad) elif self.config.model.strategy == "fsdp2": grad_norm = fsdp2_clip_grad_norm_(self.fsdp_model.parameters(), max_norm=self.config.optim.clip_grad) else: raise NotImplementedError(f"not implement {self.config.model.strategy}") log_gpu_memory_usage("Before optimizer step", logger=logger) # if grad_norm is not finite, skip the update if not torch.isfinite(grad_norm): print(f"WARN: grad_norm is not finite: {grad_norm}") self.optimizer.zero_grad() else: self.optimizer.step() log_gpu_memory_usage("After optimizer step", logger=logger) self.lr_scheduler.step() # reduce loss across dp ranks lr = self.lr_scheduler.get_last_lr()[0] log_gpu_memory_usage("After offload weights", logger=logger) step_loss = torch.tensor(step_loss).to(self.device_name) if is_cuda_available: torch.distributed.all_reduce(step_loss, op=torch.distributed.ReduceOp.AVG) elif is_npu_available: torch.distributed.all_reduce(step_loss) step_loss /= self.device_mesh.size(0) return {"train/loss": step_loss.detach().item(), "train/lr(1e-3)": lr * 1e3} def validation_step(self, batch: TensorDict): self.fsdp_model.eval() with torch.no_grad(): loss = self._compute_loss_and_backward(batch, do_backward=False) if is_cuda_available: torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.AVG) elif is_npu_available: torch.distributed.all_reduce(loss) loss /= self.device_mesh.size(0) return loss def save_checkpoint(self, step): """Save checkpoint using FSDPCheckpointManager with improved tracking""" from verl.utils.fs import local_mkdir_safe # Determine checkpoint path local_global_step_folder = os.path.join(self.config.trainer.default_local_dir, f"global_step_{step}") if self.device_mesh.get_rank() == 0: print(f"Saving checkpoint to: {local_global_step_folder}") # Get max checkpoints to keep max_ckpt_to_keep = getattr(self.config.trainer, "max_ckpt_to_keep", None) # Use checkpoint manager to save self.checkpoint_manager.save_checkpoint( local_path=local_global_step_folder, global_step=step, max_ckpt_to_keep=max_ckpt_to_keep ) # Save dataloader state if self.device_mesh.get_rank() == 0: local_mkdir_safe(local_global_step_folder) dataloader_local_path = os.path.join(local_global_step_folder, "data.pt") # Use StatefulDataLoader's built-in state dict functionality dataloader_state_dict = self.train_dataloader.state_dict() torch.save(dataloader_state_dict, dataloader_local_path) print(f"Saved dataloader state to: {dataloader_local_path}") # Update latest checkpoint tracker (atomic write) tracker_file = get_checkpoint_tracker_filename(self.config.trainer.default_local_dir) temp_tracker_file = tracker_file + ".tmp" with open(temp_tracker_file, "w") as f: f.write(str(step)) os.rename(temp_tracker_file, tracker_file) print(f"Updated checkpoint tracker: {tracker_file}") # Copy to HDFS if configured if self.device_mesh.get_rank() == 0 and getattr(self.config.trainer, "default_hdfs_dir", None): hdfs_io.makedirs(self.config.trainer.default_hdfs_dir, exist_ok=True) hdfs_io.copy(src=local_global_step_folder, dst=self.config.trainer.default_hdfs_dir, dirs_exist_ok=True) torch.distributed.barrier() def _init_checkpoint_manager(self): """Initialize checkpoint manager with proper configuration""" # Get checkpoint configuration from config, with defaults checkpoint_config = getattr(self.config.trainer, "checkpoint", {}) # Set default values if not specified save_contents = checkpoint_config.get("save_contents", ["model", "optimizer", "extra"]) load_contents = checkpoint_config.get("load_contents", save_contents) # Create checkpoint config dict checkpoint_config_dict = { "load_contents": load_contents, "save_contents": save_contents, } # Convert to DictConfig for compatibility checkpoint_config_dict = DictConfig(checkpoint_config_dict) # Initialize checkpoint manager self.checkpoint_manager = FSDPCheckpointManager( model=self.fsdp_model, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, processing_class=self.tokenizer, checkpoint_config=checkpoint_config_dict, ) def load_checkpoint(self): # Determine resume path based on configuration checkpoint_path = self._determine_resume_path() if checkpoint_path is None: return 0 # extract resume step from checkpoint path resume_step = extract_step(checkpoint_path) if resume_step is None: log_with_rank( f"Warning: Could not extract step number from {checkpoint_path}, starting from step 0", logger=logger, rank=self.device_mesh.get_rank(), level=logging.WARNING, log_only_rank_0=True, ) return 0 self.resume_global_step = resume_step # Use checkpoint manager to load model state self.checkpoint_manager.load_checkpoint(checkpoint_path) log_with_rank( f"Successfully loaded model checkpoint from {checkpoint_path} (step {resume_step})", logger=logger, rank=self.device_mesh.get_rank(), log_only_rank_0=True, ) # Always load dataloader state for StatefulDataLoader self._load_dataloader_state(checkpoint_path) return resume_step def _load_dataloader_state(self, checkpoint_path: str): """Load dataloader state from checkpoint""" dataloader_path = os.path.join(checkpoint_path, "data.pt") if os.path.exists(dataloader_path): # Use StatefulDataLoader's built-in state dict functionality dataloader_state_dict = torch.load(dataloader_path, map_location="cpu", weights_only=False) self.train_dataloader.load_state_dict(dataloader_state_dict) log_with_rank( f"Successfully loaded dataloader state from {dataloader_path}", logger=logger, rank=self.device_mesh.get_rank(), log_only_rank_0=True, ) else: log_with_rank( f"Warning: No dataloader state found at {dataloader_path}, will start from scratch", logger=logger, rank=self.device_mesh.get_rank(), level=logging.WARNING, log_only_rank_0=True, ) def _determine_resume_path(self): """Determine the path to resume from based on resume_mode configuration""" resume_mode = getattr(self.config.trainer, "resume_mode", "auto") resume_from_path = getattr(self.config.trainer, "resume_from_path", None) if resume_mode == "disable": return None elif resume_mode == "auto": if resume_from_path is not None: assert os.path.exists(resume_from_path), ( "resume_from_path must be null or an existing path when resume_mode is 'auto'" ) assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps" return resume_from_path # Try to find the latest checkpoint in the default directory return self._find_latest_checkpoint() elif resume_mode == "resume_path": assert os.path.exists(resume_from_path), ( "resume_from_path must be an existing path when resume_mode is 'resume_path'" ) assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps" return resume_from_path else: raise ValueError(f"Invalid resume_mode: {resume_mode}. Must be 'auto', 'disable', or 'resume_path'") def _find_latest_checkpoint(self): """Find the latest checkpoint in the default local directory""" checkpoint_dir = self.config.trainer.default_local_dir if not os.path.exists(checkpoint_dir): return None latest_checkpoint = find_latest_ckpt_path(checkpoint_dir) if latest_checkpoint and self.device_mesh.get_rank() == 0: step_num = extract_step(latest_checkpoint) print(f"Found latest checkpoint: {latest_checkpoint} (step {step_num})") return latest_checkpoint def fit(self): rank = self.device_mesh.get_rank() # TODO: add a unified tracking if rank == 0: tracking = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, ) global_step = self.resume_global_step # Start from resumed step last_valid_metric = None # compute the total training steps. # the total training steps in SFT is mainly for early exit total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.trainer.total_training_steps is not None: total_training_steps = self.config.trainer.total_training_steps self.total_training_steps = total_training_steps log_with_rank( f"Total training steps: {self.total_training_steps},", logger=logger, rank=self.device_mesh.get_rank(), log_only_rank_0=True, ) # With StatefulDataLoader, we don't need to manually calculate epochs and steps # The dataloader will automatically resume from where it left off if global_step > 0: log_with_rank( f"StatefulDataLoader will automatically resume from global step: {global_step}", logger=logger, rank=self.device_mesh.get_rank(), log_only_rank_0=True, ) # Calculate which epoch we're starting from for sampler.set_epoch() start_epoch = global_step // self.steps_per_epoch for epoch in range(start_epoch, self.config.trainer.total_epochs): self.train_sampler.set_epoch(epoch=epoch) for step_in_epoch, data in enumerate( tqdm( self.train_dataloader, initial=global_step % self.steps_per_epoch if epoch == start_epoch else 0, total=self.steps_per_epoch, desc=f"Epoch {epoch + 1}/{self.config.trainer.total_epochs}", disable=rank != 0, ) ): global_step += 1 data = TensorDict(data, batch_size=self.config.data.train_batch_size).to(self.device_name) metric = self.training_step(data) if rank == 0: tracking.log(data=metric, step=global_step) is_last_step = global_step >= self.total_training_steps is_valid_step = global_step % self.config.trainer.test_freq == 0 is_save_step = global_step % self.config.trainer.save_freq == 0 # early exit or validation step if is_last_step or (self.config.trainer.test_freq > 0 and is_valid_step): # Perform validation val_losses = [] for val_data in self.val_dataloader: val_data = TensorDict(val_data, batch_size=self.config.data.micro_batch_size_per_gpu).to( self.device_name ) val_loss = self.validation_step(val_data) val_losses.append(val_loss) if rank == 0: val_loss = torch.mean(torch.stack(val_losses)) metric = {"val/loss": val_loss.detach().item()} tracking.log(data=metric, step=global_step) last_valid_metric = metric torch.distributed.barrier() if is_last_step or (self.config.trainer.save_freq > 0 and is_save_step): self.save_checkpoint(step=global_step) if is_last_step: if rank == 0: print(f"Final validation metrics: {last_valid_metric}") return def run_sft(config): device_name = get_device_name() local_rank, rank, world_size = initialize_global_process_group() device_mesh = init_device_mesh(device_type=device_name, mesh_shape=(world_size,), mesh_dim_names=("fsdp",)) dp_size = world_size // config.ulysses_sequence_parallel_size ulysses_device_mesh = init_device_mesh( device_type=device_name, mesh_shape=(dp_size, config.ulysses_sequence_parallel_size), mesh_dim_names=("dp", "sp"), ) # build tokenizer and datasets first from verl.utils import hf_tokenizer local_model_path = copy_to_local(src=config.model.partial_pretrain, verbose=True) tokenizer = hf_tokenizer(local_model_path, trust_remote_code=config.model.trust_remote_code) train_dataset = create_sft_dataset(config.data.train_files, config.data, tokenizer) val_dataset = create_sft_dataset(config.data.val_files, config.data, tokenizer) trainer = FSDPSFTTrainer( config=config, device_mesh=device_mesh, ulysses_device_mesh=ulysses_device_mesh, tokenizer=tokenizer, train_dataset=train_dataset, val_dataset=val_dataset, ) trainer.fit() destroy_global_process_group() @hydra.main(config_path="config", config_name="sft_trainer", version_base=None) def main(config): run_sft(config) def create_sft_dataset(data_paths, data_config, tokenizer): """Create a dataset.""" # build dataset # First check if a custom dataset class is specified if data_config.custom_cls.get("path", None): from verl.utils.import_utils import load_extern_type dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name) # Then check if multi-turn dataset should be used elif data_config.get("multiturn", {}).get("enable", False): dataset_cls = MultiTurnSFTDataset # Default to single-turn dataset else: dataset_cls = SFTDataset # Create datasets based on the selected class dataset = dataset_cls(parquet_files=data_paths, tokenizer=tokenizer, config=data_config) return dataset if __name__ == "__main__": main() ================================================ FILE: verl_rl/verl/trainer/main_eval.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Offline evaluate the performance of a generated file using reward model and ground truth verifier. The input is a parquet file that contains N generated sequences and (optional) the ground truth. """ from collections import defaultdict import hydra import numpy as np import pandas as pd import ray from tqdm import tqdm from verl.trainer.ppo.reward import get_custom_reward_fn from verl.utils.fs import copy_to_local @ray.remote def process_item(reward_fn, data_source, response_lst, reward_data): ground_truth = reward_data["ground_truth"] score_lst = [reward_fn(data_source, r, ground_truth) for r in response_lst] return data_source, np.mean(score_lst) @hydra.main(config_path="config", config_name="evaluation", version_base=None) def main(config): local_path = copy_to_local(config.data.path, use_shm=config.data.get("use_shm", False)) dataset = pd.read_parquet(local_path) responses = dataset[config.data.response_key] data_sources = dataset[config.data.data_source_key] reward_model_data = dataset[config.data.reward_model_key] total = len(dataset) # Initialize Ray if not ray.is_initialized(): ray.init(num_cpus=config.ray_init.num_cpus) # evaluate test_score based on data source data_source_reward = defaultdict(list) compute_score = get_custom_reward_fn(config) # Create remote tasks remote_tasks = [ process_item.remote(compute_score, data_sources[i], responses[i], reward_model_data[i]) for i in range(total) ] # Process results as they come in with tqdm(total=total) as pbar: while len(remote_tasks) > 0: # Use ray.wait to get completed tasks done_ids, remote_tasks = ray.wait(remote_tasks) for result_id in done_ids: data_source, score = ray.get(result_id) data_source_reward[data_source].append(score) pbar.update(1) metric_dict = {} for data_source, rewards in data_source_reward.items(): metric_dict[f"test_score/{data_source}"] = np.mean(rewards) print(metric_dict) if __name__ == "__main__": main() ================================================ FILE: verl_rl/verl/trainer/main_generation.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generate responses given a dataset of prompts """ import os import hydra import numpy as np import ray os.environ["NCCL_DEBUG"] = "WARN" os.environ["TOKENIZERS_PARALLELISM"] = "true" # os.environ['TORCH_COMPILE_DISABLE'] = '1' from pprint import pprint import pandas as pd from omegaconf import OmegaConf from verl import DataProto from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup from verl.utils import hf_tokenizer from verl.utils.fs import copy_to_local from verl.utils.hdfs_io import makedirs from verl.utils.model import compute_position_id_with_mask from verl.workers.fsdp_workers import ActorRolloutRefWorker @hydra.main(config_path="config", config_name="generation", version_base=None) def main(config): run_generation(config) def run_generation(config) -> None: if not ray.is_initialized(): # this is for local ray cluster ray.init( runtime_env={"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}}, num_cpus=config.ray_init.num_cpus, ) ray.get(main_task.remote(config)) @ray.remote(num_cpus=1) def main_task(config): pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values OmegaConf.resolve(config) local_path = copy_to_local(config.model.path) trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) if config.rollout.temperature == 0.0: assert config.data.n_samples == 1, "When temperature=0, n_samples must be 1." assert config.data.n_samples >= 1, "n_samples should always >= 1" # read dataset. Note that the dataset should directly contain chat template format (e.g., a list of dictionary) dataset = pd.read_parquet(config.data.path) chat_lst = dataset[config.data.prompt_key].tolist() chat_lst = [chat.tolist() for chat in chat_lst] tokenizer.padding_side = "left" if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token ray_cls_with_init = RayClassWithInitArgs(cls=ray.remote(ActorRolloutRefWorker), config=config, role="rollout") resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes) wg = RayWorkerGroup( resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, device_name=config.trainer.device, ) wg.init_model() total_samples = len(dataset) config_batch_size = config.data.batch_size num_batch = -(-total_samples // config_batch_size) output_lst = [[] for _ in range(config.data.n_samples)] for batch_idx in range(num_batch): print(f"[{batch_idx + 1}/{num_batch}] Start to process.") batch_chat_lst = chat_lst[batch_idx * config_batch_size : (batch_idx + 1) * config_batch_size] inputs = tokenizer.apply_chat_template( batch_chat_lst, add_generation_prompt=True, padding=True, truncation=True, max_length=config.rollout.prompt_length, return_tensors="pt", return_dict=True, tokenize=True, ) input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] position_ids = compute_position_id_with_mask(attention_mask) batch_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids} data = DataProto.from_dict(batch_dict) data_padded, pad_size = pad_dataproto_to_divisor(data, wg.world_size) # START TO GENERATE FOR n_samples TIMES print(f"[{batch_idx + 1}/{num_batch}] Start to generate.") for n_sample in range(config.data.n_samples): output_padded = wg.generate_sequences(data_padded) output = unpad_dataproto(output_padded, pad_size=pad_size) output_texts = [] for i in range(len(output)): data_item = output[i] prompt_length = data_item.batch["prompts"].shape[-1] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = data_item.batch["responses"][:valid_response_length] response_str = tokenizer.decode(valid_response_ids, skip_special_tokens=True) output_texts.append(response_str) output_lst[n_sample].extend(output_texts) # convert output_lst from (n_samples, n_data) to (n_data, n_sampels) output_lst = np.array(output_lst, dtype=object) output_lst = np.transpose(output_lst, axes=(1, 0)).tolist() # add to the data frame dataset["responses"] = output_lst # write to a new parquet output_dir = os.path.dirname(config.data.output_path) makedirs(output_dir, exist_ok=True) dataset.to_parquet(config.data.output_path) if __name__ == "__main__": main() ================================================ FILE: verl_rl/verl/trainer/main_ppo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. """ import os import socket import hydra import ray from omegaconf import OmegaConf from verl.experimental.dataset.sampler import AbstractSampler from verl.trainer.constants_ppo import get_ppo_ray_runtime_env from verl.trainer.ppo.ray_trainer import RayPPOTrainer from verl.trainer.ppo.reward import load_reward_manager from verl.utils.device import is_cuda_available from verl.utils.import_utils import load_extern_type @hydra.main(config_path="config", config_name="ppo_trainer", version_base=None) def main(config): """Main entry point for PPO training with Hydra configuration management. Args: config_dict: Hydra configuration dictionary containing training parameters. """ run_ppo(config) # Define a function to run the PPO-like training process def run_ppo(config) -> None: """Initialize Ray cluster and run distributed PPO training process. Args: config: Training configuration object containing all necessary parameters for distributed PPO training including Ray initialization settings, model paths, and training hyperparameters. """ # Check if Ray is not initialized if not ray.is_initialized(): # Initialize Ray with a local cluster configuration # Set environment variables in the runtime environment to control tokenizer parallelism, # NCCL debug level, VLLM logging level, and allow runtime LoRA updating # `num_cpus` specifies the number of CPU cores Ray can use, obtained from the configuration ray.init( runtime_env=get_ppo_ray_runtime_env(), num_cpus=config.ray_init.num_cpus, ) # Create a remote instance of the TaskRunner class, and # Execute the `run` method of the TaskRunner instance remotely and wait for it to complete if ( is_cuda_available and config.trainer.get("profile_steps") is not None and len(config.trainer.get("profile_steps", [])) > 0 ): nsight_options = OmegaConf.to_container(config.trainer.controller_nsight_options) runner = TaskRunner.options(runtime_env={"nsight": nsight_options}).remote() else: runner = TaskRunner.remote() ray.get(runner.run.remote(config)) # [Optional] get the path of the timeline trace file from the configuration, default to None # This file is used for performance analysis timeline_json_file = config.ray_init.get("timeline_json_file", None) if timeline_json_file: ray.timeline(filename=timeline_json_file) @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head class TaskRunner: """Ray remote class for executing distributed PPO training tasks. This class encapsulates the main training logic and runs as a Ray remote actor to enable distributed execution across multiple nodes and GPUs. """ def run(self, config): """Execute the main PPO training workflow. This method sets up the distributed training environment, initializes workers, datasets, and reward functions, then starts the training process. Args: config: Training configuration object containing all parameters needed for setting up and running the PPO training process. """ # Print the initial configuration. `resolve=True` will evaluate symbolic values. from pprint import pprint from omegaconf import OmegaConf from verl.utils.fs import copy_to_local print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}") pprint(OmegaConf.to_container(config, resolve=True)) OmegaConf.resolve(config) # Download the checkpoint from HDFS to the local machine. # `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on local_path = copy_to_local( config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False) ) # Instantiate the tokenizer and processor. from verl.utils import hf_processor, hf_tokenizer trust_remote_code = config.data.get("trust_remote_code", False) tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) # Used for multimodal LLM, could be None processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) # Define worker classes based on the actor strategy. if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}: assert config.critic.strategy in {"fsdp", "fsdp2"} from verl.single_controller.ray import RayWorkerGroup from verl.workers.fsdp_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto") if use_legacy_worker_impl in ["auto", "enable"]: # import warnings # warnings.warn(f"Legacy worker impl is going to be deprecated, will be removed in the future. \ # Please set trainer.use_legacy_worker_impl = false to switch to the new worker implementation.") from verl.workers.fsdp_workers import CriticWorker elif use_legacy_worker_impl == "disable": from verl.workers.roles import CriticWorker print("Using new worker implementation") else: raise ValueError(f"Invalid use_legacy_worker_impl: {use_legacy_worker_impl}") actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = RayWorkerGroup elif config.actor_rollout_ref.actor.strategy == "megatron": assert config.actor_rollout_ref.actor.strategy == config.critic.strategy from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup from verl.workers.megatron_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker actor_rollout_cls = ( AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker ) ray_worker_group_cls = NVMegatronRayWorkerGroup else: raise NotImplementedError from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role # Map roles to their corresponding remote worker classes. role_worker_mapping = { Role.ActorRollout: ray.remote(actor_rollout_cls), Role.Critic: ray.remote(CriticWorker), } # Define the resource pool specification. # Map roles to the resource pool. global_pool_id = "global_pool" resource_pool_spec = { global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, } mapping = { Role.ActorRollout: global_pool_id, Role.Critic: global_pool_id, } # We should adopt a multi-source reward function here: # - for rule-based rm, we directly call a reward score # - for model-based rm, we call a model # - for code related prompt, we send to a sandbox if there are test cases # finally, we combine all the rewards together # The reward type depends on the tag of the data if config.reward_model.enable: if config.reward_model.strategy in {"fsdp", "fsdp2"}: from verl.workers.fsdp_workers import RewardModelWorker elif config.reward_model.strategy == "megatron": from verl.workers.megatron_workers import RewardModelWorker else: raise NotImplementedError role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) mapping[Role.RewardModel] = global_pool_id # Add a reference policy worker if KL loss or KL reward is used. if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) mapping[Role.RefPolicy] = global_pool_id # Load the reward manager for training and validation. reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) val_reward_fn = load_reward_manager( config, tokenizer, num_examine=1, **config.reward_model.get("reward_kwargs", {}) ) resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) from verl.utils.dataset.rl_dataset import collate_fn # Create training and validation datasets. train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor, is_train=True) val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor, is_train=False) train_sampler = create_rl_sampler(config.data, train_dataset) # Initialize the PPO trainer. trainer = RayPPOTrainer( config=config, tokenizer=tokenizer, processor=processor, role_worker_mapping=role_worker_mapping, resource_pool_manager=resource_pool_manager, ray_worker_group_cls=ray_worker_group_cls, reward_fn=reward_fn, val_reward_fn=val_reward_fn, train_dataset=train_dataset, val_dataset=val_dataset, collate_fn=collate_fn, train_sampler=train_sampler, ) # Initialize the workers of the trainer. trainer.init_workers() # Start the training process. trainer.fit() def create_rl_dataset(data_paths, data_config, tokenizer, processor, is_train=True): """Create a dataset. Arguments: data_paths: List of paths to data files. data_config: The data config. tokenizer (Tokenizer): The tokenizer. processor (Processor): The processor. Returns: dataset (Dataset): The dataset. """ from torch.utils.data import Dataset from verl.utils.dataset.rl_dataset import RLHFDataset # Check if a custom dataset class is specified in the data configuration # and if the path to the custom class is provided if "custom_cls" in data_config and data_config.custom_cls.get("path", None) is not None: # Dynamically load the custom dataset class dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name) # Verify that the custom dataset class inherits from torch.utils.data.Dataset if not issubclass(dataset_cls, Dataset): raise TypeError( f"The custom dataset class '{data_config.custom_cls.name}' from " f"'{data_config.custom_cls.path}' must inherit from torch.utils.data.Dataset" ) elif "datagen" in data_config and data_config.datagen.get("path", None) is not None and is_train: # If a data generation strategy is specified, use the DynamicGenDataset class from verl.utils.dataset.dynamicgen_dataset import DynamicGenDataset dataset_cls = DynamicGenDataset print("Using DynamicGenDataset for data generation.") else: # Use the default RLHFDataset class if no custom class is specified dataset_cls = RLHFDataset print(f"Using dataset class: {dataset_cls.__name__}") # Instantiate the dataset using the determined dataset class dataset = dataset_cls( data_files=data_paths, tokenizer=tokenizer, processor=processor, config=data_config, ) return dataset def create_rl_sampler(data_config, dataset): """Create a sampler for the dataset. Arguments: data_config: The data config. dataset (Dataset): The dataset. Returns: sampler (Sampler): The sampler. """ import torch from torch.utils.data import RandomSampler, SequentialSampler if data_config.sampler is not None and data_config.sampler.get("class_path", None) is not None: curriculum_class = load_extern_type( data_config.sampler.class_path, data_config.sampler.class_name, ) sampler = curriculum_class( data_source=dataset, data_config=data_config, ) assert isinstance(sampler, AbstractSampler) assert data_config.get("dataloader_num_workers", 8) == 0, ( "If using curriculum, num_workers must be 0 to prevent data caching. " "If the dataloader caches data before the batch is done the " "curriculum sampler won't have the opportunity to reorder it. " ) # Use a sampler to facilitate checkpoint resumption. # If shuffling is enabled in the data configuration, create a random sampler. elif data_config.shuffle: train_dataloader_generator = torch.Generator() train_dataloader_generator.manual_seed(data_config.get("seed", 1)) sampler = RandomSampler(data_source=dataset, generator=train_dataloader_generator) else: # If shuffling is disabled, use a sequential sampler to iterate through the dataset in order. sampler = SequentialSampler(data_source=dataset) return sampler if __name__ == "__main__": main() ================================================ FILE: verl_rl/verl/trainer/ppo/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/trainer/ppo/core_algos.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Core functions to implement PPO algorithms. The function implemented in this file should be used by trainer with different distributed strategies to implement PPO-like algorithms. """ __all__ = ["register_adv_est", "get_adv_estimator_fn", "AdvantageEstimator"] from collections import defaultdict from enum import Enum from typing import Optional import numpy as np import torch import verl.utils.torch_functional as verl_F from verl.trainer.config import AlgoConfig POLICY_LOSS_REGISTRY = {} def register_policy_loss(name): """Register a policy loss function with the given name. Args: name (str): The name to register the policy loss function under. Returns: function: Decorator function that registers the policy loss function. """ def decorator(func): POLICY_LOSS_REGISTRY[name] = func return func return decorator def get_policy_loss_fn(name): """Get the policy loss with a given name. Args: name: `(str)` The name of the policy loss. Returns: `(callable)`: The policy loss function. """ loss_name = name if loss_name not in POLICY_LOSS_REGISTRY: raise ValueError( f"Unsupported loss mode: {loss_name}. Supported modes are: {list(POLICY_LOSS_REGISTRY.keys())}" ) return POLICY_LOSS_REGISTRY[loss_name] ADV_ESTIMATOR_REGISTRY = {} def register_adv_est(name_or_enum): """Decorator to register a advantage estimator function with a given name. Args: name_or_enum: `(str)` or `(AdvantageEstimator)` The name or enum of the advantage estimator. """ def decorator(fn): name = name_or_enum.value if isinstance(name_or_enum, Enum) else name_or_enum if name in ADV_ESTIMATOR_REGISTRY and ADV_ESTIMATOR_REGISTRY[name] != fn: raise ValueError( f"Adv estimator {name} has already been registered: {ADV_ESTIMATOR_REGISTRY[name]} vs {fn}" ) ADV_ESTIMATOR_REGISTRY[name] = fn return fn return decorator def get_adv_estimator_fn(name_or_enum): """Get the advantage estimator function with a given name. Args: name_or_enum: `(str)` or `(AdvantageEstimator)` The name or enum of the advantage estimator. Returns: `(callable)`: The advantage estimator function. """ name = name_or_enum.value if isinstance(name_or_enum, Enum) else name_or_enum if name not in ADV_ESTIMATOR_REGISTRY: raise ValueError(f"Unknown advantage estimator simply: {name}") return ADV_ESTIMATOR_REGISTRY[name] class AdvantageEstimator(str, Enum): """Using an enumeration class to avoid spelling errors in adv_estimator. Note(haibin.lin): this enum class is immutable after creation. Extending this enum for new estimators may not be necessary since users can always just call `verl.trainer.ppo.core_algos.register` with string name for a custom advantage estimator instead. """ GAE = "gae" GRPO = "grpo" REINFORCE_PLUS_PLUS = "reinforce_plus_plus" REINFORCE_PLUS_PLUS_BASELINE = "reinforce_plus_plus_baseline" REMAX = "remax" RLOO = "rloo" OPO = "opo" GRPO_PASSK = "grpo_passk" GPG = "gpg" class AdaptiveKLController: """ Adaptive KL controller described in the paper: https://arxiv.org/pdf/1909.08593.pdf """ def __init__(self, init_kl_coef, target_kl, horizon): self.value = init_kl_coef self.target = target_kl self.horizon = horizon def update(self, current_kl, n_steps): """Update the KL coefficient based on current KL divergence. Args: current_kl (float): Current KL divergence value. n_steps (int): Number of steps taken. """ target = self.target proportional_error = np.clip(current_kl / target - 1, -0.2, 0.2) mult = 1 + proportional_error * n_steps / self.horizon self.value *= mult class FixedKLController: """Fixed KL controller.""" def __init__(self, kl_coef): self.value = kl_coef def update(self, current_kl, n_steps): """Update method for fixed KL controller (no-op). Args: current_kl (float): Current KL divergence value (unused). n_steps (int): Number of steps taken (unused). """ pass def get_kl_controller(kl_ctrl): """Factory function to create appropriate KL controller based on configuration. Args: kl_ctrl: Configuration object containing KL controller settings. Returns: KL controller instance (FixedKLController or AdaptiveKLController). Raises: NotImplementedError: If controller type is not supported. AssertionError: If adaptive controller horizon is not positive. """ if kl_ctrl.type == "fixed": return FixedKLController(kl_coef=kl_ctrl.kl_coef) elif kl_ctrl.type == "adaptive": assert kl_ctrl.horizon > 0, f"horizon must be larger than 0. Got {kl_ctrl.horizon}" return AdaptiveKLController(init_kl_coef=kl_ctrl.kl_coef, target_kl=kl_ctrl.target_kl, horizon=kl_ctrl.horizon) else: raise NotImplementedError @register_adv_est(AdvantageEstimator.GAE) # or simply: @register_adv_est("gae") def compute_gae_advantage_return( token_level_rewards: torch.Tensor, values: torch.Tensor, response_mask: torch.Tensor, gamma: torch.Tensor, lam: torch.Tensor, ): """Adapted from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py Args: token_level_rewards: `(torch.Tensor)` shape is (bs, response_length) values: `(torch.Tensor)` shape is (bs, response_length) response_mask: `(torch.Tensor)` shape is (bs, response_length). [EOS] mask. The token after [EOS] have mask zero. gamma is `(float)` discounted factor used in RL lam: `(float)` lambda value when computing Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438) Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ with torch.no_grad(): nextvalues = 0 lastgaelam = 0 advantages_reversed = [] gen_len = token_level_rewards.shape[-1] for t in reversed(range(gen_len)): delta = token_level_rewards[:, t] + gamma * nextvalues - values[:, t] lastgaelam_ = delta + gamma * lam * lastgaelam # skip values and TD-error on observation tokens nextvalues = values[:, t] * response_mask[:, t] + (1 - response_mask[:, t]) * nextvalues lastgaelam = lastgaelam_ * response_mask[:, t] + (1 - response_mask[:, t]) * lastgaelam advantages_reversed.append(lastgaelam) advantages = torch.stack(advantages_reversed[::-1], dim=1) returns = advantages + values advantages = verl_F.masked_whiten(advantages, response_mask) return advantages, returns # NOTE(sgm): this implementation only consider outcome supervision, where the reward is a scalar. @register_adv_est(AdvantageEstimator.GRPO) # or simply: @register_adv_est("grpo") def compute_grpo_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for GRPO, operating only on Outcome reward (with only one scalar reward for each response). Args: token_level_rewards: `(torch.Tensor)` shape is (bs, response_length) response_mask: `(torch.Tensor)` shape is (bs, response_length) index: `(np.ndarray)` index array for grouping epsilon: `(float)` small value to avoid division by zero norm_adv_by_std_in_grpo: `(bool)` whether to scale the GRPO advantage config: `(Optional[AlgoConfig])` algorithm configuration object Note: If norm_adv_by_std_in_grpo is True, the advantage is scaled by the std, as in the original GRPO. If False, the advantage is not scaled, as in Dr.GRPO (https://arxiv.org/abs/2503.20783). Returns: advantages: `(torch.Tensor)` shape is (bs, response_length) Returns: `(torch.Tensor)` shape is (bs, response_length) """ scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2mean = {} id2std = {} with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): id2score[index[i]].append(scores[i]) for idx in id2score: if len(id2score[idx]) == 1: id2mean[idx] = torch.tensor(0.0) id2std[idx] = torch.tensor(1.0) elif len(id2score[idx]) > 1: id2mean[idx] = torch.mean(torch.tensor(id2score[idx])) id2std[idx] = torch.std(torch.tensor([id2score[idx]])) else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): if norm_adv_by_std_in_grpo: scores[i] = (scores[i] - id2mean[index[i]]) / (id2std[index[i]] + epsilon) else: scores[i] = scores[i] - id2mean[index[i]] scores = scores.unsqueeze(-1) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.GRPO_PASSK) # or simply: @register_adv_est("grpo_passk") def compute_grpo_passk_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for Pass@k using a GRPO-style outcome reward formulation. Only the best response per group gets a non-zero advantage: r_max - r_second_max. Implemented as described in https://arxiv.org/abs/2503.19595. Args: token_level_rewards: (bs, response_length) response_mask: (bs, response_length) index: (bs,) → group ID per sample epsilon: float for numerical stability config: (AlgoConfig) algorithm settings, which contains "norm_adv_by_std_in_grpo" Returns: advantages: (bs, response_length) returns: (bs, response_length) """ assert config is not None # if True, normalize advantage by std within group norm_adv_by_std_in_grpo = config.get("norm_adv_by_std_in_grpo", True) scores = token_level_rewards.sum(dim=-1) # (bs,) advantages = torch.zeros_like(scores) id2scores = defaultdict(list) id2indices = defaultdict(list) with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): idx = index[i] id2scores[idx].append(scores[i]) id2indices[idx].append(i) for idx in id2scores: rewards = torch.stack(id2scores[idx]) # (k,) if rewards.numel() < 2: raise ValueError( f"Pass@k requires at least 2 samples per group. Got {rewards.numel()} for group {idx}." ) topk, topk_idx = torch.topk(rewards, 2) r_max, r_second_max = topk[0], topk[1] i_max = id2indices[idx][topk_idx[0].item()] advantage = r_max - r_second_max if norm_adv_by_std_in_grpo: std = torch.std(rewards) advantage = advantage / (std + epsilon) advantages[i_max] = advantage advantages = advantages.unsqueeze(-1) * response_mask return advantages, advantages @register_adv_est( AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE ) # or simply: @register_adv_est("reinforce_plus_plus_baseline") def compute_reinforce_plus_plus_baseline_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: torch.Tensor, epsilon: float = 1e-6, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for RF++-baseline (https://arxiv.org/abs/2501.03262), operating only on Outcome reward (with only one scalar reward for each response). Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ response_length = token_level_rewards.shape[-1] scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2mean = {} with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): id2score[index[i]].append(scores[i]) for idx in id2score: if len(id2score[idx]) == 1: id2mean[idx] = torch.tensor(0.0) elif len(id2score[idx]) > 1: id2mean[idx] = torch.mean(torch.tensor(id2score[idx])) else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): scores[i] = scores[i] - id2mean[index[i]] scores = scores.unsqueeze(-1).tile([1, response_length]) * response_mask scores = verl_F.masked_whiten(scores, response_mask) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.RLOO) # or simply: @register_adv_est("rloo") def compute_rloo_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for RLOO based on https://arxiv.org/abs/2402.14740 Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2mean = {} with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): id2score[index[i]].append(scores[i]) for idx in id2score: if len(id2score[idx]) == 1: id2mean[idx] = torch.tensor(0.0) elif len(id2score[idx]) > 1: id2mean[idx] = torch.mean(torch.tensor(id2score[idx])) else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): response_num = len(id2score[index[i]]) if response_num > 1: scores[i] = scores[i] * response_num / (response_num - 1) - id2mean[index[i]] * response_num / ( response_num - 1 ) scores = scores.unsqueeze(-1) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.OPO) # or simply: @register_adv_est("opo") def compute_opo_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for OPO based on https://arxiv.org/pdf/2505.23585 Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ response_length = response_mask.sum(dim=-1) scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2len = defaultdict(list) id2bsl = {} with torch.no_grad(): bsz = scores.shape[0] for i in range(bsz): id2score[index[i]].append(scores[i]) id2len[index[i]].append(response_length[i]) for idx in id2score: if len(id2score[idx]) == 1: id2bsl[idx] = torch.tensor(0.0) elif len(id2score[idx]) > 1: score_tensor = torch.tensor(id2score[idx]) len_tensor = torch.tensor(id2len[idx]) id2bsl[idx] = (len_tensor * score_tensor).sum() / len_tensor.sum() else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): scores[i] = scores[i] - id2bsl[index[i]] scores = scores.unsqueeze(-1) * response_mask return scores, scores @register_adv_est(AdvantageEstimator.REINFORCE_PLUS_PLUS) # or simply: @register_adv_est("reinforce_plus_plus") def compute_reinforce_plus_plus_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, config: Optional[AlgoConfig] = None, **kwargs ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for REINFORCE++. This implementation is based on the paper: https://arxiv.org/abs/2501.03262 Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ assert config is not None gamma = config.gamma with torch.no_grad(): returns = torch.zeros_like(token_level_rewards) running_return = 0 for t in reversed(range(token_level_rewards.shape[1])): running_return = token_level_rewards[:, t] + gamma * running_return returns[:, t] = running_return # Reset after EOS running_return = running_return * response_mask[:, t] advantages = verl_F.masked_whiten(returns, response_mask) advantages = advantages * response_mask return advantages, returns @register_adv_est(AdvantageEstimator.REMAX) # or simply: @register_adv_est("remax") def compute_remax_outcome_advantage( token_level_rewards: torch.Tensor, reward_baselines: torch.Tensor, response_mask: torch.Tensor, config: Optional[AlgoConfig] = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Compute advantage for ReMax, operating only on Outcome reward This implementation is based on the paper: https://arxiv.org/abs/2310.10505 (with only one scalar reward for each response). Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) reward_baselines: `(torch.Tensor)` shape: (bs,) response_mask: `(torch.Tensor)` shape: (bs, response_length) config: (AlgoConfig) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ with torch.no_grad(): returns = (token_level_rewards * response_mask).flip(dims=[-1]).cumsum(dim=-1).flip(dims=[-1]) advantages = returns - reward_baselines.unsqueeze(-1) * response_mask return advantages, returns @register_adv_est(AdvantageEstimator.GPG) # or simply: @register_adv_est("gpg") def compute_gpg_outcome_advantage( token_level_rewards: torch.Tensor, response_mask: torch.Tensor, index: np.ndarray, epsilon: float = 1e-6, f_norm: float = 1.0, alpha: float = 1.0, config=None, **kwargs, ): """ Compute advantage for GPG, operating only on Outcome reward (with only one scalar reward for each response). Args: token_level_rewards: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) index: `(np.ndarray)` shape: (bs,) epsilon: (float) f_norm: (float) alpha: (float) config: (dict) algorithm config Returns: advantages: `(torch.Tensor)` shape: (bs, response_length) Returns: `(torch.Tensor)` shape: (bs, response_length) """ scores = token_level_rewards.sum(dim=-1) id2score = defaultdict(list) id2mean = {} id2std = {} with torch.no_grad(): bsz = scores.shape[0] m = torch.count_nonzero(scores) alpha = bsz / m.clamp(min=1) for i in range(bsz): id2score[index[i]].append(scores[i]) for idx in id2score: if len(id2score[idx]) == 1: id2mean[idx] = torch.tensor(0.0) id2std[idx] = torch.tensor(1.0) elif len(id2score[idx]) > 1: id2mean[idx] = torch.mean(torch.tensor(id2score[idx])) id2std[idx] = torch.std(torch.tensor([id2score[idx]])) else: raise ValueError(f"no score in prompt index: {idx}") for i in range(bsz): scores[i] = alpha * (scores[i] - id2mean[index[i]]) / (f_norm) scores = scores.unsqueeze(-1) * response_mask return scores, scores def compute_rewards(token_level_scores, old_log_prob, ref_log_prob, kl_ratio): """Compute token-level rewards with KL penalty. Args: token_level_scores (torch.Tensor): Token-level reward scores. old_log_prob (torch.Tensor): Log probabilities from current policy. ref_log_prob (torch.Tensor): Log probabilities from reference policy. kl_ratio (float): KL penalty coefficient. Returns: torch.Tensor: Token-level rewards with KL penalty applied. """ kl = old_log_prob - ref_log_prob return token_level_scores - kl * kl_ratio def agg_loss(loss_mat: torch.Tensor, loss_mask: torch.Tensor, loss_agg_mode: str): """ Aggregate the loss matrix into a scalar. Args: loss_mat: `(torch.Tensor)`: shape: (bs, response_length) loss_mask: `(torch.Tensor)`: shape: (bs, response_length) loss_agg_mode: (str) choices: method to aggregate the loss matrix into a scalar. Returns: loss: `a scalar torch.Tensor` aggregated loss """ if loss_agg_mode == "token-mean": loss = verl_F.masked_mean(loss_mat, loss_mask) elif loss_agg_mode == "seq-mean-token-sum": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) # token-sum loss = torch.mean(seq_losses) # seq-mean elif loss_agg_mode == "seq-mean-token-mean": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) / torch.sum(loss_mask, dim=-1) # token-mean loss = torch.mean(seq_losses) # seq-mean elif loss_agg_mode == "seq-mean-token-sum-norm": seq_losses = torch.sum(loss_mat * loss_mask, dim=-1) loss = torch.sum(seq_losses) / loss_mask.shape[-1] # The divisor # (loss_mask.shape[-1]) should ideally be constant # throughout training to well-replicate the DrGRPO paper. # TODO: Perhaps add user-defined normalizer argument to # agg_loss to ensure divisor stays constant throughout. else: raise ValueError(f"Invalid loss_agg_mode: {loss_agg_mode}") return loss @register_policy_loss("gspo") def compute_policy_loss_gspo( old_log_prob, log_prob, advantages, response_mask, cliprange=None, cliprange_low=None, cliprange_high=None, loss_agg_mode="seq-mean-token-mean" ): clip_ratio_low = cliprange_low if cliprange_low is not None else cliprange clip_ratio_high = cliprange_high if cliprange_high is not None else cliprange negative_approx_kl = log_prob - old_log_prob # compute sequence-level importance ratio: # si(θ) = (π_θ(yi|x)/π_θold(yi|x))^(1/|yi|) = # exp [(1/|y_i|) * Σ_t log(π_θ(y_i,t|x,y_i, 1.0, ( "The lower bound of the clip_ratio_c for dual-clip PPO should be greater than 1.0," + f" but get the value: {clip_ratio_c}." ) negative_approx_kl = log_prob - old_log_prob # Clamp negative_approx_kl for stability negative_approx_kl = torch.clamp(negative_approx_kl, min=-20.0, max=20.0) ratio = torch.exp(negative_approx_kl) ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask) pg_losses1 = -advantages * ratio if cliprange_low is None: cliprange_low = cliprange if cliprange_high is None: cliprange_high = cliprange pg_losses2 = -advantages * torch.clamp( ratio, 1 - cliprange_low, 1 + cliprange_high ) # - clip(ratio, 1-cliprange, 1+cliprange) * A clip_pg_losses1 = torch.maximum( pg_losses1, pg_losses2 ) # max(-ratio * A, -clip(ratio, 1-cliprange, 1+cliprange) * A) pg_clipfrac = verl_F.masked_mean(torch.gt(pg_losses2, pg_losses1).float(), response_mask) pg_losses3 = -advantages * clip_ratio_c clip_pg_losses2 = torch.min(pg_losses3, clip_pg_losses1) pg_clipfrac_lower = verl_F.masked_mean( torch.gt(clip_pg_losses1, pg_losses3) * (advantages < 0).float(), response_mask ) pg_losses = torch.where(advantages < 0, clip_pg_losses2, clip_pg_losses1) pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower @register_policy_loss("gpg") def compute_policy_loss_gpg(old_log_prob, log_prob, advantages, response_mask, loss_agg_mode="token-mean", config=None): """Adapted from https://github.com/AMAP-ML/GPG/blob/main/VisualThinker-R1-Zero/src/open-r1-multimodal/src/open_r1/trainer/grpo_trainer.py#L495 Args: log_prob: `(torch.Tensor)` shape: (bs, response_length) advantages: `(torch.Tensor)` shape: (bs, response_length) response_mask: `(torch.Tensor)` shape: (bs, response_length) return: pg_loss: `a scalar torch.Tensor` policy gradient loss computed via GPG """ pg_losses = -log_prob * advantages pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0) @register_policy_loss("clip_cov") def compute_policy_loss_clip_cov( old_log_prob: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor, response_mask: torch.Tensor, loss_agg_mode: str = "token-mean", config: Optional[AlgoConfig] = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Compute the clipped policy objective and related metrics for Clip-Cov. Adapted from https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/verl/trainer/ppo/core_algos.py Args: old_log_prob (torch.Tensor): Log-probabilities of actions under the old policy, shape (batch_size, response_length). log_prob (torch.Tensor): Log-probabilities of actions under the current policy, shape (batch_size, response_length). advantages (torch.Tensor): Advantage estimates for each action, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the loss, shape (batch_size, response_length). cliprange (float, optional): Clipping parameter ε for standard PPO. See https://arxiv.org/abs/1707.06347. Defaults to None (must be provided). cliprange_low (float, optional): Lower clip range for dual-clip PPO. Defaults to same as `cliprange`. cliprange_high (float, optional): Upper clip range for dual-clip PPO. Defaults to same as `cliprange`. loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. Defaults to "token-mean". clip_cvo_ratio (float, optional): Ratio for clipping the covariance. Defaults to 0.0002. clip_cov_lb (float, optional): Lower bound for clipping covariance. Defaults to 1.0. clip_cov_ub (float, optional): Upper bound for clipping covariance. Defaults to 5.0. """ clip_cov_ratio = config.policy_loss.clip_cov_ratio if config.policy_loss.clip_cov_ratio is not None else 0.0002 cliprange = config.clip_ratio cliprange_low = config.clip_ratio_low if config.clip_ratio_low is not None else cliprange cliprange_high = config.clip_ratio_high if config.clip_ratio_high is not None else cliprange clip_cov_ub = config.policy_loss.clip_cov_ub if config.policy_loss.clip_cov_ub is not None else 5.0 clip_cov_lb = config.policy_loss.clip_cov_lb if config.policy_loss.clip_cov_lb is not None else 1.0 assert clip_cov_ratio > 0, "clip_ratio should be larger than 0." negative_approx_kl = log_prob - old_log_prob ratio = torch.exp(negative_approx_kl) ppo_kl = verl_F.masked_mean(-negative_approx_kl, response_mask) pg_losses1 = -advantages * ratio if cliprange_low is None: cliprange_low = cliprange if cliprange_high is None: cliprange_high = cliprange corr = torch.ones_like(advantages) pg_losses2 = -advantages * torch.clamp(ratio, 1 - cliprange_low, 1 + cliprange_high) clip_by_origin = (pg_losses2 > pg_losses1) & (response_mask > 0) cov_all = (advantages - verl_F.masked_mean(advantages, response_mask)) * ( log_prob - verl_F.masked_mean(log_prob.detach(), response_mask) ) cov_all[response_mask == 0] = -torch.inf cov_all[clip_by_origin] = -torch.inf clip_num = max(int(clip_cov_ratio * response_mask.sum().item()), 1) top_k_idx = (cov_all < clip_cov_ub) & (cov_all > clip_cov_lb) & (response_mask > 0) top_k_idx = torch.nonzero(top_k_idx) if len(top_k_idx) > 0: perm = torch.randperm(len(top_k_idx)) top_k_idx = top_k_idx[perm[: min(clip_num, len(top_k_idx))]] else: top_k_idx = torch.empty((0, 2), device=cov_all.device, dtype=torch.long) corr[top_k_idx[:, 0], top_k_idx[:, 1]] = 0 pg_clipfrac = verl_F.masked_mean((corr == 0).float(), response_mask) pg_losses = torch.maximum(pg_losses1, pg_losses2) * corr pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, pg_clipfrac, ppo_kl, torch.tensor(0.0) @register_policy_loss("kl_cov") def compute_policy_loss_kl_cov( old_log_prob: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor, response_mask: torch.Tensor, loss_agg_mode: str = "token-mean", config: Optional[AlgoConfig] = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Compute the clipped policy objective and related metrics for Clip-Cov. Adapted from https://github.com/PRIME-RL/Entropy-Mechanism-of-RL/blob/main/verl/trainer/ppo/core_algos.py Args: old_log_prob (torch.Tensor): Log-probabilities of actions under the old policy, shape (batch_size, response_length). log_prob (torch.Tensor): Log-probabilities of actions under the current policy, shape (batch_size, response_length). advantages (torch.Tensor): Advantage estimates for each action, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the loss, shape (batch_size, response_length). loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. Defaults to "token-mean". kl_cov_ratio (float, optional): Ratio for selecting the top-k covariance values. Defaults to 0.0002. ppo_kl_coef (float, optional): Coefficient for the KL penalty term in the loss. Defaults to 1. """ kl_cov_ratio = config.policy_loss.kl_cov_ratio if config.policy_loss.kl_cov_ratio is not None else 0.0002 ppo_kl_coef = config.policy_loss.ppo_kl_coef if config.policy_loss.ppo_kl_coef is not None else 1.0 assert kl_cov_ratio > 0, "kl_cov_ratio should be larger than 0." negative_approx_kl = log_prob - old_log_prob abs_kl = negative_approx_kl.abs() ratio = torch.exp(negative_approx_kl) ppo_kl_abs = verl_F.masked_mean(negative_approx_kl.abs(), response_mask) pg_losses1 = -advantages * ratio pg_losses_kl = -advantages * ratio + ppo_kl_coef * abs_kl pg_losses = pg_losses1 all_valid = response_mask > 0 all_valid_idx = torch.nonzero(all_valid.reshape(-1), as_tuple=True)[0] all_valid_adv = advantages[all_valid].detach().reshape(-1).cpu() all_valid_logp = log_prob[all_valid].detach().reshape(-1).cpu() k = min(kl_cov_ratio, len(all_valid_adv)) if k != 0: cov_lst_all = (all_valid_adv - all_valid_adv.mean()) * (all_valid_logp - all_valid_logp.mean()) k_percent_nums = max(1, int(len(cov_lst_all) * kl_cov_ratio)) large_cov_idxs = torch.topk(cov_lst_all, k_percent_nums, largest=True).indices if len(large_cov_idxs) != 0: large_cov_idxs = all_valid_idx[large_cov_idxs] pg_losses[large_cov_idxs // advantages.shape[1], large_cov_idxs % advantages.shape[1]] = pg_losses_kl[ large_cov_idxs // advantages.shape[1], large_cov_idxs % advantages.shape[1] ] pg_loss = agg_loss(loss_mat=pg_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return pg_loss, torch.tensor(0.0), ppo_kl_abs, torch.tensor(0.0) def compute_entropy_loss(logits, response_mask, loss_agg_mode: str = "token-mean"): """Compute categorical entropy loss (For backward compatibility) Args: logits (torch.Tensor): shape is (bs, response_length, vocab_size) response_mask (torch.Tensor): shape is (bs, response_length) Returns: entropy: a scalar torch.Tensor """ # compute entropy token_entropy = verl_F.entropy_from_logits(logits) # (bs, response_len) entropy_loss = agg_loss(loss_mat=token_entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) return entropy_loss def compute_value_loss( vpreds: torch.Tensor, returns: torch.Tensor, values: torch.Tensor, response_mask: torch.Tensor, cliprange_value: float, loss_agg_mode: str = "token-mean", ): """ Compute the clipped value-function loss for PPO. Copied from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1151 Args: vpreds (torch.FloatTensor): Predicted values from the value head, shape (batch_size, response_length). values (torch.FloatTensor): Old (baseline) values from the value head, shape (batch_size, response_length). returns (torch.FloatTensor): Ground-truth returns, shape (batch_size, response_length). response_mask (torch.Tensor): Mask indicating which tokens to include in the value loss calculation. cliprange_value (float): Clip range for value prediction updates. loss_agg_mode (str, optional): Aggregation mode for `agg_loss`. Defaults to "token-mean". Returns: vf_loss (torch.FloatTensor): A scalar tensor containing the aggregated value-function loss. vf_clipfrac (float): Fraction of elements where the clipped loss was used. """ vpredclipped = verl_F.clip_by_value(vpreds, values - cliprange_value, values + cliprange_value) vf_losses1 = (vpreds - returns) ** 2 vf_losses2 = (vpredclipped - returns) ** 2 clipped_vf_losses = torch.max(vf_losses1, vf_losses2) vf_loss = 0.5 * agg_loss(loss_mat=clipped_vf_losses, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) vf_clipfrac = verl_F.masked_mean(torch.gt(vf_losses2, vf_losses1).float(), response_mask) return vf_loss, vf_clipfrac def kl_penalty(logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor, kl_penalty) -> torch.FloatTensor: """Compute KL divergence given logprob and ref_logprob. Copied from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1104 See more description in http://joschu.net/blog/kl-approx.html Args: logprob: ref_logprob: Returns: """ if kl_penalty in ("kl", "k1"): return logprob - ref_logprob if kl_penalty == "abs": return (logprob - ref_logprob).abs() if kl_penalty in ("mse", "k2"): return 0.5 * (logprob - ref_logprob).square() # J. Schulman. Approximating kl divergence, 2020. # # URL http://joschu.net/blog/kl-approx.html. if kl_penalty in ("low_var_kl", "k3"): kl = ref_logprob - logprob # For numerical stability kl = torch.clamp(kl, min=-20, max=20) ratio = torch.exp(kl) kld = (ratio - kl - 1).contiguous() return torch.clamp(kld, min=-10, max=10) if kl_penalty == "full": # so, here logprob and ref_logprob should contain the logits for every token in vocabulary raise NotImplementedError raise NotImplementedError def compute_pf_ppo_reweight_data( data, reweight_method: str = "pow", weight_pow: float = 2.0, ): """Reweight the data based on the token_level_scores. Args: data: DataProto object, containing batch, non_tensor_batch and meta_info reweight_method: str, choices: "pow", "max_min", "max_random" weight_pow: float, the power of the weight Returns: """ @torch.no_grad() def compute_weights(scores: torch.Tensor, reweight_method: str, weight_pow: float) -> torch.Tensor: """Compute importance weights for resampling based on scores. Args: scores (torch.Tensor): Tensor of scores to compute weights from. reweight_method (str): Method for computing weights ('pow', 'max_min', 'max_random'). weight_pow (float): Power exponent for 'pow' method. Returns: torch.Tensor: Computed importance weights. Raises: ValueError: If reweight_method is not supported. """ if reweight_method == "pow": weights = torch.pow(torch.abs(scores), weight_pow) elif reweight_method == "max_min": max_score = torch.max(scores) min_score = torch.min(scores) weights = torch.where((scores == max_score) | (scores == min_score), 1.0, 0.0) elif reweight_method == "max_random": max_score = torch.max(scores) weights = torch.where(scores == max_score, 0.4, 0.1) else: raise ValueError(f"Unsupported reweight_method: {reweight_method}") return weights scores = data.batch["token_level_scores"].sum(dim=-1) weights = compute_weights(scores, reweight_method, weight_pow) weights = torch.clamp(weights + 1e-8, min=1e-8) batch_size = scores.shape[0] sample_indices = torch.multinomial(weights, batch_size, replacement=True) resampled_batch = {key: tensor[sample_indices] for key, tensor in data.batch.items()} sample_indices_np = sample_indices.numpy() resampled_non_tensor_batch = {} for key, array in data.non_tensor_batch.items(): if isinstance(array, np.ndarray): resampled_non_tensor_batch[key] = array[sample_indices_np] else: resampled_non_tensor_batch[key] = [array[i] for i in sample_indices_np] resampled_meta_info = {} for key, value in data.meta_info.items(): if isinstance(value, list) and len(value) == batch_size: resampled_meta_info[key] = [value[i] for i in sample_indices_np] else: resampled_meta_info[key] = value from copy import deepcopy resampled_data = deepcopy(data) resampled_data.batch = type(data.batch)(resampled_batch) resampled_data.batch.batch_size = data.batch.batch_size resampled_data.non_tensor_batch = resampled_non_tensor_batch resampled_data.meta_info = resampled_meta_info return resampled_data ================================================ FILE: verl_rl/verl/trainer/ppo/metric_utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Metrics related to the PPO trainer. """ from collections import defaultdict from functools import partial from typing import Any, Callable import numpy as np import torch from verl import DataProto from verl.utils.import_utils import deprecated @deprecated("verl.utils.metric.reduce_metrics") def reduce_metrics(metrics: dict[str, list[Any]]) -> dict[str, Any]: """ Reduces a dictionary of metric lists by computing the mean of each list. Args: metrics: A dictionary mapping metric names to lists of metric values. Returns: A dictionary with the same keys but with each list replaced by its mean value. Example: >>> metrics = {"loss": [1.0, 2.0, 3.0], "accuracy": [0.8, 0.9, 0.7]} >>> reduce_metrics(metrics) {"loss": 2.0, "accuracy": 0.8} """ from verl.utils.metric import reduce_metrics return reduce_metrics(metrics) def _compute_response_info(batch: DataProto) -> dict[str, Any]: """ Computes information about prompts and responses from a batch. This is an internal helper function that extracts masks and lengths for prompts and responses. Args: batch: A DataProto object containing batch data with responses and attention masks. Returns: A dictionary containing: - response_mask: Attention mask for the response tokens - prompt_length: Tensor of prompt lengths for each item in the batch - response_length: Tensor of response lengths for each item in the batch """ response_length = batch.batch["responses"].shape[-1] prompt_mask = batch.batch["attention_mask"][:, :-response_length] response_mask = batch.batch["attention_mask"][:, -response_length:] prompt_length = prompt_mask.sum(-1).float() response_length = response_mask.sum(-1).float() # (batch_size,) return dict( response_mask=response_mask, prompt_length=prompt_length, response_length=response_length, ) def compute_data_metrics(batch: DataProto, use_critic: bool = True) -> dict[str, Any]: """ Computes various metrics from a batch of data for PPO training. This function calculates metrics related to scores, rewards, advantages, returns, values, and sequence lengths from a batch of data. It provides statistical information (mean, max, min) for each metric category. Args: batch: A DataProto object containing batch data with token-level scores, rewards, advantages, etc. use_critic: Whether to include critic-specific metrics. Defaults to True. Returns: A dictionary of metrics including: - critic/score/mean, max, min: Statistics about sequence scores - critic/rewards/mean, max, min: Statistics about sequence rewards - critic/advantages/mean, max, min: Statistics about advantages - critic/returns/mean, max, min: Statistics about returns - critic/values/mean, max, min: Statistics about critic values (if use_critic=True) - critic/vf_explained_var: Explained variance of the value function (if use_critic=True) - response_length/mean, max, min, clip_ratio: Statistics about response lengths - prompt_length/mean, max, min, clip_ratio: Statistics about prompt lengths - num_turns/mean, max, min: Statistics about the number of multi-turn conversations """ sequence_score = batch.batch["token_level_scores"].sum(-1) sequence_reward = batch.batch["token_level_rewards"].sum(-1) advantages = batch.batch["advantages"] returns = batch.batch["returns"] max_response_length = batch.batch["responses"].shape[-1] prompt_mask = batch.batch["attention_mask"][:, :-max_response_length].bool() response_mask = batch.batch["response_mask"].bool() max_prompt_length = prompt_mask.size(-1) response_info = _compute_response_info(batch) prompt_length = response_info["prompt_length"] response_length = response_info["response_length"] valid_adv = torch.masked_select(advantages, response_mask) valid_returns = torch.masked_select(returns, response_mask) if use_critic: values = batch.batch["values"] valid_values = torch.masked_select(values, response_mask) return_diff_var = torch.var(valid_returns - valid_values) return_var = torch.var(valid_returns) metrics = { # score "critic/score/mean": torch.mean(sequence_score).detach().item(), "critic/score/max": torch.max(sequence_score).detach().item(), "critic/score/min": torch.min(sequence_score).detach().item(), # reward "critic/rewards/mean": torch.mean(sequence_reward).detach().item(), "critic/rewards/max": torch.max(sequence_reward).detach().item(), "critic/rewards/min": torch.min(sequence_reward).detach().item(), # adv "critic/advantages/mean": torch.mean(valid_adv).detach().item(), "critic/advantages/max": torch.max(valid_adv).detach().item(), "critic/advantages/min": torch.min(valid_adv).detach().item(), # returns "critic/returns/mean": torch.mean(valid_returns).detach().item(), "critic/returns/max": torch.max(valid_returns).detach().item(), "critic/returns/min": torch.min(valid_returns).detach().item(), **( { # values "critic/values/mean": torch.mean(valid_values).detach().item(), "critic/values/max": torch.max(valid_values).detach().item(), "critic/values/min": torch.min(valid_values).detach().item(), # vf explained var "critic/vf_explained_var": (1.0 - return_diff_var / (return_var + 1e-5)).detach().item(), } if use_critic else {} ), # response length "response_length/mean": torch.mean(response_length).detach().item(), "response_length/max": torch.max(response_length).detach().item(), "response_length/min": torch.min(response_length).detach().item(), "response_length/clip_ratio": torch.mean(torch.eq(response_length, max_response_length).float()) .detach() .item(), # prompt length "prompt_length/mean": torch.mean(prompt_length).detach().item(), "prompt_length/max": torch.max(prompt_length).detach().item(), "prompt_length/min": torch.min(prompt_length).detach().item(), "prompt_length/clip_ratio": torch.mean(torch.eq(prompt_length, max_prompt_length).float()).detach().item(), } # multi-turn conversation if "__num_turns__" in batch.non_tensor_batch: num_turns = batch.non_tensor_batch["__num_turns__"] metrics["num_turns/min"] = num_turns.min() metrics["num_turns/max"] = num_turns.max() metrics["num_turns/mean"] = num_turns.mean() return metrics def compute_timing_metrics(batch: DataProto, timing_raw: dict[str, float]) -> dict[str, Any]: """ Computes timing metrics for different processing stages in PPO training. This function calculates both raw timing metrics (in seconds) and per-token timing metrics (in milliseconds) for various processing stages like generation, reference computation, value computation, advantage computation, and model updates. Args: batch: A DataProto object containing batch data with responses and attention masks. timing_raw: A dictionary mapping stage names to their execution times in seconds. Returns: A dictionary containing: - timing_s/{name}: Raw timing in seconds for each stage - timing_per_token_ms/{name}: Per-token timing in milliseconds for each stage Note: Different stages use different token counts for normalization: - "gen" uses only response tokens - Other stages ("ref", "values", "adv", "update_critic", "update_actor") use all tokens (prompt + response) """ response_info = _compute_response_info(batch) num_prompt_tokens = torch.sum(response_info["prompt_length"]).item() num_response_tokens = torch.sum(response_info["response_length"]).item() num_overall_tokens = num_prompt_tokens + num_response_tokens num_tokens_of_section = { "gen": num_response_tokens, **{name: num_overall_tokens for name in ["ref", "values", "adv", "update_critic", "update_actor"]}, } return { **{f"timing_s/{name}": value for name, value in timing_raw.items()}, **{ f"timing_per_token_ms/{name}": timing_raw[name] * 1000 / num_tokens_of_section[name] for name in set(num_tokens_of_section.keys()) & set(timing_raw.keys()) }, } def compute_throughout_metrics(batch: DataProto, timing_raw: dict[str, float], n_gpus: int) -> dict[str, Any]: """ Computes throughput metrics for PPO training. This function calculates performance metrics related to token processing speed, including the total number of tokens processed, time per step, and throughput (tokens per second per GPU). Args: batch: A DataProto object containing batch data with meta information about token counts. timing_raw: A dictionary mapping stage names to their execution times in seconds. Must contain a "step" key with the total step time. n_gpus: Number of GPUs used for training. Returns: A dictionary containing: - perf/total_num_tokens: Total number of tokens processed in the batch - perf/time_per_step: Time taken for the step in seconds - perf/throughput: Tokens processed per second per GPU Note: The throughput is calculated as total_tokens / (time * n_gpus) to normalize across different GPU counts. """ total_num_tokens = sum(batch.meta_info["global_token_num"]) time = timing_raw["step"] # estimated_flops, promised_flops = flops_function.estimate_flops(num_tokens, time) # f'Actual TFLOPs/s/GPU​': estimated_flops/(n_gpus), # f'Theoretical TFLOPs/s/GPU​': promised_flops, return { "perf/total_num_tokens": total_num_tokens, "perf/time_per_step": time, "perf/throughput": total_num_tokens / (time * n_gpus), } def bootstrap_metric( data: list[Any], subset_size: int, reduce_fns: list[Callable[[np.ndarray], float]], n_bootstrap: int = 1000, seed: int = 42, ) -> list[tuple[float, float]]: """ Performs bootstrap resampling to estimate statistics of metrics. This function uses bootstrap resampling to estimate the mean and standard deviation of metrics computed by the provided reduction functions on random subsets of the data. Args: data: List of data points to bootstrap from. subset_size: Size of each bootstrap sample. reduce_fns: List of functions that compute a metric from a subset of data. n_bootstrap: Number of bootstrap iterations. Defaults to 1000. seed: Random seed for reproducibility. Defaults to 42. Returns: A list of tuples, where each tuple contains (mean, std) for a metric corresponding to each reduction function in reduce_fns. Example: >>> data = [1, 2, 3, 4, 5] >>> reduce_fns = [np.mean, np.max] >>> bootstrap_metric(data, 3, reduce_fns) [(3.0, 0.5), (4.5, 0.3)] # Example values """ np.random.seed(seed) bootstrap_metric_lsts = [[] for _ in range(len(reduce_fns))] for _ in range(n_bootstrap): bootstrap_idxs = np.random.choice(len(data), size=subset_size, replace=True) bootstrap_data = [data[i] for i in bootstrap_idxs] for i, reduce_fn in enumerate(reduce_fns): bootstrap_metric_lsts[i].append(reduce_fn(bootstrap_data)) return [(np.mean(lst), np.std(lst)) for lst in bootstrap_metric_lsts] def calc_maj_val(data: list[dict[str, Any]], vote_key: str, val_key: str) -> float: """ Calculate a value based on majority voting. This function identifies the most common value for a specified vote key in the data, then returns the corresponding value for that majority vote. Args: data: List of dictionaries, where each dictionary contains both vote_key and val_key. vote_key: The key in each dictionary used for voting/counting. val_key: The key in each dictionary whose value will be returned for the majority vote. Returns: The value associated with the most common vote. Example: >>> data = [ ... {"pred": "A", "val": 0.9}, ... {"pred": "B", "val": 0.8}, ... {"pred": "A", "val": 0.7} ... ] >>> calc_maj_val(data, vote_key="pred", val_key="val") 0.9 # Returns the first "val" for the majority vote "A" """ vote2vals = defaultdict(list) for d in data: vote2vals[d[vote_key]].append(d[val_key]) vote2cnt = {k: len(v) for k, v in vote2vals.items()} maj_vote = max(vote2cnt, key=vote2cnt.get) maj_val = vote2vals[maj_vote][0] return maj_val def process_validation_metrics( data_sources: list[str], sample_inputs: list[str], infos_dict: dict[str, list[Any]], seed: int = 42 ) -> dict[str, dict[str, dict[str, float]]]: """ Process validation metrics into a structured format with statistical analysis. This function organizes validation metrics by data source and prompt, then computes various statistical measures including means, standard deviations, best/worst values, and majority voting results. It also performs bootstrap sampling to estimate statistics for different sample sizes. Args: data_sources: List of data source identifiers for each sample. sample_inputs: List of input prompts corresponding to each sample. infos_dict: Dictionary mapping variable names to lists of values for each sample. seed: Random seed for bootstrap sampling. Defaults to 42. Returns: A nested dictionary with the structure: { data_source: { variable_name: { metric_name: value } } } Where metric_name includes: - "mean@N": Mean value across N samples - "std@N": Standard deviation across N samples - "best@N/mean": Mean of the best values in bootstrap samples of size N - "best@N/std": Standard deviation of the best values in bootstrap samples - "worst@N/mean": Mean of the worst values in bootstrap samples - "worst@N/std": Standard deviation of the worst values in bootstrap samples - "maj@N/mean": Mean of majority voting results in bootstrap samples (if "pred" exists) - "maj@N/std": Standard deviation of majority voting results (if "pred" exists) Example: >>> data_sources = ["source1", "source1", "source2"] >>> sample_inputs = ["prompt1", "prompt1", "prompt2"] >>> infos_dict = {"score": [0.8, 0.9, 0.7], "pred": ["A", "A", "B"]} >>> result = process_validation_metrics(data_sources, sample_inputs, infos_dict) >>> # result will contain statistics for each data source and variable """ # Group metrics by data source, prompt and variable data_src2prompt2var2vals = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for sample_idx, data_source in enumerate(data_sources): prompt = sample_inputs[sample_idx] var2vals = data_src2prompt2var2vals[data_source][prompt] for var_name, var_vals in infos_dict.items(): var2vals[var_name].append(var_vals[sample_idx]) # Calculate metrics for each group data_src2prompt2var2metric = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) for data_source, prompt2var2vals in data_src2prompt2var2vals.items(): for prompt, var2vals in prompt2var2vals.items(): for var_name, var_vals in var2vals.items(): if isinstance(var_vals[0], str): continue metric = {} n_resps = len(var_vals) metric[f"mean@{n_resps}"] = np.mean(var_vals) if n_resps > 1: metric[f"std@{n_resps}"] = np.std(var_vals) ns = [] n = 2 while n < n_resps: ns.append(n) n *= 2 ns.append(n_resps) for n in ns: [(bon_mean, bon_std), (won_mean, won_std)] = bootstrap_metric( data=var_vals, subset_size=n, reduce_fns=[np.max, np.min], seed=seed ) metric[f"best@{n}/mean"], metric[f"best@{n}/std"] = bon_mean, bon_std metric[f"worst@{n}/mean"], metric[f"worst@{n}/std"] = won_mean, won_std if var2vals.get("pred", None) is not None: vote_data = [ {"val": val, "pred": pred} for val, pred in zip(var_vals, var2vals["pred"], strict=True) ] [(maj_n_mean, maj_n_std)] = bootstrap_metric( data=vote_data, subset_size=n, reduce_fns=[partial(calc_maj_val, vote_key="pred", val_key="val")], seed=seed, ) metric[f"maj@{n}/mean"], metric[f"maj@{n}/std"] = maj_n_mean, maj_n_std data_src2prompt2var2metric[data_source][prompt][var_name] = metric # Aggregate metrics across prompts data_src2var2metric2prompt_vals = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for data_source, prompt2var2metric in data_src2prompt2var2metric.items(): for prompt, var2metric in prompt2var2metric.items(): for var_name, metric in var2metric.items(): for metric_name, metric_val in metric.items(): data_src2var2metric2prompt_vals[data_source][var_name][metric_name].append(metric_val) data_src2var2metric2val = defaultdict(lambda: defaultdict(lambda: defaultdict(float))) for data_source, var2metric2prompt_vals in data_src2var2metric2prompt_vals.items(): for var_name, metric2prompt_vals in var2metric2prompt_vals.items(): for metric_name, prompt_vals in metric2prompt_vals.items(): data_src2var2metric2val[data_source][var_name][metric_name] = np.mean(prompt_vals) return data_src2var2metric2val ================================================ FILE: verl_rl/verl/trainer/ppo/ray_trainer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PPO Trainer with Ray-based single controller. This trainer supports model-agonistic model initialization with huggingface """ import json import os import uuid from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from enum import Enum from pprint import pprint from typing import Optional import numpy as np import ray import torch import wandb from omegaconf import OmegaConf, open_dict from torch.utils.data import Dataset, Sampler from torchdata.stateful_dataloader import StatefulDataLoader from tqdm import tqdm from verl import DataProto from verl.experimental.dataset.sampler import AbstractCurriculumSampler from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto from verl.single_controller.base import Worker from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup from verl.single_controller.ray.base import create_colocated_worker_cls from verl.trainer.config import AlgoConfig from verl.trainer.ppo import core_algos from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss from verl.trainer.ppo.metric_utils import ( compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, process_validation_metrics, ) from verl.trainer.ppo.reward import compute_reward, compute_reward_async from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, should_save_ckpt_esi from verl.utils.debug import marked_timer from verl.utils.metric import ( reduce_metrics, ) from verl.utils.seqlen_balancing import get_seqlen_balanced_partitions, log_seqlen_unbalance from verl.utils.torch_functional import masked_mean from verl.utils.tracking import ValidationGenerationsLogger WorkerType = type[Worker] class Role(Enum): """ To create more roles dynamically, you can subclass Role and add new members """ Actor = 0 Rollout = 1 ActorRollout = 2 Critic = 3 RefPolicy = 4 RewardModel = 5 ActorRolloutRef = 6 @dataclass class ResourcePoolManager: """ Define a resource pool specification. Resource pool will be initialized first. """ resource_pool_spec: dict[str, list[int]] mapping: dict[Role, str] resource_pool_dict: dict[str, RayResourcePool] = field(default_factory=dict) def create_resource_pool(self): """Create Ray resource pools for distributed training. Initializes resource pools based on the resource pool specification, with each pool managing GPU resources across multiple nodes. For FSDP backend, uses max_colocate_count=1 to merge WorkerGroups. For Megatron backend, uses max_colocate_count>1 for different models. """ for resource_pool_name, process_on_nodes in self.resource_pool_spec.items(): # max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool # For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one. # For Megatron backend, we recommend using max_colocate_count>1 # that can utilize different WorkerGroup for differnt models resource_pool = RayResourcePool( process_on_nodes=process_on_nodes, use_gpu=True, max_colocate_count=1, name_prefix=resource_pool_name ) self.resource_pool_dict[resource_pool_name] = resource_pool self._check_resource_available() def get_resource_pool(self, role: Role) -> RayResourcePool: """Get the resource pool of the worker_cls""" return self.resource_pool_dict[self.mapping[role]] def get_n_gpus(self) -> int: """Get the number of gpus in this cluster.""" return sum([n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes]) def _check_resource_available(self): """Check if the resource pool can be satisfied in this ray cluster.""" node_available_resources = ray.state.available_resources_per_node() node_available_gpus = { node: node_info.get("GPU", 0) if "GPU" in node_info else node_info.get("NPU", 0) for node, node_info in node_available_resources.items() } # check total required gpus can be satisfied total_available_gpus = sum(node_available_gpus.values()) total_required_gpus = sum( [n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes] ) if total_available_gpus < total_required_gpus: raise ValueError( f"Total available GPUs {total_available_gpus} is less than total desired GPUs {total_required_gpus}" ) # check each resource pool can be satisfied, O(#resource_pools * #nodes) for resource_pool_name, process_on_nodes in self.resource_pool_spec.items(): num_gpus, num_nodes = process_on_nodes[0], len(process_on_nodes) for node, available_gpus in node_available_gpus.items(): if available_gpus >= num_gpus: node_available_gpus[node] -= num_gpus num_nodes -= 1 if num_nodes == 0: break if num_nodes > 0: raise ValueError( f"Resource pool {resource_pool_name}: {num_gpus}*{num_nodes}" + "cannot be satisfied in this ray cluster" ) def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty="kl"): """Apply KL penalty to the token-level rewards. This function computes the KL divergence between the reference policy and current policy, then applies a penalty to the token-level rewards based on this divergence. Args: data (DataProto): The data containing batched model outputs and inputs. kl_ctrl (core_algos.AdaptiveKLController): Controller for adaptive KL penalty. kl_penalty (str, optional): Type of KL penalty to apply. Defaults to "kl". multi_turn (bool, optional): Whether the data is from a multi-turn conversation. Defaults to False. Returns: tuple: A tuple containing: - The updated data with token-level rewards adjusted by KL penalty - A dictionary of metrics related to the KL penalty """ response_mask = data.batch["response_mask"] token_level_scores = data.batch["token_level_scores"] batch_size = data.batch.batch_size[0] # compute kl between ref_policy and current policy # When apply_kl_penalty, algorithm.use_kl_in_reward=True, so the reference model has been enabled. kld = core_algos.kl_penalty( data.batch["old_log_probs"], data.batch["ref_log_prob"], kl_penalty=kl_penalty ) # (batch_size, response_length) kld = kld * response_mask beta = kl_ctrl.value token_level_rewards = token_level_scores - beta * kld current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence current_kl = torch.mean(current_kl, dim=0).item() # according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837 kl_ctrl.update(current_kl=current_kl, n_steps=batch_size) data.batch["token_level_rewards"] = token_level_rewards metrics = {"actor/reward_kl_penalty": current_kl, "actor/reward_kl_penalty_coeff": beta} return data, metrics def compute_response_mask(data: DataProto): """Compute the attention mask for the response part of the sequence. This function extracts the portion of the attention mask that corresponds to the model's response, which is used for masking computations that should only apply to response tokens. Args: data (DataProto): The data containing batched model outputs and inputs. Returns: torch.Tensor: The attention mask for the response tokens. """ responses = data.batch["responses"] response_length = responses.size(1) attention_mask = data.batch["attention_mask"] return attention_mask[:, -response_length:] def compute_advantage( data: DataProto, adv_estimator: AdvantageEstimator, gamma: float = 1.0, lam: float = 1.0, num_repeat: int = 1, norm_adv_by_std_in_grpo: bool = True, config: Optional[AlgoConfig] = None, ) -> DataProto: """Compute advantage estimates for policy optimization. This function computes advantage estimates using various estimators like GAE, GRPO, REINFORCE++, etc. The advantage estimates are used to guide policy optimization in RL algorithms. Args: data (DataProto): The data containing batched model outputs and inputs. adv_estimator (AdvantageEstimator): The advantage estimator to use (e.g., GAE, GRPO, REINFORCE++). gamma (float, optional): Discount factor for future rewards. Defaults to 1.0. lam (float, optional): Lambda parameter for GAE. Defaults to 1.0. num_repeat (int, optional): Number of times to repeat the computation. Defaults to 1. norm_adv_by_std_in_grpo (bool, optional): Whether to normalize advantages by standard deviation in GRPO. Defaults to True. config (dict, optional): Configuration dictionary for algorithm settings. Defaults to None. Returns: DataProto: The updated data with computed advantages and returns. """ # Back-compatible with trainers that do not compute response mask in fit if "response_mask" not in data.batch.keys(): data.batch["response_mask"] = compute_response_mask(data) # prepare response group if adv_estimator == AdvantageEstimator.GAE: # Compute advantages and returns using Generalized Advantage Estimation (GAE) advantages, returns = core_algos.compute_gae_advantage_return( token_level_rewards=data.batch["token_level_rewards"], values=data.batch["values"], response_mask=data.batch["response_mask"], gamma=gamma, lam=lam, ) data.batch["advantages"] = advantages data.batch["returns"] = returns if config.get("use_pf_ppo", False): data = core_algos.compute_pf_ppo_reweight_data( data, config.pf_ppo.reweight_method, config.pf_ppo.weight_pow, ) elif adv_estimator == AdvantageEstimator.GRPO: # Initialize the mask for GRPO calculation grpo_calculation_mask = data.batch["response_mask"] # Call compute_grpo_outcome_advantage with parameters matching its definition advantages, returns = core_algos.compute_grpo_outcome_advantage( token_level_rewards=data.batch["token_level_rewards"], response_mask=grpo_calculation_mask, index=data.non_tensor_batch["uid"], norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, ) data.batch["advantages"] = advantages data.batch["returns"] = returns else: # handle all other adv estimator type other than GAE and GRPO adv_estimator_fn = core_algos.get_adv_estimator_fn(adv_estimator) adv_kwargs = { "token_level_rewards": data.batch["token_level_rewards"], "response_mask": data.batch["response_mask"], "config": config, } if "uid" in data.non_tensor_batch: # optional adv_kwargs["index"] = data.non_tensor_batch["uid"] if "reward_baselines" in data.batch: # optional adv_kwargs["reward_baselines"] = data.batch["reward_baselines"] # calculate advantage estimator advantages, returns = adv_estimator_fn(**adv_kwargs) data.batch["advantages"] = advantages data.batch["returns"] = returns return data class RayPPOTrainer: """Distributed PPO trainer using Ray for scalable reinforcement learning. This trainer orchestrates distributed PPO training across multiple nodes and GPUs, managing actor rollouts, critic training, and reward computation with Ray backend. Supports various model architectures including FSDP, Megatron, and vLLM integration. """ # TODO: support each role have individual ray_worker_group_cls, # i.e., support different backend of different role def __init__( self, config, tokenizer, role_worker_mapping: dict[Role, WorkerType], resource_pool_manager: ResourcePoolManager, ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup, processor=None, reward_fn=None, val_reward_fn=None, train_dataset: Optional[Dataset] = None, val_dataset: Optional[Dataset] = None, collate_fn=None, train_sampler: Optional[Sampler] = None, device_name=None, ): """ Initialize distributed PPO trainer with Ray backend. Note that this trainer runs on the driver process on a single CPU/GPU node. Args: config: Configuration object containing training parameters. tokenizer: Tokenizer used for encoding and decoding text. role_worker_mapping (dict[Role, WorkerType]): Mapping from roles to worker classes. resource_pool_manager (ResourcePoolManager): Manager for Ray resource pools. ray_worker_group_cls (RayWorkerGroup, optional): Class for Ray worker groups. Defaults to RayWorkerGroup. processor: Optional data processor, used for multimodal data reward_fn: Function for computing rewards during training. val_reward_fn: Function for computing rewards during validation. train_dataset (Optional[Dataset], optional): Training dataset. Defaults to None. val_dataset (Optional[Dataset], optional): Validation dataset. Defaults to None. collate_fn: Function to collate data samples into batches. train_sampler (Optional[Sampler], optional): Sampler for the training dataset. Defaults to None. device_name (str, optional): Device name for training (e.g., "cuda", "cpu"). Defaults to None. """ # Store the tokenizer for text processing self.tokenizer = tokenizer self.processor = processor self.config = config self.reward_fn = reward_fn self.val_reward_fn = val_reward_fn self.hybrid_engine = config.actor_rollout_ref.hybrid_engine assert self.hybrid_engine, "Currently, only support hybrid engine" if self.hybrid_engine: assert Role.ActorRollout in role_worker_mapping, f"{role_worker_mapping.keys()=}" self.role_worker_mapping = role_worker_mapping self.resource_pool_manager = resource_pool_manager self.use_reference_policy = Role.RefPolicy in role_worker_mapping self.use_rm = Role.RewardModel in role_worker_mapping self.ray_worker_group_cls = ray_worker_group_cls self.device_name = device_name if device_name else self.config.trainer.device self.validation_generations_logger = ValidationGenerationsLogger( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, ) # if ref_in_actor is True, the reference policy will be actor without lora applied self.ref_in_actor = config.actor_rollout_ref.model.get("lora_rank", 0) > 0 # define in-reward KL control # kl loss control currently not suppoorted if self.config.algorithm.use_kl_in_reward: self.kl_ctrl_in_reward = core_algos.get_kl_controller(self.config.algorithm.kl_ctrl) if self.config.algorithm.adv_estimator == AdvantageEstimator.GAE: self.use_critic = True elif self.config.algorithm.adv_estimator in [ AdvantageEstimator.GRPO, AdvantageEstimator.GRPO_PASSK, AdvantageEstimator.REINFORCE_PLUS_PLUS, AdvantageEstimator.REMAX, AdvantageEstimator.RLOO, AdvantageEstimator.OPO, AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE, AdvantageEstimator.GPG, ]: self.use_critic = False else: raise NotImplementedError self._validate_config() self._create_dataloader(train_dataset, val_dataset, collate_fn, train_sampler) def _validate_config(self): config = self.config # number of GPUs total n_gpus = config.trainer.n_gpus_per_node * config.trainer.nnodes if config.actor_rollout_ref.actor.strategy == "megatron": model_parallel_size = ( config.actor_rollout_ref.actor.megatron.tensor_model_parallel_size * config.actor_rollout_ref.actor.megatron.pipeline_model_parallel_size ) assert ( n_gpus % (model_parallel_size * config.actor_rollout_ref.actor.megatron.context_parallel_size) == 0 ), ( f"n_gpus ({n_gpus}) must be divisible by model_parallel_size ({model_parallel_size}) times " f"context_parallel_size ({config.actor_rollout_ref.actor.megatron.context_parallel_size})" ) megatron_dp = n_gpus // ( model_parallel_size * config.actor_rollout_ref.actor.megatron.context_parallel_size ) minimal_bsz = megatron_dp * config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu else: minimal_bsz = n_gpus # 1. Check total batch size for data correctness real_train_batch_size = config.data.train_batch_size * config.actor_rollout_ref.rollout.n assert real_train_batch_size % minimal_bsz == 0, ( f"real_train_batch_size ({real_train_batch_size}) must be divisible by minimal possible batch size " f"({minimal_bsz})" ) # A helper function to check "micro_batch_size" vs "micro_batch_size_per_gpu" # We throw an error if the user sets both. The new convention is "..._micro_batch_size_per_gpu". def check_mutually_exclusive(mbs, mbs_per_gpu, name: str): """Validate mutually exclusive micro batch size configuration options. Ensures that users don't set both deprecated micro_batch_size and the new micro_batch_size_per_gpu parameters simultaneously. Args: mbs: Deprecated micro batch size parameter value. mbs_per_gpu: New micro batch size per GPU parameter value. name (str): Configuration section name for error messages. Raises: ValueError: If both parameters are set or neither is set. """ settings = { "actor_rollout_ref.actor": "micro_batch_size", "critic": "micro_batch_size", "reward_model": "micro_batch_size", "actor_rollout_ref.ref": "log_prob_micro_batch_size", "actor_rollout_ref.rollout": "log_prob_micro_batch_size", } if name in settings: param = settings[name] param_per_gpu = f"{param}_per_gpu" if mbs is None and mbs_per_gpu is None: raise ValueError( f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'." ) if mbs is not None and mbs_per_gpu is not None: raise ValueError( f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove " f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)." ) if not config.actor_rollout_ref.actor.use_dynamic_bsz: # actor: ppo_micro_batch_size vs. ppo_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.actor.ppo_micro_batch_size, config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu, "actor_rollout_ref.actor", ) if self.use_reference_policy: # reference: log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.ref.log_prob_micro_batch_size, config.actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu, "actor_rollout_ref.ref", ) # The rollout section also has log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu check_mutually_exclusive( config.actor_rollout_ref.rollout.log_prob_micro_batch_size, config.actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu, "actor_rollout_ref.rollout", ) if self.use_critic and not config.critic.use_dynamic_bsz: # Check for critic micro-batch size conflicts check_mutually_exclusive( config.critic.ppo_micro_batch_size, config.critic.ppo_micro_batch_size_per_gpu, "critic" ) # Check for reward model micro-batch size conflicts if config.reward_model.enable and not config.reward_model.use_dynamic_bsz: check_mutually_exclusive( config.reward_model.micro_batch_size, config.reward_model.micro_batch_size_per_gpu, "reward_model" ) # Actor # check if train_batch_size is larger than ppo_mini_batch_size # if NOT dynamic_bsz, we must ensure: # ppo_mini_batch_size is divisible by ppo_micro_batch_size # ppo_micro_batch_size * sequence_parallel_size >= n_gpus if not config.actor_rollout_ref.actor.use_dynamic_bsz: assert config.data.train_batch_size >= config.actor_rollout_ref.actor.ppo_mini_batch_size sp_size = config.actor_rollout_ref.actor.get("ulysses_sequence_parallel_size", 1) if config.actor_rollout_ref.actor.ppo_micro_batch_size is not None: assert ( config.actor_rollout_ref.actor.ppo_mini_batch_size % config.actor_rollout_ref.actor.ppo_micro_batch_size == 0 ) assert config.actor_rollout_ref.actor.ppo_micro_batch_size * sp_size >= n_gpus assert config.actor_rollout_ref.actor.loss_agg_mode in [ "token-mean", "seq-mean-token-sum", "seq-mean-token-mean", "seq-mean-token-sum-norm", ], f"Invalid loss_agg_mode: {config.actor_rollout_ref.actor.loss_agg_mode}" if self.config.algorithm.use_kl_in_reward and config.actor_rollout_ref.actor.use_kl_loss: print("NOTICE: You have both enabled in-reward kl and kl loss.") # critic if self.use_critic and not config.critic.use_dynamic_bsz: assert config.data.train_batch_size >= config.critic.ppo_mini_batch_size sp_size = config.critic.get("ulysses_sequence_parallel_size", 1) if config.critic.ppo_micro_batch_size is not None: assert config.critic.ppo_mini_batch_size % config.critic.ppo_micro_batch_size == 0 assert config.critic.ppo_micro_batch_size * sp_size >= n_gpus # Check if use_remove_padding is enabled when using sequence parallelism for fsdp if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"} and ( config.actor_rollout_ref.actor.get("ulysses_sequence_parallel_size", 1) > 1 or config.actor_rollout_ref.ref.get("ulysses_sequence_parallel_size", 1) > 1 ): assert config.actor_rollout_ref.model.use_remove_padding, ( "When using sequence parallelism for actor/ref policy, you must enable `use_remove_padding`." ) if self.use_critic and config.critic.strategy in {"fsdp", "fsdp2"}: if config.critic.get("ulysses_sequence_parallel_size", 1) > 1: assert config.critic.model.use_remove_padding, ( "When using sequence parallelism for critic, you must enable `use_remove_padding`." ) if config.data.get("val_batch_size", None) is not None: print( "WARNING: val_batch_size is deprecated." + " Validation datasets are sent to inference engines as a whole batch," + " which will schedule the memory themselves." ) # check eval config if config.actor_rollout_ref.rollout.val_kwargs.do_sample: assert config.actor_rollout_ref.rollout.temperature > 0, ( "validation gen temperature should be greater than 0 when enabling do_sample" ) print("[validate_config] All configuration checks passed successfully!") def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler: Optional[Sampler]): """ Creates the train and validation dataloaders. """ # TODO: we have to make sure the batch size is divisible by the dp size from verl.trainer.main_ppo import create_rl_dataset, create_rl_sampler if train_dataset is None: train_dataset = create_rl_dataset( self.config.data.train_files, self.config.data, self.tokenizer, self.processor ) if val_dataset is None: val_dataset = create_rl_dataset( self.config.data.val_files, self.config.data, self.tokenizer, self.processor ) self.train_dataset, self.val_dataset = train_dataset, val_dataset if train_sampler is None: train_sampler = create_rl_sampler(self.config.data, self.train_dataset) if collate_fn is None: from verl.utils.dataset.rl_dataset import collate_fn as default_collate_fn collate_fn = default_collate_fn num_workers = self.config.data["dataloader_num_workers"] self.train_dataloader = StatefulDataLoader( dataset=self.train_dataset, batch_size=self.config.data.get("gen_batch_size", self.config.data.train_batch_size), num_workers=num_workers, drop_last=True, collate_fn=collate_fn, sampler=train_sampler, ) val_batch_size = self.config.data.val_batch_size # Prefer config value if set if val_batch_size is None: val_batch_size = len(self.val_dataset) self.val_dataloader = StatefulDataLoader( dataset=self.val_dataset, batch_size=val_batch_size, num_workers=num_workers, shuffle=self.config.data.get("validation_shuffle", True), drop_last=False, collate_fn=collate_fn, ) assert len(self.train_dataloader) >= 1, "Train dataloader is empty!" assert len(self.val_dataloader) >= 1, "Validation dataloader is empty!" print( f"Size of train dataloader: {len(self.train_dataloader)}, Size of val dataloader: " f"{len(self.val_dataloader)}" ) total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs if self.config.trainer.total_training_steps is not None: total_training_steps = self.config.trainer.total_training_steps self.total_training_steps = total_training_steps print(f"Total training steps: {self.total_training_steps}") try: OmegaConf.set_struct(self.config, True) with open_dict(self.config): if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"): self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps if OmegaConf.select(self.config, "critic.optim"): self.config.critic.optim.total_training_steps = total_training_steps except Exception as e: print(f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}") def _dump_generations(self, inputs, outputs, scores, reward_extra_infos_dict, dump_path, ground_truths=None): """Dump rollout/validation samples as JSONL.""" os.makedirs(dump_path, exist_ok=True) filename = os.path.join(dump_path, f"{self.global_steps}.jsonl") n = len(inputs) base_data = { "input": inputs, "output": outputs, "score": scores, "step": [self.global_steps] * n, } if ground_truths and len(ground_truths) == n: base_data["ground_truth"] = ground_truths for k, v in reward_extra_infos_dict.items(): if len(v) == n: base_data[k] = v lines = [] for i in range(n): entry = {k: v[i] for k, v in base_data.items()} lines.append(json.dumps(entry, ensure_ascii=False)) with open(filename, "w") as f: f.write("\n".join(lines) + "\n") print(f"Dumped generations to {filename}") def _maybe_log_val_generations(self, inputs, outputs, scores): """Log a table of validation samples to the configured logger (wandb or swanlab)""" generations_to_log = self.config.trainer.log_val_generations if generations_to_log == 0: return import numpy as np # Create tuples of (input, output, score) and sort by input text samples = list(zip(inputs, outputs, scores, strict=True)) samples.sort(key=lambda x: x[0]) # Sort by input text # Use fixed random seed for deterministic shuffling rng = np.random.RandomState(42) rng.shuffle(samples) # Take first N samples after shuffling samples = samples[:generations_to_log] # Log to each configured logger self.validation_generations_logger.log(self.config.trainer.logger, samples, self.global_steps) def _validate(self): data_source_lst = [] reward_extra_infos_dict: dict[str, list] = defaultdict(list) # Lists to collect samples for the table sample_inputs = [] sample_outputs = [] sample_scores = [] sample_turns = [] sample_ground_truths = [] for test_data in self.val_dataloader: test_batch = DataProto.from_single_dict(test_data) # repeat test batch test_batch = test_batch.repeat( repeat_times=self.config.actor_rollout_ref.rollout.val_kwargs.n, interleave=True ) # we only do validation on rule-based rm if self.config.reward_model.enable and test_batch[0].non_tensor_batch["reward_model"]["style"] == "model": return {} # Store original inputs input_ids = test_batch.batch["input_ids"] # TODO: Can we keep special tokens except for padding tokens? input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids] sample_inputs.extend(input_texts) if "reward_model" in test_batch.non_tensor_batch: ground_truths = [item["ground_truth"] for item in test_batch.non_tensor_batch["reward_model"]] sample_ground_truths.extend(ground_truths) batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] if "multi_modal_data" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_data") if "raw_prompt" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("raw_prompt") if "tools_kwargs" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("tools_kwargs") if "interaction_kwargs" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("interaction_kwargs") if "agent_name" in test_batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("agent_name") test_gen_batch = test_batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) test_gen_batch.meta_info = { "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, "recompute_log_prob": False, "do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample, "validate": True, "global_steps": self.global_steps, } print(f"test_gen_batch meta info: {test_gen_batch.meta_info}") # pad to be divisible by dp_size size_divisor = ( self.actor_rollout_wg.world_size if not self.async_rollout_mode else self.config.actor_rollout_ref.rollout.agent.num_workers ) test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, size_divisor) if not self.async_rollout_mode: test_output_gen_batch_padded = self.actor_rollout_wg.generate_sequences(test_gen_batch_padded) else: test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(test_gen_batch_padded) # unpad test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size) print("validation generation end") # Store generated outputs output_ids = test_output_gen_batch.batch["responses"] output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids] sample_outputs.extend(output_texts) test_batch = test_batch.union(test_output_gen_batch) test_batch.meta_info["validate"] = True # evaluate using reward_function result = self.val_reward_fn(test_batch, return_dict=True) reward_tensor = result["reward_tensor"] scores = reward_tensor.sum(-1).cpu().tolist() sample_scores.extend(scores) reward_extra_infos_dict["reward"].extend(scores) print(f"len reward_extra_infos_dict['reward']: {len(reward_extra_infos_dict['reward'])}") if "reward_extra_info" in result: for key, lst in result["reward_extra_info"].items(): reward_extra_infos_dict[key].extend(lst) print(f"len reward_extra_infos_dict['{key}']: {len(reward_extra_infos_dict[key])}") # collect num_turns of each prompt if "__num_turns__" in test_batch.non_tensor_batch: sample_turns.append(test_batch.non_tensor_batch["__num_turns__"]) data_source_lst.append(test_batch.non_tensor_batch.get("data_source", ["unknown"] * reward_tensor.shape[0])) self._maybe_log_val_generations(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores) # dump generations val_data_dir = self.config.trainer.get("validation_data_dir", None) if val_data_dir: self._dump_generations( inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=val_data_dir, ground_truths=sample_ground_truths, ) for key_info, lst in reward_extra_infos_dict.items(): assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}" data_sources = np.concatenate(data_source_lst, axis=0) data_src2var2metric2val = process_validation_metrics(data_sources, sample_inputs, reward_extra_infos_dict) metric_dict = {} for data_source, var2metric2val in data_src2var2metric2val.items(): core_var = "acc" if "acc" in var2metric2val else "reward" for var_name, metric2val in var2metric2val.items(): n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()]) for metric_name, metric_val in metric2val.items(): if ( (var_name == core_var) and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"]) and (f"@{n_max}" in metric_name) ): metric_sec = "val-core" else: metric_sec = "val-aux" pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}" metric_dict[pfx] = metric_val if len(sample_turns) > 0: sample_turns = np.concatenate(sample_turns) metric_dict["val-aux/num_turns/min"] = sample_turns.min() metric_dict["val-aux/num_turns/max"] = sample_turns.max() metric_dict["val-aux/num_turns/mean"] = sample_turns.mean() return metric_dict def init_workers(self): """Initialize distributed training workers using Ray backend. Creates: 1. Ray resource pools from configuration 2. Worker groups for each role (actor, critic, etc.) """ self.resource_pool_manager.create_resource_pool() self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()} # create actor and rollout if self.hybrid_engine: resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout) actor_rollout_cls = RayClassWithInitArgs( cls=self.role_worker_mapping[Role.ActorRollout], config=self.config.actor_rollout_ref, role="actor_rollout", profile_option=self.config.trainer.npu_profile.options, ) self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls else: raise NotImplementedError # create critic if self.use_critic: resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic) critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=self.config.critic) self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls # create reference policy if needed if self.use_reference_policy: resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy) ref_policy_cls = RayClassWithInitArgs( self.role_worker_mapping[Role.RefPolicy], config=self.config.actor_rollout_ref, role="ref", profile_option=self.config.trainer.npu_profile.options, ) self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls # create a reward model if reward_fn is None if self.use_rm: # we create a RM here resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model) self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls # initialize WorkerGroup # NOTE: if you want to use a different resource pool for each role, which can support different parallel size, # you should not use `create_colocated_worker_cls`. # Instead, directly pass different resource pool to different worker groups. # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information. all_wg = {} wg_kwargs = {} # Setting up kwargs for RayWorkerGroup if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None: wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout if OmegaConf.select(self.config.trainer, "profile_steps") is not None: wg_kwargs["profile_steps"] = OmegaConf.select(self.config.trainer, "profile_steps") assert OmegaConf.select(self.config.trainer, "worker_nsight_options") is not None, ( "worker_nsight_options must be set when profile_steps is set" ) wg_kwargs["worker_nsight_options"] = OmegaConf.to_container( OmegaConf.select(self.config.trainer, "worker_nsight_options") ) wg_kwargs["device_name"] = self.device_name for resource_pool, class_dict in self.resource_pool_to_cls.items(): worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict) wg_dict = self.ray_worker_group_cls( resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls, **wg_kwargs, ) spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys()) all_wg.update(spawn_wg) if self.use_critic: self.critic_wg = all_wg["critic"] self.critic_wg.init_model() if self.use_reference_policy and not self.ref_in_actor: self.ref_policy_wg = all_wg["ref"] self.ref_policy_wg.init_model() if self.use_rm: self.rm_wg = all_wg["rm"] self.rm_wg.init_model() # we should create rollout at the end so that vllm can have a better estimation of kv cache memory self.actor_rollout_wg = all_wg["actor_rollout"] self.actor_rollout_wg.init_model() # create async rollout manager and request scheduler self.async_rollout_mode = False if self.config.actor_rollout_ref.rollout.mode == "async": from verl.experimental.agent_loop import AgentLoopManager self.async_rollout_mode = True self.async_rollout_manager = AgentLoopManager( config=self.config, worker_group=self.actor_rollout_wg, ) def _save_checkpoint(self): from verl.utils.fs import local_mkdir_safe # path: given_path + `/global_step_{global_steps}` + `/actor` local_global_step_folder = os.path.join( self.config.trainer.default_local_dir, f"global_step_{self.global_steps}" ) print(f"local_global_step_folder: {local_global_step_folder}") actor_local_path = os.path.join(local_global_step_folder, "actor") actor_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "actor") ) remove_previous_ckpt_in_save = self.config.trainer.get("remove_previous_ckpt_in_save", False) if remove_previous_ckpt_in_save: print( "Warning: remove_previous_ckpt_in_save is deprecated," + " set max_actor_ckpt_to_keep=1 and max_critic_ckpt_to_keep=1 instead" ) max_actor_ckpt_to_keep = ( self.config.trainer.get("max_actor_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) max_critic_ckpt_to_keep = ( self.config.trainer.get("max_critic_ckpt_to_keep", None) if not remove_previous_ckpt_in_save else 1 ) self.actor_rollout_wg.save_checkpoint( actor_local_path, actor_remote_path, self.global_steps, max_ckpt_to_keep=max_actor_ckpt_to_keep ) if self.use_critic: critic_local_path = os.path.join(local_global_step_folder, "critic") critic_remote_path = ( None if self.config.trainer.default_hdfs_dir is None else os.path.join(self.config.trainer.default_hdfs_dir, f"global_step_{self.global_steps}", "critic") ) self.critic_wg.save_checkpoint( critic_local_path, critic_remote_path, self.global_steps, max_ckpt_to_keep=max_critic_ckpt_to_keep ) # save dataloader local_mkdir_safe(local_global_step_folder) dataloader_local_path = os.path.join(local_global_step_folder, "data.pt") dataloader_state_dict = self.train_dataloader.state_dict() torch.save(dataloader_state_dict, dataloader_local_path) # latest checkpointed iteration tracker (for atomic usage) local_latest_checkpointed_iteration = os.path.join( self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt" ) with open(local_latest_checkpointed_iteration, "w") as f: f.write(str(self.global_steps)) def _load_checkpoint(self): if self.config.trainer.resume_mode == "disable": return 0 # load from hdfs if self.config.trainer.default_hdfs_dir is not None: raise NotImplementedError("load from hdfs is not implemented yet") else: checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path if not os.path.isabs(checkpoint_folder): working_dir = os.getcwd() checkpoint_folder = os.path.join(working_dir, checkpoint_folder) global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest # find global_step_folder if self.config.trainer.resume_mode == "auto": if global_step_folder is None: print("Training from scratch") return 0 else: if self.config.trainer.resume_mode == "resume_path": assert isinstance(self.config.trainer.resume_from_path, str), "resume ckpt must be str type" assert "global_step_" in self.config.trainer.resume_from_path, ( "resume ckpt must specify the global_steps" ) global_step_folder = self.config.trainer.resume_from_path if not os.path.isabs(global_step_folder): working_dir = os.getcwd() global_step_folder = os.path.join(working_dir, global_step_folder) print(f"Load from checkpoint folder: {global_step_folder}") # set global step self.global_steps = int(global_step_folder.split("global_step_")[-1]) print(f"Setting global step to {self.global_steps}") print(f"Resuming from {global_step_folder}") actor_path = os.path.join(global_step_folder, "actor") critic_path = os.path.join(global_step_folder, "critic") # load actor self.actor_rollout_wg.load_checkpoint( actor_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load critic if self.use_critic: self.critic_wg.load_checkpoint( critic_path, del_local_after_load=self.config.trainer.del_local_ckpt_after_load ) # load dataloader, # TODO: from remote not implemented yet dataloader_local_path = os.path.join(global_step_folder, "data.pt") if os.path.exists(dataloader_local_path): dataloader_state_dict = torch.load(dataloader_local_path, weights_only=False) self.train_dataloader.load_state_dict(dataloader_state_dict) else: print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch") def _start_profiling(self, do_profile: bool) -> None: """Start profiling for all worker groups if profiling is enabled.""" if do_profile: self.actor_rollout_wg.start_profile(role="e2e", profile_step=self.global_steps) if self.use_reference_policy: self.ref_policy_wg.start_profile() if self.use_critic: self.critic_wg.start_profile() if self.use_rm: self.rm_wg.start_profile() def _stop_profiling(self, do_profile: bool) -> None: """Stop profiling for all worker groups if profiling is enabled.""" if do_profile: self.actor_rollout_wg.stop_profile() if self.use_reference_policy: self.ref_policy_wg.stop_profile() if self.use_critic: self.critic_wg.stop_profile() if self.use_rm: self.rm_wg.stop_profile() def _balance_batch(self, batch: DataProto, metrics, logging_prefix="global_seqlen"): """Reorder the data on single controller such that each dp rank gets similar total tokens""" attention_mask = batch.batch["attention_mask"] batch_size = attention_mask.shape[0] global_seqlen_lst = batch.batch["attention_mask"].view(batch_size, -1).sum(-1).tolist() # (train_batch_size,) world_size = self.actor_rollout_wg.world_size global_partition_lst = get_seqlen_balanced_partitions( global_seqlen_lst, k_partitions=world_size, equal_size=True ) # reorder based on index. The data will be automatically equally partitioned by dispatch function global_idx = torch.tensor([j for partition in global_partition_lst for j in partition]) batch.reorder(global_idx) global_balance_stats = log_seqlen_unbalance( seqlen_list=global_seqlen_lst, partitions=global_partition_lst, prefix=logging_prefix ) metrics.update(global_balance_stats) def fit(self): """ The training loop of PPO. The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow. The light-weight advantage computation is done on the driver process. """ from omegaconf import OmegaConf from verl.utils.tracking import Tracking logger = Tracking( project_name=self.config.trainer.project_name, experiment_name=self.config.trainer.experiment_name, default_backend=self.config.trainer.logger, config=OmegaConf.to_container(self.config, resolve=True), ) self.global_steps = 0 # load checkpoint before doing anything self._load_checkpoint() # perform validation before training # currently, we only support validation using the reward_function. if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True): val_metrics = self._validate() assert val_metrics, f"{val_metrics=}" pprint(f"Initial validation metrics: {val_metrics}") logger.log(data=val_metrics, step=self.global_steps) if self.config.trainer.get("val_only", False): return # add tqdm progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress") # we start from step 1 self.global_steps += 1 last_val_metrics = None self.max_steps_duration = 0 for epoch in range(self.config.trainer.total_epochs): for batch_dict in self.train_dataloader: metrics = {} timing_raw = {} do_profile = ( self.global_steps in self.config.trainer.profile_steps if self.config.trainer.profile_steps is not None else False ) with marked_timer("start_profile", timing_raw): self._start_profiling(do_profile) batch: DataProto = DataProto.from_single_dict(batch_dict) # pop those keys for generation batch_keys_to_pop = ["input_ids", "attention_mask", "position_ids"] non_tensor_batch_keys_to_pop = ["raw_prompt_ids"] if "multi_modal_data" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("multi_modal_data") if "raw_prompt" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("raw_prompt") if "tools_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("tools_kwargs") if "interaction_kwargs" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("interaction_kwargs") if "index" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("index") if "agent_name" in batch.non_tensor_batch: non_tensor_batch_keys_to_pop.append("agent_name") gen_batch = batch.pop( batch_keys=batch_keys_to_pop, non_tensor_batch_keys=non_tensor_batch_keys_to_pop, ) # pass global_steps to trace gen_batch.meta_info["global_steps"] = self.global_steps gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) is_last_step = self.global_steps >= self.total_training_steps with marked_timer("step", timing_raw): # generate a batch with marked_timer("gen", timing_raw, color="red"): if not self.async_rollout_mode: gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch) else: gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch) timing_raw.update(gen_batch_output.meta_info["timing"]) gen_batch_output.meta_info.pop("timing", None) if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX: with marked_timer("gen_max", timing_raw, color="purple"): gen_baseline_batch = deepcopy(gen_batch) gen_baseline_batch.meta_info["do_sample"] = False if not self.async_rollout_mode: gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch) else: gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_batch) batch = batch.union(gen_baseline_output) reward_baseline_tensor = self.reward_fn(batch) reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1) batch.pop(batch_keys=list(gen_baseline_output.batch.keys())) batch.batch["reward_baselines"] = reward_baseline_tensor del gen_baseline_batch, gen_baseline_output batch.non_tensor_batch["uid"] = np.array( [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object ) # repeat to align with repeated responses in rollout batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True) batch = batch.union(gen_batch_output) if "response_mask" not in batch.batch.keys(): batch.batch["response_mask"] = compute_response_mask(batch) # Balance the number of valid tokens across DP ranks. # NOTE: This usually changes the order of data in the `batch`, # which won't affect the advantage calculation (since it's based on uid), # but might affect the loss calculation (due to the change of mini-batching). # TODO: Decouple the DP balancing and mini-batching. if self.config.trainer.balance_batch: self._balance_batch(batch, metrics=metrics) # compute global_valid tokens batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist() with marked_timer("reward", timing_raw, color="yellow"): # compute reward model score if self.use_rm: reward_tensor = self.rm_wg.compute_rm_score(batch) batch = batch.union(reward_tensor) if self.config.reward_model.launch_reward_fn_async: future_reward = compute_reward_async.remote(data=batch, reward_fn=self.reward_fn) else: reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn) # recompute old_log_probs with marked_timer("old_log_prob", timing_raw, color="blue"): old_log_prob = self.actor_rollout_wg.compute_log_prob(batch) entropys = old_log_prob.batch["entropys"] response_masks = batch.batch["response_mask"] loss_agg_mode = self.config.actor_rollout_ref.actor.loss_agg_mode entropy_agg = agg_loss(loss_mat=entropys, loss_mask=response_masks, loss_agg_mode=loss_agg_mode) old_log_prob_metrics = {"actor/entropy": entropy_agg.detach().item()} # per-position entropy plot masked_entropys = entropys * response_masks sum_entropy_per_position = torch.sum(masked_entropys, dim=0) num_tokens_per_position = torch.sum(response_masks, dim=0) mean_entropy_per_position = sum_entropy_per_position / torch.clamp( num_tokens_per_position, min=1 ) try: entropy_list = mean_entropy_per_position.cpu().tolist() table_data = [[i, ent] for i, ent in enumerate(entropy_list)] table = wandb.Table(data=table_data, columns=["position", "entropy"]) old_log_prob_metrics["actor/per_position_entropy_plot"] = wandb.plot.line( table, "position", "entropy", title="Per-Position Entropy" ) except Exception as e: print(f"Warning: Could not create wandb per-position entropy plot. Error: {e}") # token-type entropy try: responses = batch.batch["responses"] # mask for token type 1 (id >= 151669) type1_mask = (responses >= 151669) * response_masks # mask for token type 2 (id < 151669) type2_mask = (responses < 151669) * response_masks count_type1 = type1_mask.sum().item() count_type2 = type2_mask.sum().item() if count_type1 > 0: entropy_type1 = masked_mean(entropys, mask=type1_mask, axis=None).item() old_log_prob_metrics["actor/entropy_itemic_token"] = entropy_type1 if count_type2 > 0: entropy_type2 = masked_mean(entropys, mask=type2_mask, axis=None).item() old_log_prob_metrics["actor/entropy_lang_token"] = entropy_type2 old_log_prob_metrics["actor/token_count_itemic_token"] = count_type1 old_log_prob_metrics["actor/token_count_lang_token"] = count_type2 except Exception as e: print(f"Warning: Could not compute token-type entropy metrics. Error: {e}") metrics.update(old_log_prob_metrics) old_log_prob.batch.pop("entropys") batch = batch.union(old_log_prob) if "rollout_log_probs" in batch.batch.keys(): # TODO: we may want to add diff of probs too. rollout_old_log_probs = batch.batch["rollout_log_probs"] actor_old_log_probs = batch.batch["old_log_probs"] attention_mask = batch.batch["attention_mask"] responses = batch.batch["responses"] response_length = responses.size(1) response_mask = attention_mask[:, -response_length:] rollout_probs = torch.exp(rollout_old_log_probs) actor_probs = torch.exp(actor_old_log_probs) rollout_probs_diff = torch.abs(rollout_probs - actor_probs) rollout_probs_diff = torch.masked_select(rollout_probs_diff, response_mask.bool()) rollout_probs_diff_max = torch.max(rollout_probs_diff) rollout_probs_diff_mean = torch.mean(rollout_probs_diff) rollout_probs_diff_std = torch.std(rollout_probs_diff) metrics.update( { "training/rollout_probs_diff_max": rollout_probs_diff_max.detach().item(), "training/rollout_probs_diff_mean": rollout_probs_diff_mean.detach().item(), "training/rollout_probs_diff_std": rollout_probs_diff_std.detach().item(), } ) if self.use_reference_policy: # compute reference log_prob with marked_timer("ref", timing_raw, color="olive"): if not self.ref_in_actor: ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch) else: ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(batch) batch = batch.union(ref_log_prob) # compute values if self.use_critic: with marked_timer("values", timing_raw, color="cyan"): values = self.critic_wg.compute_values(batch) batch = batch.union(values) with marked_timer("adv", timing_raw, color="brown"): # we combine with rule-based rm reward_extra_infos_dict: dict[str, list] if self.config.reward_model.launch_reward_fn_async: reward_tensor, reward_extra_infos_dict = ray.get(future_reward) batch.batch["token_level_scores"] = reward_tensor if reward_extra_infos_dict: batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()}) # 将 reward_extra_infos_dict 中的统计信息添加到 metrics 中 for key, values in reward_extra_infos_dict.items(): if values and len(values) > 0: values_array = np.array(values) # 只记录数值类型的指标 if np.issubdtype(values_array.dtype, np.number): metrics[f"reward/{key}/mean"] = float(np.mean(values_array)) metrics[f"reward/{key}/max"] = float(np.max(values_array)) metrics[f"reward/{key}/min"] = float(np.min(values_array)) # compute rewards. apply_kl_penalty if available if self.config.algorithm.use_kl_in_reward: batch, kl_metrics = apply_kl_penalty( batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty ) metrics.update(kl_metrics) else: batch.batch["token_level_rewards"] = batch.batch["token_level_scores"] # compute advantages, executed on the driver process norm_adv_by_std_in_grpo = self.config.algorithm.get( "norm_adv_by_std_in_grpo", True ) # GRPO adv normalization factor batch = compute_advantage( batch, adv_estimator=self.config.algorithm.adv_estimator, gamma=self.config.algorithm.gamma, lam=self.config.algorithm.lam, num_repeat=self.config.actor_rollout_ref.rollout.n, norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo, config=self.config.algorithm, ) # update critic if self.use_critic: with marked_timer("update_critic", timing_raw, color="pink"): critic_output = self.critic_wg.update_critic(batch) critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"]) metrics.update(critic_output_metrics) # implement critic warmup if self.config.trainer.critic_warmup <= self.global_steps: # update actor with marked_timer("update_actor", timing_raw, color="red"): batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable actor_output = self.actor_rollout_wg.update_actor(batch) actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"]) metrics.update(actor_output_metrics) # Log rollout generations if enabled rollout_data_dir = self.config.trainer.get("rollout_data_dir", None) if rollout_data_dir: with marked_timer("dump_rollout_generations", timing_raw, color="green"): inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True) outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True) scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist() ground_truths = None if "reward_model" in batch.non_tensor_batch: ground_truths = [item["ground_truth"] for item in batch.non_tensor_batch["reward_model"]] if "request_id" in batch.non_tensor_batch: reward_extra_infos_dict.setdefault( "request_id", batch.non_tensor_batch["request_id"].tolist(), ) self._dump_generations( inputs=inputs, outputs=outputs, scores=scores, reward_extra_infos_dict=reward_extra_infos_dict, dump_path=rollout_data_dir, ground_truths=ground_truths, ) # validate if ( self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0) ): with marked_timer("testing", timing_raw, color="green"): val_metrics: dict = self._validate() if is_last_step: last_val_metrics = val_metrics metrics.update(val_metrics) # Check if the ESI (Elastic Server Instance)/training plan is close to expiration. esi_close_to_expiration = should_save_ckpt_esi( max_steps_duration=self.max_steps_duration, redundant_time=self.config.trainer.esi_redundant_time, ) # Check if the conditions for saving a checkpoint are met. # The conditions include a mandatory condition (1) and # one of the following optional conditions (2/3/4): # 1. The save frequency is set to a positive value. # 2. It's the last training step. # 3. The current step number is a multiple of the save frequency. # 4. The ESI(Elastic Server Instance)/training plan is close to expiration. if self.config.trainer.save_freq > 0 and ( is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration ): if esi_close_to_expiration: print("Force saving checkpoint: ESI instance expiration approaching.") with marked_timer("save_checkpoint", timing_raw, color="green"): self._save_checkpoint() with marked_timer("stop_profile", timing_raw): self._stop_profiling(do_profile) steps_duration = timing_raw["step"] self.max_steps_duration = max(self.max_steps_duration, steps_duration) # training metrics metrics.update( { "training/global_step": self.global_steps, "training/epoch": epoch, } ) # collect metrics metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic)) metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw)) # TODO: implement actual tflpo and theoretical tflpo n_gpus = self.resource_pool_manager.get_n_gpus() metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus)) # this is experimental and may be changed/removed in the future in favor of a general-purpose one if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler): self.train_dataloader.sampler.update(batch=batch) # TODO: make a canonical logger that supports various backend logger.log(data=metrics, step=self.global_steps) progress_bar.update(1) self.global_steps += 1 if is_last_step: pprint(f"Final validation metrics: {last_val_metrics}") progress_bar.close() return # this is experimental and may be changed/removed in the future # in favor of a general-purpose data buffer pool if hasattr(self.train_dataset, "on_batch_end"): # The dataset may be changed after each training batch self.train_dataset.on_batch_end(batch=batch) ================================================ FILE: verl_rl/verl/trainer/ppo/reward.py ================================================ # Copyright 2025 Individual Contributor: Thibaut Barroyer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing import os from functools import partial import ray from verl import DataProto from verl.utils.reward_score import default_compute_score def _call_with_kwargs(raw_fn, extra_kwargs, *args, **kwargs): """Calls `raw_fn` by merging `extra_kwargs` into call-time `kwargs`, with `extra_kwargs` taking precedence. This function is used to merge additional keyword arguments with the original function's arguments. """ merged_kwargs = {**kwargs, **extra_kwargs} return raw_fn(*args, **merged_kwargs) def get_custom_reward_fn(config): """Load and return a custom reward function from external file. Dynamically imports a reward function from a specified file path and wraps it with additional keyword arguments from the configuration. Args: config (dict): Configuration dictionary containing custom_reward_function settings with 'path', 'name', and 'reward_kwargs' fields. Returns: callable or None: Wrapped reward function with merged kwargs, or None if no custom reward function is configured. Raises: FileNotFoundError: If the specified reward function file doesn't exist. RuntimeError: If there's an error loading the module from file. AttributeError: If the specified function name isn't found in the module. """ import importlib.util import sys reward_fn_config = config.get("custom_reward_function") or {} file_path = reward_fn_config.get("path") if not file_path: return None if not os.path.exists(file_path): raise FileNotFoundError(f"Reward function file '{file_path}' not found.") spec = importlib.util.spec_from_file_location("custom_module", file_path) module = importlib.util.module_from_spec(spec) try: sys.modules["custom_module"] = module spec.loader.exec_module(module) except Exception as e: raise RuntimeError(f"Error loading module from '{file_path}': {e}") from e function_name = reward_fn_config.get("name") if not hasattr(module, function_name): raise AttributeError(f"Reward function '{function_name}' not found in '{file_path}'.") print(f"using customized reward function '{function_name}' from '{file_path}'") raw_fn = getattr(module, function_name) reward_kwargs = dict(reward_fn_config.get("reward_kwargs", {})) return partial(_call_with_kwargs, raw_fn, reward_kwargs) def load_reward_manager(config, tokenizer, num_examine, **reward_kwargs): """ Load and initialize a reward manager based on the configuration. Args: config: PPO trainer configuration object containing reward_model fields. tokenizer: Tokenizer object used for processing text. num_examine: Number of samples to examine. **reward_kwargs: Additional keyword arguments for the reward manager. Returns: An instance of the specified reward manager class. """ from verl.workers.reward_manager import get_reward_manager_cls # The list of pre-defined reward managers are defined in `verl/workers/reward_manager/`: # naive: NaiveRewardManager # prime: PrimeRewardManager # batch: BatchRewardManager # dapo: DAPORewardManager # Note(haibin.lin): For custom reward managers, please make sure they are imported and # registered via `verl.workers.reward_manager.register` # By default reward_manager is set to naive (NaiveRewardManager) reward_manager_name = config.reward_model.get("reward_manager", "naive") reward_manager_cls = get_reward_manager_cls(reward_manager_name) # Try to get a custom reward function based on the configuration compute_score = get_custom_reward_fn(config) final_compute_score = compute_score if compute_score is None: sandbox_config = config.reward_model.get("sandbox_fusion") sandbox_url = sandbox_config.get("url") if sandbox_config else None memory_limit_mb = sandbox_config.get("memory_limit_mb", 1024) if sandbox_url: sandbox_manager = multiprocessing.Manager() # Create a semaphore to control concurrent access to the sandbox _concurrent_semaphore = sandbox_manager.Semaphore(sandbox_config.get("max_concurrent", 64)) final_compute_score = partial( default_compute_score, sandbox_fusion_url=sandbox_url, concurrent_semaphore=_concurrent_semaphore, memory_limit_mb=memory_limit_mb, ) else: final_compute_score = default_compute_score # Instantiate and return the reward manager with the specified parameters return reward_manager_cls( tokenizer=tokenizer, num_examine=num_examine, compute_score=final_compute_score, reward_fn_key=config.data.reward_fn_key, **reward_kwargs, ) def compute_reward(data: DataProto, reward_fn): """ Compute reward for a batch of data. Args: data: DataProto object containing the input data. reward_fn: Reward function to compute the reward. Returns: Tuple of reward tensor and extra info dictionary. """ try: reward_result = reward_fn(data, return_dict=True) reward_tensor = reward_result["reward_tensor"] reward_extra_infos_dict = reward_result.get("reward_extra_info", {}) except Exception as e: print(f"Error in reward_fn: {e}") reward_tensor = reward_fn(data) reward_extra_infos_dict = {} return reward_tensor, reward_extra_infos_dict @ray.remote(num_cpus=1) def compute_reward_async(data: DataProto, config=None, tokenizer=None, reward_fn=None): """ Load the reward manager and compute the reward for a batch of data. This is meant to be run in a separate Ray worker. """ if reward_fn is None: assert config is not None and tokenizer is not None, ( "config and tokenizer must not be None when reward_fn is None" ) import warnings warnings.warn("using config and tokenizer with compute_reward_async is deprecated", stacklevel=2) reward_fn = load_reward_manager( config, tokenizer, num_examine=0, **config.reward_model.get("reward_kwargs", {}) ) return compute_reward(data, reward_fn) ================================================ FILE: verl_rl/verl/trainer/runtime_env.yaml ================================================ working_dir: ./ excludes: ["/.git/"] env_vars: TORCH_NCCL_AVOID_RECORD_STREAMS: "1" CUDA_DEVICE_MAX_CONNECTIONS: "1" ================================================ FILE: verl_rl/verl/utils/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import config, tokenizer from .config import omega_conf_to_dataclass from .tokenizer import hf_processor, hf_tokenizer __all__ = tokenizer.__all__ + config.__all__ + ["hf_processor", "hf_tokenizer", "omega_conf_to_dataclass"] ================================================ FILE: verl_rl/verl/utils/activation_offload.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functionality for CPU offloading of tensors saved for backward pass.""" from __future__ import annotations import functools import logging import os from typing import Any, Optional import torch from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from verl.utils.device import get_torch_device from verl.utils.fsdp_utils import FSDPModule as FSDP2 logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def _get_unique_tensor_key(tensor): key = (tensor.untyped_storage().data_ptr() + tensor.storage_offset(), tensor.dtype) return key class FSDPParameterFilter: def __init__(self): self.model_parameters_storage = set() def __call__(self, tensor): return tensor.untyped_storage().data_ptr() not in self.model_parameters_storage def update_model_parameters(self, model): new_storage = set() for p in model.parameters(): new_storage.add(p.data.untyped_storage().data_ptr()) self.model_parameters_storage = new_storage class CpuOffloadHookWithOffloadHandler: """Context-manager that offloads/recovers tensors through an offload hander. The hook just offloads/recovers the tensor object to the handler through `tensor_push` and `tensor_pop` interface. How the offload-handler manages the offloading, recovering or prefetching timing is transparent to this hook. """ def __init__( self, offload_handler: OffloadHandler, handler_extra_kwargs: Optional[dict[str, Any]] = None, ) -> None: if handler_extra_kwargs is None: handler_extra_kwargs = {} self.offload_handler: OffloadHandler = offload_handler self.handler_extra_kwargs: dict[str, Any] = handler_extra_kwargs self.inside_context = False def __enter__(self): self.inside_context = True torch._C._autograd._push_saved_tensors_default_hooks(self.on_save_for_backward, self.on_get_saved_tensor) def __exit__(self, *args: Any): self.inside_context = False torch._C._autograd._pop_saved_tensors_default_hooks() def on_save_for_backward(self, tensor: torch.Tensor) -> Any: retrieve_identifier = self.offload_handler.tensor_push(tensor, **self.handler_extra_kwargs) return retrieve_identifier def on_get_saved_tensor(self, saved_state: Any) -> torch.Tensor: tensor = self.offload_handler.tensor_pop(saved_state, **self.handler_extra_kwargs) return tensor class OffloadHandler: """A base class for CPU offload-handler.""" def __init__(self) -> None: pass def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: """Tensor push.""" raise NotImplementedError( "`tensor_push is not implented in OffloadHandler class. Inherit this class and implement your " "custom tensor_push." ) def tensor_pop(self, tensor_tag: Any, **kwargs): """Tensor pop.""" raise NotImplementedError( "`tensor_pop is not implented in OffloadHandler class. Inherit this class and implement your " "custom tensor_pop." ) class GroupCommitFunction(torch.autograd.Function): """this is a dummy op with output identical to input. However, it is necessary for marking a timepoint for offload handler to accomplish all synchronizations. Implementing it as a function is necessary because we need to actions in both forward and backward. """ @staticmethod def forward(ctx, tensor, cpu_offload_handler): # pylint: disable=missing-function-docstring cpu_offload_handler.on_group_commit_forward() ctx.cpu_offload_handler = cpu_offload_handler # return the identical tensor return tensor @staticmethod def backward(ctx, grad_output): # pylint: disable=missing-function-docstring cpu_offload_handler = ctx.cpu_offload_handler cpu_offload_handler.on_group_commit_backward() return grad_output, None group_prefetch_offload_commit = GroupCommitFunction.apply class SynchronizedGroupOffloadHandler(OffloadHandler): """Offload Handler that offloads/reloads in a synchronized way. The device-to-host and host-to-device copying happen in the same stream as the computation kernels, thus the copying will block computation. """ def __init__(self, num_offload_group, tensor_need_offloading_checker=(lambda _: True)) -> None: super().__init__() self.num_offload_group = num_offload_group self.tensor_need_offloading_checker = tensor_need_offloading_checker self.groupid_reset() def groupid_reset(self): """Groupid reset.""" # Data structures to label saved tensors and book-keep their cpu copies. # Currently, on push, create a new cpu tensor and copies; on pop, copies # the tensor back to gpu and deletes the cpu tensor. # These will increment whenever `group_commit()` is invoked self.current_group, self.tensor_count_current_group = (0, 0) self.torch_tensor_count = 0 self.tensor_tag_to_state = {} def on_group_commit_forward(self): """On group commit forward.""" # finishing up with updating current group and tensor count self.current_group += 1 # increment self.tensor_count_current_group = 0 # reset def on_group_commit_backward(self): """On group commit backward.""" self.current_group -= 1 assert self.current_group >= 0 @staticmethod def offload(src_tensor, pin_memory=True): """Offload.""" cpu_backup = torch.empty( src_tensor.size(), dtype=src_tensor.dtype, layout=src_tensor.layout, device="cpu", pin_memory=pin_memory, ) cpu_backup.copy_(src_tensor, non_blocking=True) state = (src_tensor.device, cpu_backup) return state @staticmethod def reload(state, non_blocking=None): """Reload.""" dev, cpu_backup = state if non_blocking is None: non_blocking = cpu_backup.is_pinned() return cpu_backup.to(dev, non_blocking=non_blocking) def tensor_push(self, tensor: torch.Tensor, **kwargs): """Tensor push.""" # obtain a unique tensor tag tensor_tag = (self.current_group, self.tensor_count_current_group) self.tensor_count_current_group += 1 assert tensor_tag not in self.tensor_tag_to_state if self.current_group < self.num_offload_group and self.tensor_need_offloading_checker(tensor): state = SynchronizedGroupOffloadHandler.offload(tensor) self.tensor_tag_to_state[tensor_tag] = state else: # will be offloaded together after group commit self.tensor_tag_to_state[tensor_tag] = tensor return tensor_tag def tensor_pop(self, tensor_tag, **kwargs): """Tensor pop.""" assert tensor_tag in self.tensor_tag_to_state state = self.tensor_tag_to_state.pop(tensor_tag) if isinstance(state, tuple): tensor = SynchronizedGroupOffloadHandler.reload(state) else: tensor = state return tensor class AsyncDoubleBufferGroupOffloadHandler(SynchronizedGroupOffloadHandler): """Compared to synchronize, this uses more memory because of the buffer but achieves better performance due to the overlapping. D2h and h2d copying are completely hidden behind computation if computation time of a layer is longer than host-device communication time. Bulk offloading with delay and bulk reloading with prefetch are implemented.""" def __init__( self, num_offload_group, # must be <= actual number of groups (number of commits) num_model_group, tensor_need_offloading_checker=(lambda t: True), ) -> None: super().__init__( num_offload_group=num_offload_group, tensor_need_offloading_checker=tensor_need_offloading_checker, ) # Number of layers in the model self.num_layers = num_model_group # Data Structure to maintain reference to activation tensors self.tensor_tag_to_buf = {} # Tracking the number of layers offloaded self.offloaded_group_count = 0 # Core data structure that decides the window for offloading self.layer_window_map = {} self.group_offload_mapping = {} # Logic to make offloading load balance across computation # for optimal CPU/GPU interconnect usage constant = 0 for i in range(self.num_offload_group): self.layer_window_map[i] = ((self.num_layers // self.num_offload_group) * (i + 1)) - 1 if i < (self.num_layers % self.num_offload_group): self.layer_window_map[i] += i + 1 constant = i + 1 else: self.layer_window_map[i] += constant # allocate streams and events for synchronization self.d2h_stream = get_torch_device().Stream() self.h2d_stream = get_torch_device().Stream() def tensor_push(self, tensor: torch.Tensor, **kwargs) -> Any: torch_stray_tensor = isinstance( tensor, torch._subclasses.fake_tensor.FakeTensor | torch._subclasses.functional_tensor.FunctionalTensor, ) need_offload = not torch_stray_tensor need_offload = need_offload and self.tensor_need_offloading_checker(tensor) if need_offload: # obtain a unique tensor tag tensor_tag = (self.current_group, self.tensor_count_current_group) self.tensor_count_current_group += 1 assert tensor_tag not in self.tensor_tag_to_state self.tensor_tag_to_state[tensor_tag] = tensor if self.current_group < self.num_offload_group: self.tensor_tag_to_buf[tensor_tag] = tensor else: tensor_tag = tensor return tensor_tag def tensor_pop(self, tensor_tag, **kwargs): """Tensor pop.""" if isinstance(tensor_tag, torch.Tensor): return tensor_tag assert tensor_tag in self.tensor_tag_to_state tensor = self.tensor_tag_to_state.pop(tensor_tag) self.tensor_tag_to_buf.pop(tensor_tag, None) # the tensor should have been copied back in on_group_commit_backward() # which invokes bulk_reload_group. assert not isinstance(tensor, tuple) return tensor def bulk_offload_group(self, group_to_offload): """Bulk offload group.""" offload_mapping = {} offload_size = 0 with get_torch_device().stream(self.d2h_stream): for tensor_tag, state in self.tensor_tag_to_state.items(): group_id, _ = tensor_tag if group_id == group_to_offload: assert not isinstance(state, tuple) key = _get_unique_tensor_key(state) if key not in offload_mapping: offload_mapping[key] = state # if offload, return the reference to cpu copy self.tensor_tag_to_state[tensor_tag] = (key, state.shape) for key, tensor in offload_mapping.items(): state = SynchronizedGroupOffloadHandler.offload(tensor) offload_size += tensor.numel() * tensor.element_size() offload_mapping[key] = state self.group_offload_mapping[group_to_offload] = offload_mapping def synchronize_on_group_commit_forward(self, current_group): """Synchronize on group commit forward.""" # For the first group, kickstart the offload after we have # the first compute completion if current_group == 0: self.d2h_stream.wait_stream(get_torch_device().current_stream()) self.bulk_offload_group(current_group) # Window map data structure helps us synchronize based on number # of layers offloaded if self.layer_window_map[self.offloaded_group_count] == current_group: # Stream synchronization both ways self.d2h_stream.wait_stream(get_torch_device().current_stream()) get_torch_device().current_stream().wait_stream(self.d2h_stream) # Time to free the activation memory after usage for tensor_tag, _ in self.tensor_tag_to_buf.items(): if tensor_tag[0] == self.offloaded_group_count: self.tensor_tag_to_buf[tensor_tag] = None # Time to offload the next group if self.offloaded_group_count < (self.num_offload_group - 1): self.bulk_offload_group(self.offloaded_group_count + 1) # Increment the offload group count to keep track self.offloaded_group_count += 1 def on_group_commit_forward(self): """This function will cause host device synchronization""" # handle synchronization events self.synchronize_on_group_commit_forward(self.current_group) super().on_group_commit_forward() @torch.no_grad def bulk_reload_group(self, group_to_reload): """Bulk reload group.""" assert group_to_reload < self.num_offload_group with get_torch_device().stream(self.h2d_stream): # move back tensors offload_mapping = self.group_offload_mapping.pop(group_to_reload) assert offload_mapping is not None for key, state in offload_mapping.items(): offload_mapping[key] = SynchronizedGroupOffloadHandler.reload(state) for tensor_label, state in self.tensor_tag_to_state.items(): group_id, _ = tensor_label if group_id == group_to_reload and not isinstance(state, torch.Tensor): assert isinstance(state, tuple), f"{group_id} {state}" key, shape = state recovered_tensor = offload_mapping[key].view(shape) self.tensor_tag_to_state[tensor_label] = recovered_tensor def on_group_commit_backward(self): # first decrement the current group. # after last commit in forward, the group will +1; in backward it -1. # Finally it should be decremented to 0. self.current_group -= 1 assert self.current_group >= 0 # Layer window data structure helps us to reload at right times if self.layer_window_map[self.offloaded_group_count - 1] == self.current_group: # Stream synchronization both ways self.h2d_stream.wait_stream(get_torch_device().current_stream()) get_torch_device().current_stream().wait_stream(self.h2d_stream) # Time to reload the next group self.bulk_reload_group(self.offloaded_group_count - 1) # Decrease the offloading group counter self.offloaded_group_count -= 1 if self.offloaded_group_count > 1 else 0 # Last group computation needs to wait till all the reloads complete if self.current_group == 0: get_torch_device().current_stream().wait_stream(self.h2d_stream) self.offloaded_group_count = 0 def get_activation_offload_context( num_layers: int = 1, model_layers: int = 1, tensor_need_offloading_checker=(lambda t: True) ): cpu_offload_handler = AsyncDoubleBufferGroupOffloadHandler( num_offload_group=num_layers, num_model_group=model_layers, tensor_need_offloading_checker=tensor_need_offloading_checker, ) def group_prefetch_offload_commit_async(tensor): return group_prefetch_offload_commit(tensor, cpu_offload_handler) return ( CpuOffloadHookWithOffloadHandler(offload_handler=cpu_offload_handler), group_prefetch_offload_commit_async, ) class ActivationHandler: def __init__(self, offload_ctx, sync_func, tensor_filter, enable_ckpt): self._offload_ctx = offload_ctx self._sync_func = sync_func self._enable_ckpt = enable_ckpt self._tensor_filter = tensor_filter if enable_ckpt: self.checkpoint_fn = functools.partial( torch.utils.checkpoint.checkpoint, use_reentrant=True, ) def pre_forward(self, module): if module.training: self._offload_ctx.__enter__() self._tensor_filter.update_model_parameters(module) def post_forward(self, module): if module.training: self._offload_ctx.__exit__(None, None, None) def _pack_kwargs(self, *args, **kwargs): kwarg_keys = [] flat_args = list(args) for k, v in kwargs.items(): kwarg_keys.append(k) flat_args.append(v) return tuple(flat_args), tuple(kwarg_keys) def _unpack_kwargs(self, flat_args, kwarg_keys): assert len(kwarg_keys) <= len(flat_args), f"too many keys {len(kwarg_keys)} vs. {len(flat_args)}" if len(kwarg_keys) == 0: return flat_args, {} args = flat_args[: -len(kwarg_keys)] kwargs = dict(zip(kwarg_keys, flat_args[-len(kwarg_keys) :], strict=True)) return args, kwargs def _ckpt_forward(self, forward_method, *args, **kwargs): flat_args, kwarg_keys = self._pack_kwargs(*args, **kwargs) def my_function(*inputs): # unpack back into args and kwargs nonlocal forward_method, kwarg_keys unpacked_args, unpacked_kwargs = self._unpack_kwargs(inputs, kwarg_keys) # run original module return forward_method(*unpacked_args, **unpacked_kwargs) return self.checkpoint_fn( my_function, *flat_args, ) def forward(self, module, forward_method, *args, **kwargs): if not module.training: return forward_method(*args, **kwargs) if not self._enable_ckpt: ret = forward_method(*args, **kwargs) else: ret = self._ckpt_forward(forward_method, *args, **kwargs) binded_tensor = ret if isinstance(ret, tuple): binded_tensor = ret[0] binded_tensor = self._sync_func(binded_tensor) final_ret = binded_tensor if isinstance(ret, tuple): final_ret = (final_ret,) + ret[1:] return final_ret def wrap_module_forward_method(self, module): orig_method = module.forward handler = self @functools.wraps(orig_method) def wrapped_method(model_self, *args, **kwargs): nonlocal handler handler.pre_forward(model_self) out = handler.forward(model_self, orig_method, *args, **kwargs) handler.post_forward(model_self) return out module.forward = wrapped_method.__get__(module, type(module)) def enable_activation_offloading(model, strategy, enable_ckpt=False): """ Enable activation offloading for the model. It groups activations by TransformerLayer and offloads activation groups asynchronously. This means that the offloading of the i-th activation group and the computation of the i+1-th activation group happen at the same time, and there are at most two activation groups in GPU memory. Args: model: the model to enable activation offloading strategy: the training strategy of the model, such as "fsdp" enable_ckpt: whether activation checkpointing(also called gradient checkpointing) has been enabled for the model Note: For best efficiency, activation offloading is usually combined with activation checkpointing. However, this implementation of activation offloading is conflicted with the implementation of activation checkpointing in some training strategies. This function resolves this conflict, and therefore requires the "strategy" and "enable_ckpt" arguments. Returns: """ assert strategy == "fsdp" or strategy == "fsdp2", "activation offloading only supports fsdp strategy" layers = [] def get_layers(module): for name, child in module.named_children(): if not isinstance(child, FSDP | FSDP2): get_layers(child) else: wrapped_module = child if isinstance(child, FSDP): wrapped_module = child._fsdp_wrapped_module # In some cases, torch.nn.Embedding is wrapped with FSDP alone. However, the activation # size of torch.nn.Embedding is small, so it's not necessary to offload it. if not isinstance(wrapped_module, torch.nn.Embedding): layers.append(child) get_layers(model) if len(layers) < 3: logger.warning(f"Find only {len(layers)} fsdp layers, not neccessary to enable async activation offloading") return tensor_filter = FSDPParameterFilter() context, sync_func = get_activation_offload_context(len(layers) - 1, len(layers), tensor_filter) if enable_ckpt: # The implementation of activation checkpointing in transformers library is incompatible with # activation offloading, # so it will be disabled, but this implementation supports another version of activation checkpointing, so that # these two features can be enabled at the same time. for module in model.modules(): if hasattr(module, "gradient_checkpointing_disable"): module.gradient_checkpointing_disable() handler = ActivationHandler(context, sync_func, tensor_filter, enable_ckpt) for layer in layers: module = layer if isinstance(layer, FSDP): module = module._fsdp_wrapped_module handler.wrap_module_forward_method(module) ================================================ FILE: verl_rl/verl/utils/checkpoint/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/utils/checkpoint/checkpoint_manager.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import shutil import numpy as np import torch import torch.distributed from omegaconf import DictConfig from transformers import PreTrainedTokenizer, ProcessorMixin from verl.utils.device import get_device_name, get_torch_device class BaseCheckpointManager: """ A checkpoint manager that saves and loads - model - optimizer - lr_scheduler - extra_states in a SPMD way. We save - sharded model states and optimizer states - full lr_scheduler states - huggingface tokenizer and config for ckpt merge """ def __init__( self, model, optimizer: torch.optim.Optimizer, lr_scheduler: torch.optim.lr_scheduler.LRScheduler = None, processing_class: PreTrainedTokenizer | ProcessorMixin = None, checkpoint_config: DictConfig = None, ): self.checkpoint_config = checkpoint_config checkpoint_load_contents = checkpoint_config.get("load_contents", None) if checkpoint_config else None checkpoint_save_contents = checkpoint_config.get("save_contents", None) if checkpoint_config else None if checkpoint_load_contents is None: checkpoint_load_contents = ["model", "optimizer", "extra"] if checkpoint_save_contents is None: checkpoint_save_contents = ["model", "optimizer", "extra"] self.previous_global_step = None self.previous_saved_paths = [] self.model = model self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.processing_class = processing_class self.checkpoint_load_contents = checkpoint_load_contents self.checkpoint_save_contents = checkpoint_save_contents self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() @property def should_save_model(self) -> bool: """ Returns True if 'model' is in checkpoint_save_contents, indicating the model state should be saved. """ return "model" in self.checkpoint_save_contents @property def should_save_optimizer(self) -> bool: """ Returns True if 'optimizer' is in checkpoint_save_contents, indicating the optimizer state should be saved. """ return "optimizer" in self.checkpoint_save_contents @property def should_save_extra(self) -> bool: """ Returns True if 'extra' is in checkpoint_save_contents, indicating the extra state should be saved. """ return "extra" in self.checkpoint_save_contents @property def should_save_hf_model(self) -> bool: """ Returns True if 'hf_model' is in checkpoint_save_contents, indicating the model should be converted to hf model and saved. """ return "hf_model" in self.checkpoint_save_contents @property def should_load_model(self) -> bool: """ Returns True if 'model' is in checkpoint_load_contents, indicating the model state should be loaded. """ return "model" in self.checkpoint_load_contents @property def should_load_optimizer(self) -> bool: """ Returns True if 'optimizer' is in checkpoint_load_contents, indicating the optimizer state should be loaded. """ return "optimizer" in self.checkpoint_load_contents @property def should_load_extra(self) -> bool: """ Returns True if 'extra' is in checkpoint_load_contents, indicating the extra state should be loaded. """ return "extra" in self.checkpoint_load_contents def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load: bool = False): raise NotImplementedError def save_checkpoint( self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep: int = None ): raise NotImplementedError @staticmethod def checkpath(local_path: str, hdfs_path: str): assert local_path is not None or hdfs_path is not None, "local_path and hdfs_path cannot be both None" return local_path is not None, local_path if local_path is not None else hdfs_path def remove_previous_save_local_path(self, path): if isinstance(path, str): path = [path] for p in path: abs_path = os.path.abspath(p) print(f"Checkpoint manager remove previous save local path: {abs_path}") if not os.path.exists(abs_path): continue shutil.rmtree(abs_path, ignore_errors=True) @staticmethod def get_rng_state(): rng_state = { "cpu": torch.get_rng_state(), "numpy": np.random.get_state(), "random": random.getstate(), } if get_device_name() != "cpu": rng_state[get_device_name()] = get_torch_device().get_rng_state() return rng_state @staticmethod def load_rng_state(rng_state): torch.set_rng_state(rng_state["cpu"]) np.random.set_state(rng_state["numpy"]) random.setstate(rng_state["random"]) if get_device_name() != "cpu": get_torch_device().set_rng_state(rng_state[get_device_name()]) def find_latest_ckpt_path(path, directory_format="global_step_{}"): """ Return the most recent checkpoint directory based on a tracker file. Args: path (str): Base directory containing the checkpoint tracker. directory_format (str): Template for checkpoint subfolders with one placeholder for the iteration number (default "global_step_{}"). Returns: str or None: Full path to the latest checkpoint directory, or None if the tracker or checkpoint folder is missing. """ if path is None: return None tracker_file = get_checkpoint_tracker_filename(path) if not os.path.exists(tracker_file): print(f"Checkpoint tracker file does not exist: {tracker_file}") return None with open(tracker_file, "rb") as f: iteration = int(f.read().decode()) ckpt_path = os.path.join(path, directory_format.format(iteration)) if not os.path.exists(ckpt_path): print("Checkpoint does not exist: %s", ckpt_path) return None print("Found checkpoint: %s", ckpt_path) return ckpt_path def get_checkpoint_tracker_filename(root_path: str): """ Tracker file rescords the latest chckpoint during training to restart from. """ return os.path.join(root_path, "latest_checkpointed_iteration.txt") def should_save_ckpt_esi(max_steps_duration: float, save_ckpt_duration: float = 60, redundant_time: float = 0) -> bool: """ Determine if checkpoint should be saved based on capacity esi expiration. Args: max_steps_duration: Max estimated time (seconds) required to complete one training step save_ckpt_duration: Estimated time (seconds) required to save checkpoint (default: 60) redundant_time: Additional buffer time (seconds) for unexpected delays (default: 0) """ exp_ts_mlp = os.getenv("MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP") # vemlp exp_ts_aws = os.getenv("SAGEMAKER_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP") # aws if exp_ts_mlp: try: import time remaining = float(exp_ts_mlp) - time.time() except ValueError: return False return ( remaining > 0 and max_steps_duration > 0 and remaining <= save_ckpt_duration + max_steps_duration + redundant_time ) elif exp_ts_aws: from datetime import datetime, timedelta expiration_time = datetime.fromtimestamp(int(exp_ts_aws)) time_difference = expiration_time - datetime.now() threshold_minutes = (save_ckpt_duration + max_steps_duration + redundant_time) / 60 return time_difference < timedelta(minutes=threshold_minutes) else: return False ================================================ FILE: verl_rl/verl/utils/checkpoint/fsdp_checkpoint_manager.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import warnings from dataclasses import asdict, dataclass from typing import Optional import torch import torch.distributed from accelerate import init_empty_weights from omegaconf import DictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType from transformers import GenerationConfig, PreTrainedTokenizer, ProcessorMixin from verl.utils.device import is_cuda_available from verl.utils.fs import copy_to_local, is_non_local, local_mkdir_safe from verl.utils.fsdp_utils import fsdp_version, get_fsdp_full_state_dict, get_fsdp_state_ctx from verl.utils.logger import log_with_rank from .checkpoint_manager import BaseCheckpointManager # Setup logging logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO")) @dataclass class FSDPConfig: """Configuration for FSDP checkpointing. Args: FSDP_version (int): Version of FSDP being used. world_size (int): Number of processes in the distributed training setup. """ FSDP_version: int world_size: int class FSDPCheckpointManager(BaseCheckpointManager): """ Manage FSDP checkpointing in SPMD training. - Saves/loads per-rank sharded model & optimizer states - Persists full lr_scheduler and RNG state - Stores HF tokenizer/processor and model/config for unified restore Args: model (FSDP): Wrapped model instance. optimizer (Optimizer): Training optimizer. lr_scheduler (LRScheduler): Learning-rate scheduler. processing_class (PreTrainedTokenizer or ProcessorMixin, optional): Pre-/post-processing artifact handler. checkpoint_contents DictConfig: Configuration for checkpoint contents. - 'load': Components to load; must contain 'model'. Defaults to ['model', 'optimizer', 'extra']. - 'save': Components to save; must contain 'model'. Defaults to ['model', 'optimizer', 'extra']. """ def __init__( self, model: FSDP, optimizer: Optional[torch.optim.Optimizer] = None, lr_scheduler: Optional[torch.optim.lr_scheduler.LRScheduler] = None, processing_class: PreTrainedTokenizer | ProcessorMixin = None, checkpoint_config: DictConfig = None, **kwargs, ): if processing_class is None: assert "tokenizer" in kwargs, "tokenizer or processor must be provided" warnings.warn( "`tokenizer` is deprecated. use `processing_class` instead.", DeprecationWarning, stacklevel=2 ) processing_class = kwargs.pop("tokenizer") super().__init__( model, optimizer, lr_scheduler=lr_scheduler, processing_class=processing_class, checkpoint_config=checkpoint_config, ) def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load=False): """ Load an FSDP checkpoint for this rank. Downloads and loads: - model and optimizer shards - extra state dict (scheduler + RNG) Args: local_path: Directory with per-rank checkpoint files. hdfs_path: Unused (for API compatibility). del_local_after_load: Remove local files after loading. """ if local_path is None: return # check if the checkpoint_load_contents is valid if self.should_load_model: assert self.model is not None, "model must be provided when checkpoint_contents.load includes ['model']" if self.should_load_optimizer: assert self.optimizer is not None, ( "optimizer must be provided when checkpoint_contents.load includes ['optimizer']" ) # every rank download its own checkpoint state_dict_cfg = ( ShardedStateDictConfig(offload_to_cpu=True if is_cuda_available else False) if self.should_load_model else None ) optim_cfg = ( ShardedOptimStateDictConfig(offload_to_cpu=True if is_cuda_available else False) if self.should_load_optimizer else None ) with get_fsdp_state_ctx(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg): if self.should_load_model: remote_model_path = os.path.join(local_path, f"model_world_size_{self.world_size}_rank_{self.rank}.pt") local_model_path = copy_to_local(remote_model_path) model_state_dict = torch.load(local_model_path, weights_only=False) self.model.load_state_dict(model_state_dict) log_with_rank(f"Loaded model from {remote_model_path}", rank=self.rank, logger=logger) if self.should_load_optimizer: remote_optim_path = os.path.join(local_path, f"optim_world_size_{self.world_size}_rank_{self.rank}.pt") local_optim_path = copy_to_local(remote_optim_path) optimizer_state_dict = torch.load(local_optim_path, weights_only=False) self.optimizer.load_state_dict(optimizer_state_dict) log_with_rank(f"Loaded optimizer from {remote_optim_path}", rank=self.rank, logger=logger) if self.should_load_extra: remote_extra_state_path = os.path.join( local_path, f"extra_state_world_size_{self.world_size}_rank_{self.rank}.pt" ) local_extra_state_path = copy_to_local(remote_extra_state_path) extra_state_dict = torch.load(local_extra_state_path, weights_only=False) # recover random state if "rng" in extra_state_dict: # 'rng' may not exist for backward compatibility self.load_rng_state(extra_state_dict["rng"]) log_with_rank(f"Loaded rng from {remote_extra_state_path}", rank=self.rank, logger=logger) lr_scheduler_state_dict = extra_state_dict["lr_scheduler"] if lr_scheduler_state_dict is not None and self.lr_scheduler is not None: self.lr_scheduler.load_state_dict(lr_scheduler_state_dict) log_with_rank(f"Loaded lr_scheduler from {remote_extra_state_path}", rank=self.rank, logger=logger) if self.rank == 0 and del_local_after_load: try: os.remove(local_model_path) if is_non_local(local_model_path) else None os.remove(local_optim_path) if is_non_local(local_optim_path) else None os.remove(local_extra_state_path) if is_non_local(local_extra_state_path) else None except Exception as e: log_with_rank( f"remove local resume ckpt file after loading failed, exception {e} will be ignored", rank=self.rank, logger=logger, ) # wait for everyone to load checkpoints torch.distributed.barrier() def save_checkpoint(self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep=None): """ Save an FSDP checkpoint for this rank. Writes: - model & optimizer shard files - extra state dict (scheduler + RNG) - HF tokenizer/processor and model/config on rank 0 - optional full HF model under 'huggingface/' if requested Rotates old checkpoints, keeping at most `max_ckpt_to_keep`. Args: local_path: Target directory for checkpoint files. hdfs_path: Unused (for API compatibility). global_step: Current training step (used for bookkeeping). max_ckpt_to_keep: Number of recent checkpoints to retain. """ if local_path is None: return # record the previous global step self.previous_global_step = global_step # remove previous local_path, only rank 0 should do this if ( self.rank == 0 and max_ckpt_to_keep and isinstance(max_ckpt_to_keep, int) and max_ckpt_to_keep > 0 and len(self.previous_saved_paths) >= max_ckpt_to_keep ): keep_start = len(self.previous_saved_paths) - max_ckpt_to_keep + 1 self.remove_previous_save_local_path(self.previous_saved_paths[:keep_start]) self.previous_saved_paths = self.previous_saved_paths[keep_start:] local_path = local_mkdir_safe(local_path) torch.distributed.barrier() # check if the checkpoint_save_contents is valid if self.should_save_model: assert self.model is not None, "model must be provided when checkpoint_contents.save includes ['model']" if self.should_save_optimizer: assert self.optimizer is not None, ( "optimizer must be provided when checkpoint_contents.save includes ['optimizer']" ) # every rank will save its own model and optim shard state_dict_cfg = ShardedStateDictConfig(offload_to_cpu=True if is_cuda_available else False) optim_cfg = ShardedOptimStateDictConfig(offload_to_cpu=True if is_cuda_available else False) with warnings.catch_warnings(): warnings.simplefilter("ignore") with get_fsdp_state_ctx(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg): model_path = os.path.join(local_path, f"model_world_size_{self.world_size}_rank_{self.rank}.pt") optim_path = os.path.join(local_path, f"optim_world_size_{self.world_size}_rank_{self.rank}.pt") extra_path = os.path.join(local_path, f"extra_state_world_size_{self.world_size}_rank_{self.rank}.pt") if self.should_save_model: model_state_dict = self.model.state_dict() torch.save(model_state_dict, model_path) log_with_rank(f"Saved model to {os.path.abspath(model_path)}", rank=self.rank, logger=logger) if self.should_save_optimizer: optimizer_state_dict = self.optimizer.state_dict() torch.save(optimizer_state_dict, optim_path) log_with_rank(f"Saved optim to {os.path.abspath(optim_path)}", rank=self.rank, logger=logger) if self.should_save_extra: lr_scheduler_state_dict = self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None extra_state_dict = { "lr_scheduler": lr_scheduler_state_dict, "rng": self.get_rng_state(), } torch.save(extra_state_dict, extra_path) log_with_rank(f"Saved extra_state to {os.path.abspath(extra_path)}", rank=self.rank, logger=logger) if self.rank == 0: # Save HF tokenizer/processor and model config on rank 0 to huggingface/ directory, no matter whether # huggingface model is requested to be saved or not. if fsdp_version(self.model) == 1: unwrap_model = self.model._fsdp_wrapped_module else: unwrap_model = self.model hf_config_tokenizer_path = os.path.join(local_path, "huggingface") local_mkdir_safe(hf_config_tokenizer_path) model_config = unwrap_model.config generation_config = None if unwrap_model.can_generate() and hasattr(model_config, "name_or_path") and model_config.name_or_path: try: # Some model's name_or_path is empty if not initialized from pretrained, # in this cases, we don't save generation config. generation_config = GenerationConfig.from_pretrained(model_config.name_or_path) generation_config.save_pretrained(hf_config_tokenizer_path) except Exception: # if the generation config isn't available, we don't save it pass model_config.save_pretrained(hf_config_tokenizer_path) self.processing_class.save_pretrained(hf_config_tokenizer_path) log_with_rank( f"Saved model config and tokenizer class to {os.path.abspath(hf_config_tokenizer_path)}", rank=self.rank, logger=logger, log_only_rank_0=True, ) # Also save runtime FSDP config fsdp_config_path = os.path.join(local_path, "fsdp_config.json") fsdp_config = FSDPConfig( FSDP_version=fsdp_version(self.model), world_size=self.world_size, ) with open(fsdp_config_path, "w") as f: json.dump(asdict(fsdp_config), f, indent=4) # wait for everyone to dump to local torch.distributed.barrier() if self.should_save_hf_model: # Only rank 0 will save hf model and, # offload to cpu to save LLMs which may be too large to fit in one GPU state_dict = get_fsdp_full_state_dict(self.model, offload_to_cpu=True, rank0_only=True) if self.rank == 0: hf_local_path = os.path.join(local_path, "huggingface") os.makedirs(hf_local_path, exist_ok=True) if "ForTokenClassification" in model_config.architectures[0]: from transformers import AutoModelForTokenClassification auto_model_cls = AutoModelForTokenClassification elif "ForCausalLM" in model_config.architectures[0]: from transformers import AutoModelForCausalLM auto_model_cls = AutoModelForCausalLM elif "ForConditionalGeneration" in model_config.architectures[0]: from transformers import AutoModelForVision2Seq auto_model_cls = AutoModelForVision2Seq else: raise NotImplementedError(f"Unknown architecture {model_config['architectures']}") with init_empty_weights(): save_model = auto_model_cls.from_config(model_config, torch_dtype=torch.bfloat16) save_model.to_empty(device="cpu") if save_model.can_generate(): if generation_config is not None: save_model.generation_config = generation_config else: print( f"Warning: {self.__class__.__name__}.save_checkpoint: Generation config file not found " f"in, using a generation config created from the model config when saving hf_model." ) save_model.save_pretrained(hf_local_path, state_dict=state_dict) log_with_rank( f"Saved hf_model to {os.path.abspath(hf_local_path)}", rank=self.rank, logger=logger, log_only_rank_0=True, ) del state_dict del save_model # wait for rank0 to dump hf_model to local torch.distributed.barrier() self.previous_saved_paths.append(local_path) ================================================ FILE: verl_rl/verl/utils/checkpoint/megatron_checkpoint_manager.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import random from collections.abc import Callable from dataclasses import asdict import numpy as np import torch import torch.distributed from megatron.core import mpu, tensor_parallel from megatron.core.dist_checkpointing.mapping import ShardedObject from megatron.core.transformer.enums import AttnBackend from transformers import GenerationConfig from verl.models.weight_loader_registry import get_weight_saver from verl.utils.device import get_device_name, get_torch_device from verl.utils.fs import is_non_local, local_mkdir_safe from verl.utils.logger import log_with_rank from verl.utils.megatron.dist_checkpointing import load_dist_checkpointing, save_dist_checkpointing from verl.utils.megatron_utils import ( get_dist_checkpoint_path, get_hf_model_checkpoint_path, get_transformer_config_checkpoint_path, ) from .checkpoint_manager import BaseCheckpointManager # Setup logging logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO")) class MegatronCheckpointManager(BaseCheckpointManager): """ Checkpoint manager for Megatron-LM distributed training. This class manages the saving and loading of model checkpoints in a Megatron-LM distributed training environment. It handles various aspects of checkpointing including model states, optimizer states, learning rate schedulers, and random number generator states, ensuring compatibility with HuggingFace formats. Key features: - Distributed checkpoint saving and loading using Megatron's dist_checkpointing - Support for tensor parallel, pipeline parallel, and data parallel configurations - Automatic handling of model state dictionaries across multiple pipeline stages - Integration with HuggingFace model configurations and tokenizers - Random number generator state management for reproducibility - Support for both synchronous and asynchronous checkpoint operations The manager automatically handles: - Directory structure creation based on global steps and process ranks - Model configuration and tokenizer saving in HuggingFace format - Optimizer and scheduler state persistence - CUDA RNG state management for deterministic training - Checkpoint cleanup and retention policies Args: model: The Megatron model instance to checkpoint optimizer: The optimizer instance (optional) lr_scheduler: The learning rate scheduler instance (optional) Attributes: model: Reference to the Megatron model being checkpointed optimizer: Reference to the optimizer (if provided) lr_scheduler: Reference to the learning rate scheduler (if provided) rank: Current process rank in the distributed setup Example: ```python checkpoint_manager = MegatronCheckpointManager( model=megatron_model, optimizer=optimizer, lr_scheduler=scheduler ) checkpoint_manager.save_checkpoint( local_path="checkpoints/step_1000", global_step=1000 ) checkpoint_manager.load_checkpoint( local_path="checkpoints/step_1000" ) ``` """ def __init__( self, config, checkpoint_config, model_config, transformer_config, role, model: torch.nn.ModuleList, arch: str, hf_config, param_dtype: torch.dtype, share_embeddings_and_output_weights: bool, processing_class, optimizer, optimizer_scheduler, use_distributed_optimizer: bool, use_checkpoint_opt_param_scheduler: bool = False, use_dist_checkpointing: bool = True, bridge=None, **kwargs, ): super().__init__( model, optimizer=optimizer, lr_scheduler=optimizer_scheduler, processing_class=processing_class, checkpoint_config=checkpoint_config, ) self.arch = arch self.config = config self.transformer_config = transformer_config self.role = role self.is_value_model = False if self.role in ["reward", "critic"]: self.is_value_model = True self.model_config = model_config self.hf_config = hf_config self.param_dtype = param_dtype self.share_embeddings_and_output_weights = share_embeddings_and_output_weights self.model_path = self.config.model.path self.use_distributed_optimizer = use_distributed_optimizer self.use_checkpoint_opt_param_scheduler = use_checkpoint_opt_param_scheduler self.bridge = bridge self.rank = torch.distributed.get_rank() self.use_dist_checkpointing = use_dist_checkpointing or not self.bridge or self.is_value_model self.use_hf_checkpoint = not self.use_dist_checkpointing self.weight_saver = get_weight_saver(self.arch) def get_rng_state(self, use_dist_ckpt: bool = True, data_parallel_random_init: bool = False): """collect rng state across data parallel ranks""" rng_state = { "random_rng_state": random.getstate(), "np_rng_state": np.random.get_state(), "torch_rng_state": torch.get_rng_state(), "rng_tracker_states": tensor_parallel.get_cuda_rng_tracker().get_states(), } if get_device_name() != "cpu": rng_state[f"{get_device_name()}_rng_state"] = get_torch_device().get_rng_state() rng_state_list = None if torch.distributed.is_initialized() and mpu.get_data_parallel_world_size() > 1 and data_parallel_random_init: rng_state_list = [None for i in range(mpu.get_data_parallel_world_size())] torch.distributed.all_gather_object(rng_state_list, rng_state, group=mpu.get_data_parallel_group()) else: rng_state_list = [rng_state] if use_dist_ckpt: pp_rank = mpu.get_pipeline_model_parallel_rank() pp_size = mpu.get_pipeline_model_parallel_world_size() tp_rank = mpu.get_tensor_model_parallel_rank() tp_size = mpu.get_tensor_model_parallel_world_size() rng_state_list = ShardedObject( "rng_state", rng_state_list, (pp_size, tp_size), (pp_rank, tp_rank), replica_id=mpu.get_data_parallel_rank(with_context_parallel=True), ) return rng_state_list def get_checkpoint_name( self, checkpoints_path, pipeline_parallel=None, tensor_rank=None, pipeline_rank=None, cp_rank=None, expert_parallel=None, expert_rank=None, return_base_dir=True, basename="model.pt", ): """Determine the directory name for this rank's checkpoint.""" # Use both the tensor and pipeline MP rank. if pipeline_parallel is None: pipeline_parallel = mpu.get_pipeline_model_parallel_world_size() > 1 if tensor_rank is None: tensor_rank = mpu.get_tensor_model_parallel_rank() if pipeline_rank is None: pipeline_rank = mpu.get_pipeline_model_parallel_rank() if cp_rank is None: cp_rank = mpu.get_context_parallel_rank() if expert_parallel is None: expert_parallel = mpu.get_expert_model_parallel_world_size() > 1 if expert_rank is None: expert_rank = mpu.get_expert_model_parallel_rank() # Use both the tensor and pipeline MP rank. If using the distributed # optimizer, then the optimizer's path must additionally include the # data parallel rank. # due to the fact that models are identical across cp ranks, cp rank is not used in the checkpoint path if not pipeline_parallel: common_path = os.path.join(checkpoints_path, f"mp_rank_{tensor_rank:02d}") else: common_path = os.path.join(checkpoints_path, f"mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}") if expert_parallel: common_path = common_path + f"_{expert_rank:03d}" os.makedirs(common_path, exist_ok=True) if return_base_dir: return common_path return os.path.join(common_path, basename) def generate_state_dict(self): # For save dist checkpointing state_dict = {} # All ranks Save Model to reduce memory pressure if self.should_save_model or self.should_load_model: # Get sharded state dict, notice that state_dict will collect among dp groups, causing memory pressure for vpp_rank, model in enumerate(self.model): if len(self.model) > 1: mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank) key = f"model{vpp_rank}" if len(self.model) > 1 else "model" else: key = "model" if hasattr(model, "module"): model = model.module state_dict[key] = model.sharded_state_dict() # Optimizer State Dict if self.should_save_optimizer or self.should_load_optimizer: torch.distributed.barrier() optimizer_sharded_states = self.optimizer.sharded_state_dict(state_dict) state_dict["optimizer"] = optimizer_sharded_states if self.lr_scheduler is not None: lr_state_dict = self.lr_scheduler.state_dict() state_dict["lr_scheduler"] = lr_state_dict # RNG States State Dict if self.should_save_extra or self.should_load_extra: torch.distributed.barrier() rng_state = self.get_rng_state() state_dict["rng_state"] = rng_state return state_dict def load_rng_states(self, rng_states, data_parallel_random_init=False, use_dist_ckpt=True): # access rng_state for data parallel rank if data_parallel_random_init: rng_states = rng_states[mpu.get_data_parallel_rank()] else: rng_states = rng_states[0] random.setstate(rng_states["random_rng_state"]) np.random.set_state(rng_states["np_rng_state"]) torch.set_rng_state(rng_states["torch_rng_state"]) if get_device_name() != "cpu": get_torch_device().set_rng_state(rng_states[f"{get_device_name()}_rng_state"]) # Check for empty states array if not rng_states["rng_tracker_states"]: raise KeyError tensor_parallel.get_cuda_rng_tracker().set_states(rng_states["rng_tracker_states"]) def load_checkpoint(self, local_path: str, hdfs_path: str = None, del_local_after_load=False): if local_path is not None: assert os.path.exists(local_path), f"Checkpoint path {local_path} does not exist." dist_checkpoint_path = get_dist_checkpoint_path(local_path) # Get State Dict for loading sharded_state_dict = self.generate_state_dict() log_with_rank(f"Generated state dict for saving: {sharded_state_dict.keys()}", rank=self.rank, logger=logger) for vpp_rank, model in enumerate(self.model): if len(self.model) > 1: model_i_keys = sharded_state_dict[f"model{vpp_rank}"].keys() log_with_rank(f"Generated state dict for saving: {model_i_keys}", rank=self.rank, logger=logger) else: log_with_rank( f"Generated state dict for saving: {sharded_state_dict['model'].keys()}", rank=self.rank, logger=logger, ) # Load Dist Checkpointing state_dict = load_dist_checkpointing( sharded_state_dict=sharded_state_dict, ckpt_dir=dist_checkpoint_path, ) if self.should_load_model and self.use_dist_checkpointing: assert "model" in state_dict or any( f"model{vpp_rank}" in state_dict for vpp_rank in range(len(self.model)) ), f"Model state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}." for vpp_rank, model in enumerate(self.model): if len(self.model) == 1: model_state_dict = state_dict["model"] else: assert f"model{vpp_rank}" in state_dict, f"model{vpp_rank} not found in state_dict" model_state_dict = state_dict[f"model{vpp_rank}"] mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank) self.model[vpp_rank].load_state_dict(model_state_dict) log_with_rank(f"Loaded sharded model checkpoint from {local_path}", rank=self.rank, logger=logger) elif self.should_load_model and self.use_hf_checkpoint: hf_model_path = get_hf_model_checkpoint_path(local_path) self.bridge.load_weights(self.model, hf_model_path) log_with_rank(f"Loaded HF model checkpoint from {hf_model_path} with bridge", rank=self.rank, logger=logger) if self.should_load_optimizer: assert "optimizer" in state_dict, ( f"Optimizer state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}." ) optimizer_state_dict = state_dict["optimizer"] self.optimizer.load_state_dict(optimizer_state_dict) log_with_rank(f"Loaded optimizer checkpoint from {local_path}", rank=self.rank, logger=logger) if self.use_checkpoint_opt_param_scheduler: assert "lr_scheduler" in state_dict, ( f"LR scheduler state dict not found in {state_dict.keys()}. Please check the checkpoint file " f"{local_path}." ) lr_scheduler_state_dict = state_dict["lr_scheduler"] if self.lr_scheduler is not None: self.lr_scheduler.load_state_dict(lr_scheduler_state_dict) log_with_rank(f"Loaded LR scheduler checkpoint from {local_path}", rank=self.rank, logger=logger) if self.should_load_extra: assert "rng_state" in state_dict, ( f"RNG state dict not found in {state_dict.keys()}. Please check the checkpoint file {local_path}." ) rng_state = state_dict["rng_state"] self.load_rng_states(rng_state) log_with_rank(f"Loaded RNG states from {local_path}", rank=self.rank, logger=logger) if del_local_after_load: try: os.remove(local_path) if is_non_local(local_path) else None except Exception as e: log_with_rank( f"remove local resume ckpt file after loading failed, exception {e} will be ignored", rank=self.rank, logger=logger, ) def save_checkpoint(self, local_path: str, hdfs_path: str = None, global_step: int = 0, max_ckpt_to_keep=None): # record the previous global step self.previous_global_step = global_step # remove previous local_path if ( max_ckpt_to_keep and isinstance(max_ckpt_to_keep, int) and max_ckpt_to_keep > 0 and len(self.previous_saved_paths) >= max_ckpt_to_keep ): keep_start = len(self.previous_saved_paths) - max_ckpt_to_keep + 1 self.remove_previous_save_local_path(self.previous_saved_paths[:keep_start]) self.previous_saved_paths = self.previous_saved_paths[keep_start:] local_path = local_mkdir_safe(local_path) dist_checkpoint_path = get_dist_checkpoint_path(local_path) if self.use_dist_checkpointing: # Generate state dict for saving state_dict = self.generate_state_dict() log_with_rank(f"Generated state dict for saving: {state_dict.keys()}", rank=self.rank, logger=logger) for vpp_rank, model in enumerate(self.model): if len(self.model) > 1: model_i_keys = state_dict[f"model{vpp_rank}"].keys() log_with_rank(f"Generated state dict for saving: {model_i_keys}", rank=self.rank, logger=logger) else: log_with_rank( f"Generated state dict for saving: {state_dict['model'].keys()}", rank=self.rank, logger=logger ) # Start Async save if enabled async_save_request = save_dist_checkpointing( sharded_state_dict=state_dict, ckpt_path=dist_checkpoint_path, async_save=self.checkpoint_config.async_save, ) # Synchronize all async save requests if not self.checkpoint_config.async_save: assert async_save_request is None, "Async save request should be None when not using async save." torch.distributed.barrier() else: assert self.use_hf_checkpoint, "use_hf_checkpoint should be True when not using dist checkpointing" log_with_rank(f"Saving HF model checkpoint to {local_path} with bridge", rank=self.rank, logger=logger) hf_ckpt_path = get_hf_model_checkpoint_path(local_path) self.bridge.save_weights(self.model, hf_ckpt_path) log_with_rank(f"Saved bridge checkpoint to {hf_ckpt_path}", rank=self.rank, logger=logger) if self.should_save_model: # Only rank 0 saves the hf config and tokenizer to huggingface path # No matter whether we save hf model or not if self.rank == 0: # Save tokenizer hf_config_tokenizer_path = get_hf_model_checkpoint_path(local_path) self.processing_class.save_pretrained(hf_config_tokenizer_path) # Save huggingface config self.hf_config.save_pretrained(hf_config_tokenizer_path) if hasattr(self.hf_config, "name_or_path") and self.hf_config.name_or_path: try: generation_config = GenerationConfig.from_pretrained(self.hf_config.name_or_path) generation_config.save_pretrained(hf_config_tokenizer_path) except Exception: # if the generation config isn't available, we don't save it pass log_with_rank( f"Saved Huggingface config and tokenizer to {hf_config_tokenizer_path}", rank=self.rank, logger=logger, log_only_rank_0=True, ) if self.should_save_extra: if self.rank == 0: # Save transformer config print(self.transformer_config) transformer_config_dict = asdict(self.transformer_config) to_convert_types = {torch.dtype: str, AttnBackend: str} ignore_types = [Callable] pop_keys = [] for key, value in transformer_config_dict.items(): if type(value) in to_convert_types: transformer_config_dict[key] = to_convert_types[type(value)](value) if type(value) in ignore_types: pop_keys.append(key) if callable(value): pop_keys.append(key) for key in pop_keys: transformer_config_dict.pop(key) transformer_config_path = get_transformer_config_checkpoint_path(local_path) with open(transformer_config_path, "w") as f: json.dump(transformer_config_dict, f, indent=2) if self.should_save_hf_model: # wait for everyone to dump to local state_dict = self.weight_saver( self.model, self.hf_config, dtype=self.param_dtype, is_value_model=self.is_value_model, tie_word_embeddings=self.share_embeddings_and_output_weights, ) torch.distributed.barrier() if self.rank == 0: hf_model_ckpt_path = get_hf_model_checkpoint_path(local_path) import warnings from accelerate import init_empty_weights with init_empty_weights(), warnings.catch_warnings(): warnings.simplefilter("ignore") if "mistral7b-rm" in self.config.model.path: from transformers import MistralForSequenceClassification model = MistralForSequenceClassification.from_pretrained( self.config.model.path ) # use score head instead of lm_head state_dict["score.weight"] = state_dict["score.weight"] else: from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(self.config.model.path, torch_dtype="auto") model.save_pretrained(hf_model_ckpt_path, state_dict=state_dict) log_with_rank( f"Saved Huggingface config and tokenizer to {hf_model_ckpt_path}", rank=self.rank, logger=logger, log_only_rank_0=True, ) if hdfs_path is not None: log_with_rank( f"Uploading checkpoint to {hdfs_path}", rank=self.rank, logger=logger, log_only_rank_0=True ) from verl.utils import hdfs_io hdfs_io.makedirs(hdfs_path, exist_ok=True) hdfs_io.copy(src=hf_model_ckpt_path, dst=hdfs_path, dirs_exist_ok=True) log_with_rank( f"HDFS checkpoint uploaded to {hdfs_path}", rank=self.rank, logger=logger, log_only_rank_0=True ) def finalize_save_fn(): # Rank 0 uploads checkpoint to HDFS if hdfs_path is provided log_with_rank( f"Dist checkpointing save completed for {dist_checkpoint_path}", rank=self.rank, logger=logger ) if self.rank == 0: if hdfs_path is not None: log_with_rank(f"Uploading checkpoint to {hdfs_path}", rank=self.rank, logger=logger) from verl.utils import hdfs_io hdfs_io.makedirs(hdfs_path, exist_ok=True) hdfs_io.copy(src=dist_checkpoint_path, dst=hdfs_path, dirs_exist_ok=True) hdfs_io.copy(src=hf_config_tokenizer_path, dst=hdfs_path, dirs_exist_ok=True) if self.checkpoint_config.async_save: assert async_save_request is not None, "Async save request should not be None when using async save." async_save_request.add_finalize_fn(finalize_save_fn) else: finalize_save_fn() self.previous_saved_paths.append(local_path) ================================================ FILE: verl_rl/verl/utils/config.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import is_dataclass from typing import Any, Optional from omegaconf import DictConfig, ListConfig, OmegaConf __all__ = ["omega_conf_to_dataclass"] def omega_conf_to_dataclass(config: DictConfig | dict, dataclass_type: Optional[type[Any]] = None) -> Any: """ Convert an OmegaConf DictConfig to a dataclass. Args: config: The OmegaConf DictConfig or dict to convert. dataclass_type: The dataclass type to convert to. When dataclass_type is None, the DictConfig must contain _target_ to be instantiated via hydra.instantiate API. Returns: The dataclass instance. """ # Got an empty config if not config: return dataclass_type if dataclass_type is None else dataclass_type() # Got an object if not isinstance(config, DictConfig | ListConfig | dict | list): return config if dataclass_type is None: assert "_target_" in config, ( "When dataclass_type is not provided, config must contain _target_." "See trainer/config/ppo_trainer.yaml algorithm section for an example." ) from hydra.utils import instantiate return instantiate(config, _convert_="partial") if not is_dataclass(dataclass_type): raise ValueError(f"{dataclass_type} must be a dataclass") cfg = OmegaConf.create(config) # in case it's a dict cfg_from_dataclass = OmegaConf.structured(dataclass_type) # let cfg override the existing vals in `cfg_from_dataclass` cfg_merged = OmegaConf.merge(cfg_from_dataclass, cfg) # now convert to `dataclass_type` config_object = OmegaConf.to_object(cfg_merged) return config_object def update_dict_with_config(dictionary: dict, config: DictConfig): for key in dictionary: if hasattr(config, key): dictionary[key] = getattr(config, key) ================================================ FILE: verl_rl/verl/utils/dataset/README.md ================================================ # Dataset Format ## RLHF dataset We combine all the data sources into a single parquet files. We directly organize the prompt into the chat format so that multi-turn chats can be easily incorporated. In the prompt, we may add instruction following texts to guide the model output the answers in a particular format so that we can extract the answers. Math problems ```json { "data_source": "openai/gsm8k", "prompt": [{"role": "user", "content": "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May? Let's think step by step and output the final answer after \"####\""}], "ability": "math", "reward_model": { "style": "rule", "ground_truth": ["72"] }, } ``` ================================================ FILE: verl_rl/verl/utils/dataset/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .rl_dataset import RLHFDataset from .rm_dataset import RMDataset from .sft_dataset import SFTDataset __all__ = ["RLHFDataset", "RMDataset", "SFTDataset"] ================================================ FILE: verl_rl/verl/utils/dataset/multiturn_sft_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2025 ModelBest Inc. and/or its affiliates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Multi-turn SFT dataset that supports training on conversation data with multiple turns """ import logging from typing import Any, Optional import numpy as np import pandas as pd import torch from torch.utils.data import Dataset from transformers import PreTrainedTokenizer from verl.utils import hf_tokenizer from verl.utils.fs import copy_local_path_from_hdfs def convert_nested_value_to_list_recursive(data_item): if isinstance(data_item, dict): return {k: convert_nested_value_to_list_recursive(v) for k, v in data_item.items()} elif isinstance(data_item, list): return [convert_nested_value_to_list_recursive(elem) for elem in data_item] elif isinstance(data_item, np.ndarray): # Convert to list, then recursively process the elements of the new list return convert_nested_value_to_list_recursive(data_item.tolist()) else: # Base case: item is already a primitive type (int, str, float, bool, etc.) return data_item class MultiTurnSFTDataset(Dataset): """ Dataset for multi-turn conversations where each assistant response should be trained """ def __init__(self, parquet_files: str | list[str], tokenizer, config=None): # Set defaults and extract parameters from config if provided config = config or {} self.truncation = config.get("truncation", "error") self.max_length = config.get("max_length", 1024) # Get messages_key from the new multiturn config structure multiturn_config = config.get("multiturn", {}) self.messages_key = multiturn_config.get("messages_key", "messages") self.tools_key = multiturn_config.get("tools_key", "tools") self.enable_thinking_key = multiturn_config.get("enable_thinking_key", "enable_thinking") assert self.truncation in ["error", "left", "right"] if not isinstance(parquet_files, list): parquet_files = [parquet_files] self.parquet_files = parquet_files if isinstance(tokenizer, str): tokenizer = hf_tokenizer(tokenizer) self.tokenizer: PreTrainedTokenizer = tokenizer self._download() self._read_files_and_process() def _download(self): for i, parquet_file in enumerate(self.parquet_files): self.parquet_files[i] = copy_local_path_from_hdfs(parquet_file, verbose=True) def _read_files_and_process(self): def series_to_item(ls): import numpy import pandas while isinstance(ls, pandas.core.series.Series | numpy.ndarray) and len(ls) == 1: ls = ls[0] return ls dataframes = [] for parquet_file in self.parquet_files: dataframe = pd.read_parquet(parquet_file) dataframes.append(dataframe) self.dataframe = pd.concat(dataframes) # Extract messages list from dataframe self.messages = self.dataframe[self.messages_key].apply(series_to_item).tolist() # Extract tools list from dataframe if self.tools_key in self.dataframe.columns: self.tools = self.dataframe[self.tools_key].apply(convert_nested_value_to_list_recursive).tolist() else: self.tools = None # Extract enable_thinking list from dataframe if self.enable_thinking_key in self.dataframe.columns: self.enable_thinking = self.dataframe[self.enable_thinking_key].tolist() else: self.enable_thinking = None def __len__(self): return len(self.messages) def _process_message_tokens( self, messages: list[dict[str, Any]], start_idx: int, end_idx: int, is_assistant: bool = False, enable_thinking: Optional[bool] = None, tools: Optional[list[dict[str, Any]]] = None, ) -> tuple[list[int], list[int], list[int]]: """ Process tokens for a single message or a group of messages. Args: messages: List of message dictionaries start_idx: Start index in messages list end_idx: End index in messages list is_assistant: Whether this is an assistant message enable_thinking: Whether to enable thinking mode Returns: Tuple of (tokens, loss_mask, attention_mask) """ if start_idx > 0: prev_applied_text = self.tokenizer.apply_chat_template( messages[:start_idx], tokenize=False, add_generation_prompt=False, enable_thinking=enable_thinking, tools=tools, ) if is_assistant: prev_applied_text_w_generation_prompt = self.tokenizer.apply_chat_template( messages[:start_idx], tokenize=False, add_generation_prompt=True, enable_thinking=enable_thinking, tools=tools, ) else: prev_applied_text = "" cur_applied_text = self.tokenizer.apply_chat_template( messages[:end_idx], tokenize=False, add_generation_prompt=False, enable_thinking=enable_thinking, tools=tools, ) # Get tokens for the current message only if is_assistant: generation_prompt_text = prev_applied_text_w_generation_prompt[len(prev_applied_text) :] generation_prompt_tokens = self.tokenizer.encode( generation_prompt_text, add_special_tokens=False, ) _message_tokens = self.tokenizer.encode( cur_applied_text[len(prev_applied_text_w_generation_prompt) :], add_special_tokens=False, ) message_tokens = generation_prompt_tokens + _message_tokens loss_mask = [0] * (len(generation_prompt_tokens)) + [1] * ( len(message_tokens) - len(generation_prompt_tokens) ) else: message_tokens = self.tokenizer.encode( cur_applied_text[len(prev_applied_text) :], add_special_tokens=False, ) loss_mask = [0] * len(message_tokens) attention_mask = [1] * len(message_tokens) return message_tokens, loss_mask, attention_mask def _validate_and_convert_tokens( self, full_tokens: torch.Tensor, concat_tokens: list[int], concat_loss_mask: list[int], concat_attention_mask: list[int], ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Validate tokenization and convert to tensors. Args: full_tokens: Full conversation tokens concat_tokens: Concatenated tokens concat_loss_mask: Concatenated loss mask concat_attention_mask: Concatenated attention mask Returns: Tuple of (input_ids, loss_mask, attention_mask) as tensors """ full_tokens_list = full_tokens.tolist() if len(concat_tokens) != len(full_tokens_list) or not all( a == b for a, b in zip(concat_tokens, full_tokens_list, strict=True) ): logging.warning( f"Token mismatch detected! Full tokenization length: {len(full_tokens_list)}, Concatenated tokens " f"length: {len(concat_tokens)}. Using concatenated version." # f"full tokens text: {self.tokenizer.decode(full_tokens_list)}" # f"concat tokens text: {self.tokenizer.decode(concat_tokens)}" ) return ( torch.tensor(concat_tokens, dtype=torch.long), torch.tensor(concat_loss_mask, dtype=torch.long), torch.tensor(concat_attention_mask, dtype=torch.long), ) return ( full_tokens, torch.tensor(concat_loss_mask, dtype=torch.long), torch.tensor(concat_attention_mask, dtype=torch.long), ) def __getitem__(self, item): tokenizer = self.tokenizer messages = self.messages[item] tools = self.tools[item] if self.tools is not None else None enable_thinking = self.enable_thinking[item] if self.enable_thinking is not None else None # First, get the full conversation tokens try: full_tokens = tokenizer.apply_chat_template( messages, tools=tools, tokenize=True, return_tensors="pt", add_generation_prompt=False, enable_thinking=enable_thinking, ) except Exception as e: logging.error( f"Error applying chat template: {e}\nMessages: {messages}\nTools: {tools}\nEnable thinking: " f"{enable_thinking}" ) raise # Track concatenated tokens for validation concat_tokens = [] concat_loss_mask = [] concat_attention_mask = [] i = 0 while i < len(messages): cur_messages = messages[i] if cur_messages["role"] == "assistant": # Process assistant message tokens, loss_mask, attention_mask = self._process_message_tokens( messages, i, i + 1, is_assistant=True, enable_thinking=enable_thinking, tools=tools ) concat_tokens.extend(tokens) concat_loss_mask.extend(loss_mask) concat_attention_mask.extend(attention_mask) i += 1 elif cur_messages["role"] == "tool": # Process consecutive tool messages st = i ed = i + 1 while ed < len(messages) and messages[ed]["role"] == "tool": ed += 1 tokens, loss_mask, attention_mask = self._process_message_tokens( messages, st, ed, enable_thinking=enable_thinking, tools=tools ) concat_tokens.extend(tokens) concat_loss_mask.extend(loss_mask) concat_attention_mask.extend(attention_mask) i = ed elif cur_messages["role"] in ["user", "system"]: # Process user or system message if cur_messages["role"] == "system" and i != 0: raise ValueError("System message should be the first message") tokens, loss_mask, attention_mask = self._process_message_tokens( messages, i, i + 1, enable_thinking=enable_thinking, tools=tools ) concat_tokens.extend(tokens) concat_loss_mask.extend(loss_mask) concat_attention_mask.extend(attention_mask) i += 1 else: raise ValueError(f"Unknown role: {cur_messages['role']}") # Validate and convert tokens input_ids, loss_mask, attention_mask = self._validate_and_convert_tokens( full_tokens[0], concat_tokens, concat_loss_mask, concat_attention_mask ) # Handle sequence length sequence_length = input_ids.shape[0] if sequence_length < self.max_length: # Pad sequences pad_token_id = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else 0 padded_input_ids = torch.full((self.max_length - sequence_length,), pad_token_id, dtype=input_ids.dtype) padded_attention_mask = torch.zeros((self.max_length - sequence_length,), dtype=attention_mask.dtype) padded_loss_mask = torch.zeros((self.max_length - sequence_length,), dtype=loss_mask.dtype) input_ids = torch.cat((input_ids, padded_input_ids)) attention_mask = torch.cat((attention_mask, padded_attention_mask)) loss_mask = torch.cat((loss_mask, padded_loss_mask)) elif sequence_length > self.max_length: if self.truncation == "left": input_ids = input_ids[-self.max_length :] attention_mask = attention_mask[-self.max_length :] loss_mask = loss_mask[-self.max_length :] elif self.truncation == "right": input_ids = input_ids[: self.max_length] attention_mask = attention_mask[: self.max_length] loss_mask = loss_mask[: self.max_length] elif self.truncation == "error": raise ValueError(f"{sequence_length=} is larger than {self.max_length=}") else: raise ValueError(f"Unknown truncation method {self.truncation}") # Create position IDs position_ids = torch.arange(len(input_ids), dtype=torch.long) # Zero out position IDs for padding position_ids = position_ids * attention_mask return { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids, "loss_mask": loss_mask, } ================================================ FILE: verl_rl/verl/utils/dataset/rl_dataset.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import logging import os import re from collections import defaultdict from typing import Optional import datasets import numpy as np import torch from omegaconf import DictConfig, ListConfig from torch.utils.data import Dataset from transformers import PreTrainedTokenizer, ProcessorMixin import verl.utils.torch_functional as verl_F from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__name__) def collate_fn(data_list: list[dict]) -> dict: """ Collate a batch of sample dicts into batched tensors and arrays. Args: data_list: List of dicts mapping feature names to torch.Tensor or other values. Returns: Dict where tensor entries are stacked into a torch.Tensor of shape (batch_size, \*dims) and non-tensor entries are converted to np.ndarray of dtype object with shape (batch_size,). """ tensors = defaultdict(list) non_tensors = defaultdict(list) for data in data_list: for key, val in data.items(): if isinstance(val, torch.Tensor): tensors[key].append(val) else: non_tensors[key].append(val) for key, val in tensors.items(): tensors[key] = torch.stack(val, dim=0) for key, val in non_tensors.items(): non_tensors[key] = np.array(val, dtype=object) return {**tensors, **non_tensors} class RLHFDataset(Dataset): """ Load and preprocess RLHF data from Parquet files. - Caches files locally. - Reads into a HuggingFace Dataset and tokenizes prompts. - Optionally handles images/videos via a ProcessorMixin. - Filters prompts over a max length. - Supports resuming from checkpoints. Args: data_files (str or list): Path(s) to Parquet file(s). tokenizer (PreTrainedTokenizer): For the tokenization of text to token IDs. config (DictConfig): Options like cache_dir, prompt_key, max_prompt_length, truncation, etc. processor (ProcessorMixin, optional): Multimodal preprocessor for images/videos. """ def __init__( self, data_files: str | list[str], tokenizer: PreTrainedTokenizer, config: DictConfig, processor: Optional[ProcessorMixin] = None, ): if not isinstance(data_files, list | ListConfig): data_files = [data_files] self.data_files = copy.deepcopy(data_files) self.original_data_files = copy.deepcopy(data_files) # use for resume self.tokenizer = tokenizer self.processor = processor self.config = config self.cache_dir = os.path.expanduser(config.get("cache_dir", "~/.cache/verl/rlhf")) self.prompt_key = config.get("prompt_key", "prompt") self.image_key = config.get("image_key", "images") self.video_key = config.get("video_key", "videos") self.max_prompt_length = config.get("max_prompt_length", 1024) self.return_raw_chat = config.get("return_raw_chat", False) self.return_full_prompt = config.get("return_full_prompt", False) self.truncation = config.get("truncation", "error") self.filter_overlong_prompts = config.get("filter_overlong_prompts", True) self.num_workers = config.get("filter_overlong_prompts_workers", max(1, os.cpu_count() // 4)) self.num_workers = min(self.num_workers, os.cpu_count()) self.use_shm = config.get("use_shm", False) self.chat_template_func = config.get("chat_template_func", None) self.need_tools_kwargs = config.get("need_tools_kwargs", False) self.filter_prompts = config.get("filter_prompts", True) self.serialize_dataset = False self.return_multi_modal_inputs = config.get("return_multi_modal_inputs", True) self._download() self._read_files_and_tokenize() def _download(self, use_origin_parquet=False): from verl.utils.fs import copy_to_local data_files = self.data_files if not use_origin_parquet else self.original_data_files for i, parquet_file in enumerate(data_files): self.data_files[i] = copy_to_local(src=parquet_file, cache_dir=self.cache_dir, use_shm=self.use_shm) def _read_files_and_tokenize(self): dataframes = [] for parquet_file in self.data_files: # read parquet files and cache dataframe = datasets.load_dataset("parquet", data_files=parquet_file)["train"] dataframes.append(dataframe) self.dataframe: datasets.Dataset = datasets.concatenate_datasets(dataframes) print(f"dataset len: {len(self.dataframe)}") self.dataframe = self.maybe_filter_out_long_prompts(self.dataframe) def maybe_filter_out_long_prompts(self, dataframe: datasets.Dataset = None): # filter out too long prompts if self.filter_overlong_prompts: tokenizer = self.tokenizer processor = self.processor prompt_key = self.prompt_key image_key = self.image_key video_key = self.video_key if processor is not None: from verl.utils.dataset.vision_utils import process_image, process_video def doc2len(doc) -> int: messages = self._build_messages(doc) raw_prompt = self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=False ) images = [process_image(image) for image in doc[image_key]] if image_key in doc else None videos = [process_video(video) for video in doc[video_key]] if video_key in doc else None return len(processor(text=[raw_prompt], images=images, videos=videos)["input_ids"][0]) else: def doc2len(doc) -> int: return len(tokenizer.apply_chat_template(doc[prompt_key], add_generation_prompt=True)) dataframe = dataframe.filter( lambda doc: doc2len(doc) <= self.max_prompt_length, num_proc=self.num_workers, desc=f"Filtering prompts longer than {self.max_prompt_length} tokens", ) print(f"filter dataset len: {len(dataframe)}") return dataframe def resume_dataset_state(self): self.serialize_dataset = not hasattr(self, "original_data_files") # resume dataframe if not it's serialized in data.pt if not self.serialize_dataset: self._download(use_origin_parquet=True) # download and resume from original parquet files self._read_files_and_tokenize() else: print(r"old dataloader ckpt file is used, please train from scratch for better ckpt performance") def __len__(self): return len(self.dataframe) def _build_messages(self, example: dict): messages: list = example.pop(self.prompt_key) if self.image_key in example or self.video_key in example: for message in messages: content = message["content"] content_list = [] segments = re.split("(|
score = score / 4 return score return score else: return format_score def compute_score_subem(solution_str, ground_truth, method="strict", format_score=0.0, score=1.0): """The scoring function for substring exact match (EM). Args: solution_str: the solution text ground_truth: the ground truth method: the method to extract the solution, choices are 'strict' and 'flexible' format_score: the score for the format score: the score for the correct answer """ answer = extract_solution(solution_str=solution_str) do_print = random.randint(1, 64) == 1 if do_print: print("--------------------------------") print(f"Golden answers: {ground_truth['target']}") print(f"Extracted answer: {answer}") print(f"Solution string: {solution_str}") if answer is None: return 0 else: if subem_check(answer, ground_truth["target"]): return score else: return format_score ================================================ FILE: verl_rl/verl/utils/rollout_trace.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import contextlib import functools import inspect import os from typing import Optional class RolloutTraceConfig: """Configuration for rollout tracing with various backends. Singleton configuration class for managing rollout trace settings across different tracing backends like Weave and MLflow. Args: backend (Optional[str]): Tracing backend to use ('weave', 'mlflow', or None). client (Optional[object]): Client instance for the selected backend. token2text (bool): Whether to convert tokens to text in traces. Defaults to False. project_name (str): Name of the project for tracing. experiment_name (str): Name of the experiment for tracing. """ _instance: Optional["RolloutTraceConfig"] = None backend: Optional[str] = None client: Optional[object] = None token2text: bool = False _initialized: bool = False project_name: str = None experiment_name: str = None def __new__(cls, *args, **kwargs): if cls._instance is None: cls._instance = super().__new__(cls) cls._instance._initialized = False return cls._instance @classmethod def get_instance(cls) -> "RolloutTraceConfig": if cls._instance is None: cls._instance = cls() return cls._instance @classmethod def init(cls, project_name: str, experiment_name: str, backend: str, token2text: bool = False): config = cls.get_instance() if config._initialized: return config.backend = backend config.token2text = token2text config.project_name = project_name config.experiment_name = experiment_name if backend == "weave": import weave config.client = weave.init(project_name) elif backend == "mlflow": import mlflow mlflow.config.enable_async_logging() config.client = mlflow MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "sqlite:////tmp/mlruns.db") mlflow.set_tracking_uri(MLFLOW_TRACKING_URI) mlflow.set_experiment(project_name) else: config.client = None config._initialized = True @classmethod def get_backend(cls) -> Optional[str]: return cls.get_instance().backend @classmethod def get_client(cls) -> Optional[object]: return cls.get_instance().client @classmethod def enable_token2text(cls) -> Optional[bool]: return cls.get_instance().token2text @classmethod def reset(cls): cls._instance = None @contextlib.contextmanager def rollout_trace_attr(sample_index=None, step=None, rollout_n=None, name="rollout_trace", validate=False): """A context manager to add attributes to a trace for the configured backend.""" backend = RolloutTraceConfig.get_backend() attributes = {} if backend: if sample_index is not None: attributes["sample_index"] = sample_index if step is not None: attributes["step"] = step if rollout_n is not None: attributes["rollout_n"] = rollout_n attributes["validate"] = validate attributes["experiment_name"] = RolloutTraceConfig.get_instance().experiment_name if not attributes or backend is None: yield return if backend == "weave": import weave with weave.attributes(attributes): yield elif backend == "mlflow": import mlflow with mlflow.start_span(name=name) as span: trace_id = span.trace_id for key, value in attributes.items(): mlflow.set_trace_tag(trace_id, str(key), str(value)) yield else: yield def rollout_trace_op(func): @functools.wraps(func) async def async_wrapper(self, *args, **kwargs): backend = RolloutTraceConfig.get_backend() enable_token2text = RolloutTraceConfig.enable_token2text() if backend is None: return await func(self, *args, **kwargs) sig = inspect.signature(func) bound_args = sig.bind(self, *args, **kwargs) bound_args.apply_defaults() inputs = dict(bound_args.arguments) del inputs["self"] async def add_token2text(self, result): if hasattr(result, "prompt_ids") and hasattr(self, "tokenizer") and hasattr(self.tokenizer, "decode"): _result = vars(result) loop = asyncio.get_running_loop() if hasattr(result, "prompt_ids"): prompt_text = await loop.run_in_executor(None, self.tokenizer.decode, result.prompt_ids) _result["prompt_text"] = prompt_text if hasattr(result, "response_ids"): response_text = await loop.run_in_executor(None, self.tokenizer.decode, result.response_ids) _result["response_text"] = response_text return _result return result if backend == "weave": tracer = RolloutTraceConfig.get_client() from weave.trace.context import call_context cur_attributes = {**call_context.call_attributes.get()} call = tracer.create_call(op=func.__qualname__, inputs=inputs, attributes=cur_attributes) try: result = await func(self, *args, **kwargs) if enable_token2text: _result = await add_token2text(self, result) tracer.finish_call(call, output=_result) else: tracer.finish_call(call, output=result) return result except Exception as e: tracer.finish_call(call, exception=e) raise e elif backend == "mlflow": import mlflow with mlflow.start_span(name=func.__qualname__) as span: span.set_inputs(inputs) result = await func(self, *args, **kwargs) if enable_token2text: _result = await add_token2text(self, result) span.set_outputs(_result) else: span.set_outputs(result) return result else: return await func(self, *args, **kwargs) @functools.wraps(func) def wrapper(self, *args, **kwargs): backend = RolloutTraceConfig.get_backend() if backend is None: return func(self, *args, **kwargs) sig = inspect.signature(func) bound_args = sig.bind(self, *args, **kwargs) bound_args.apply_defaults() inputs = dict(bound_args.arguments) del inputs["self"] if backend == "weave": tracer = RolloutTraceConfig.get_client() from weave.trace.context import call_context cur_attributes = {**call_context.call_attributes.get()} call = tracer.create_call(op=func.__qualname__, inputs=inputs, attributes=cur_attributes) try: result = func(self, *args, **kwargs) tracer.finish_call(call, output=result) return result except Exception as e: tracer.finish_call(call, exception=e) raise e elif backend == "mlflow": import mlflow return mlflow.trace(func)(self, *args, **kwargs) else: return func(self, *args, **kwargs) return async_wrapper if inspect.iscoroutinefunction(func) else wrapper ================================================ FILE: verl_rl/verl/utils/seqlen_balancing.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import heapq from itertools import chain import torch from torch import distributed as dist from verl.protocol import DataProto from verl.utils.device import get_device_name def karmarkar_karp(seqlen_list: list[int], k_partitions: int, equal_size: bool): # see: https://en.wikipedia.org/wiki/Largest_differencing_method class Set: def __init__(self) -> None: self.sum = 0 self.items = [] def add(self, idx: int, val: int): self.items.append((idx, val)) self.sum += val def merge(self, other): for idx, val in other.items: self.items.append((idx, val)) self.sum += val def __lt__(self, other): if self.sum != other.sum: return self.sum < other.sum if len(self.items) != len(other.items): return len(self.items) < len(other.items) return self.items < other.items class State: def __init__(self, items: list[tuple[int, int]], k: int) -> None: self.k = k # sets should always be decreasing order self.sets = [Set() for _ in range(k)] assert len(items) in [1, k], f"{len(items)} not in [1, {k}]" for i, (idx, seqlen) in enumerate(items): self.sets[i].add(idx=idx, val=seqlen) self.sets = sorted(self.sets, reverse=True) def get_partitions(self): partitions = [] for i in range(len(self.sets)): cur_partition = [] for idx, _ in self.sets[i].items: cur_partition.append(idx) partitions.append(cur_partition) return partitions def merge(self, other): for i in range(self.k): self.sets[i].merge(other.sets[self.k - 1 - i]) self.sets = sorted(self.sets, reverse=True) @property def spread(self) -> int: return self.sets[0].sum - self.sets[-1].sum def __lt__(self, other): # least heap, let the state with largest spread to be popped first, # if the spread is the same, let the state who has the largest set # to be popped first. if self.spread != other.spread: return self.spread > other.spread return self.sets[0] > other.sets[0] def __repr__(self) -> str: repr_str = "[" for i in range(self.k): if i > 0: repr_str += "," repr_str += "{" for j, (_, seqlen) in enumerate(self.sets[i].items): if j > 0: repr_str += "," repr_str += str(seqlen) repr_str += "}" repr_str += "]" return repr_str sorted_seqlen_list = sorted([(seqlen, i) for i, seqlen in enumerate(seqlen_list)]) states_pq = [] if equal_size: assert len(seqlen_list) % k_partitions == 0, f"{len(seqlen_list)} % {k_partitions} != 0" for offset in range(0, len(sorted_seqlen_list), k_partitions): items = [] for i in range(k_partitions): seqlen, idx = sorted_seqlen_list[offset + i] items.append((idx, seqlen)) heapq.heappush(states_pq, State(items=items, k=k_partitions)) else: for seqlen, idx in sorted_seqlen_list: heapq.heappush(states_pq, State(items=[(idx, seqlen)], k=k_partitions)) while len(states_pq) > 1: state0 = heapq.heappop(states_pq) state1 = heapq.heappop(states_pq) # merge states state0.merge(state1) heapq.heappush(states_pq, state0) final_state = states_pq[0] partitions = final_state.get_partitions() if equal_size: for i, partition in enumerate(partitions): assert len(partition) * k_partitions == len(seqlen_list), ( f"{len(partition)} * {k_partitions} != {len(seqlen_list)}" ) return partitions def greedy_partition(seqlen_list: list[int], k_partitions: int, equal_size: bool): bias = sum(seqlen_list) + 1 if equal_size else 0 sorted_seqlen = [(seqlen + bias, i) for i, seqlen in enumerate(seqlen_list)] partitions = [[] for _ in range(k_partitions)] partition_sums = [0 for _ in range(k_partitions)] for seqlen, i in sorted_seqlen: min_idx = None for j in range(k_partitions): if min_idx is None or partition_sums[j] < partition_sums[min_idx]: min_idx = j partitions[min_idx].append(i) partition_sums[min_idx] += seqlen if equal_size: for i, partition in enumerate(partitions): assert len(partition) * k_partitions == len(seqlen_list), ( f"{len(partition)} * {k_partitions} != {len(seqlen_list)}" ) return partitions def get_seqlen_balanced_partitions(seqlen_list: list[int], k_partitions: int, equal_size: bool): """ Calculates partitions of indices from seqlen_list such that the sum of sequence lengths in each partition is balanced. Uses the Karmarkar-Karp differencing method. This is useful for balancing workload across devices or batches, especially when dealing with variable sequence lengths. Args: seqlen_list (List[int]): A list of sequence lengths for each item. k_partitions (int): The desired number of partitions. equal_size (bool): If True, ensures that each partition has the same number of items. Requires len(seqlen_list) to be divisible by k_partitions. If False, partitions can have varying numbers of items, focusing only on balancing the sum of sequence lengths. Returns: List[List[int]]: A list containing k_partitions lists. Each inner list contains the original indices of the items assigned to that partition. The indices within each partition list are sorted. Raises: AssertionError: If len(seqlen_list) < k_partitions. AssertionError: If equal_size is True and len(seqlen_list) is not divisible by k_partitions. AssertionError: If any resulting partition is empty. """ assert len(seqlen_list) >= k_partitions, f"number of items:[{len(seqlen_list)}] < k_partitions:[{k_partitions}]" def _check_and_sort_partitions(partitions): assert len(partitions) == k_partitions, f"{len(partitions)} != {k_partitions}" seen_idx = set() sorted_partitions = [None] * k_partitions for i, partition in enumerate(partitions): assert len(partition) > 0, f"the {i}-th partition is empty" for idx in partition: seen_idx.add(idx) sorted_partitions[i] = sorted(partition) assert seen_idx == set(range(len(seqlen_list))) return sorted_partitions partitions = karmarkar_karp(seqlen_list=seqlen_list, k_partitions=k_partitions, equal_size=equal_size) return _check_and_sort_partitions(partitions) def log_seqlen_unbalance(seqlen_list: list[int], partitions: list[list[int]], prefix): """ Calculate and log metrics related to sequence length imbalance before and after partitioning. Args: seqlen_list (List[int]): A list of sequence lengths for each item. partitions (List[List[int]]): A list of partitions, where each inner list contains indices from seqlen_list assigned to that partition. prefix (str): A prefix to be added to each metric key in the returned dictionary. Returns: dict: A dictionary containing metrics related to sequence length imbalance. """ # Get the number of partitions k_partition = len(partitions) # assert len(seqlen_list) % k_partition == 0 batch_size = len(seqlen_list) // k_partition min_sum_seqlen = None max_sum_seqlen = None total_sum_seqlen = 0 # Iterate over each batch of sequence lengths for offset in range(0, len(seqlen_list), batch_size): cur_sum_seqlen = sum(seqlen_list[offset : offset + batch_size]) if min_sum_seqlen is None or cur_sum_seqlen < min_sum_seqlen: min_sum_seqlen = cur_sum_seqlen if max_sum_seqlen is None or cur_sum_seqlen > max_sum_seqlen: max_sum_seqlen = cur_sum_seqlen total_sum_seqlen += cur_sum_seqlen balanced_sum_seqlen_list = [] for partition in partitions: cur_sum_seqlen_balanced = sum([seqlen_list[i] for i in partition]) balanced_sum_seqlen_list.append(cur_sum_seqlen_balanced) # print("balanced_sum_seqlen_list: ", balanced_sum_seqlen_list) min_sum_seqlen_balanced = min(balanced_sum_seqlen_list) max_sum_seqlen_balanced = max(balanced_sum_seqlen_list) return { f"{prefix}/min": min_sum_seqlen, f"{prefix}/max": max_sum_seqlen, f"{prefix}/minmax_diff": max_sum_seqlen - min_sum_seqlen, f"{prefix}/balanced_min": min_sum_seqlen_balanced, f"{prefix}/balanced_max": max_sum_seqlen_balanced, f"{prefix}/mean": total_sum_seqlen / len(partitions), } def ceildiv(a, b): return -(a // -b) def roundup_divisible(a, b): return ((a + b - 1) // b) * b def rearrange_micro_batches( batch, max_token_len, dp_group=None, num_batches_divided_by=None, same_micro_num_in_dp=True, min_num_micro_batch=None, use_dynamic_bsz_balance=True, ): """ Split a batch into micro-batches by total token count, with optional DP sync and padding. Args: batch (TensorDict): must include "attention_mask" (B*S); other fields are sliced similarly. max_token_len (int): max sum of attention_mask per micro-batch. dp_group (optional): torch.distributed group for data-parallel sync. num_batches_divided_by (optional): virtual pipeline parallel size, for megatron. same_micro_num_in_dp (bool): if True and dp_group set, pad all ranks to the same count. min_num_micro_batch (int, optional): force at least this many splits (pads empty ones). use_dynamic_bsz_balance (bool, optional): balance the computational workload between micro-batches Returns: List[TensorDict]: the micro-batches. List[List[int]]: index lists mapping each micro-batch back to original positions. """ # this is per local micro_bsz max_seq_len = batch["attention_mask"].shape[-1] assert max_token_len >= max_seq_len, ( f"max_token_len must be greater than the sequence length. Got {max_token_len=} and {max_seq_len=}" ) seq_len_effective: torch.Tensor = batch["attention_mask"].sum(dim=1) total_seqlen = seq_len_effective.sum().item() # NOTE: num_microbatches <= batch_size, so take the min of this two. num_micro_batches = min(len(seq_len_effective), ceildiv(total_seqlen, max_token_len)) if min_num_micro_batch is not None: # used to support pp num_micro_batches = max(min_num_micro_batch, num_micro_batches) if dist.is_initialized() and same_micro_num_in_dp: num_micro_batches = torch.tensor([num_micro_batches], device=get_device_name()) dist.all_reduce(num_micro_batches, op=dist.ReduceOp.MAX, group=dp_group) num_micro_batches = num_micro_batches.cpu().item() if num_batches_divided_by is not None: num_micro_batches = roundup_divisible(num_micro_batches, num_batches_divided_by) seq_len_effective = seq_len_effective.tolist() assert num_micro_batches <= len(seq_len_effective) micro_bsz_idx = get_seqlen_balanced_partitions(seq_len_effective, num_micro_batches, equal_size=False) if use_dynamic_bsz_balance: # Use the sum of squared sequence lengths to approximate attention computation workload micro_bsz_idx.sort( key=lambda partition: ( sum(seq_len_effective[idx] ** 2 for idx in partition), min(partition) if partition else 0, ), reverse=True, ) micro_batches = [] for partition in micro_bsz_idx: curr_micro_batch = [] for idx in partition: curr_micro_batch.append(batch[idx : idx + 1]) curr_micro_batch = torch.cat(curr_micro_batch) micro_batches.append(curr_micro_batch) return micro_batches, micro_bsz_idx def get_reverse_idx(idx_map): """ Build the inverse of an index mapping. Args: idx_map (Sequence[int]): Sequence where idx_map[i] = j. Returns: List[int]: Inverse mapping list such that output[j] = i for each i. """ reverse_idx_map = copy.deepcopy(idx_map) for i, idx in enumerate(idx_map): reverse_idx_map[idx] = i return reverse_idx_map def prepare_dynamic_batch(data: DataProto, max_token_len: int) -> tuple[list[DataProto], list[list[int]]]: """ Prepare a batch for dynamic batching. Args: data (DataProto): The input data. max_token_len (int): The maximum token length for dynamic batching. Returns: Tuple[List[DataProto], List[List[int]]]: A tuple containing a list of DataProto objects and a list of index lists. """ batch, batch_idx_list = rearrange_micro_batches(data.batch, max_token_len=max_token_len) micro_batches = [] for i, batch_idx in enumerate(batch_idx_list): tensors = dict(batch[i]) non_tensors = {key: value[batch_idx] for key, value in data.non_tensor_batch.items()} micro_batches.append(DataProto.from_dict(tensors, non_tensors)) return micro_batches, batch_idx_list def restore_dynamic_batch(data: torch.Tensor, batch_idx_list: list[list[int]]) -> torch.Tensor: """ Restore a batch from dynamic batching. Args: data (torch.Tensor): The input data. batch_idx_list (List[List[int]]): The list of index lists. Returns: torch.Tensor: The restored data. """ indices = list(chain.from_iterable(batch_idx_list)) revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) return data[revert_indices] ================================================ FILE: verl_rl/verl/utils/tokenizer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for tokenization.""" import warnings __all__ = ["hf_tokenizer", "hf_processor"] def set_pad_token_id(tokenizer): """Set pad_token_id to eos_token_id if it is None. Args: tokenizer (transformers.PreTrainedTokenizer): The tokenizer to be set. """ if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id warnings.warn(f"tokenizer.pad_token_id is None. Now set to {tokenizer.eos_token_id}", stacklevel=1) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token warnings.warn(f"tokenizer.pad_token is None. Now set to {tokenizer.eos_token}", stacklevel=1) def hf_tokenizer(name_or_path, correct_pad_token=True, correct_gemma2=True, **kwargs): """Create a huggingface pretrained tokenizer which correctness handles eos and pad tokens. Args: name (str): The name of the tokenizer. correct_pad_token (bool): Whether to correct the pad token id. correct_gemma2 (bool): Whether to correct the gemma2 tokenizer. Returns: transformers.PreTrainedTokenizer: The pretrained tokenizer. """ from transformers import AutoTokenizer if correct_gemma2 and isinstance(name_or_path, str) and "gemma-2-2b-it" in name_or_path: # the EOS token in gemma2 is ambiguious, which may worsen RL performance. # https://huggingface.co/google/gemma-2-2b-it/commit/17a01657f5c87135bcdd0ec7abb4b2dece04408a warnings.warn( "Found gemma-2-2b-it tokenizer. Set eos_token and eos_token_id to and 107.", stacklevel=1 ) kwargs["eos_token"] = "" kwargs["eos_token_id"] = 107 tokenizer = AutoTokenizer.from_pretrained(name_or_path, **kwargs) if correct_pad_token: set_pad_token_id(tokenizer) return tokenizer def hf_processor(name_or_path, **kwargs): """Create a huggingface processor to process multimodal data. Args: name_or_path (str): The name of the processor. Returns: transformers.ProcessorMixin: The pretrained processor. """ from transformers import AutoProcessor try: processor = AutoProcessor.from_pretrained(name_or_path, **kwargs) except Exception as e: processor = None # TODO(haibin.lin): try-catch should be removed after adding transformer version req to setup.py to avoid # silent failure warnings.warn(f"Failed to create processor: {e}. This may affect multimodal processing", stacklevel=1) # Avoid load tokenizer, see: # https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/models/auto/processing_auto.py#L344 if processor is not None and "Processor" not in processor.__class__.__name__: processor = None return processor ================================================ FILE: verl_rl/verl/utils/torch_dtypes.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from Cruise. """ import torch HALF_LIST = [16, "16", "fp16", "float16", torch.float16] FLOAT_LIST = [32, "32", "fp32", "float32", torch.float32] BFLOAT_LIST = ["bf16", "bfloat16", torch.bfloat16] class PrecisionType: """Type of precision used. >>> PrecisionType.HALF == 16 True >>> PrecisionType.HALF in (16, "16") True """ HALF = "16" FLOAT = "32" FULL = "64" BFLOAT = "bf16" MIXED = "mixed" @staticmethod def supported_type(precision: str | int) -> bool: return any(x == precision for x in PrecisionType) @staticmethod def supported_types() -> list[str]: return [x.value for x in PrecisionType] @staticmethod def is_fp16(precision): return precision in HALF_LIST @staticmethod def is_fp32(precision): return precision in FLOAT_LIST @staticmethod def is_bf16(precision): return precision in BFLOAT_LIST @staticmethod def to_dtype(precision): if precision in HALF_LIST: return torch.float16 elif precision in FLOAT_LIST: return torch.float32 elif precision in BFLOAT_LIST: return torch.bfloat16 else: raise RuntimeError(f"unexpected precision: {precision}") @staticmethod def to_str(precision): if precision == torch.float16: return "fp16" elif precision == torch.float32: return "fp32" elif precision == torch.bfloat16: return "bf16" else: raise RuntimeError(f"unexpected precision: {precision}") ================================================ FILE: verl_rl/verl/utils/torch_functional.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Contain small torch utilities """ import math from contextlib import contextmanager from typing import Optional import torch import torch.distributed import torch.nn.functional as F from tensordict import TensorDict from torch import nn from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from transformers import PreTrainedTokenizer from verl.utils.device import get_device_name, get_torch_device try: from flash_attn.ops.triton.cross_entropy import cross_entropy_loss FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE = True except ImportError: FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE = False try: import torch_npu NPU_CROSS_ENTROPY_LOSS_AVAILABLE = hasattr(torch_npu, "npu_cross_entropy_loss") except ImportError: NPU_CROSS_ENTROPY_LOSS_AVAILABLE = False def gather_from_labels(data, label): """Gather the label from data. The value in label should be [0, vocab_size) Args: data: (..., vocab_size) label (torch.IntTensor) : (...,) Returns: """ output = torch.gather(data, -1, label.unsqueeze(-1)).squeeze(-1) return output def logprobs_from_logits(logits, labels, inplace_backward=True): """ Compute per-token log-probabilities for the given labels. Uses a Flash-Attention–based cross-entropy (if available) for efficient backward, otherwise falls back to a standard log-softmax+gather approach. See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591 Args: logits (Tensor): Model outputs of shape (..., vocab_size). labels (LongTensor): True class indices of shape matching logits[..., :-1]. inplace_backward (bool): If True and Flash-Attn is available, perform backward in-place. Returns: Tensor: Log-probabilities of the target labels, shape logits.shape[:-1]. """ if FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE: batch_dim = logits.shape[:-1] last_dim = logits.shape[-1] logits = logits.reshape(-1, last_dim) labels = labels.reshape(-1) output = logprobs_from_logits_flash_attn(logits, labels, inplace_backward=inplace_backward) output = output.view(*batch_dim) elif NPU_CROSS_ENTROPY_LOSS_AVAILABLE: output = logprobs_from_logits_torch_npu(logits, labels) else: output = logprobs_from_logits_v2(logits, labels) return output def logprobs_from_logits_flash_attn(logits, labels, inplace_backward=True): output = cross_entropy_loss(logits, labels, inplace_backward=inplace_backward) assert isinstance(output, tuple), ( "please make sure flash-attn>=2.4.3 where cross_entropy_loss returns Tuple[losses, z_losses]." ) return -output[0] def logprobs_from_logits_torch_npu(logits, labels): batch_dim = logits.shape[:-1] logits = logits.reshape(-1, logits.shape[-1]) loss, _, _, _ = torch_npu.npu_cross_entropy_loss(logits, labels.reshape(-1), reduction="none") return -loss.view(*batch_dim) def logprobs_from_logits_naive(logits, labels): logp = F.log_softmax(logits, dim=-1) logpy = gather_from_labels(logp, labels) return logpy def logprobs_from_logits_v2(logits: torch.FloatTensor, labels): """ A memory efficient implementation of logprobs_from_logits """ if logits.dtype in [torch.float32, torch.float64]: logits_labels = torch.gather(logits, dim=-1, index=labels.unsqueeze(-1)).squeeze(-1) # loop to reduce peak mem consumption logsumexp_values = torch.stack([torch.logsumexp(logit, dim=-1) for logit in logits]) logprobs_labels = logits_labels - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x) else: # logsumexp approach is unstable with bfloat16, fall back to slightly less efficent approach logprobs_labels = [] for row_logits, row_labels in zip(logits, labels, strict=True): # loop to reduce peak mem consumption row_logprobs = F.log_softmax(row_logits, dim=-1) row_logprobs_labels = row_logprobs.gather(dim=-1, index=row_labels.unsqueeze(-1)).squeeze(-1) logprobs_labels.append(row_logprobs_labels) logprobs_labels = torch.stack(logprobs_labels) return logprobs_labels def clip_by_value(x, tensor_min, tensor_max): """ Tensor extenstion to torch.clamp https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713 """ clipped = torch.max(torch.min(x, tensor_max), tensor_min) return clipped def entropy_from_logits(logits: torch.Tensor): """Calculate entropy from logits.""" pd = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, dim=-1) - torch.sum(pd * logits, dim=-1) return entropy def entropy_from_logits_with_chunking(logits: torch.Tensor, chunk_size: int = 2048): """Memory-efficient entropy calculation with chunking.""" entropy = torch.zeros(logits.shape[0], device=logits.device) for i in range(0, logits.shape[0], chunk_size): logits_chunk = logits[i : i + chunk_size].float() pd_chunk = torch.nn.functional.softmax(logits_chunk, dim=-1) entropy_chunk = torch.logsumexp(logits_chunk, dim=-1) - torch.sum(pd_chunk * logits_chunk, dim=-1) entropy[i : i + chunk_size] = entropy_chunk return entropy def masked_sum(values, mask, axis=None): """Compute mean of tensor with a masked values.""" # If NaNs exist out of mask, replace NaNs in values with a value that # won't affect the sum (e.g., 0 for masked regions) valid_values = torch.where(mask.bool(), values, 0.0) return (valid_values * mask).sum(axis=axis) def masked_mean(values, mask, axis=None): """ Compute the mean of `values` over elements selected by `mask`. Args: values (Tensor): Input tensor. mask (Tensor): Boolean or numeric mask of the same shape as `values`. axis (int or tuple of int, optional): Dimension(s) along which to compute the mean. Defaults to None (over all elements). Returns: Tensor: Masked mean, with shape equal to `values` reduced over `axis`. """ s = masked_sum(values, mask, axis) return s / (mask.sum(axis=axis) + 1e-8) def masked_var(values, mask, unbiased=True): """Compute variance of tensor with masked values.""" mean = masked_mean(values, mask) centered_values = values - mean variance = masked_mean(centered_values**2, mask) if unbiased: mask_sum = mask.sum() if mask_sum == 0: raise ValueError("At least one element in the mask has to be 1.") # note that if mask_sum == 1, then there is a division by zero issue # to avoid it you just need to use a larger minibatch_size if mask_sum == 1: raise ValueError("The sum of the mask is one, which can cause a division by zero.") bessel_correction = mask_sum / (mask_sum - 1) variance = variance * bessel_correction return variance def masked_whiten(values, mask, shift_mean=True): """ Whiten `values` by normalizing with mean and variance computed over `mask`. Args: values (torch.Tensor): Input tensor. mask (torch.Tensor): Boolean tensor of same shape, selects elements for stats. shift_mean (bool): If True (default), output is zero-mean; if False, the original mean is re-added after scaling. Returns: torch.Tensor: Whitened tensor of same shape as `values`. """ mean, var = masked_mean(values, mask), masked_var(values, mask) whitened = (values - mean) * torch.rsqrt(var + 1e-8) if not shift_mean: whitened += mean return whitened def get_response_mask(response_id: torch.Tensor, eos_token: int | list[int] = 2, dtype=torch.int64): """ end of sentence token can be int or list: 1 or [1, 2] e.g. response_id = torch.tensor([[20, 10, 34, 1, 0, 0, 0], [78, 0, 76, 2, 1, 0, 0], [23, 98, 1, 0, 0, 0, 0], [33, 3, 98, 45, 1, 0, 0]]) #eos_token=1 response_mask: tensor([[1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0]]) #eos_token=[1,2] response_mask: tensor([[1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0]]) """ eos_mask = torch.isin(response_id, torch.tensor(eos_token, device=response_id.device)).int() return (eos_mask.cumsum(dim=1) - eos_mask).eq(0).to(dtype) def compute_grad_norm(model: nn.Module): total_grad_square = 0 for param in model.parameters(): if param.grad is not None: total_grad_square += torch.sum(torch.square(param.grad.detach())).item() return total_grad_square def broadcast_dict_tensor(tensors: dict[str, torch.Tensor] | TensorDict, src, group): """ TODO: optimize this. Technically, we only need one broadcast """ for key in tensors.sorted_keys: torch.distributed.broadcast(tensors[key], src=src, group=group, async_op=False) def allgather_dict_tensors(tensors: dict[str, torch.Tensor] | TensorDict, size, group, dim=0): """ TODO: optimize this. - We can use async ops - We can use only one allgather Args: tensors: size: group: Returns: """ if isinstance(tensors, TensorDict): is_tensor_dict = True tensors_as_dict = tensors.to_dict() else: tensors_as_dict = tensors is_tensor_dict = False output = {} sorted_keys = sorted(tensors_as_dict.keys()) for key in sorted_keys: val = tensors_as_dict[key] output[key] = [torch.empty_like(val) for _ in range(size)] torch.distributed.all_gather(output[key], val, group=group, async_op=False) output[key] = torch.cat(output[key], dim=dim) if is_tensor_dict: output = TensorDict(source=output, batch_size=tensors.batch_size[0] * size) return output def split_dict_tensor_into_batches(tensors: TensorDict, batch_size) -> list[TensorDict]: assert tensors.batch_size[0] % batch_size == 0, ( f"input data batch size: {tensors.batch_size[0]}, split batch size: {batch_size}" ) return tensors.split(batch_size) def pad_2d_list_to_length(response, pad_token_id, max_length=None): """ pad a 2D list (e.g. responses, logprobs) to a 2D tensor. """ response_length = max(len(sub_list) for sub_list in response) target_length = max_length if max_length is not None and max_length > response_length else response_length padded_response = [tuple(sub_list) + (pad_token_id,) * (target_length - len(sub_list)) for sub_list in response] tensor = torch.tensor(padded_response) return tensor def pad_sequence_to_length(tensors, max_seq_len, pad_token_id, left_pad=False): """ pad a 2D tensors (e.g. responses, logprobs) in the last dim to max_seq_length. input shape: [bs, seq_length] output shape: [bs, max_seq_length] """ if tensors.shape[-1] >= max_seq_len: return tensors # (0, max_seq_len - tensors.shape[-1]) means right pad to max_seq_length and no left pad pad_tuple = (max_seq_len - tensors.shape[-1], 0) if left_pad else (0, max_seq_len - tensors.shape[-1]) return F.pad(tensors, pad_tuple, "constant", pad_token_id) def postprocess_data( input_ids: torch.Tensor, attention_mask: torch.Tensor, max_length: int, pad_token_id: int, left_pad=True, truncation="error", ): """Process tokenizer outputs to consistent shapes via padding/truncation. Args: input_ids: Token indices [batch_size, seq_len] attention_mask: Mask [batch_size, seq_len] max_length: Target sequence length pad_token_id: Padding token ID left_pad: Pad left if True truncation: "left", "right", "middle" or "error" Returns: (input_ids, attention_mask) padded/truncated to max_length """ assert truncation in ["left", "right", "middle", "error"] assert input_ids.ndim == 2 sequence_length = input_ids.shape[-1] if sequence_length < max_length: input_ids = pad_sequence_to_length( input_ids, max_seq_len=max_length, pad_token_id=pad_token_id, left_pad=left_pad ) attention_mask = pad_sequence_to_length( attention_mask, max_seq_len=max_length, pad_token_id=0, left_pad=left_pad ) elif sequence_length > max_length: if truncation == "left": # actually, left truncation may not be reasonable input_ids = input_ids[:, -max_length:] attention_mask = attention_mask[:, -max_length:] elif truncation == "right": input_ids = input_ids[:, :max_length] attention_mask = attention_mask[:, :max_length] elif truncation == "middle": left_half = max_length // 2 right_half = max_length - left_half input_ids = torch.cat([input_ids[:, :left_half], input_ids[:, -right_half:]], dim=-1) attention_mask = torch.cat([attention_mask[:, :left_half], attention_mask[:, -right_half:]], dim=-1) elif truncation == "error": raise NotImplementedError(f"{sequence_length=} is larger than {max_length=}") else: raise NotImplementedError(f"Unknown truncation method {truncation}") return input_ids, attention_mask def tokenize_and_postprocess_data( prompt: str, tokenizer: PreTrainedTokenizer, max_length: int, pad_token_id: int, left_pad=True, truncation="error" ): """Tokenize text and process outputs to consistent tensor shapes. Args: prompt: Input text to tokenize tokenizer: HuggingFace tokenizer instance max_length: Target sequence length pad_token_id: Padding token ID left_pad: Pad left if True truncation: Truncation strategy ("left"/"right"/"error") Returns: Tuple of (input_ids, attention_mask) from postprocess_data """ input_data = tokenizer(prompt, return_tensors="pt", add_special_tokens=False) input_ids = input_data["input_ids"] attention_mask = input_data["attention_mask"] return postprocess_data(input_ids, attention_mask, max_length, pad_token_id, left_pad, truncation) def remove_pad_token(input_ids: torch.Tensor, attention_mask: torch.Tensor): """Remove the pad token. Args: input_ids shape: [bs, seq_length] attention_mask shape: [bs, seq_length] Returns: no_padding_batch(List[List[int]]): contains the rmpad token ids per query. """ no_padding_batch = [] for ids, mask in zip(input_ids, attention_mask, strict=True): no_padding_batch.append((ids[len(ids) - mask.sum() :]).cpu().numpy().tolist()) return no_padding_batch def log_probs_from_logits_response(input_ids, logits, response_length): """Compute the response log_probs from full logits. Note that logits = model(input_ids) Args: input_ids: [batch_size, seqlen] logits: [batch_size, seqlen, vocab_size] Returns: response_log_prob: """ response_logits = logits[:, -response_length - 1 : -1] response = input_ids[:, -response_length:] response_log_prob = logprobs_from_logits(logits=response_logits, labels=response) return response_log_prob def log_probs_from_logits_response_rmpad(input_ids, attention_mask, logits_rmpad, response_length): """Compute the log_probs from logits with rmpad logits and pad input. Note that logits_rmpad = model(input_ids_rmpad). For each sentences, there is a shift between logits and input_ids. The reason for this function to is to compute logprobs_from_logits in rmpad mode because it is memory-intensive for large vocab_size Args: input_ids: [batch_size, seqlen] attention_mask: [batch_size, seqlen] logits_rmpad: [total_nnz, vocab_size] response_length: int """ from flash_attn.bert_padding import pad_input, unpad_input batch_size, seqlen = input_ids.shape input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask=attention_mask) input_ids_rmpad = input_ids_rmpad.squeeze(-1) input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0) full_log_probs_rmpad = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (total_nnz,) full_output = pad_input( hidden_states=full_log_probs_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen ) output = full_output.squeeze(-1)[:, -response_length - 1 : -1] # [batch_size, response_length] return output def log_probs_from_logits_all_rmpad(input_ids_rmpad, logits_rmpad, indices, batch_size, seqlen, response_length): """Compute the log_probs from logits with rmpad input_ids and logits. Note that logits_rmpad = model(input_ids_rmpad). For each sentences, there is a shift between logits and input_ids. The reason for this function to is to compute logprobs_from_logits in rmpad mode because it is memory-intensive for large vocab_size Args: input_ids_rmpad: [1, total_nnz] logits_rmpad: [total_nnz, vocab_size] indices: [total_nnz] batch_size: int seqlen: int response_length: int """ from flash_attn.bert_padding import pad_input input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # transpose back to [total_nnz, 1] input_ids_rmpad = input_ids_rmpad.squeeze(-1) input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0) full_log_probs_rmpad = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (total_nnz,) full_output = pad_input( hidden_states=full_log_probs_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen ) output = full_output.squeeze(-1)[:, -response_length - 1 : -1] # [batch_size, response_length] return output def post_process_logits(input_ids, logits, temperature, top_k, top_p): if temperature != 1.0: logits = logits.div_(temperature) # inplace operation to avoid OOM # TODO: add them back # if top_k is not None and top_k > 0: # logits = TopKLogitsWarper(top_k=top_k)(input_ids, logits) # if top_p is not None and top_p < 1.0 and top_p > 0.0: # logits = TopPLogitsWarper(top_p=top_p)(input_ids, logits) return logits """ Optimizer related """ def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, min_lr_ratio: float = 0.0, num_cycles: float = 0.5, last_epoch: int = -1, ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. min_lr_ratio (:obj:`float`, `optional`, defaults to 0.0): The minimum lr ratio w.r.t the maximum. num_cycles (:obj:`float`, `optional`, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ min_lr_ratio = 0.0 if min_lr_ratio is None else min_lr_ratio assert min_lr_ratio >= 0 and min_lr_ratio <= 1.0 coef = (1 - min_lr_ratio) * 0.5 intercept = (1 + min_lr_ratio) * 0.5 def lr_lambda(current_step): if current_step < num_warmup_steps: return min_lr_ratio + (1.0 - min_lr_ratio) * (float(current_step) / float(max(1, num_warmup_steps))) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) x = math.cos(math.pi * float(num_cycles) * 2.0 * progress) return max(min_lr_ratio, x * coef + intercept) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_constant_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1, ): """ Create a constant LR schedule with a linear warmup phase. Args: optimizer (Optimizer): Wrapped optimizer. num_warmup_steps (int): Number of steps to ramp up the LR from 0 to initial value. last_epoch (int, optional): The index of the last epoch when resuming training. Defaults to -1. Returns: LambdaLR: Scheduler that increases LR linearly during warmup, then holds it constant. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch) def prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) def get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return ( indices, cu_seqlens, max_seqlen_in_batch, ) def get_wsd_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, min_lr_ratio: float = 0.0, num_cycles: float = 0.5, last_epoch: int = -1, stable_ratio: float = 0.9, ): """ Create a Warmup-Stable-Decay learning rate scheduler. The schedule follows three phases: 1. Warmup: Learning rate increases linearly from 0 to the initial LR 2. Stable: Learning rate remains constant at the initial LR 3. Decay: Learning rate decreases following a cosine curve to min_lr_ratio * initial LR Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. min_lr_ratio (:obj:`float`, `optional`, defaults to 0.0): The minimum learning rate ratio w.r.t the initial learning rate. num_cycles (:obj:`float`, `optional`, defaults to 0.5): The number of waves in the cosine schedule during decay phase. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. stable_ratio (:obj:`float`, `optional`, defaults to 0.0): The ratio of non-warmup steps that should maintain a constant learning rate. Set to 0.0 to behave exactly like cosine schedule. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ remaining_steps = max(0, num_training_steps - num_warmup_steps) num_stable_steps = int(remaining_steps * stable_ratio) num_decay_steps = remaining_steps - num_stable_steps def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) if current_step < num_warmup_steps + num_stable_steps: return 1.0 if current_step < num_training_steps: progress = float(current_step - num_warmup_steps - num_stable_steps) / float(max(1, num_decay_steps)) value = max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return (1.0 - min_lr_ratio) * value + min_lr_ratio return min_lr_ratio return LambdaLR(optimizer, lr_lambda, last_epoch) @contextmanager def check_device_is_available(): """ Some modules must be imported after CUDA is initialized. Such as sglang's sharding manager. This context manager checks if CUDA is available and raises an error if it is not. """ if not get_torch_device().is_available(): raise RuntimeError("Device {} must be initialized before importing this module.".format(get_device_name())) yield def distributed_mean_max_min_std(local_tensor, compute_max=True, compute_min=True, compute_std=True): """Compute distributed statistics across all processes. Args: local_tensor: Tensor containing local values compute_max: Include maximum value calculation compute_min: Include minimum value calculation compute_std: Include standard deviation calculation Returns: Tuple containing (mean, max, min, std) in this order. None for disabled metrics. """ # Sum the local tensor across all processes local_sum = torch.sum(local_tensor) local_num = torch.tensor(torch.numel(local_tensor), device=get_device_name()) torch.distributed.all_reduce(local_sum, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(local_num, op=torch.distributed.ReduceOp.SUM) global_mean = local_sum / local_num if compute_max: local_max = torch.max(local_tensor) torch.distributed.all_reduce(local_max, op=torch.distributed.ReduceOp.MAX) else: local_max = None if compute_min: local_min = torch.min(local_tensor) torch.distributed.all_reduce(local_min, op=torch.distributed.ReduceOp.MIN) else: local_min = None if compute_std: square_diff = torch.sum(torch.pow(local_tensor - global_mean, 2)) torch.distributed.all_reduce(square_diff, op=torch.distributed.ReduceOp.SUM) global_std = torch.sqrt(square_diff / (local_num - 1)) else: global_std = None return global_mean, local_max, local_min, global_std def distributed_masked_mean(local_tensor, local_mask): """Compute global mean of non-masked elements across distributed processes. Args: local_tensor (torch.Tensor): Input tensor with local values local_mask (torch.Tensor): Binary mask (1=valid, 0=ignore) matching local_tensor shape Returns: torch.Tensor: Global mean of all valid elements across processes """ local_tensor = local_tensor * local_mask local_sum = torch.sum(local_tensor) local_num = torch.sum(local_mask) torch.distributed.all_reduce(local_sum, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(local_num, op=torch.distributed.ReduceOp.SUM) global_mean = local_sum / local_num return global_mean ================================================ FILE: verl_rl/verl/utils/tracking.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A unified tracking interface that supports logging data to different backend """ import dataclasses import os from enum import Enum from functools import partial from pathlib import Path from typing import Any class Tracking: """A unified tracking interface for logging experiment data to multiple backends. This class provides a centralized way to log experiment metrics, parameters, and artifacts to various tracking backends including WandB, MLflow, SwanLab, TensorBoard, and console. Attributes: supported_backend: List of supported tracking backends. logger: Dictionary of initialized logger instances for each backend. """ supported_backend = ["wandb", "mlflow", "swanlab", "vemlp_wandb", "tensorboard", "console", "clearml"] def __init__(self, project_name, experiment_name, default_backend: str | list[str] = "console", config=None): if isinstance(default_backend, str): default_backend = [default_backend] for backend in default_backend: if backend == "tracking": import warnings warnings.warn("`tracking` logger is deprecated. use `wandb` instead.", DeprecationWarning, stacklevel=2) else: assert backend in self.supported_backend, f"{backend} is not supported" self.logger = {} if "tracking" in default_backend or "wandb" in default_backend: import wandb settings = None if config and config["trainer"].get("wandb_proxy", None): settings = wandb.Settings(https_proxy=config["trainer"]["wandb_proxy"]) wandb.init(project=project_name, name=experiment_name, config=config, settings=settings, mode='offline') self.logger["wandb"] = wandb if "mlflow" in default_backend: import os import mlflow MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "sqlite:////tmp/mlruns.db") mlflow.set_tracking_uri(MLFLOW_TRACKING_URI) # Project_name is actually experiment_name in MLFlow # If experiment does not exist, will create a new experiment experiment = mlflow.set_experiment(project_name) mlflow.start_run(experiment_id=experiment.experiment_id, run_name=experiment_name) mlflow.log_params(_compute_mlflow_params_from_objects(config)) self.logger["mlflow"] = _MlflowLoggingAdapter() if "swanlab" in default_backend: import os import swanlab SWANLAB_API_KEY = os.environ.get("SWANLAB_API_KEY", None) SWANLAB_LOG_DIR = os.environ.get("SWANLAB_LOG_DIR", "swanlog") SWANLAB_MODE = os.environ.get("SWANLAB_MODE", "cloud") if SWANLAB_API_KEY: swanlab.login(SWANLAB_API_KEY) # NOTE: previous login information will be overwritten if config is None: config = {} # make sure config is not None, otherwise **config will raise error swanlab.init( project=project_name, experiment_name=experiment_name, config={"FRAMEWORK": "verl", **config}, logdir=SWANLAB_LOG_DIR, mode=SWANLAB_MODE, ) self.logger["swanlab"] = swanlab if "vemlp_wandb" in default_backend: import os import volcengine_ml_platform from volcengine_ml_platform import wandb as vemlp_wandb volcengine_ml_platform.init( ak=os.environ["VOLC_ACCESS_KEY_ID"], sk=os.environ["VOLC_SECRET_ACCESS_KEY"], region=os.environ["MLP_TRACKING_REGION"], ) vemlp_wandb.init( project=project_name, name=experiment_name, config=config, sync_tensorboard=True, ) self.logger["vemlp_wandb"] = vemlp_wandb if "tensorboard" in default_backend: self.logger["tensorboard"] = _TensorboardAdapter(project_name, experiment_name) if "console" in default_backend: from verl.utils.logger import LocalLogger self.console_logger = LocalLogger(print_to_console=True) self.logger["console"] = self.console_logger if "clearml" in default_backend: self.logger["clearml"] = ClearMLLogger(project_name, experiment_name, config) def log(self, data, step, backend=None): for default_backend, logger_instance in self.logger.items(): if backend is None or default_backend in backend: logger_instance.log(data=data, step=step) def __del__(self): if "wandb" in self.logger: self.logger["wandb"].finish(exit_code=0) if "swanlab" in self.logger: self.logger["swanlab"].finish() if "vemlp_wandb" in self.logger: self.logger["vemlp_wandb"].finish(exit_code=0) if "tensorboard" in self.logger: self.logger["tensorboard"].finish() if "clearnml" in self.logger: self.logger["clearnml"].finish() class ClearMLLogger: def __init__(self, project_name: str, experiment_name: str, config): self.project_name = project_name self.experiment_name = experiment_name import clearml self._task: clearml.Task = clearml.Task.init( task_name=experiment_name, project_name=project_name, continue_last_task=True, output_uri=False, ) self._task.connect_configuration(config, name="Hyperparameters") def _get_logger(self): return self._task.get_logger() def log(self, data, step): import numpy as np import pandas as pd # logs = self._rewrite_logs(data) logger = self._get_logger() for k, v in data.items(): title, series = k.split("/", 1) if isinstance(v, int | float | np.floating | np.integer): logger.report_scalar( title=title, series=series, value=v, iteration=step, ) elif isinstance(v, pd.DataFrame): logger.report_table( title=title, series=series, table_plot=v, iteration=step, ) else: logger.warning( f'Trainer is attempting to log a value of "{v}" of type {type(v)} for key "{k}". This ' f"invocation of ClearML logger's function is incorrect so this attribute was dropped. " ) def finish(self): self._task.mark_completed() class _TensorboardAdapter: def __init__(self, project_name, experiment_name): import os from torch.utils.tensorboard import SummaryWriter tensorboard_dir = os.environ.get("TENSORBOARD_DIR", f"tensorboard_log/{project_name}/{experiment_name}") os.makedirs(tensorboard_dir, exist_ok=True) print(f"Saving tensorboard log to {tensorboard_dir}.") self.writer = SummaryWriter(tensorboard_dir) def log(self, data, step): for key in data: self.writer.add_scalar(key, data[key], step) def finish(self): self.writer.close() class _MlflowLoggingAdapter: def log(self, data, step): import mlflow results = {k.replace("@", "_at_"): v for k, v in data.items()} mlflow.log_metrics(metrics=results, step=step) def _compute_mlflow_params_from_objects(params) -> dict[str, Any]: if params is None: return {} return _flatten_dict(_transform_params_to_json_serializable(params, convert_list_to_dict=True), sep="/") def _transform_params_to_json_serializable(x, convert_list_to_dict: bool): _transform = partial(_transform_params_to_json_serializable, convert_list_to_dict=convert_list_to_dict) if dataclasses.is_dataclass(x): return _transform(dataclasses.asdict(x)) if isinstance(x, dict): return {k: _transform(v) for k, v in x.items()} if isinstance(x, list): if convert_list_to_dict: return {"list_len": len(x)} | {f"{i}": _transform(v) for i, v in enumerate(x)} else: return [_transform(v) for v in x] if isinstance(x, Path): return str(x) if isinstance(x, Enum): return x.value return x def _flatten_dict(raw: dict[str, Any], *, sep: str) -> dict[str, Any]: import pandas as pd ans = pd.json_normalize(raw, sep=sep).to_dict(orient="records")[0] assert isinstance(ans, dict) return ans @dataclasses.dataclass class ValidationGenerationsLogger: project_name: str = None experiment_name: str = None def log(self, loggers, samples, step): if "wandb" in loggers: self.log_generations_to_wandb(samples, step) if "swanlab" in loggers: self.log_generations_to_swanlab(samples, step) if "mlflow" in loggers: self.log_generations_to_mlflow(samples, step) if "clearml" in loggers: self.log_generations_to_clearml(samples, step) if "tensorboard" in loggers: self.log_generations_to_tensorboard(samples, step) if "vemlp_wandb" in loggers: self.log_generations_to_vemlp_wandb(samples, step) def log_generations_to_vemlp_wandb(self, samples, step): from volcengine_ml_platform import wandb as vemlp_wandb self._log_generations_to_wandb(samples, step, vemlp_wandb) def log_generations_to_wandb(self, samples, step): import wandb self._log_generations_to_wandb(samples, step, wandb) def _log_generations_to_wandb(self, samples, step, wandb): """Log samples to wandb as a table""" # Create column names for all samples columns = ["step"] + sum( [[f"input_{i + 1}", f"output_{i + 1}", f"score_{i + 1}"] for i in range(len(samples))], [] ) if not hasattr(self, "validation_table"): # Initialize the table on first call self.validation_table = wandb.Table(columns=columns) # Create a new table with same columns and existing data # Workaround for https://github.com/wandb/wandb/issues/2981#issuecomment-1997445737 new_table = wandb.Table(columns=columns, data=self.validation_table.data) # Add new row with all data row_data = [] row_data.append(step) for sample in samples: row_data.extend(sample) new_table.add_data(*row_data) # Update reference and log wandb.log({"val/generations": new_table}, step=step) self.validation_table = new_table def log_generations_to_swanlab(self, samples, step): """Log samples to swanlab as text""" import swanlab swanlab_table = swanlab.echarts.Table() # Create column names headers = ["step", "input", "output", "score"] swanlab_row_list = [[step, *sample] for sample in samples] swanlab_table.add(headers=headers, rows=swanlab_row_list) # Log to swanlab swanlab.log({"val/generations": swanlab_table}, step=step) def log_generations_to_mlflow(self, samples, step): """Log validation generation to mlflow as artifacts""" # https://mlflow.org/docs/latest/api_reference/python_api/mlflow.html?highlight=log_artifact#mlflow.log_artifact import json import tempfile import mlflow try: with tempfile.TemporaryDirectory() as tmp_dir: validation_gen_step_file = Path(tmp_dir, f"val_step{step}.json") row_data = [] for sample in samples: data = {"input": sample[0], "output": sample[1], "score": sample[2]} row_data.append(data) with open(validation_gen_step_file, "w") as file: json.dump(row_data, file) mlflow.log_artifact(validation_gen_step_file) except Exception as e: print(f"WARNING: save validation generation file to mlflow failed with error {e}") def log_generations_to_clearml(self, samples, step): """Log validation generation to clearml as table""" import clearml import pandas as pd task: clearml.Task | None = clearml.Task.current_task() if task is None: return table = [ { "step": step, "input": sample[0], "output": sample[1], "score": sample[2], } for sample in samples ] logger = task.get_logger() logger.report_table( series="Validation generations", title="Validation", table_plot=pd.DataFrame.from_records(table), iteration=step, ) def log_generations_to_tensorboard(self, samples, step): """Log samples to tensorboard as text""" # Initialize tensorboard writer if not exists if not hasattr(self, "writer"): from torch.utils.tensorboard import SummaryWriter # Use the same directory structure as _TensorboardAdapter if self.project_name and self.experiment_name: default_dir = os.path.join("tensorboard_log", self.project_name, self.experiment_name) else: default_dir = "tensorboard_log" tensorboard_dir = os.environ.get("TENSORBOARD_DIR", default_dir) os.makedirs(tensorboard_dir, exist_ok=True) self.writer = SummaryWriter(log_dir=tensorboard_dir) # Format the samples data into readable text text_content = f"**Generation Results - Step {step}**\n\n" for i, sample in enumerate(samples): text_content += f"### Sample {i + 1}\n" # Assuming sample contains [input, output, score] if len(sample) >= 3: input_text, output_text, score = sample[0], sample[1], sample[2] text_content += f"**Input:** {input_text}\n\n" text_content += f"**Output:** {output_text}\n\n" text_content += f"**Score:** {score}\n\n" else: # Handle cases where sample format might be different text_content += f"**Data:** {sample}\n\n" text_content += "---\n\n" # Log to tensorboard as text self.writer.add_text("val/generations", text_content, step) # Flush to ensure data is written self.writer.flush() ================================================ FILE: verl_rl/verl/utils/ulysses.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for DeepSpeed Ulysses Sequence Parallelism. DeepSpeed Ulysses Paper: https://arxiv.org/abs/2309.14509 Inspired from: https://github.com/deepspeedai/DeepSpeed/blob/master/deepspeed/sequence/layer.py """ from typing import Any, Optional import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup _ULYSSES_SEQUENCE_PARALLEL_GROUP = None def set_ulysses_sequence_parallel_group(group: dist.ProcessGroup): """ Set ulysses sequence parallel process group. """ global _ULYSSES_SEQUENCE_PARALLEL_GROUP _ULYSSES_SEQUENCE_PARALLEL_GROUP = group def get_ulysses_sequence_parallel_group() -> Optional[dist.ProcessGroup]: """ Get ulysses sequence parallel process group. """ global _ULYSSES_SEQUENCE_PARALLEL_GROUP return _ULYSSES_SEQUENCE_PARALLEL_GROUP def get_ulysses_sequence_parallel_world_size(group: ProcessGroup = None) -> int: """ Get ulysses sequence parallel world size. """ group = get_ulysses_sequence_parallel_group() if group is None else group return dist.get_world_size(group) if group else 1 def get_ulysses_sequence_parallel_rank(group: ProcessGroup = None) -> int: """ Get ulysses sequence parallel rank. """ group = get_ulysses_sequence_parallel_group() if group is None else group return dist.get_rank(group) if group else 0 def gather_seq_scatter_heads( x: Tensor, seq_dim: int, head_dim: int, unpadded_dim_size: int = 0, group: ProcessGroup = None, ) -> Tensor: """ A func to sync embedding input with alltoall in sequence parallel gather sequence dimension and scatter head dim: e.g. seq_dim: 1, head_dim: 2 [bsz, seq/n, h, ...] -> [bsz, seq, h/n, ...] """ group = get_ulysses_sequence_parallel_group() if group is None else group if not group: return x sp_world = get_ulysses_sequence_parallel_world_size(group) x = SeqAllToAll.apply(group, x, head_dim, seq_dim) if unpadded_dim_size and unpadded_dim_size % sp_world != 0: padding_size = x.size(seq_dim) - unpadded_dim_size x = _unpad_tensor(x, seq_dim, padding_size) return x def gather_heads_scatter_seq(x: Tensor, head_dim: int, seq_dim: int, group: ProcessGroup = None) -> Tensor: """ A func to sync attention result with alltoall in sequence parallel gather head dimension and scatter seq dim: e.g. seq_dim: 1, head_dim: 2 [bsz, seq, h/n, ...] -> [bsz, seq/n, h, ...] """ group = get_ulysses_sequence_parallel_group() if group is None else group if not group: return x dim_size = x.size(seq_dim) sp_world = get_ulysses_sequence_parallel_world_size(group) if dim_size % sp_world != 0: padding_size = sp_world - (dim_size % sp_world) x = _pad_tensor(x, seq_dim, padding_size) return SeqAllToAll.apply(group, x, seq_dim, head_dim, False) def _pad_tensor(x: Tensor, dim: int, padding_size: int) -> Tensor: shape = list(x.shape) shape[dim] = padding_size pad = torch.zeros(shape, dtype=x.dtype, device=x.device) return torch.cat([x, pad], dim=dim) def _unpad_tensor(x: Tensor, dim: int, padding_size: int) -> Tensor: slc = [slice(None)] * len(x.shape) slc[dim] = slice(0, -padding_size) return x[slc] def slice_input_tensor(x: Tensor, dim: int, padding: bool = True, group: ProcessGroup = None) -> Tensor: group = get_ulysses_sequence_parallel_group() if group is None else group sp_world_size = dist.get_world_size(group) sp_rank = get_ulysses_sequence_parallel_rank() dim_size = x.size(dim) # pad before slice if padding and dim_size % sp_world_size: padding_size = sp_world_size - (dim_size % sp_world_size) x = _pad_tensor(x, dim, padding_size) # slice the input tensor parts = x.size(dim) // sp_world_size slc = [slice(None)] * len(x.shape) slc[dim] = slice(sp_rank * parts, (sp_rank + 1) * parts) return x[slc].contiguous() def all_to_all_tensor( local_input: Tensor, scatter_dim: int, gather_dim: int, group: Optional[dist.ProcessGroup] = None, async_op: bool = False, ): group = get_ulysses_sequence_parallel_group() if group is None else group seq_world_size = dist.get_world_size(group) input_list = [t.contiguous() for t in torch.tensor_split(local_input, seq_world_size, scatter_dim)] output_list = [torch.empty_like(input_list[0]) for _ in range(seq_world_size)] comm = dist.all_to_all(output_list, input_list, group=group, async_op=async_op) if async_op: def wait(): comm.wait() return torch.cat(output_list, dim=gather_dim).contiguous() return wait return torch.cat(output_list, dim=gather_dim).contiguous() def all_gather_tensor(local_tensor: Tensor, group: Optional[dist.ProcessGroup] = None, async_op: bool = False): group = get_ulysses_sequence_parallel_group() if group is None else group sp_world_size = dist.get_world_size(group=group) output_shape = list(local_tensor.shape) output_shape[0] = output_shape[0] * sp_world_size output = torch.empty(output_shape, dtype=local_tensor.dtype, device=local_tensor.device) dist.all_gather_into_tensor(output, local_tensor, group=group, async_op=async_op) return output class SeqAllToAll(torch.autograd.Function): @staticmethod def forward( ctx: Any, group: dist.ProcessGroup, local_input: Tensor, scatter_dim: int, gather_dim: int, async_op: bool = False, ) -> Tensor: ctx.group = group ctx.scatter_dim = scatter_dim ctx.gather_dim = gather_dim ctx.async_op = async_op return all_to_all_tensor(local_input, scatter_dim, gather_dim, group, async_op) @staticmethod def backward(ctx: Any, *grad_output: Tensor) -> tuple[None, Tensor, None, None]: input_t = torch.cat(grad_output[1:], dim=ctx.gather_dim).contiguous() if ctx.async_op else grad_output[0] return ( None, all_to_all_tensor(input_t, ctx.gather_dim, ctx.scatter_dim, ctx.group, False), None, None, None, None, ) class Gather(torch.autograd.Function): @staticmethod def forward( ctx: Any, group: dist.ProcessGroup, local_tensor: Tensor, gather_dim: int, grad_scaler: bool = True, async_op=False, ) -> Tensor: ctx.group = group ctx.gather_dim = gather_dim ctx.grad_scaler = grad_scaler ctx.async_op = async_op sp_world_size = dist.get_world_size(group=group) ctx.sp_world_size = sp_world_size sp_rank = dist.get_rank(group=group) ctx.sp_rank = sp_rank local_shape = list(local_tensor.size()) split_size = local_shape[0] part_size = local_shape[gather_dim] # store original size ctx.part_size = part_size output = all_gather_tensor(local_tensor, group, async_op) return torch.cat(output.split(split_size, dim=0), dim=gather_dim) @staticmethod def backward(ctx: Any, grad_output: Tensor) -> Any: if ctx.grad_scaler: grad_output = grad_output * ctx.sp_world_size return ( None, grad_output.split(ctx.part_size, dim=ctx.gather_dim)[ctx.sp_rank].contiguous(), None, None, None, None, ) def gather_outpus_and_unpad(*args, **kwargs): raise RuntimeError( "please use verl.utils.ulysses.gather_outputs_and_unpad instead of verl.utils.ulysses.gather_outpus_and_unpad" ) def gather_outputs_and_unpad( x: Tensor, gather_dim: int, unpad_dim: int = None, padding_size: int = 0, grad_scaler: bool = True, group: Optional[dist.ProcessGroup] = None, ): """ Gather a tensor across a process group and optionally unpad its padded elements. Args: x (Tensor): Input tensor to gather. gather_dim (int): Dimension along which to gather across ranks. unpad_dim (int, optional): Dimension from which to remove padding. If None, no unpadding. padding_size (int): Number of padding elements to remove on `unpad_dim`. Defaults to 0. grad_scaler (bool): Whether to apply gradient scaling during gather. Defaults to True. group (ProcessGroup, optional): Process group for gathering. If None, uses `get_ulysses_sequence_parallel_group()`. If still None, returns `x` unchanged. Returns: Tensor: The gathered tensor, with padding removed if requested. """ group = get_ulysses_sequence_parallel_group() if group is None else group if group is None: return x x = Gather.apply(group, x, gather_dim, grad_scaler) if unpad_dim is not None: assert isinstance(padding_size, int), "padding size is not given or is not an integer" if padding_size == 0: return x x = _unpad_tensor(x, unpad_dim, padding_size) return x def ulysses_pad(input_ids_rmpad: torch.Tensor, position_ids_rmpad: Optional[torch.Tensor] = None, sp_size: int = 1): if position_ids_rmpad is not None: assert position_ids_rmpad.size(-2) == 1 assert input_ids_rmpad.size(-1) == position_ids_rmpad.size(-1) if sp_size <= 1: return input_ids_rmpad, position_ids_rmpad, 0 _, total_seq_len = input_ids_rmpad.shape pad_size = (sp_size - total_seq_len % sp_size) % sp_size if pad_size > 0: input_ids_rmpad = torch.nn.functional.pad(input_ids_rmpad, (0, pad_size), value=0) if position_ids_rmpad is not None: pad_pos_ids = torch.arange(pad_size, device=position_ids_rmpad.device).unsqueeze(0) if position_ids_rmpad.dim() == 3: pad_pos_ids = pad_pos_ids.unsqueeze(0).repeat(3, 1, 1) position_ids_rmpad = torch.cat((position_ids_rmpad, pad_pos_ids), dim=-1) return input_ids_rmpad, position_ids_rmpad, pad_size def ulysses_pad_and_slice_inputs( input_ids_rmpad: torch.Tensor, position_ids_rmpad: Optional[torch.Tensor] = None, sp_size: int = 1 ): """ Pad and slice input_ids to be divisible by sp_size Pad position_ids to be divisible by sp_size. Note both input_ids_rmpad and position_ids_rmpad will be padded and sliced. The is the utility of pre-forward for ulysses sequence parallelism Args: input_ids_rmpad: shape of [bsz, seqlen] position_ids_rmpad: shape of [bsz, seqlen], where bsz must be 1 sp_size (int): ulysses sequence parallelism size Returns: torch.Tensor: padded and sliced input_ids torch.Tensor: padded and sliced position_ids int: pad size """ input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad(input_ids_rmpad, position_ids_rmpad, sp_size) input_ids_rmpad = slice_input_tensor(input_ids_rmpad, dim=1, padding=False) if position_ids_rmpad is not None: position_ids_rmpad = slice_input_tensor(position_ids_rmpad, dim=1, padding=False) return input_ids_rmpad, position_ids_rmpad, pad_size def validate_ulysses_config(num_heads, ulysses_sequence_size): if ulysses_sequence_size > 1: assert num_heads % ulysses_sequence_size == 0, ( f"num_heads ({num_heads}) must be divisible by ulysses sequence size({ulysses_sequence_size})" ) ================================================ FILE: verl_rl/verl/utils/vllm_utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from msgspec import field from packaging import version as vs from vllm.lora.models import LoRAModel from vllm.lora.request import LoRARequest from vllm.lora.utils import get_adapter_absolute_path from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager from verl.third_party.vllm import get_version # To support different vLLM versions, we add the model into SUPPORTED_MOE_MODELS separately to avoid triggering # unsupported issues. SUPPORTED_MOE_MODELS = [] try: from vllm.model_executor.models.deepseek_v2 import DeepseekV2ForCausalLM, DeepseekV3ForCausalLM SUPPORTED_MOE_MODELS.append(DeepseekV2ForCausalLM) SUPPORTED_MOE_MODELS.append(DeepseekV3ForCausalLM) except ImportError: pass try: from vllm.model_executor.models.mixtral import MixtralForCausalLM SUPPORTED_MOE_MODELS.append(MixtralForCausalLM) except ImportError: pass try: from vllm.model_executor.models.qwen2_moe import Qwen2MoeForCausalLM SUPPORTED_MOE_MODELS.append(Qwen2MoeForCausalLM) except ImportError: pass try: from vllm.model_executor.models.qwen3_moe import Qwen3MoeForCausalLM SUPPORTED_MOE_MODELS.append(Qwen3MoeForCausalLM) except ImportError: pass try: from vllm.model_executor.models.kimi_vl import KimiVLForConditionalGeneration SUPPORTED_MOE_MODELS.append(KimiVLForConditionalGeneration) except ImportError: pass def patch_vllm_moe_model_weight_loader(model): # this is a work around to load the weight of vllm fused moe model # it is from a bug from vllm 0.8.2 # all the weights are supposed to have a weight_loader, but the moe weights # do not have a weight_loader, so we need to patch it # (True, 'model.embed_tokens.weight') # (True, 'model.layers.0.self_attn.qkv_proj.weight') # (True, 'model.layers.0.self_attn.qkv_proj.bias') # (True, 'model.layers.0.self_attn.o_proj.weight') # (True, 'model.layers.0.mlp.gate.weight') # (True, 'model.layers.0.mlp.shared_expert.gate_up_proj.weight') # (True, 'model.layers.0.mlp.shared_expert.down_proj.weight') # (False, 'model.layers.0.mlp.shared_expert_gate.weight') use default # (False, 'model.layers.0.input_layernorm.weight') use default # (False, 'model.layers.0.post_attention_layernorm.weight') use default # (False, 'model.layers.0.mlp.experts.w13_weight') use mlp.experts.weight_loader # (False, 'model.layers.0.mlp.experts.w2_weight') use mlp.experts.weight_loader # Define MLP attribute mapping for different model types MLP_ATTR_MAPPING = { MixtralForCausalLM: "block_sparse_moe", } DEFAULT_MLP_ATTR = "mlp" if not isinstance(model, tuple(SUPPORTED_MOE_MODELS)): return model = getattr(model, "model", None) or getattr(model, "language_model", None) if model is None: raise ValueError("The provided model does not have a valid 'model' or 'language_model' attribute.") for layer in model.layers: mlp_attr = MLP_ATTR_MAPPING.get(type(model), DEFAULT_MLP_ATTR) mlp = getattr(layer, mlp_attr) param_dict = dict(mlp.named_parameters()) for name, param in param_dict.items(): if "w13_weight" in name or "w2_weight" in name: param.weight_loader = mlp.experts.weight_loader class TensorLoRARequest(LoRARequest): peft_config: dict = field(default=None) lora_tensors: dict = field(default=None) class VLLMHijack: @staticmethod def hijack(): def hijack__load_adapter(self, lora_request: TensorLoRARequest) -> LoRAModel: """ based on vllm.lora.worker_manager.WorkerLoRAManager._load_adapter, support load adapter with lora tensors Reason: VLLM does not support adding LoRA from tensors directly. It only supports adding LoRA via file paths. To synchronize the LoRA tensors of the actor model, we need to find a workaround to enable VLLM to load memory-based LoRA tensors. """ try: supported_lora_modules = self._adapter_manager.supported_lora_modules packed_modules_mapping = self._adapter_manager.packed_modules_mapping expected_lora_modules: list[str] = [] for module in supported_lora_modules: if module in packed_modules_mapping: expected_lora_modules.extend(packed_modules_mapping[module]) else: expected_lora_modules.append(module) expected_lora_modules = list(set(expected_lora_modules)) lora_tensors = None from vllm.lora.peft_helper import PEFTHelper if isinstance(lora_request, TensorLoRARequest): peft_config = lora_request.peft_config lora_tensors = lora_request.lora_tensors peft_helper = PEFTHelper.from_dict(peft_config) else: lora_path = get_adapter_absolute_path(lora_request.lora_path) peft_helper = PEFTHelper.from_local_dir(lora_path, self.max_position_embeddings) # Validates the LoRA configuration against requirements before # loading weights, throwing an exception if validation fails. peft_helper.validate_legal(self.lora_config) # For some models like Qwen2VL, we need to use hf_to_vllm_mapper # to ensure correct loading of lora weights. model = self._adapter_manager.model hf_to_vllm_mapper = None if hasattr(model, "hf_to_vllm_mapper") and model.hf_to_vllm_mapper is not None: hf_to_vllm_mapper = model.hf_to_vllm_mapper if isinstance(lora_request, TensorLoRARequest): lora = self._lora_model_cls.from_lora_tensors( lora_model_id=lora_request.lora_int_id, tensors=lora_tensors, peft_helper=peft_helper, device="cpu", dtype=self.lora_config.lora_dtype, embeddings=None, target_embedding_padding=self.vocab_size + self.lora_config.lora_extra_vocab_size, embedding_modules=self.embedding_modules, embedding_padding_modules=self.embedding_padding_modules, weights_mapper=hf_to_vllm_mapper, ) else: lora = self._lora_model_cls.from_local_checkpoint( lora_path, expected_lora_modules, peft_helper=peft_helper, lora_model_id=lora_request.lora_int_id, device="cpu", dtype=self.lora_config.lora_dtype, target_embedding_padding=self.vocab_size + self.lora_config.lora_extra_vocab_size, embedding_modules=self.embedding_modules, embedding_padding_modules=self.embedding_padding_modules, weights_mapper=hf_to_vllm_mapper, ) except Exception as e: raise e if lora.extra_vocab_size > self.lora_config.lora_extra_vocab_size: raise ValueError( f"LoRA added vocab size {lora.extra_vocab_size} is greater than lora_extra_vocab_size " f"{self.lora_config.lora_extra_vocab_size}." ) return lora def do_hijack(target_cls, target_method_name, hooking_method): setattr(target_cls, target_method_name, hooking_method) do_hijack(LRUCacheWorkerLoRAManager, "_load_adapter", hijack__load_adapter) def is_version_ge(pkg: str = "vllm", minver: str = "0.7.3"): """check if the package version is greater than or equal to the minimum version""" return vs.parse(get_version(pkg)) >= vs.parse(minver) ================================================ FILE: verl_rl/verl/version/version ================================================ 0.5.0 ================================================ FILE: verl_rl/verl/workers/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/workers/actor/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BasePPOActor from .dp_actor import DataParallelPPOActor __all__ = ["BasePPOActor", "DataParallelPPOActor"] ================================================ FILE: verl_rl/verl/workers/actor/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The base class for Actor """ from abc import ABC, abstractmethod import torch from verl import DataProto __all__ = ["BasePPOActor"] class BasePPOActor(ABC): def __init__(self, config): """The base class for PPO actor Args: config (DictConfig): a config passed to the PPOActor. We expect the type to be DictConfig (https://omegaconf.readthedocs.io/), but it can be any namedtuple in general. """ super().__init__() self.config = config @abstractmethod def compute_log_prob(self, data: DataProto) -> torch.Tensor: """Compute logits given a batch of data. Args: data (DataProto): a batch of data represented by DataProto. It must contain key ```input_ids```, ```attention_mask``` and ```position_ids```. Returns: DataProto: a DataProto containing the key ```log_probs``` """ pass @abstractmethod def update_policy(self, data: DataProto) -> dict: """Update the policy with an iterator of DataProto Args: data (DataProto): an iterator over the DataProto that returns by ```make_minibatch_iterator``` Returns: Dict: a dictionary contains anything. Typically, it contains the statistics during updating the model such as ```loss```, ```grad_norm```, etc,. """ pass ================================================ FILE: verl_rl/verl/workers/actor/dp_actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Single Process Actor """ import logging import os import torch from torch import nn from torch.distributed.fsdp import FullyShardedDataParallel as FSDP import verl.utils.torch_functional as verl_F from verl import DataProto from verl.trainer.ppo.core_algos import agg_loss, compute_policy_loss, get_policy_loss_fn, kl_penalty from verl.utils.device import get_device_name, is_cuda_available, is_npu_available from verl.utils.fsdp_utils import FSDPModule, fsdp2_clip_grad_norm_ from verl.utils.profiler import GPUMemoryLogger from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import prepare_dynamic_batch, restore_dynamic_batch from verl.utils.torch_functional import logprobs_from_logits from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad, ulysses_pad_and_slice_inputs from verl.workers.actor import BasePPOActor if is_cuda_available: from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input elif is_npu_available: from transformers.integrations.npu_flash_attention import index_first_axis, pad_input, rearrange, unpad_input __all__ = ["DataParallelPPOActor"] logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class DataParallelPPOActor(BasePPOActor): def __init__(self, config, actor_module: nn.Module, actor_optimizer: torch.optim.Optimizer = None): """When optimizer is None, it is Reference Policy""" super().__init__(config) self.actor_module = actor_module self.actor_optimizer = actor_optimizer self.use_remove_padding = self.config.get("use_remove_padding", False) if torch.distributed.get_rank() == 0: print(f"Actor use_remove_padding={self.use_remove_padding}") self.use_fused_kernels = self.config.get("use_fused_kernels", False) if torch.distributed.get_rank() == 0: print(f"Actor use_fused_kernels={self.use_fused_kernels}") self.ulysses_sequence_parallel_size = self.config.ulysses_sequence_parallel_size self.use_ulysses_sp = self.ulysses_sequence_parallel_size > 1 if self.config.entropy_from_logits_with_chunking: entropy_from_logits = verl_F.entropy_from_logits_with_chunking else: entropy_from_logits = verl_F.entropy_from_logits self.compute_entropy_from_logits = ( torch.compile(entropy_from_logits, dynamic=True) if self.config.get("use_torch_compile", True) # use torch compile by default else entropy_from_logits ) self.device_name = get_device_name() def _forward_micro_batch( self, micro_batch, temperature, calculate_entropy=False ) -> tuple[torch.Tensor, torch.Tensor]: """ Returns: entropy: # (bs, response_len) log_probs: # (bs, response_len) """ response_length = micro_batch["responses"].size(-1) multi_modal_inputs = {} if "multi_modal_inputs" in micro_batch.keys(): if "image_bound" in micro_batch["multi_modal_inputs"][0]: # minicpm-o logic for key in micro_batch["multi_modal_inputs"][0].keys(): multi_modal_inputs[key] = [inputs[key] for inputs in micro_batch["multi_modal_inputs"]] else: for key in micro_batch["multi_modal_inputs"][0].keys(): multi_modal_inputs[key] = torch.cat( [inputs[key] for inputs in micro_batch["multi_modal_inputs"]], dim=0 ) with torch.autocast(device_type=self.device_name, dtype=torch.bfloat16): input_ids = micro_batch["input_ids"] batch_size, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] entropy = None if position_ids.dim() == 3: # qwen2vl mrope position_ids = position_ids.transpose(0, 1) # (bsz, 3, seqlen) -> (3, bsz, seqlen) if self.use_remove_padding: input_ids_rmpad, indices, cu_seqlens, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary if position_ids.dim() == 3: position_ids_rmpad = ( index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices) .transpose(0, 1) .unsqueeze(1) ) # (3, bsz, seqlen) -> (3, 1, bsz * seqlen) else: position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) if "image_bound" in multi_modal_inputs: from verl.utils.dataset.vision_utils import process_multi_modal_inputs_for_minicpmo multi_modal_inputs = process_multi_modal_inputs_for_minicpmo( input_ids, attention_mask, position_ids, cu_seqlens, multi_modal_inputs ) # for compute the log_prob input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz) # pad and slice the inputs if sp > 1 if self.use_ulysses_sp: is_vlm_model = "multi_modal_inputs" in micro_batch.keys() if is_vlm_model: # vlm model's inputs will be sliced after embedding input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad( input_ids_rmpad, position_ids_rmpad=position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size, ) else: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad=position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size, ) input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs( input_ids_rmpad_rolled, position_ids_rmpad=None, sp_size=self.ulysses_sequence_parallel_size, ) input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad) # only pass input_ids and position_ids to enable flash_attn_varlen extra_args = {} if self.use_fused_kernels: extra_args["temperature"] = temperature extra_args["return_dict"] = True output = self.actor_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, **multi_modal_inputs, use_cache=False, **extra_args, ) # prevent model thinks we are generating if self.use_fused_kernels: log_probs = output.log_probs.squeeze(0) # (total_nnz,) entropy_rmpad = output.entropy.squeeze(0) # (total_nnz,) else: logits_rmpad = output.logits.squeeze(0) # (total_nnz, vocab_size) logits_rmpad.div_(temperature) # if use_sp: ((total_nnz / sp) + pad) ; if not use_sp: (batch, seqlen) inplace_backward = True if calculate_entropy: inplace_backward = False log_probs = logprobs_from_logits( logits=logits_rmpad, labels=input_ids_rmpad_rolled, inplace_backward=inplace_backward, ) # compute entropy if calculate_entropy: if not self.config.entropy_checkpointing: entropy_rmpad = self.compute_entropy_from_logits(logits_rmpad) # ((total_nnz / sp) + pad) else: entropy_rmpad = torch.utils.checkpoint.checkpoint( self.compute_entropy_from_logits, logits_rmpad ) # gather log_prob if sp > 1 if self.use_ulysses_sp: # gather and unpad for the ulysses sp log_probs = gather_outputs_and_unpad( log_probs, gather_dim=0, unpad_dim=0, padding_size=pad_size, ) if calculate_entropy: entropy_rmpad = gather_outputs_and_unpad( entropy_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size, ) # pad back to (bsz, seqlen) if calculate_entropy: full_entropy = pad_input( hidden_states=entropy_rmpad.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen, ) full_log_probs = pad_input( hidden_states=log_probs.unsqueeze(-1), indices=indices, batch=batch_size, seqlen=seqlen, ) # only return response part: if calculate_entropy: entropy = full_entropy.squeeze(-1)[:, -response_length - 1 : -1] # (bsz, response_length) log_probs = full_log_probs.squeeze(-1)[:, -response_length - 1 : -1] # (bsz, response_length) else: # not using rmpad and no ulysses sp extra_args = {} if self.use_fused_kernels: extra_args["temperature"] = temperature extra_args["return_dict"] = True output = self.actor_module( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **multi_modal_inputs, use_cache=False, **extra_args, ) # prevent model thinks we are generating if self.use_fused_kernels: log_probs = output.log_probs[:, -response_length - 1 : -1] entropy = output.entropy[:, -response_length - 1 : -1] # (bsz, response_length) else: logits = output.logits logits.div_(temperature) logits = logits[:, -response_length - 1 : -1, :] # (bsz, response_length, vocab_size) log_probs = logprobs_from_logits(logits, micro_batch["responses"]) if calculate_entropy: if not self.config.entropy_checkpointing: entropy = verl_F.entropy_from_logits(logits) # (bsz, response_length) else: entropy = torch.utils.checkpoint.checkpoint(verl_F.entropy_from_logits, logits) return entropy, log_probs def _optimizer_step(self): assert self.config.grad_clip is not None if isinstance(self.actor_module, FSDP): grad_norm = self.actor_module.clip_grad_norm_(max_norm=self.config.grad_clip) elif isinstance(self.actor_module, FSDPModule): grad_norm = fsdp2_clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip) else: grad_norm = torch.nn.utils.clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip) # if grad_norm is not finite, skip the update if not torch.isfinite(grad_norm): print(f"WARN: rank {torch.distributed.get_rank()} grad_norm is not finite: {grad_norm}") self.actor_optimizer.zero_grad() else: self.actor_optimizer.step() return grad_norm @GPUMemoryLogger(role="dp actor", logger=logger) def compute_log_prob(self, data: DataProto, calculate_entropy=False) -> torch.Tensor: """Compute the log probability of the responses given input_ids, attention_mask and position_ids Args: data (DataProto): a DataProto containing keys ``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``. ``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64. ``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. ``responses``: tensor of shape [batch_size, response_length]. torch.int64. Returns: torch.Tensor: the log_prob tensor """ # set to eval self.actor_module.eval() micro_batch_size = data.meta_info["micro_batch_size"] temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid silent error use_dynamic_bsz = data.meta_info["use_dynamic_bsz"] has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() select_keys = ["responses", "input_ids", "attention_mask", "position_ids"] non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else [] data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys) if use_dynamic_bsz: max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size micro_batches, batch_idx_list = prepare_dynamic_batch(data, max_token_len=max_token_len) else: micro_batches = data.split(micro_batch_size) log_probs_lst = [] entropy_lst = [] for micro_batch in micro_batches: model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch} with torch.no_grad(): entropy, log_probs = self._forward_micro_batch( model_inputs, temperature=temperature, calculate_entropy=calculate_entropy ) log_probs_lst.append(log_probs) if calculate_entropy: entropy_lst.append(entropy) log_probs = torch.concat(log_probs_lst, dim=0) entropys = None if calculate_entropy: entropys = torch.concat(entropy_lst, dim=0) if use_dynamic_bsz: log_probs = restore_dynamic_batch(log_probs, batch_idx_list) if calculate_entropy: entropys = restore_dynamic_batch(entropys, batch_idx_list) return log_probs, entropys @GPUMemoryLogger(role="dp actor", logger=logger) def update_policy(self, data: DataProto): # make sure we are in training mode self.actor_module.train() temperature = data.meta_info["temperature"] # temperature must be in the data.meta_info to avoid silent error select_keys = [ "responses", "response_mask", "input_ids", "attention_mask", "position_ids", "old_log_probs", "advantages", ] if self.config.use_kl_loss: select_keys.append("ref_log_prob") has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else [] data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys) # Split to make minibatch iterator for updating the actor # See PPO paper for details. https://arxiv.org/abs/1707.06347 mini_batches = data.split(self.config.ppo_mini_batch_size) metrics = {} for _ in range(self.config.ppo_epochs): for batch_idx, mini_batch in enumerate(mini_batches): if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, _ = prepare_dynamic_batch(mini_batch, max_token_len=max_token_len) else: self.gradient_accumulation = ( self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu ) micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu) self.actor_optimizer.zero_grad() for micro_batch in micro_batches: micro_batch_metrics = {} model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch} response_mask = model_inputs["response_mask"] old_log_prob = model_inputs["old_log_probs"] advantages = model_inputs["advantages"] clip_ratio = self.config.clip_ratio clip_ratio_low = ( self.config.clip_ratio_low if self.config.clip_ratio_low is not None else clip_ratio ) clip_ratio_high = ( self.config.clip_ratio_high if self.config.clip_ratio_high is not None else clip_ratio ) clip_ratio_c = self.config.get("clip_ratio_c", 3.0) entropy_coeff = self.config.entropy_coeff loss_agg_mode = self.config.loss_agg_mode # all return: (bsz, response_length) calculate_entropy = False if entropy_coeff != 0: calculate_entropy = True entropy, log_prob = self._forward_micro_batch( model_inputs, temperature=temperature, calculate_entropy=calculate_entropy ) loss_mode = self.config.policy_loss.get("loss_mode", "vanilla") if self.config.policy_loss.loss_mode == "vanilla": pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = compute_policy_loss( old_log_prob=old_log_prob, log_prob=log_prob, advantages=advantages, response_mask=response_mask, cliprange=clip_ratio, cliprange_low=clip_ratio_low, cliprange_high=clip_ratio_high, clip_ratio_c=clip_ratio_c, loss_agg_mode=loss_agg_mode, ) else: policy_loss_fn = get_policy_loss_fn(loss_mode) pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = policy_loss_fn( old_log_prob=old_log_prob, log_prob=log_prob, advantages=advantages, response_mask=response_mask, loss_agg_mode=loss_agg_mode, config=self.config, ) if entropy_coeff != 0: entropy_loss = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) # compute policy loss policy_loss = pg_loss - entropy_loss * entropy_coeff else: policy_loss = pg_loss if self.config.use_kl_loss: ref_log_prob = model_inputs["ref_log_prob"] # compute kl loss kld = kl_penalty( logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=self.config.kl_loss_type ) kl_loss = agg_loss(loss_mat=kld, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef micro_batch_metrics["actor/kl_loss"] = kl_loss.detach().item() micro_batch_metrics["actor/kl_coef"] = self.config.kl_loss_coef if self.config.use_dynamic_bsz: # relative to the dynamic bsz loss = policy_loss * (response_mask.shape[0] / self.config.ppo_mini_batch_size) else: loss = policy_loss / self.gradient_accumulation loss.backward() micro_batch_metrics.update( { "actor/pg_loss": pg_loss.detach().item(), "actor/pg_clipfrac": pg_clipfrac.detach().item(), "actor/ppo_kl": ppo_kl.detach().item(), "actor/pg_clipfrac_lower": pg_clipfrac_lower.detach().item(), } ) append_to_dict(metrics, micro_batch_metrics) grad_norm = self._optimizer_step() mini_batch_metrics = {"actor/grad_norm": grad_norm.detach().item()} append_to_dict(metrics, mini_batch_metrics) self.actor_optimizer.zero_grad() return metrics ================================================ FILE: verl_rl/verl/workers/actor/megatron_actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Megatron Actor. In megatron actor, the differences are: 1. We only make minibatch Note that our model doesn't have to be `MegatronModule` because we don't share embedding in the last layer """ import itertools import logging import os from functools import partial from typing import Iterable import torch import torch.distributed from megatron.core import parallel_state as mpu from megatron.core.distributed import finalize_model_grads # from megatron.core.optimizer import DistributedOptimizer from megatron.core.optimizer import DistributedOptimizer from megatron.core.pipeline_parallel import get_forward_backward_func from omegaconf import OmegaConf from torch import nn from verl import DataProto from verl.trainer.ppo.core_algos import agg_loss, compute_policy_loss, get_policy_loss_fn, kl_penalty from verl.utils.device import get_device_id, get_torch_device from verl.utils.megatron.pipeline_parallel import make_batch_generator from verl.utils.megatron.tensor_parallel import vocab_parallel_entropy, vocab_parallel_log_probs_from_logits from verl.utils.megatron_utils import get_model_config from verl.utils.profiler import GPUMemoryLogger from verl.utils.profiler.profile import Profiler from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.utils.torch_functional import broadcast_dict_tensor from verl.workers.actor import BasePPOActor __all__ = ["MegatronPPOActor"] logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MegatronPPOActor(BasePPOActor): def __init__( self, config, model_config, hf_config, tf_config, actor_module: nn.ModuleList, actor_optimizer: DistributedOptimizer, ): """MeagtronPPOActor class. This class implements the simple PPO logics when the model is built with Megatron. Args: config (OmegaConf): the basic config that contains the hyper-parameters of PPO Actor. It must contain ``ppo_micro_batch_size_per_gpu``: micro batch size when updating ppo. ``ppo_mini_batch_size``: minibatch size when updating ppo using the batch data. ``ppo_epochs``: number of epochs to update the actor using the batch data. ``shuffle``: whether to shuffle the data after each ppo epoch. ``clip_ratio``: clip ratio of the ppo algorithm. See https://arxiv.org/abs/1707.06347. ``entropy_coeff``: entropy coefficient of the PPO loss. See https://arxiv.org/abs/1707.06347. model_config (OmegaConf): model configuration. It must contains ``model_config.vocab_size`` and ``model_config.hidden_size`` hf_config (PretrainedConfig): huggingface config tf_config (TransformerConfig): mcore transformer config actor_module (nn.ModuleList): actor module is a ModuleList that contains a list of nn.Module in this pp stage. each nn.Module in this rank holds a vpp module chunk. See https://arxiv.org/pdf/2104.04473.pdf for more details. The actor module has some constraints to follow in order to use the updating logics implemented here 1. It must implement unpad_input before any computation and pad_input after all the computation. Remove padding is an optimization that removes the padding tokens. See unpad_input and pad_input function in flash-attn (https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py). 2. Each pp stage must return the hidden state with the same shape [total_nnz, 1, hidden_size], where total_nnz is the number of valid tokens in this batch. If sequence parallel is enabled, the size of the hidden state is [total_nnz // tp, 1, hidden_size]. actor_optimizer (DistributedOptimizer): currently, we only support DistributedOptimizer in Megatron. It implements zero1 optimizer that shards the optimizer state across dp ranks. >>> from megatron.training import get_model >>> from megatron.optimizer import get_megatron_optimizer >>> actor_module = get_model(megatron_actor_model_provider, wrap_with_ddp=True) >>> actor_module = nn.ModuleList(actor_module) >>> actor_optimizer = get_megatron_optimizer(actor_module) >>> actor = MegatronPPOActor(config=config, >>> model_config=actor_model_config, >>> hf_config=hf_config, >>> tf_config=tf_config, >>> actor_module=actor_module, >>> actor_optimizer=actor_optimizer) """ super().__init__(config) self._validate_config(config) self.model_config = model_config self.hf_config = hf_config self.tf_config = tf_config self.actor_module = actor_module self.actor_optimizer: DistributedOptimizer = actor_optimizer self.prof = Profiler(self.config.profile) self.use_fused_kernels = self.config.get("use_fused_kernels", False) if self.use_fused_kernels: from verl.models.mcore.model_forward_fused import patch_fused_forward for model in self.actor_module: patch_fused_forward(model) self.optimizer_step_args = OmegaConf.create( { "skip_grad": None, "overlap_dp_param_comm": False, "overlap_dp_grad_comm": False, "gradient_accumulation_steps": 1, "sequence_parallel": self.tf_config.sequence_parallel, "DDP_impl": "local", "layernorm_allreduce_bucket_threshold": 0, "pipeline_model_parallel_split_rank": None, "reduce_grads_use_alltoall": False, } ) config = get_model_config(self.actor_module[0]) print(config) config.finalize_model_grads_func = finalize_model_grads def _validate_config(self, config) -> None: """Validate config options not implemented for Megatron backend""" assert config.get("ulysses_sequence_parallel_size", 1) == 1 if config.get("shuffle", False): assert config.data_loader_seed is not None, "If shuffle dataloader, seed must be manually set" if config.megatron.tensor_model_parallel_size == 1: print("[Warining] Because actor tp size == 1, set sp to False") config.megatron.sequence_parallel = False self.config = config @GPUMemoryLogger(role="megatron actor", logger=logger) def compute_log_prob(self, data: DataProto, calculate_entropy=False) -> torch.Tensor: """Compute the log probability of the responses given input_ids, attention_mask and position_ids Args: data (DataProto): a DataProto containing keys ``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``. ``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64. ``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. ``responses``: tensor of shape [batch_size, response_length]. torch.int64. Returns: DataProto: torch.Tensor: the log_prob tensor """ data.to(get_device_id()) data.batch = data.batch.contiguous() use_dynamic_bsz = data.meta_info.get("use_dynamic_bsz", False) micro_batch_size = data.meta_info.get("micro_batch_size", None) max_token_len = data.meta_info.get("max_token_len", None) assert micro_batch_size is not None, "micro batch size is needed for forward compute" if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" max_token_len = max_token_len * self.config.megatron.context_parallel_size def compute_logprobs_fn(output, data, use_dynamic_bsz=False, indices=None): response = data["responses"] response_length = response.size(1) log_probs = output["log_probs"][:, -response_length - 1 : -1].contiguous() return {"log_probs": log_probs} # We make recompute_old_log_prob by default here. # TODO (zhangchi.usc1992): actually, this function should only return log_prob and this logic should be # handled by user outside recompute_old_log_prob = self.config.get("recompute_old_log_prob", True) entropys = torch.Tensor() if recompute_old_log_prob: select_keys = ["responses", "input_ids", "attention_mask", "position_ids"] batch = data.select(batch_keys=select_keys).batch input_ids = batch["input_ids"] batch_size = input_ids.size(0) response = batch["responses"] response_length = response.size(1) with torch.no_grad(): output = self.forward_backward_batch( data, forward_only=True, post_process_fn=compute_logprobs_fn, calculate_entropy=calculate_entropy, use_dynamic_bsz=use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len, ) if mpu.is_pipeline_last_stage(ignore_virtual=True): # only on last rank. It should be on every tp rank if calculate_entropy: log_probs = [o[0]["log_probs"] for o in output["output"]] # (bs, seq_size) else: log_probs = [o["log_probs"] for o in output["output"]] # (bs, seq_size) log_probs = torch.cat(log_probs, dim=0).to(torch.float32) if use_dynamic_bsz: indices = output["indices"] indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == log_probs.size(0), f"{len(indices)} vs. {log_probs.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) log_probs = log_probs[revert_indices] else: log_probs = torch.empty( size=(batch_size, response_length), dtype=torch.float32, device=input_ids.device ) # broadcast across pp ranks torch.distributed.broadcast( tensor=log_probs, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), async_op=False, ) if calculate_entropy: # Note that o[0] is metrics, o[1] is entropy if mpu.is_pipeline_last_stage(ignore_virtual=True): entropys = torch.cat([o[1] for o in output["output"]], dim=0) entropys = entropys.to(torch.float32) if use_dynamic_bsz: indices = output["indices"] indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == entropys.size(0), f"{len(indices)} vs. {entropys.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) entropys = entropys[revert_indices] else: entropys = torch.empty( size=(batch_size, response_length), dtype=torch.float32, device=input_ids.device ) # broadcast across pp ranks torch.distributed.broadcast( tensor=entropys, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), async_op=False, ) # add empty cache after each compute get_torch_device().empty_cache() return log_probs, entropys def make_minibatch_iterator(self, data: DataProto) -> Iterable[DataProto]: """Make minibatch iterator for updating the actor Args: data (DataProto): a DataProto containing keys ``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64, where ``sequence_length = prompt_length + response_length`` ``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64 ``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64 ``responses``: tensor of shape [batch_size, response_length]. torch.int64. Note that responses = input_ids[:, -response_length:] ``old_log_probs``: tensor of shape [batch_size, response_length]. torch.float32. The log probability of responses. ``advantages``: tensor of shape [batch_size, response_length]. torch.float32. The advantages of responses. See PPO paper for details. https://arxiv.org/abs/1707.06347 Returns: """ select_keys = [ "responses", "input_ids", "attention_mask", "response_mask", "position_ids", "old_log_probs", "advantages", ] if self.config.use_kl_loss: select_keys.append("ref_log_prob") self.has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() if self.has_multi_modal_inputs: data = data.select(select_keys, ["multi_modal_inputs"]) else: data = data.select(batch_keys=select_keys) return data.make_iterator( mini_batch_size=self.config.ppo_mini_batch_size, epochs=self.config.ppo_epochs, seed=self.config.data_loader_seed, dataloader_kwargs={"shuffle": self.config.shuffle}, ) def forward_backward_batch( self, data: DataProto, forward_only=False, post_process_fn=None, calculate_entropy=False, use_dynamic_bsz=False, micro_batch_size=None, max_token_len=None, mini_batch_size=None, ): """ We assume: - The model takes input: (input_ids, attention_mask, position_ids). No rmpad for the input - The communication shape is (total_nnz_pad_to_sp // tp_size, 1, hidden_size) if sequence parallel is enabled """ # broadcast from last pp rank to all other pp ranks # TODO: actually, we just need to control the sampling order. mini_batch = data broadcast_dict_tensor( mini_batch.batch, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), ) # split into micro-batches mini_batch.batch["attention_mask"] = mini_batch.batch["attention_mask"].to(bool) self.has_multi_modal_inputs = "multi_modal_inputs" in mini_batch.non_tensor_batch.keys() if self.has_multi_modal_inputs: mini_batch.batch["multi_modal_inputs"] = mini_batch.non_tensor_batch["multi_modal_inputs"] mini_batch.batch["multi_modal_inputs_idx"] = torch.Tensor( list(range(len(mini_batch.non_tensor_batch["multi_modal_inputs"]))) ).to(torch.int64) if mini_batch.batch["position_ids"].dim() == 3: # qwen2vl mrope [bs, 3, seq_len] mini_batch.batch["position_ids"] = mini_batch.batch["position_ids"][ :, 0 ] # mcore patch recompute qwen2vl's pos ids during forward indices = None if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() if vpp_size is not None and vpp_size > 1: microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage micro_batches, indices = rearrange_micro_batches( batch=mini_batch.batch, num_batches_divided_by=microbatch_group_size_per_vp_stage, max_token_len=max_token_len, ) assert len(micro_batches) % self.tf_config.microbatch_group_size_per_vp_stage == 0, ( f"micro_batches {micro_batches} must be divisible by microbatch_group_size_per_vp_stage " f"{microbatch_group_size_per_vp_stage} for megatron backend" ) else: micro_batches, indices = rearrange_micro_batches(batch=mini_batch.batch, max_token_len=max_token_len) total_seqlen = max_token_len else: assert micro_batch_size is not None, ( "micro_batch_size is needed to be passed in when not using dynamic batch size" ) micro_batches = mini_batch.batch.split(micro_batch_size) seq_len = micro_batches[0]["input_ids"].shape[1] total_seqlen = micro_batch_size * seq_len # compute input shapes for pp stages n_micro_batch = len(micro_batches) forward_backward_func = get_forward_backward_func() def loss_func(output, data, meta_info): # For memory efficiency # We move calculation of entropy to compute_log_probs, forward_only == True device = output["log_probs"].device metrics = {} if forward_only: if post_process_fn is None: pass # metrics["logits"] = output else: stats = post_process_fn(output, data) metrics.update(stats) if not calculate_entropy: return torch.tensor(1.0, device=device), metrics responses = data["responses"] response_length = responses.size(1) response_mask = data["response_mask"].to(bool) loss_agg_mode = self.config.loss_agg_mode # compute policy loss log_prob = output["log_probs"][:, -response_length - 1 : -1].contiguous() ret_entropy = None stats = {} if not forward_only: old_log_prob = data["old_log_probs"] advantages = data["advantages"] clip_ratio = self.config.clip_ratio clip_ratio_low = self.config.clip_ratio_low if self.config.clip_ratio_low is not None else clip_ratio clip_ratio_high = self.config.clip_ratio_high if self.config.clip_ratio_high is not None else clip_ratio clip_ratio_c = self.config.get("clip_ratio_c", 3.0) entropy_coeff = self.config.entropy_coeff loss_agg_mode = self.config.loss_agg_mode loss_mode = self.config.policy_loss.get("loss_mode", "vanilla") if self.config.policy_loss.loss_mode == "vanilla": pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = compute_policy_loss( old_log_prob=old_log_prob, log_prob=log_prob, advantages=advantages, response_mask=response_mask, cliprange=clip_ratio, cliprange_low=clip_ratio_low, cliprange_high=clip_ratio_high, clip_ratio_c=clip_ratio_c, loss_agg_mode=loss_agg_mode, ) else: policy_loss_fn = get_policy_loss_fn(loss_mode) pg_loss, pg_clipfrac, ppo_kl, pg_clipfrac_lower = policy_loss_fn( old_log_prob=old_log_prob, log_prob=log_prob, advantages=advantages, response_mask=response_mask, loss_agg_mode=loss_agg_mode, config=self.config, ) stats.update( { "actor/pg_loss": pg_loss.detach().item(), "actor/pg_clipfrac": pg_clipfrac.detach().item(), "actor/ppo_kl": ppo_kl.detach().item(), "actor/pg_clipfrac_lower": pg_clipfrac_lower.detach().item(), } ) policy_loss = pg_loss if calculate_entropy: entropy = output["entropy"][:, -response_length - 1 : -1].contiguous() if not forward_only: entropy_loss = agg_loss(loss_mat=entropy, loss_mask=response_mask, loss_agg_mode=loss_agg_mode) entropy_coeff = meta_info["entropy_coeff"] policy_loss = pg_loss - entropy_coeff * entropy_loss else: ret_entropy = entropy if forward_only: policy_loss = torch.tensor(1.0, device=device) else: if self.config.use_kl_loss: ref_log_prob = data["ref_log_prob"] # compute kl loss kld = kl_penalty(logprob=log_prob, ref_logprob=ref_log_prob, kl_penalty=self.config.kl_loss_type) kl_loss = agg_loss(loss_mat=kld, loss_mask=response_mask, loss_agg_mode=self.config.loss_agg_mode) policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef metrics["actor/kl_loss"] = kl_loss.detach().item() metrics["actor/kl_coef"] = self.config.kl_loss_coef # return loss and stats append_to_dict(metrics, stats) return policy_loss, [metrics, ret_entropy] def forward_step(batch_iter, model): batch = next(batch_iter) input_ids = batch["input_ids"] attention_mask = batch["attention_mask"].to(bool) position_ids = batch["position_ids"] multi_modal_inputs = {} if "multi_modal_inputs" in batch: for key in batch["multi_modal_inputs"][0].keys(): idxs = batch["multi_modal_inputs_idx"] mmi = batch["multi_modal_inputs"] multi_modal_inputs[key] = torch.cat( [mmi[idx].get(key) for idx in idxs if mmi[idx].get(key) is not None], dim=0 ) responses = batch["responses"] response_length = responses.size(1) label = position_ids.clone() label[:, -response_length - 1 : -1] = responses label_mask = attention_mask.clone() label_mask[:, : -response_length - 1] = False label_mask[:, -1] = False from verl.models.mcore import get_mcore_forward_fn, get_mcore_forward_fused_fn if self.use_fused_kernels: forward_fn = get_mcore_forward_fused_fn(self.hf_config) # return dict of [logits, entropy] output = forward_fn( model, input_ids, position_ids, attention_mask, sequence_parallel=self.tf_config.sequence_parallel, multi_modal_inputs=multi_modal_inputs, labels=label, labels_mask=label_mask, ) else: forward_fn = get_mcore_forward_fn(self.hf_config) def logits_processor(logits, label, label_mask): assert logits.shape[:2] == label.shape[:2] assert label.shape == label_mask.shape ret = {} if calculate_entropy: entropy = vocab_parallel_entropy(logits) ret["entropy"] = entropy log_probs = vocab_parallel_log_probs_from_logits(logits, label) log_probs = log_probs.masked_fill(~label_mask, 0.0) ret["log_probs"] = log_probs return ret logits_processor_args = {"label": label, "label_mask": label_mask} output = forward_fn( model, input_ids, attention_mask, position_ids, sequence_parallel=self.tf_config.sequence_parallel, multi_modal_inputs=multi_modal_inputs, logits_processor=logits_processor, logits_processor_args=logits_processor_args, ) if forward_only: meta_info = None else: clip_ratio_c = self.config.get("clip_ratio_c", 3.0) meta_info = { "clip_ratio": self.config.clip_ratio, "entropy_coeff": self.config.entropy_coeff, "clip_ratio_c": clip_ratio_c, } return output, partial(loss_func, data=batch, meta_info=meta_info) # batch should be a list of batches inside micro-batches batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.actor_module)) # TODO: we may use the new schedule instead # for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size) if mpu.get_pipeline_model_parallel_world_size() > 1: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.actor_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # no use when input_shapes was set micro_batch_size=1, # no use when input_shapes was set forward_only=forward_only, ) else: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.actor_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # in use for pp = 1 micro_batch_size=1, # in use for pp = 1 forward_only=forward_only, ) # loss_reduces contains the stats returned from loss_func if self.has_multi_modal_inputs: data.batch.pop("multi_modal_inputs") data.batch.pop("multi_modal_inputs_idx") data.non_tensor_batch.pop("multi_modal_inputs") losses_reduced = {"output": losses_reduced} if use_dynamic_bsz: losses_reduced["indices"] = indices return losses_reduced @GPUMemoryLogger(role="megatron actor", logger=logger) def update_policy(self, dataloader: Iterable[DataProto]) -> dict: """Update the policy with an iterator of DataProto Args: dataloader (Iterable[DataProto]): an iterator over the DataProto that returns by ``make_minibatch_iterator`` The keys of each data batch is described in the make_minibatch_iterator. Returns: Dict: a dictionary containing the statistics. Note that the statistics are only valid in the last pp stage and users have to combine the output in each dp rank manually. """ metrics = {} self.prof.start() for data in dataloader: data.to(get_device_id()) self.actor_optimizer.zero_grad() # use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm for chunk in self.actor_module: # if use distributed optimizer, zero grad buffer will be handled by optimizer chunk.zero_grad_buffer() calculate_entropy = self.config.entropy_coeff != 0 if data.meta_info.get("micro_batch_size", None) is not None: micro_batch_size = data.meta_info["micro_batch_size"] else: micro_batch_size = self.config.ppo_micro_batch_size_per_gpu max_token_len = None if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.config.megatron.context_parallel_size metric_micro_batch = self.forward_backward_batch( data, calculate_entropy=calculate_entropy, use_dynamic_bsz=self.config.use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len, mini_batch_size=self.config.ppo_mini_batch_size, ) metric_micro_batch = metric_micro_batch["output"] for metric in metric_micro_batch: # Note that o[0] is metrics, o[1] is entropy, o[2] is response_mask append_to_dict(metrics, metric[0]) # append the metric from this micro-batch to global metrics. update_successful, grad_norm, num_zeros_in_grad = self.actor_optimizer.step() data = {"actor/grad_norm": grad_norm} append_to_dict(metrics, data) if update_successful: # allgather already execute in optimizer.step in new megatron pass else: raise NotImplementedError self.prof.step() # add empty cache after each compute self.prof.stop_and_save() self.prof.stop_trace() get_torch_device().empty_cache() return metrics ================================================ FILE: verl_rl/verl/workers/critic/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BasePPOCritic from .dp_critic import DataParallelPPOCritic __all__ = ["BasePPOCritic", "DataParallelPPOCritic"] ================================================ FILE: verl_rl/verl/workers/critic/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base class for a critic """ from abc import ABC, abstractmethod import torch from verl import DataProto __all__ = ["BasePPOCritic"] class BasePPOCritic(ABC): def __init__(self, config): super().__init__() self.config = config @abstractmethod def compute_values(self, data: DataProto) -> torch.Tensor: """Compute values""" pass @abstractmethod def update_critic(self, data: DataProto): """Update the critic""" pass ================================================ FILE: verl_rl/verl/workers/critic/dp_critic.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implement a multiprocess PPOCritic """ import logging import os import torch import torch.distributed from torch import nn, optim from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from verl import DataProto from verl.trainer.ppo import core_algos from verl.utils.device import get_device_name, is_cuda_available, is_npu_available from verl.utils.fsdp_utils import FSDPModule, fsdp2_clip_grad_norm_ from verl.utils.profiler import GPUMemoryLogger from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import prepare_dynamic_batch, restore_dynamic_batch from verl.utils.torch_functional import masked_mean from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad_and_slice_inputs from verl.workers.critic import BasePPOCritic if is_cuda_available: from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input elif is_npu_available: from transformers.integrations.npu_flash_attention import index_first_axis, pad_input, rearrange, unpad_input logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class DataParallelPPOCritic(BasePPOCritic): def __init__(self, config, critic_module: nn.Module, critic_optimizer: optim.Optimizer): super().__init__(config=config) self.critic_module = critic_module self.critic_optimizer = critic_optimizer self.use_remove_padding = self.config.model.get("use_remove_padding", False) print(f"Critic use_remove_padding={self.use_remove_padding}") self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) self.device_name = get_device_name() def _forward_micro_batch(self, micro_batch): response_length = micro_batch["responses"].size(-1) multi_modal_inputs = {} if "multi_modal_inputs" in micro_batch.keys(): for key in micro_batch["multi_modal_inputs"][0].keys(): multi_modal_inputs[key] = torch.cat( [inputs[key] for inputs in micro_batch["multi_modal_inputs"]], dim=0 ) with torch.autocast(device_type=self.device_name, dtype=torch.bfloat16): input_ids = micro_batch["input_ids"] batch, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] if position_ids.dim() == 3: # qwen2vl mrope position_ids = position_ids.transpose(0, 1) if self.use_remove_padding: input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary if position_ids.dim() == 3: position_ids_rmpad = ( index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices) .transpose(0, 1) .unsqueeze(1) ) # (3, bsz, seqlen) -> (3, 1, bsz * seqlen) else: position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # pad and slice the inputs if sp > 1 if self.ulysses_sequence_parallel_size > 1: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size ) # only pass input_ids and position_ids to enable flash_attn_varlen output = self.critic_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, **multi_modal_inputs, use_cache=False, ) # prevent model thinks we are generating if hasattr(self.critic_module, "v_head"): # For trl.AutoModelForCausalLMWithValueHead values_rmpad = output[2].squeeze(0).unsqueeze(-1) else: values_rmpad = output.logits values_rmpad = values_rmpad.squeeze(0) # (total_nnz) # gather output if sp > 1 if self.ulysses_sequence_parallel_size > 1: values_rmpad = gather_outputs_and_unpad( values_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size ) # pad it back values = pad_input(values_rmpad, indices=indices, batch=batch, seqlen=seqlen).squeeze(-1) values = values[:, -response_length - 1 : -1] else: output = self.critic_module( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **multi_modal_inputs, use_cache=False, ) # prevent model thinks we are generating if hasattr(self.critic_module, "v_head"): # For trl.AutoModelForCausalLMWithValueHead values = output[2] else: values = output.logits values = values[:, -response_length - 1 : -1].squeeze(-1) return values def _optimizer_step(self): assert self.config.grad_clip is not None if isinstance(self.critic_module, FSDP): grad_norm = self.critic_module.clip_grad_norm_(self.config.grad_clip) elif isinstance(self.critic_module, FSDPModule): grad_norm = fsdp2_clip_grad_norm_(self.critic_module.parameters(), max_norm=self.config.grad_clip) else: grad_norm = torch.nn.utils.clip_grad_norm_(self.critic_module.parameters(), max_norm=self.config.grad_clip) # if grad_norm is not finite, skip the update if not torch.isfinite(grad_norm): print(f"WARN: grad_norm is not finite: {grad_norm}") self.critic_optimizer.zero_grad() else: self.critic_optimizer.step() return grad_norm @GPUMemoryLogger(role="dp critic", logger=logger) def compute_values(self, data: DataProto) -> torch.Tensor: self.critic_module.eval() micro_batch_size = data.meta_info["micro_batch_size"] use_dynamic_bsz = data.meta_info["use_dynamic_bsz"] has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() select_keys = ["responses", "input_ids", "response_mask", "attention_mask", "position_ids"] non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else [] data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys) if use_dynamic_bsz: max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size micro_batches, batch_idx_list = prepare_dynamic_batch(data, max_token_len=max_token_len) else: micro_batches = data.split(micro_batch_size) values_lst = [] for micro_batch in micro_batches: model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch} with torch.no_grad(): values = self._forward_micro_batch(model_inputs) values_lst.append(values) values = torch.concat(values_lst, dim=0) if use_dynamic_bsz: values = restore_dynamic_batch(values, batch_idx_list) response_mask = data.batch["response_mask"] values = values * response_mask # Only action tokens have values return values @GPUMemoryLogger(role="dp critic", logger=logger) def update_critic(self, data: DataProto): # make sure we are in training mode self.critic_module.train() metrics = {} select_keys = ["input_ids", "responses", "response_mask", "attention_mask", "position_ids", "values", "returns"] has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() non_tensor_select_keys = ["multi_modal_inputs"] if has_multi_modal_inputs else [] data = data.select(batch_keys=select_keys, non_tensor_batch_keys=non_tensor_select_keys) # Split to make minibatch iterator for updating the actor # See PPO paper for details. https://arxiv.org/abs/1707.06347 mini_batches = data.split(self.config.ppo_mini_batch_size) for _ in range(self.config.ppo_epochs): for batch_idx, mini_batch in enumerate(mini_batches): if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, _ = prepare_dynamic_batch(mini_batch, max_token_len=max_token_len) else: self.gradient_accumulation = ( self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu ) micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu) self.critic_optimizer.zero_grad() for micro_batch in micro_batches: micro_batch_metrics = {} model_inputs = {**micro_batch.batch, **micro_batch.non_tensor_batch} response_mask = model_inputs["response_mask"] values = model_inputs["values"] returns = model_inputs["returns"] vpreds = self._forward_micro_batch(model_inputs) vf_loss, vf_clipfrac = core_algos.compute_value_loss( vpreds=vpreds, values=values, returns=returns, response_mask=response_mask, cliprange_value=self.config.cliprange_value, loss_agg_mode=self.config.loss_agg_mode, ) if self.config.use_dynamic_bsz: # relative to the dynamic bsz loss = vf_loss * (response_mask.shape[0] / self.config.ppo_mini_batch_size) else: loss = vf_loss / self.gradient_accumulation loss.backward() micro_batch_metrics.update( { "critic/vf_loss": vf_loss.detach().item(), "critic/vf_clipfrac": vf_clipfrac.detach().item(), "critic/vpred_mean": masked_mean(vpreds, response_mask).detach().item(), } ) append_to_dict(metrics, micro_batch_metrics) grad_norm = self._optimizer_step() mini_batch_metrics = {"critic/grad_norm": grad_norm.detach().item()} append_to_dict(metrics, mini_batch_metrics) self.critic_optimizer.zero_grad() return metrics ================================================ FILE: verl_rl/verl/workers/critic/megatron_critic.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implement a multiprocess PPOCritic """ import itertools import logging import os from functools import partial from typing import Iterable import torch import torch.distributed from megatron.core import parallel_state as mpu from megatron.core.optimizer import DistributedOptimizer, OptimizerConfig from megatron.core.pipeline_parallel import get_forward_backward_func from omegaconf import OmegaConf from torch import nn from verl import DataProto from verl.trainer.ppo import core_algos from verl.utils.device import get_device_id, get_torch_device from verl.utils.megatron.pipeline_parallel import make_batch_generator from verl.utils.profiler import GPUMemoryLogger from verl.utils.py_functional import append_to_dict from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.utils.torch_functional import broadcast_dict_tensor, masked_mean from verl.workers.critic import BasePPOCritic logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class MegatronPPOCritic(BasePPOCritic): def __init__( self, config, model_config, hf_config, tf_config, critic_module: nn.ModuleList, critic_optimizer: DistributedOptimizer, critic_optimizer_config: OptimizerConfig, ): super().__init__(config=config) self._validate_config(config) self.model_config = model_config self.hf_config = hf_config # huggingface config self.tf_config = tf_config # mcore transformer config self.critic_module = critic_module self.critic_optimizer = critic_optimizer self.critic_optimizer_config = critic_optimizer_config # we create a separate nametuple for optimizer step so that global args won't affect it. self.optimizer_step_args = OmegaConf.create( { "skip_grad": None, "overlap_dp_param_comm": False, "overlap_dp_grad_comm": False, "gradient_accumulation_steps": 1, "sequence_parallel": self.tf_config.sequence_parallel, "DDP_impl": "local", "layernorm_allreduce_bucket_threshold": 0, "pipeline_model_parallel_split_rank": None, "reduce_grads_use_alltoall": False, } ) def _validate_config(self, config) -> None: """Validate config options not implemented for Megatron backend""" assert config.get("ulysses_sequence_parallel_size", 1) == 1 if config.shuffle: assert config.data_loader_seed is not None, "If shuffle dataloader, seed must be manually set" if config.megatron.tensor_model_parallel_size == 1: print("[Warining] Because critic tp size == 1, set sp to False") config.megatron.sequence_parallel = False self.config = config @GPUMemoryLogger("megatron critic", logger=logger) def compute_values(self, data: DataProto) -> DataProto: data.to(get_device_id()) responses = data.batch["responses"] attention_mask = data.batch["attention_mask"] use_dynamic_bsz = data.meta_info.get("use_dynamic_bsz", False) micro_batch_size = data.meta_info.get("micro_batch_size", None) max_token_len = data.meta_info.get("max_token_len", None) assert micro_batch_size is not None, "micro batch size is needed for forward compute" if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" max_token_len = max_token_len * self.config.megatron.context_parallel_size response_length = responses.size(1) with torch.no_grad(): output = self.forward_backward_batch( data=data, forward_only=True, use_dynamic_bsz=use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len, mini_batch_size=None, ) if mpu.is_pipeline_last_stage(ignore_virtual=True): # only on last rank. It should be on every tp rank values = [o["vpreds"] for o in output["output"]] # (bs, seq_size, vocal_size) values = torch.cat(values, dim=0).to(torch.float32) if use_dynamic_bsz: indices = output["indices"] indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == values.size(0), f"{len(indices)} vs. {values.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) values = values[revert_indices] else: values = torch.empty_like(attention_mask, dtype=torch.float32) # each tp ranks should contain the same value values = values[ :, -response_length - 1 : -1 ] # Values are predicted at the ends of prefixes, e.g., the last prompt token response_mask = attention_mask[:, -response_length:] values = values * response_mask # Only action tokens have values values = values.contiguous() # sync among pp ranks torch.distributed.broadcast( tensor=values, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), ) # add empty cache after each compute get_torch_device().empty_cache() return values def make_minibatch_iterator(self, data: DataProto) -> Iterable[DataProto]: select_keys = ["input_ids", "responses", "attention_mask", "position_ids", "values", "returns"] data = data.select(batch_keys=select_keys) return data.make_iterator( mini_batch_size=self.config.ppo_mini_batch_size, epochs=self.config.ppo_epochs, seed=self.config.data_loader_seed, dataloader_kwargs={"shuffle": self.config.shuffle}, ) def forward_backward_batch( self, data: DataProto, forward_only=False, use_dynamic_bsz=False, micro_batch_size=None, max_token_len=None, mini_batch_size=None, ): # broadcast from last pp rank to all other pp ranks mini_batch = data mini_batch.to(get_device_id()) mini_batch.batch = mini_batch.batch.contiguous() broadcast_dict_tensor( mini_batch.batch, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), ) # split into micro-batches mini_batch.batch["attention_mask"] = mini_batch.batch["attention_mask"].to(bool) indices = None if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() if vpp_size is not None and vpp_size > 1: microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage micro_batches, indices = rearrange_micro_batches( batch=mini_batch.batch, num_batches_divided_by=microbatch_group_size_per_vp_stage, max_token_len=max_token_len, ) assert len(micro_batches) % self.tf_config.microbatch_group_size_per_vp_stage == 0, ( f"micro_batches {micro_batches} must be divisible by microbatch_group_size_per_vp_stage " f"{microbatch_group_size_per_vp_stage} for megatron backend" ) else: micro_batches, indices = rearrange_micro_batches(batch=mini_batch.batch, max_token_len=max_token_len) total_seqlen = max_token_len else: assert micro_batch_size is not None, ( "micro_batch_size is needed to be passed in when not using dynamic batch size" ) micro_batches = mini_batch.batch.split(micro_batch_size) seq_len = micro_batches[0]["input_ids"].shape[1] total_seqlen = micro_batch_size * seq_len n_micro_batch = len(micro_batches) forward_backward_func = get_forward_backward_func() def loss_func(output, data, meta_info): nonlocal use_dynamic_bsz if forward_only: return torch.tensor(1.0, device=output.device), {"vpreds": output} responses = data["responses"] attention_mask = data["attention_mask"] values = data["values"] returns = data["returns"] response_length = responses.size(1) response_mask = attention_mask[:, -response_length:] cliprange_value = self.config.cliprange_value vpreds = output # (bs, sequence_length) vpreds = vpreds[:, -response_length - 1 : -1] vf_loss, vf_clipfrac = core_algos.compute_value_loss( vpreds=vpreds, values=values, returns=returns, response_mask=response_mask, cliprange_value=cliprange_value, loss_agg_mode=self.config.loss_agg_mode, ) stats = { "critic/vf_loss": vf_loss.detach().item(), "critic/vf_clipfrac": vf_clipfrac.detach().item(), "critic/vpred_mean": masked_mean(vpreds, response_mask).detach().item(), } return vf_loss, stats def forward_step(batch_iter, model): batch = next(batch_iter) input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] position_ids = batch["position_ids"] from verl.models.mcore import get_mcore_forward_fn forward_fn = get_mcore_forward_fn(self.hf_config) output = forward_fn( model, input_ids, attention_mask, position_ids, sequence_parallel=self.tf_config.sequence_parallel, value_model=True, ) return output, partial(loss_func, data=batch, meta_info={}) # batch should be a list of batches inside micro-batches batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.critic_module)) # TODO: we may use the new schedule instead # for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size) if mpu.get_pipeline_model_parallel_world_size() > 1: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.critic_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # no use when input_shapes was set micro_batch_size=1, # no use when input_shapes was set forward_only=forward_only, ) else: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.critic_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # in use for pp = 1 micro_batch_size=1, # in use for pp = 1 forward_only=forward_only, ) # loss_reduces contains the stats returned from loss_func losses_reduced = {"output": losses_reduced} if use_dynamic_bsz: losses_reduced["indices"] = indices return losses_reduced @GPUMemoryLogger("megatron critic", logger=logger) def update_critic(self, dataloader: Iterable[DataProto]): metrics = {} for data in dataloader: # data = data.batch.to(self.critic_module.device) self.critic_optimizer.zero_grad() # use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm for chunk in self.critic_module: chunk.zero_grad_buffer() micro_batch_size = self.config.ppo_micro_batch_size_per_gpu max_token_len = None if self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.config.megatron.context_parallel_size metric_micro_batch = self.forward_backward_batch( data, forward_only=False, use_dynamic_bsz=self.config.use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len, mini_batch_size=self.config.ppo_mini_batch_size, ) metric_micro_batch = metric_micro_batch["output"] update_successful, grad_norm, num_zeros_in_grad = self.critic_optimizer.step() learning_rate = self.critic_optimizer.param_groups[-1]["lr"] data = {"critic/grad_norm": grad_norm, "critic/lr": learning_rate} append_to_dict(metrics, data) if update_successful: # allgather already execute in optimizer.step in new megatron pass else: raise NotImplementedError for metric in metric_micro_batch: append_to_dict(metrics, metric) # append the metric from this micro-batch to global metrics. # add empty cache after each compute get_torch_device().empty_cache() return metrics ================================================ FILE: verl_rl/verl/workers/engine/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BaseEngine, EngineRegistry from .fsdp import FSDPEngine __all__ = ["BaseEngine", "EngineRegistry", "FSDPEngine"] ================================================ FILE: verl_rl/verl/workers/engine/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The abstract base class defining the interface for model training engines. """ from typing import Callable import torch from verl import DataProto class BaseEngine: """ Abstract base class defining the interface for model training engines. Engine implementations must subclass BaseEngine and provide concrete behavior for all methods. """ def __init__(self, config): """ Initialize the BaseEngine. Args: config: Configuration object containing parameters for engine setup. """ raise NotImplementedError def init_model(self): """ Instantiate or load the model, optimizer, and learning rate scheduler. Should prepare all components necessary for training or evaluation. """ raise NotImplementedError def train_mode(self): """ Context manager entry for switching the engine and model into training mode. Usage: with engine.train_mode(): # runs in training mode """ raise NotImplementedError def eval_mode(self): """ Context manager entry for switching the engine and model into evaluation mode. Usage: with engine.eval_mode(): # runs in evaluation mode """ raise NotImplementedError def infer_batch( self, data: DataProto, post_fn: Callable[[DataProto, torch.Tensor], tuple[torch.Tensor, dict[str, torch.Tensor]]], ) -> dict[str, torch.Tensor]: """ Perform inference on a mini batch of data. Args: data: The input data for inference, typically containing tensors and metadata. post_fn: A post-processing function that takes a micro-batch and predictions as input, and returns a tuple containing processed predictions and a dictionary of outputs. Returns: dict[str, torch.Tensor]: A dictionary containing the predictions for the entire batch. """ raise NotImplementedError def train_batch( self, data: DataProto, loss_fn: Callable[[DataProto, torch.Tensor], tuple[torch.Tensor, dict[str, torch.Tensor]]], ) -> dict[str, torch.Tensor]: """ Perform a training step on a mini-batch of data. Args: data (DataProto): The input data for training, typically containing tensors and metadata. loss_fn (Callable): A function that computes the loss and metrics given a micro-batch and predictions. Returns: dict[str, torch.Tensor]: A dictionary containing the aggregated training metrics for the mini-batch. """ raise NotImplementedError def optimizer_zero_grad(self): """ Zero out gradients of all parameters before starting a new backward pass. """ raise NotImplementedError def optimizer_step(self): """ Perform an optimization step to update model parameters based on accumulated gradients. Returns: grad_norm (float): The norm of the gradients before clipping or update. """ raise NotImplementedError def lr_scheduler_step(self): """ Advance the learning rate scheduler by one step. Returns: current_lr (float or list[float]): Updated learning rate(s). """ raise NotImplementedError def shard_data(self, data): """ Shard or partition data for distributed training or parallel execution. Args: data: Data structure to be sharded across devices/workers. Returns: Sharded data in the same format as input. """ raise NotImplementedError def unshard_data(self, data): """ Reconstruct or gather sharded data back to a unified format. Args: data: Sharded data structure to reconstruct. Returns: Unsharded, combined data. """ raise NotImplementedError def to(self, device: str, model: bool = True, optimizer: bool = True): """ Move model parameters, optimizer states, or both to the specified device. Args: device: Target device identifier. model: If True, move the model. optimizer: If True, move the optimizer states. """ raise NotImplementedError def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): """ Save model, optimizer, and scheduler states to a checkpoint. Args: local_path: Local filesystem path to save checkpoint. hdfs_path: Optional HDFS path to copy checkpoint. global_step: Integer training step number for naming. max_ckpt_to_keep: Maximum number of recent checkpoints to retain. """ raise NotImplementedError def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=True): """ Load model, optimizer, and scheduler states from a checkpoint. Args: local_path: Local filesystem path of the checkpoint. hdfs_path: Optional HDFS path where checkpoint is stored. del_local_after_load: Whether to delete local copy after loading. """ raise NotImplementedError class EngineRegistry: """ A registry for managing and instantiating different types of training engines. This class uses a dictionary to store engine classes, mapping a string key to each class. It provides a decorator `register` to add new engines to the registry and a `new` method to create an instance of a registered engine. """ _engines = {} @classmethod def register(cls, key): """ A class method decorator that registers an engine class with a given key. This allows for dynamic instantiation of engine classes by their registered key. Args: key (str): The identifier to associate with the engine class. Returns: A decorator function that takes an engine class and registers it. """ def decorator(engine_class): assert issubclass(engine_class, BaseEngine) cls._engines[key] = engine_class return engine_class return decorator @classmethod def new(cls, key, *args, **kwargs): """ Function to create a new training engine instance based on the provided config. Args: key: A configuration object containing the engine key and other settings. *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: engine: An instance of the training engine corresponding to the config. Raises: NotImplementedError: If the engine key in the config does not match any known engines. """ if key in cls._engines: return cls._engines[key](*args, **kwargs) else: raise NotImplementedError(f"Unknown engine: {key}") ================================================ FILE: verl_rl/verl/workers/engine/fsdp/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .engine_impl import FSDPEngine __all__ = ["FSDPEngine"] ================================================ FILE: verl_rl/verl/workers/engine/fsdp/engine_impl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The concrete Engine implementation using PyTorch FullyShardedDataParallel (FSDP) """ import gc import itertools import logging import os import warnings from typing import Callable import torch import torch.distributed from omegaconf import OmegaConf from peft import LoraConfig, TaskType, get_peft_model from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from verl import DataProto from verl.models.transformers.monkey_patch import apply_monkey_patch from verl.utils import hf_processor, hf_tokenizer from verl.utils.activation_offload import enable_activation_offloading from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.debug import log_gpu_memory_usage from verl.utils.device import ( get_device_id, get_device_name, get_torch_device, is_cuda_available, is_npu_available, ) from verl.utils.flops_counter import FlopsCounter from verl.utils.fs import copy_to_local from verl.utils.fsdp_utils import ( CPUOffloadPolicy, FSDPModule, MixedPrecisionPolicy, apply_fsdp2, fsdp2_clip_grad_norm_, fsdp2_load_full_state_dict, get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, load_fsdp_model_to_gpu, load_fsdp_optimizer, offload_fsdp_model_to_cpu, offload_fsdp_optimizer, ) from verl.utils.import_utils import import_external_libs from verl.utils.py_functional import append_to_dict, convert_to_regular_types from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.utils.ulysses import gather_outpus_and_unpad, ulysses_pad_and_slice_inputs from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager if is_cuda_available: from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input elif is_npu_available: from transformers.integrations.npu_flash_attention import index_first_axis, pad_input, rearrange, unpad_input from ..base import BaseEngine, EngineRegistry from .utils import create_device_mesh, get_sharding_strategy logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() @EngineRegistry.register("fsdp") class FSDPEngine(BaseEngine): """ Concrete Engine implementation using PyTorch FullyShardedDataParallel (FSDP). Supports model sharding, activation/optimizer offloading, LoRA, and sequence parallelism. """ def __init__(self, config): """ Initialize the FSDPEngine. Sets up distributed device meshes, LoRA, and offload policies based on config. Args: config: Configuration object with FSDP and model settings. """ self.config = config self.rank = torch.distributed.get_rank() # build device mesh for Ulysses Sequence Parallel world_size = torch.distributed.get_world_size() from torch.distributed.device_mesh import init_device_mesh fsdp_size = self.config.model.fsdp_config.fsdp_size self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size) self.use_remove_padding = config.model.get("use_remove_padding", False) self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) # set FSDP offload params self._is_offload_param = self.config.model.fsdp_config.param_offload self._is_offload_optimizer = self.config.model.fsdp_config.optimizer_offload # normalize config self.config.ppo_mini_batch_size *= self.config.rollout_n self.config.ppo_mini_batch_size //= torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size if self.config.ppo_micro_batch_size is not None: self.config.ppo_micro_batch_size //= ( torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size ) self.config.forward_micro_batch_size //= ( torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size ) self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size self.config.forward_micro_batch_size_per_gpu = self.config.forward_micro_batch_size if self.config.ppo_micro_batch_size_per_gpu is not None: assert self.config.ppo_mini_batch_size % self.config.ppo_micro_batch_size_per_gpu == 0, ( f"normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be divisible by " f"ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}" ) assert self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu > 0, ( f"normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be larger than " f"ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}" ) self._is_lora = self.config.model.get("lora_rank", 0) > 0 def init_model(self): """ Build the model, optimizer, and learning rate scheduler under FSDP. Applies device, dtype, and precision configurations, including mixed precision. Sets up checkpoint manager and FLOPs counter. """ # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) self.module, self.optimizer, self.lr_scheduler = self._build_model_optimizer(self.config) if self._is_offload_param: offload_fsdp_model_to_cpu(self.module) log_gpu_memory_usage("After offload model during init", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.optimizer) log_gpu_memory_usage("After offload optimizer during init", logger=logger) self.flops_counter = FlopsCounter(self.model_config) self.checkpoint_manager = FSDPCheckpointManager( model=self.module, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_contents=self.config.checkpoint, ) def _build_model_optimizer(self, config): # the following line is necessary from torch import optim from torch.distributed.fsdp import MixedPrecision from verl.utils.model import load_valuehead_model, print_model_size from verl.utils.torch_dtypes import PrecisionType use_shm = config.model.get("use_shm", False) local_path = copy_to_local(config.model.path, use_shm=use_shm) # note that the tokenizer between actor and critic may be different. So override tokenizer info with actor info # using random initialized model from any architecture. May not be the same as Actor. tokenizer_path = copy_to_local(config.model.tokenizer_path, use_shm=use_shm) self.tokenizer = hf_tokenizer(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False)) self.processor = hf_processor(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False)) if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template override_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create())) override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_config) if self.rank == 0: print(f"Engine overriding config {override_config_kwargs}") torch_dtype = self.config.model.fsdp_config.get("model_dtype", "fp32") torch_dtype = PrecisionType.to_dtype(torch_dtype) from transformers import AutoConfig model_config = AutoConfig.from_pretrained( local_path, attn_implementation="flash_attention_2", trust_remote_code=config.model.get("trust_remote_code", False), ) model_config.num_labels = 1 # patch for kimi-vl if getattr(model_config, "model_type", None) == "kimi_vl": model_config.text_config.topk_method = "greedy" init_context = get_init_weight_context_manager( use_meta_tensor=not model_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") model_config.classifier_dropout = 0.0 model_config.hidden_dropout = "0" model_config.summary_dropout_prob = 0.0 module = load_valuehead_model( local_path, torch_dtype, model_config, config.model.get("trust_remote_code", False), ) apply_monkey_patch( model=module, use_remove_padding=self.use_remove_padding, ulysses_sp_size=self.ulysses_sequence_parallel_size, ) # some parameters may not in torch_dtype module.to(torch_dtype) if config.model.get("enable_gradient_checkpointing", False): module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) if self._is_lora: print("Applying LoRA to the module") module.enable_input_require_grads() # Convert config to regular Python types before creating PEFT model lora_config = { "task_type": TaskType.CAUSAL_LM, "r": self.config.model.lora_rank, "lora_alpha": self.config.model.lora_alpha, "target_modules": convert_to_regular_types(self.config.model.target_modules), "bias": "none", } module = get_peft_model(module, LoraConfig(**lora_config)) if self.rank == 0: print_model_size(module) self.model_config = model_config fsdp_config = self.config.model.fsdp_config mixed_precision_config = fsdp_config.get("mixed_precision", None) if mixed_precision_config is not None: param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16")) reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32")) buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32")) else: param_dtype = torch.bfloat16 reduce_dtype = torch.float32 buffer_dtype = torch.float32 mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype) auto_wrap_policy = get_fsdp_wrap_policy( module=module, config=self.config.model.fsdp_config.wrap_policy, is_lora=self.config.model.get("lora_rank", 0) > 0, ) log_gpu_memory_usage("Before FSDP", logger=None) fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) # Note: We force turn off CPUOffload because it causes incorrect results when using grad accumulation if config.strategy == "fsdp": module = FSDP( module, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, mixed_precision=mixed_precision, sync_module_states=True, forward_prefetch=self.config.model.fsdp_config.forward_prefetch, device_mesh=self.device_mesh, cpu_offload=None, ) elif config.strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" mp_policy = MixedPrecisionPolicy( param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True ) offload_policy = None if fsdp_config.offload_policy: self._is_offload_param = False self._is_offload_optimizer = False offload_policy = CPUOffloadPolicy(pin_memory=True) fsdp_kwargs = { "mesh": fsdp_mesh, "mp_policy": mp_policy, "offload_policy": offload_policy, "reshard_after_forward": fsdp_config.reshard_after_forward, } full_state = module.state_dict() apply_fsdp2(module, fsdp_kwargs, fsdp_config) fsdp2_load_full_state_dict(module, full_state, fsdp_mesh, offload_policy) else: raise NotImplementedError(f"Unknown strategy {config.strategy}") if config.model.get("enable_activation_offload", False): enable_gradient_checkpointing = config.model.get("enable_gradient_checkpointing", False) enable_activation_offloading(module, config.strategy, enable_gradient_checkpointing) log_gpu_memory_usage("After FSDP", logger=None) optimizer = optim.AdamW( module.parameters(), lr=config.optim.lr, betas=config.optim.get("betas", (0.9, 0.999)), weight_decay=config.optim.get("weight_decay", 1e-2), ) total_steps = config.optim.get("total_training_steps", 0) num_warmup_steps = int(config.optim.get("lr_warmup_steps", -1)) warmup_style = config.optim.get("warmup_style", "constant") if num_warmup_steps < 0: num_warmup_steps_ratio = config.optim.get("lr_warmup_steps_ratio", 0.0) num_warmup_steps = int(num_warmup_steps_ratio * total_steps) if self.rank == 0: print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}") from verl.utils.torch_functional import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup if warmup_style == "constant": lr_scheduler = get_constant_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=num_warmup_steps) elif warmup_style == "cosine": lr_scheduler = get_cosine_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=total_steps ) else: raise NotImplementedError(f"Warmup style {warmup_style} is not supported") return module, optimizer, lr_scheduler def train_mode(self): """ Return a context manager that switches to training mode with FSDP-specific handling. Includes parameter and optimizer offload entry/exit. """ return EngineTrainModeCtx(self) def eval_mode(self): """ Return a context manager that switches to evaluation mode with FSDP-specific handling. Includes activation offload entry/exit. """ return EngineEvalModeCtx(self) def shard_data(self, data): """ Preprocess data into sharded format via UlyssesShardingManager. """ return self.ulysses_sharding_manager.preprocess_data(data) def unshard_data(self, data): """ Postprocess data from sharded format back to full format. """ return self.ulysses_sharding_manager.postprocess_data(data) def get_default_ctx(self): use_value_head_model = hasattr(self.module, "v_head") ctx = { "use_value_head_model": use_value_head_model, "ulysses_sequence_parallel_size": self.ulysses_sequence_parallel_size, } return ctx def _forward_micro_batch(self, micro_batch): multi_modal_inputs = {} if "multi_modal_inputs" in micro_batch.keys(): for key in micro_batch["multi_modal_inputs"][0].keys(): multi_modal_inputs[key] = torch.cat( [inputs[key] for inputs in micro_batch["multi_modal_inputs"]], dim=0 ) with torch.autocast(device_type=device_name, dtype=torch.bfloat16): input_ids = micro_batch["input_ids"] batch, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] if position_ids.dim() == 3: # qwen2vl mrope position_ids = position_ids.transpose(0, 1) if self.use_remove_padding: input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary if position_ids.dim() == 3: position_ids_rmpad = ( index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices) .transpose(0, 1) .unsqueeze(1) ) # (3, bsz, seqlen) -> (3, 1, bsz * seqlen) else: position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # pad and slice the inputs if sp > 1 if self.ulysses_sequence_parallel_size > 1: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size ) # only pass input_ids and position_ids to enable flash_attn_varlen preds = self.module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, **multi_modal_inputs, use_cache=False, ) # prevent model thinks we are generating if hasattr(self.module, "v_head"): # For trl.AutoModelForCausalLMWithValueHead preds_rmpad = preds[2].squeeze(0).unsqueeze(-1) else: preds_rmpad = preds.logits preds_rmpad = preds_rmpad.squeeze(0) # (total_nnz) # gather output if sp > 1 if self.ulysses_sequence_parallel_size > 1: preds_rmpad = gather_outpus_and_unpad(preds_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size) # pad it back preds = pad_input(preds_rmpad, indices=indices, batch=batch, seqlen=seqlen).squeeze(-1) else: preds = self.module( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, **multi_modal_inputs, use_cache=False, ) # prevent model thinks we are generating if hasattr(self.module, "v_head"): # For trl.AutoModelForCausalLMWithValueHead preds = preds[2] else: preds = preds.logits return preds def infer_batch( self, data: DataProto, post_fn: Callable[[DataProto, torch.Tensor], tuple[torch.Tensor, dict[str, torch.Tensor]]], ) -> dict[str, torch.Tensor]: """ Perform inference on a mini batch of data. Args: data: The input data for inference, typically containing tensors and metadata. post_fn: A post-processing function that takes a micro-batch and predictions as input, and returns a tuple containing processed predictions and a dictionary of outputs. Returns: dict[str, torch.Tensor]: A dictionary containing the predictions for the entire batch. """ assert self.mode == "eval" micro_batch_size = data.meta_info["micro_batch_size"] select_keys = ["responses", "input_ids", "attention_mask", "position_ids"] batch = data.select(batch_keys=select_keys).batch use_dynamic_bsz = data.meta_info["use_dynamic_bsz"] has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() if has_multi_modal_inputs: num_micro_batches = data.batch.batch_size[0] // micro_batch_size non_tensor_select_keys = ["multi_modal_inputs"] micro_batches = data.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches) elif use_dynamic_bsz: # split using dynamic bsz max_token_len = data.meta_info["max_token_len"] * self.ulysses_sequence_parallel_size micro_batches, indices = rearrange_micro_batches(batch=batch, max_token_len=max_token_len) else: micro_batches = batch.split(micro_batch_size) preds_list = {} for micro_batch in micro_batches: if isinstance(micro_batch, DataProto): micro_batch = {**micro_batch.batch, **micro_batch.non_tensor_batch} with torch.no_grad(): # micro_batch_preds would be a dict[str, torch.Tensor] preds = self._forward_micro_batch(micro_batch) _, outputs = post_fn(micro_batch, preds) assert isinstance(outputs, dict) # append micro batch preds to dict[str, List[torch.Tensor]] append_to_dict(preds_list, outputs) # reorganize mini batch preds from # dict[str, List[torch.Tensor]] to dict[str, torch.Tensor] mini_batch_preds = {} for key, t_list in preds_list.items(): t_concat = torch.concat(t_list, dim=0) if use_dynamic_bsz: indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == t_concat.size(0), f"{len(indices)} vs. {t_concat.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) t_concat = t_concat[revert_indices] mini_batch_preds[key] = t_concat return mini_batch_preds def train_batch( self, data: DataProto, loss_fn: Callable[[DataProto, torch.Tensor], tuple[torch.Tensor, dict[str, torch.Tensor]]], ) -> dict[str, torch.Tensor]: """ Perform a training step on a mini-batch of data. Args: data (DataProto): The input data for training, typically containing tensors and metadata. loss_fn (Callable): A function that computes the loss and metrics given a micro-batch and predictions. Returns: dict[str, torch.Tensor]: A dictionary containing the aggregated training metrics for the mini-batch. """ assert self.mode == "train" # split batch into micro_batches mini_batch = data select_keys = ["input_ids", "responses", "response_mask", "attention_mask", "position_ids"] if "multi_modal_inputs" in mini_batch: non_tensor_select_keys = ["multi_modal_inputs"] num_micro_batches = mini_batch.batch.batch_size[0] // self.config.ppo_micro_batch_size_per_gpu micro_batches = mini_batch.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches) self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu elif self.config.use_dynamic_bsz: max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, _ = rearrange_micro_batches(batch=mini_batch, max_token_len=max_token_len) else: micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu) self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu mini_batch_metrics = {} for micro_batch in micro_batches: # Support all devices if isinstance(micro_batch, DataProto): micro_batch = {**micro_batch.batch.to(get_device_id()), **micro_batch.non_tensor_batch} else: micro_batch = micro_batch.to(get_device_id()) # critic device is cpu when using offload preds = self._forward_micro_batch(micro_batch) loss, micro_batch_metrics = loss_fn(micro_batch, preds) append_to_dict(mini_batch_metrics, micro_batch_metrics) loss.backward() return mini_batch_metrics def optimizer_zero_grad(self): """ Zero gradients and enforce FSDP grad-clipping logic. """ self.optimizer.zero_grad() def optimizer_step(self): """ Clip gradients, skip update if non-finite, and step optimizer. Returns: grad_norm (float): Norm of gradients before clipping. """ assert self.config.grad_clip is not None if isinstance(self.module, FSDP): grad_norm = self.module.clip_grad_norm_(self.config.grad_clip) elif isinstance(self.module, FSDPModule): grad_norm = fsdp2_clip_grad_norm_(self.module.parameters(), max_norm=self.config.grad_clip) else: grad_norm = torch.nn.utils.clip_grad_norm_(self.module.parameters(), max_norm=self.config.grad_clip) # if grad_norm is not finite, skip the update if not torch.isfinite(grad_norm): print(f"WARN: grad_norm is not finite: {grad_norm}") self.optimizer.zero_grad() else: self.optimizer.step() return grad_norm def lr_scheduler_step(self): """ Advance FSDP scheduler and return updated learning rate. """ self.lr_scheduler.step() lr = self.lr_scheduler.get_last_lr() return lr def to(self, device: str, model: bool = True, optimizer: bool = True): """ Move FSDP model and/or optimizer to CPU or GPU with offload support. """ assert device in ("cuda", "cpu") if device == "cuda": if not self.config.model.fsdp_config.param_offload: if model: load_fsdp_model_to_gpu(self.model_module) if optimizer and self.optimizer is not None: load_fsdp_optimizer(self.optimizer, device) gc.collect() elif device == "cpu": if not self.config.model.fsdp_config.param_offload: if model: offload_fsdp_model_to_cpu(self.model_module) if optimizer and self.optimizer is not None: offload_fsdp_optimizer(self.optimizer) else: raise ValueError(f"Invalid device type: {device}") def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): """ Save FSDP checkpoint, handling parameter offload as needed. """ if self._is_offload_param: load_fsdp_model_to_gpu(self.module) self.checkpoint_manager.save_checkpoint( local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.module) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=True): """ Load FSDP checkpoint, restoring parameters and optimizer state. """ import torch if self._is_offload_param: load_fsdp_model_to_gpu(self.module) self.checkpoint_manager.load_checkpoint( local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.module) if self._is_offload_optimizer: offload_fsdp_optimizer(self.optimizer) class EngineEvalModeCtx: def __init__(self, engine): self.engine = engine def __enter__(self): self.engine.mode = "eval" if self.engine._is_offload_param: load_fsdp_model_to_gpu(self.engine.module) self.engine.ulysses_sharding_manager.__enter__() self.engine.module.eval() def __exit__(self, exc_type, exc_value, traceback): self.engine.ulysses_sharding_manager.__exit__(exc_type, exc_value, traceback) if self.engine._is_offload_param: offload_fsdp_model_to_cpu(self.engine.module) self.engine.mode = None class EngineTrainModeCtx: def __init__(self, engine): self.engine = engine def __enter__(self): self.engine.mode = "train" if self.engine._is_offload_param: load_fsdp_model_to_gpu(self.engine.module) if self.engine._is_offload_optimizer: load_fsdp_optimizer(optimizer=self.engine.optimizer, device_id=get_torch_device().current_device()) self.engine.ulysses_sharding_manager.__enter__() self.engine.module.train() def __exit__(self, exc_type, exc_value, traceback): self.engine.ulysses_sharding_manager.__exit__(exc_type, exc_value, traceback) if self.engine._is_offload_param: offload_fsdp_model_to_cpu(self.engine.module) if self.engine._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.optimizer) self.engine.mode = None ================================================ FILE: verl_rl/verl/workers/engine/fsdp/utils.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch.distributed.device_mesh import init_device_mesh from verl.utils.device import get_device_name def create_device_mesh(world_size, fsdp_size): """ Create a device mesh for distributed training based on the world size and FSDP size. Args: world_size (int): Total number of processes in the distributed training setup. fsdp_size (int): Size of the Fully Sharded Data Parallel (FSDP) group. Returns: torch.distributed.device_mesh.DeviceMesh: The initialized device mesh. """ device_name = get_device_name() if fsdp_size < 0 or fsdp_size >= world_size: device_mesh = init_device_mesh(device_name, mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) else: device_mesh = init_device_mesh( device_name, mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"] ) return device_mesh def get_sharding_strategy(device_mesh): """ Determine the appropriate sharding strategy based on the number of dimensions of the device mesh. Args: device_mesh (torch.distributed.device_mesh.DeviceMesh): The device mesh used for distributed training. Returns: torch.distributed.fsdp.ShardingStrategy: The sharding strategy to be used with FSDP. Raises: NotImplementedError: If the number of dimensions of the device mesh is neither 1 nor 2. """ from torch.distributed.fsdp import ShardingStrategy if device_mesh.ndim == 1: sharding_strategy = ShardingStrategy.FULL_SHARD elif device_mesh.ndim == 2: sharding_strategy = ShardingStrategy.HYBRID_SHARD else: raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2") return sharding_strategy ================================================ FILE: verl_rl/verl/workers/engine/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/workers/engine/megatron/engine_impl.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable import torch from verl import DataProto from ..base import BaseEngine, EngineRegistry @EngineRegistry.register("megatron") class MegatronEngine(BaseEngine): def __init__(self, config): raise NotImplementedError def init_model(self): raise NotImplementedError def train_mode(self): """ Context manager entry for switching the engine and model into training mode. Usage: with engine.train_mode(): # runs in training mode """ raise NotImplementedError def eval_mode(self): """ Context manager entry for switching the engine and model into evaluation mode. Usage: with engine.eval_mode(): # runs in evaluation mode """ raise NotImplementedError def infer_batch( self, data: DataProto, post_fn: Callable[[DataProto, torch.Tensor], tuple[torch.Tensor, dict[str, torch.Tensor]]], ) -> dict[str, torch.Tensor]: """ Perform inference on a mini batch of data. Args: data: The input data for inference, typically containing tensors and metadata. post_fn: A post-processing function that takes a micro-batch and predictions as input, and returns a tuple containing processed predictions and a dictionary of outputs. Returns: dict[str, torch.Tensor]: A dictionary containing the predictions for the entire batch. """ raise NotImplementedError def train_batch( self, data: DataProto, loss_fn: Callable[[DataProto, torch.Tensor], tuple[torch.Tensor, dict[str, torch.Tensor]]], ) -> dict[str, torch.Tensor]: """ Perform a training step on a mini-batch of data. Args: data (DataProto): The input data for training, typically containing tensors and metadata. loss_fn (Callable): A function that computes the loss and metrics given a micro-batch and predictions. Returns: dict[str, torch.Tensor]: A dictionary containing the aggregated training metrics for the mini-batch. """ raise NotImplementedError def optimizer_zero_grad(self): """ Zero out gradients of all parameters before starting a new backward pass. """ raise NotImplementedError def optimizer_step(self): """ Perform an optimization step to update model parameters based on accumulated gradients. Returns: grad_norm (float): The norm of the gradients before clipping or update. """ raise NotImplementedError def lr_scheduler_step(self): """ Advance the learning rate scheduler by one step. Returns: current_lr (float or list[float]): Updated learning rate(s). """ raise NotImplementedError def shard_data(self, data): """ Shard or partition data for distributed training or parallel execution. Args: data: Data structure to be sharded across devices/workers. Returns: Sharded data in the same format as input. """ raise NotImplementedError def unshard_data(self, data): """ Reconstruct or gather sharded data back to a unified format. Args: data: Sharded data structure to reconstruct. Returns: Unsharded, combined data. """ raise NotImplementedError def to(self, device: str, model: bool = True, optimizer: bool = True): """ Move model parameters, optimizer states, or both to the specified device. Args: device: Target device identifier. model: If True, move the model. optimizer: If True, move the optimizer states. """ raise NotImplementedError def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): """ Save model, optimizer, and scheduler states to a checkpoint. Args: local_path: Local filesystem path to save checkpoint. hdfs_path: Optional HDFS path to copy checkpoint. global_step: Integer training step number for naming. max_ckpt_to_keep: Maximum number of recent checkpoints to retain. """ raise NotImplementedError def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=True): """ Load model, optimizer, and scheduler states from a checkpoint. Args: local_path: Local filesystem path of the checkpoint. hdfs_path: Optional HDFS path where checkpoint is stored. del_local_after_load: Whether to delete local copy after loading. """ raise NotImplementedError ================================================ FILE: verl_rl/verl/workers/fsdp_workers.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The main entry point to run the PPO algorithm """ import json import logging import os import warnings from dataclasses import asdict from typing import Any import psutil import torch import torch.distributed import torch.distributed as dist from codetiming import Timer from omegaconf import DictConfig, OmegaConf, open_dict from peft import LoraConfig, TaskType, get_peft_model from safetensors.torch import save_file from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP import verl.utils.torch_functional as verl_F from verl import DataProto from verl.models.transformers.monkey_patch import apply_monkey_patch from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.utils import hf_processor, hf_tokenizer from verl.utils.activation_offload import enable_activation_offloading from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager from verl.utils.config import omega_conf_to_dataclass from verl.utils.device import ( get_device_id, get_device_name, get_nccl_backend, get_torch_device, is_cuda_available, is_npu_available, ) from verl.utils.flops_counter import FlopsCounter from verl.utils.fs import copy_to_local from verl.utils.fsdp_utils import ( CPUOffloadPolicy, MixedPrecisionPolicy, apply_fsdp2, fsdp2_load_full_state_dict, fsdp_version, get_fsdp_wrap_policy, get_init_weight_context_manager, init_fn, layered_summon_lora_params, load_fsdp_model_to_gpu, load_fsdp_optimizer, offload_fsdp_model_to_cpu, offload_fsdp_optimizer, ) from verl.utils.import_utils import import_external_libs from verl.utils.model import compute_position_id_with_mask from verl.utils.profiler import DistProfiler, DistProfilerExtension, log_gpu_memory_usage, simple_timer from verl.utils.profiler.performance import reduce_timing from verl.utils.py_functional import convert_to_regular_types from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) device_name = get_device_name() def create_device_mesh(world_size, fsdp_size): if fsdp_size < 0 or fsdp_size >= world_size: device_mesh = init_device_mesh(device_name, mesh_shape=(world_size,), mesh_dim_names=["fsdp"]) else: device_mesh = init_device_mesh( device_name, mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"] ) return device_mesh def get_sharding_strategy(device_mesh): from torch.distributed.fsdp import ShardingStrategy if device_mesh.ndim == 1: sharding_strategy = ShardingStrategy.FULL_SHARD elif device_mesh.ndim == 2: sharding_strategy = ShardingStrategy.HYBRID_SHARD else: raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2") return sharding_strategy class ActorRolloutRefWorker(Worker, DistProfilerExtension): """ This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy or a hybrid engine based on the config.rollout """ def __init__(self, config: DictConfig, role: str, **kwargs): Worker.__init__(self) self.config = config self.profile_option = kwargs.get("profile_option", None) import torch.distributed if not torch.distributed.is_initialized(): rank = int(os.environ.get("RANK", 0)) world_size = int(os.environ.get("WORLD_SIZE", 1)) torch.distributed.init_process_group( backend=f"cpu:gloo,{get_device_name()}:{get_nccl_backend()}", rank=rank, world_size=world_size, init_method=os.environ.get("DIST_INIT_METHOD", None), ) # build device mesh for FSDP world_size = torch.distributed.get_world_size() # TODO(sgm): support FSDP hybrid shard for larger model self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=self.config.actor.fsdp_config.fsdp_size) # build device mesh for Ulysses Sequence Parallel self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.actor.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) self._lora_rank = self.config.model.get("lora_rank", 0) self._is_lora = self._lora_rank > 0 self.role = role assert self.role in ["actor", "rollout", "ref", "actor_rollout", "actor_rollout_ref"] self._is_actor = self.role in ["actor", "actor_rollout", "actor_rollout_ref"] self._is_rollout = self.role in ["rollout", "actor_rollout", "actor_rollout_ref"] self._is_ref = self.role in ["ref", "actor_rollout_ref"] # TODO(haibin.lin): # As of now the type of config is DictConfig, if we assign config.profiler with ProfilerConfig, # it will actually convert the ProfilerConfig dataclass back to a DictConfig. # We can still use ProfilerConfig for testing purpose (tests/utils/test_nvtx_profile.py) # as they provides DictConfig-like interface # The benefit of creating the dataclass config is to perform validation during __post_init__ profiler_config = omega_conf_to_dataclass(config.get("profiler")) DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=profiler_config, option=self.profile_option) ) self._is_offload_param = False self._is_offload_optimizer = False if self._is_actor: self._is_offload_param = self.config.actor.fsdp_config.get("param_offload", False) self._is_offload_optimizer = self.config.actor.fsdp_config.get("optimizer_offload", False) elif self._is_ref: # TODO: it seems that manual offload is slowly than FSDP offload self._is_offload_param = self.config.ref.fsdp_config.get("param_offload", False) # normalize config if self._is_actor: self.config.actor.ppo_mini_batch_size *= self.config.rollout.n self.config.actor.ppo_mini_batch_size //= self.device_mesh.size() // self.ulysses_sequence_parallel_size assert self.config.actor.ppo_mini_batch_size > 0, ( f"ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be larger than 0 after " f"normalization" ) # micro bsz if self.config.actor.ppo_micro_batch_size is not None: self.config.actor.ppo_micro_batch_size //= ( self.device_mesh.size() // self.ulysses_sequence_parallel_size ) self.config.actor.ppo_micro_batch_size_per_gpu = self.config.actor.ppo_micro_batch_size if self.config.actor.ppo_micro_batch_size_per_gpu is not None: assert self.config.actor.ppo_mini_batch_size % self.config.actor.ppo_micro_batch_size_per_gpu == 0, ( f"normalized ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be divisible by " f"ppo_micro_batch_size_per_gpu {self.config.actor.ppo_micro_batch_size_per_gpu}" ) assert self.config.actor.ppo_mini_batch_size // self.config.actor.ppo_micro_batch_size_per_gpu > 0, ( f"normalized ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be larger than " f"ppo_micro_batch_size_per_gpu {self.config.actor.ppo_micro_batch_size_per_gpu}" ) # normalize rollout config if self._is_rollout and self.config.rollout.log_prob_micro_batch_size is not None: self.config.rollout.log_prob_micro_batch_size //= ( self.device_mesh.size() // self.ulysses_sequence_parallel_size ) self.config.rollout.log_prob_micro_batch_size_per_gpu = self.config.rollout.log_prob_micro_batch_size # normalize ref config if self._is_ref and self.config.ref.log_prob_micro_batch_size is not None: self.config.ref.log_prob_micro_batch_size //= self.device_mesh.size() // self.ulysses_sequence_parallel_size self.config.ref.log_prob_micro_batch_size_per_gpu = self.config.ref.log_prob_micro_batch_size def _build_model_optimizer( self, model_path, fsdp_config, optim_config, override_model_config, use_remove_padding=False, use_fused_kernels=False, enable_gradient_checkpointing=False, trust_remote_code=False, use_liger=False, role="actor", enable_activation_offload=False, ): from torch import optim from torch.distributed.fsdp import CPUOffload, MixedPrecision from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForVision2Seq from verl.utils.model import get_generation_config, print_model_size, update_model_config from verl.utils.torch_dtypes import PrecisionType assert role in ["actor", "ref"] log_gpu_memory_usage(f"Before init {role} from HF AutoModel", logger=logger) local_path = model_path # note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect # TODO(zhangchi.usc1992): 1. support create from random initialized model. 2. Support init with FSDP directly self.tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) self.processor = hf_processor(local_path, trust_remote_code=trust_remote_code) if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template torch_dtype = fsdp_config.get("model_dtype", None) if torch_dtype is None: torch_dtype = torch.float32 if self._is_actor else torch.bfloat16 else: torch_dtype = PrecisionType.to_dtype(torch_dtype) # override model kwargs actor_model_config = AutoConfig.from_pretrained( local_path, trust_remote_code=trust_remote_code, attn_implementation="flash_attention_2" ) # patch for kimi-vl if getattr(actor_model_config, "model_type", None) == "kimi_vl": actor_model_config.text_config.topk_method = "greedy" self.generation_config = get_generation_config(local_path, trust_remote_code=trust_remote_code) override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_model_config) update_model_config(actor_model_config, override_config_kwargs=override_config_kwargs) if self.rank == 0: print(f"Model config after override: {actor_model_config}") # NOTE(fix me): tie_word_embedding causes meta_tensor init to hang init_context = get_init_weight_context_manager( use_meta_tensor=not actor_model_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") if type(actor_model_config) in AutoModelForVision2Seq._model_mapping.keys(): actor_module_class = AutoModelForVision2Seq else: actor_module_class = AutoModelForCausalLM actor_module = actor_module_class.from_pretrained( pretrained_model_name_or_path=local_path, torch_dtype=torch_dtype, config=actor_model_config, trust_remote_code=trust_remote_code, ) # Apply Liger kernel to the model if use_liger is set to True if use_liger: from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance _apply_liger_kernel_to_instance(model=actor_module) fused_kernel_options = self.config.model.get("fused_kernel_options", None) fused_kernels_backend = ( fused_kernel_options.get("impl_backend", None) if fused_kernel_options is not None else None ) apply_monkey_patch( model=actor_module, use_remove_padding=use_remove_padding, ulysses_sp_size=self.ulysses_sequence_parallel_size, use_fused_kernels=use_fused_kernels, fused_kernels_backend=fused_kernels_backend, ) # some parameters may not in torch_dtype. TODO(zhangchi.usc1992) remove this after we switch to fsdp2 actor_module.to(torch_dtype) if enable_gradient_checkpointing: actor_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) if self._is_lora: print("Applying LoRA to actor module") actor_module.enable_input_require_grads() # Convert config to regular Python types before creating PEFT model lora_config = { "task_type": TaskType.CAUSAL_LM, "r": self.config.model.lora_rank, "lora_alpha": self.config.model.lora_alpha, "target_modules": convert_to_regular_types(self.config.model.target_modules), "exclude_modules": convert_to_regular_types(self.config.model.exclude_modules), "bias": "none", } actor_module = get_peft_model(actor_module, LoraConfig(**lora_config)) torch.distributed.barrier() if self.rank == 0: print_model_size(actor_module) log_gpu_memory_usage(f"After init {role} from HF AutoModel", logger=logger) # We wrap FSDP for rollout as well mixed_precision_config = fsdp_config.get("mixed_precision", None) if mixed_precision_config is not None: param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16")) reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32")) buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32")) else: param_dtype = torch.bfloat16 reduce_dtype = torch.float32 buffer_dtype = torch.float32 mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype) auto_wrap_policy = get_fsdp_wrap_policy( module=actor_module, config=fsdp_config.get("wrap_policy", None), is_lora=self.config.model.get("lora_rank", 0) > 0, ) if self._is_rollout and self.config.rollout.name == "hf": # TODO(zhangchi.usc1992, shengguangming) fix me. Current, auto_wrap_policy causes HFRollout to hang in Gemma auto_wrap_policy = None if self.rank == 0: print(f"wrap_policy: {auto_wrap_policy}") fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) # TODO: add transformer policy # We force reference policy to use CPUOffload to save memory. # We force turn off CPUOffload for actor because it causes incorrect results when using grad accumulation cpu_offload = None if role == "actor" else CPUOffload(offload_params=True) fsdp_strategy = self.config.actor.strategy if fsdp_strategy == "fsdp": actor_module_fsdp = FSDP( actor_module, cpu_offload=cpu_offload, param_init_fn=init_fn, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, # zero3 mixed_precision=mixed_precision, sync_module_states=True, device_mesh=self.device_mesh, use_orig_params=self.config.actor.fsdp_config.get("use_orig_params", False), forward_prefetch=self.config.actor.fsdp_config.get("forward_prefetch", False), ) elif fsdp_strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" mp_policy = MixedPrecisionPolicy( param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True ) if role == "actor" and fsdp_config.offload_policy: cpu_offload = CPUOffloadPolicy(pin_memory=True) self._is_offload_param = False self._is_offload_optimizer = False else: cpu_offload = None if role == "actor" else CPUOffloadPolicy(pin_memory=True) fsdp_kwargs = { "mesh": fsdp_mesh, "mp_policy": mp_policy, "offload_policy": cpu_offload, "reshard_after_forward": fsdp_config.reshard_after_forward, } full_state = actor_module.state_dict() apply_fsdp2(actor_module, fsdp_kwargs, fsdp_config) fsdp2_load_full_state_dict(actor_module, full_state, fsdp_mesh, cpu_offload) actor_module_fsdp = actor_module else: raise NotImplementedError(f"not implement {fsdp_strategy}") if enable_activation_offload: enable_activation_offloading(actor_module_fsdp, fsdp_strategy, enable_gradient_checkpointing) log_gpu_memory_usage(f"After {role} FSDP init", logger=logger) # TODO: add more optimizer args into config if role == "actor" and optim_config is not None: from verl.utils.torch_functional import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup actor_optimizer = optim.AdamW( actor_module_fsdp.parameters(), lr=optim_config.lr, betas=optim_config.get("betas", (0.9, 0.999)), weight_decay=optim_config.get("weight_decay", 1e-2), ) total_steps = optim_config.get("total_training_steps", 0) num_warmup_steps = int(optim_config.get("lr_warmup_steps", -1)) warmup_style = optim_config.get("warmup_style", "constant") min_lr_ratio = optim_config.get("min_lr_ratio", 0.0) num_cycles = optim_config.get("num_cycles", 0.5) if num_warmup_steps < 0: num_warmup_steps_ratio = optim_config.get("lr_warmup_steps_ratio", 0.0) num_warmup_steps = int(num_warmup_steps_ratio * total_steps) if self.rank == 0: print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}") if warmup_style == "constant": actor_lr_scheduler = get_constant_schedule_with_warmup( optimizer=actor_optimizer, num_warmup_steps=num_warmup_steps ) elif warmup_style == "cosine": actor_lr_scheduler = get_cosine_schedule_with_warmup( optimizer=actor_optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=total_steps, min_lr_ratio=min_lr_ratio, num_cycles=num_cycles, ) else: raise NotImplementedError(f"Warmup style {warmup_style} is not supported") log_gpu_memory_usage(f"After {role} optimizer init", logger=logger) else: actor_optimizer = None actor_lr_scheduler = None return actor_module_fsdp, actor_optimizer, actor_lr_scheduler, actor_model_config def _build_rollout(self, trust_remote_code=False): from torch.distributed.device_mesh import init_device_mesh # TODO(sgm): support FSDP hybrid shard for larger model infer_tp = self.config.rollout.tensor_model_parallel_size dp = self.world_size // infer_tp assert self.world_size % infer_tp == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}" ) rollout_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, infer_tp), mesh_dim_names=["dp", "infer_tp"] ) rollout_name = self.config.rollout.name if rollout_name == "hf": from verl.workers.rollout import HFRollout from verl.workers.sharding_manager.base import BaseShardingManager rollout = HFRollout(module=self.actor_module_fsdp, config=self.config.rollout) rollout_sharding_manager = BaseShardingManager() # TODO: a sharding manager that do nothing? elif rollout_name == "vllm": from verl.workers.rollout.vllm_rollout import vLLMRollout from verl.workers.sharding_manager.fsdp_vllm import FSDPVLLMShardingManager log_gpu_memory_usage(f"Before building {rollout_name} rollout", logger=logger) local_path = copy_to_local(self.config.model.path, use_shm=self.config.model.get("use_shm", False)) lora_kwargs = ( {"lora_kwargs": {"enable_lora": True, "max_loras": 1, "max_lora_rank": self._lora_rank}} if self._is_lora else {} ) # lora_kwargs = {} from verl.workers.rollout.vllm_rollout import vLLMAsyncRollout vllm_rollout_cls = vLLMRollout if self.config.rollout.mode == "sync" else vLLMAsyncRollout rollout = vllm_rollout_cls( model_path=local_path, config=self.config.rollout, tokenizer=self.tokenizer, model_hf_config=self.actor_model_config, device_mesh=rollout_device_mesh, trust_remote_code=trust_remote_code, **lora_kwargs, ) log_gpu_memory_usage(f"After building {rollout_name} rollout", logger=logger) full_params = torch.distributed.get_world_size() == 1 rollout_sharding_manager = FSDPVLLMShardingManager( module=self.actor_module_fsdp, inference_engine=rollout.inference_engine, model_config=self.actor_model_config, rollout_config=self.config.rollout, full_params=full_params, device_mesh=rollout_device_mesh, offload_param=self._is_offload_param, load_format=self.config.rollout.load_format, layered_summon=self.config.rollout.get("layered_summon", False), ) log_gpu_memory_usage("After building sharding manager", logger=logger) elif rollout_name == "sglang": from verl.workers.rollout.sglang_rollout import SGLangRollout # NOTE(linjunrong): Due to recent fp8 support in SGLang. Now importing any symbol relate to # SGLang's model_runner would check CUDA device capability. However, due to verl's setting, # the main process of ray can not find any CUDA device, which would potentially lead to: # "RuntimeError: No CUDA GPUs are available". # For this reason, sharding_manager.__init__ should not import FSDPSGLangShardingManager and # we import it here use the abs path. # check: https://github.com/sgl-project/sglang/blob/00f42707eaddfc2c0528e5b1e0094025c640b7a0/python/sglang/srt/layers/quantization/fp8_utils.py#L76 from verl.workers.sharding_manager.fsdp_sglang import FSDPSGLangShardingManager local_path = copy_to_local(self.config.model.path) log_gpu_memory_usage(f"Before building {rollout_name} rollout", logger=logger) rollout = SGLangRollout( actor_module=local_path, config=self.config.rollout, processing_class=self.processor if self.processor is not None else self.tokenizer, model_hf_config=self.actor_model_config, trust_remote_code=trust_remote_code, ) log_gpu_memory_usage(f"After building {rollout_name} rollout", logger=logger) if torch.distributed.get_world_size() == 1: self.config.rollout.load_format = "dummy_hf" rollout_sharding_manager = FSDPSGLangShardingManager( module=self.actor_module_fsdp, inference_engine=rollout._engine, model_config=self.actor_model_config, rollout_config=self.config.rollout, full_params="hf" in self.config.rollout.load_format, device_mesh=rollout_device_mesh, offload_param=self._is_offload_param, multi_stage_wake_up=self.config.rollout.multi_stage_wake_up, ) log_gpu_memory_usage("After building sharding manager", logger=logger) else: raise NotImplementedError(f"Rollout name: {self.config.rollout.name} is not supported") return rollout, rollout_sharding_manager @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): from verl.workers.actor import DataParallelPPOActor # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) override_model_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create())) use_remove_padding = self.config.model.get("use_remove_padding", False) use_shm = self.config.model.get("use_shm", False) use_fused_kernels = self.config.model.get("use_fused_kernels", False) if self._is_actor or self._is_rollout: # we need the model for actor and rollout if self._is_actor: optim_config = self.config.actor.optim fsdp_config = self.config.actor.fsdp_config else: optim_config = None fsdp_config = OmegaConf.create() local_path = copy_to_local(self.config.model.path, use_shm=use_shm) ( self.actor_module_fsdp, self.actor_optimizer, self.actor_lr_scheduler, self.actor_model_config, ) = self._build_model_optimizer( model_path=local_path, fsdp_config=fsdp_config, optim_config=optim_config, override_model_config=override_model_config, use_remove_padding=use_remove_padding, use_fused_kernels=use_fused_kernels, enable_gradient_checkpointing=self.config.model.get("enable_gradient_checkpointing", False), trust_remote_code=self.config.model.get("trust_remote_code", False), use_liger=self.config.model.get("use_liger", False), role="actor", enable_activation_offload=self.config.model.get("enable_activation_offload", False), ) # get the original unwrapped module if fsdp_version(self.actor_module_fsdp) == 1: self.actor_module = self.actor_module_fsdp._fsdp_wrapped_module if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After offload actor model during init", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during init", logger=logger) if self._is_actor: OmegaConf.set_struct(self.config.actor, True) with open_dict(self.config.actor): self.config.actor.use_remove_padding = use_remove_padding self.config.actor.use_fused_kernels = use_fused_kernels self.actor = DataParallelPPOActor( config=self.config.actor, actor_module=self.actor_module_fsdp, actor_optimizer=self.actor_optimizer ) if self._is_rollout: self.rollout, self.rollout_sharding_manager = self._build_rollout( trust_remote_code=self.config.model.get("trust_remote_code", False) ) if self._is_ref: local_path = copy_to_local(self.config.model.path, use_shm=use_shm) self.ref_module_fsdp = self._build_model_optimizer( model_path=local_path, fsdp_config=self.config.ref.fsdp_config, optim_config=None, override_model_config=override_model_config, use_remove_padding=use_remove_padding, use_fused_kernels=use_fused_kernels, trust_remote_code=self.config.model.get("trust_remote_code", False), use_liger=self.config.model.get("use_liger", False), role="ref", )[0] OmegaConf.set_struct(self.config.ref, True) with open_dict(self.config.ref): self.config.ref.use_remove_padding = use_remove_padding self.config.ref.use_fused_kernels = use_fused_kernels self.ref_policy = DataParallelPPOActor(config=self.config.ref, actor_module=self.ref_module_fsdp) if self._is_actor: self.flops_counter = FlopsCounter(self.actor_model_config) self.checkpoint_manager = FSDPCheckpointManager( model=self.actor_module_fsdp, optimizer=self.actor.actor_optimizer, lr_scheduler=self.actor_lr_scheduler, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=self.config.actor.checkpoint, ) if not self._is_actor and self._is_rollout: # If ActorRolloutRefWorker is initialized as a standalone rollout, # create a checkpoint manager for FSDP model to allow loading FSDP checkpoints for rollout. checkpoint_contents = OmegaConf.create({"load_contents": ["model"], "save_contents": []}) self.checkpoint_manager = FSDPCheckpointManager( model=self.actor_module_fsdp, optimizer=None, lr_scheduler=None, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=checkpoint_contents, ) @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="red", role="actor_update") def update_actor(self, data: DataProto): # Support all hardwares data = data.to(get_device_id()) assert self._is_actor if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) if self._is_offload_optimizer: load_fsdp_optimizer(optimizer=self.actor_optimizer, device_id=get_device_id()) with self.ulysses_sharding_manager: data = self.ulysses_sharding_manager.preprocess_data(data=data) # perform training with Timer(name="update_policy", logger=None) as timer: metrics = self.actor.update_policy(data=data) delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/actor"] = ( estimated_flops * self.config.actor.ppo_epochs / promised_flops / self.world_size ) metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3) metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3) metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3) lr = self.actor_lr_scheduler.get_last_lr()[0] metrics["actor/lr"] = lr self.actor_lr_scheduler.step() # TODO: here, we should return all metrics output = DataProto(meta_info={"metrics": metrics}) output = self.ulysses_sharding_manager.postprocess_data(data=output) output = output.to("cpu") if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After offload actor model during update_actor", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during update_actor", logger=logger) return output @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="red", role="rollout_generate") def generate_sequences(self, prompts: DataProto): # Support all hardwares prompts = prompts.to(get_device_id()) assert self._is_rollout meta_info = { "eos_token_id": self.generation_config.eos_token_id if self.generation_config is not None else self.tokenizer.eos_token_id, "pad_token_id": self.generation_config.pad_token_id if self.generation_config is not None else self.tokenizer.pad_token_id, } prompts.meta_info.update(meta_info) timing_generate = {} with self.rollout_sharding_manager: log_gpu_memory_usage("After entering rollout sharding manager", logger=logger) prompts = self.rollout_sharding_manager.preprocess_data(prompts) with simple_timer("generate_sequences", timing_generate): output = self.rollout.generate_sequences(prompts=prompts) log_gpu_memory_usage("After rollout generation", logger=logger) output = self.rollout_sharding_manager.postprocess_data(output) timing_generate.update(self.rollout_sharding_manager.timing) # We calculate the average timing across all ranks # to make sure meta_info["timing"] is the same timing_generate = reduce_timing(timing_generate) output.meta_info["timing"] = timing_generate output = output.to("cpu") # clear kv cache get_torch_device().empty_cache() return output @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="blue", role="actor_compute_log_prob") def compute_log_prob(self, data: DataProto): # when is_lora is True, we use the actor without lora applied to calculate the log_prob # which is mostly used for ref log_prob calculation assert self._is_actor if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) # Support all hardwares from contextlib import nullcontext is_lora = data.meta_info.pop("is_lora", False) adapter_ctx = self.actor.actor_module.disable_adapter() if is_lora else nullcontext() data = data.to(get_device_id()) # we should always recompute old_log_probs when it is HybridEngine data.meta_info["micro_batch_size"] = self.config.rollout.log_prob_micro_batch_size_per_gpu data.meta_info["max_token_len"] = self.config.rollout.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.rollout.log_prob_use_dynamic_bsz data.meta_info["temperature"] = self.config.rollout.temperature # perform recompute log_prob with self.ulysses_sharding_manager: data = self.ulysses_sharding_manager.preprocess_data(data) with adapter_ctx: output, entropys = self.actor.compute_log_prob(data=data, calculate_entropy=True) output = DataProto.from_dict( tensors={"old_log_probs": output, "entropys": entropys}, meta_info={"temperature": self.config.rollout.temperature}, ) output = self.ulysses_sharding_manager.postprocess_data(output) output = output.to("cpu") # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.world_size > 1 and fsdp_version(self.actor.actor_module) == 1: self.actor.actor_module._handle.reshard(True) if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) log_gpu_memory_usage("After offload actor model during compute_log_prob", logger=logger) return output @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="olive", role="ref_compute_log_prob") def compute_ref_log_prob(self, data: DataProto): if self._is_lora: # if _is_lora, actor without lora applied is the ref data.meta_info["is_lora"] = True data = self.compute_log_prob(data) # this old_log_probs is in fact ref_log_prob data = DataProto.from_dict(tensors={"ref_log_prob": data.batch["old_log_probs"]}) return data assert self._is_ref # else: # otherwise, the class have a standalone ref model # Support all hardwares data = data.to(get_device_id()) micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["temperature"] = self.config.rollout.temperature data.meta_info["max_token_len"] = self.config.ref.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.ref.log_prob_use_dynamic_bsz with self.ulysses_sharding_manager: data = self.ulysses_sharding_manager.preprocess_data(data) output, _ = self.ref_policy.compute_log_prob(data=data, calculate_entropy=False) output = DataProto.from_dict(tensors={"ref_log_prob": output}) output = self.ulysses_sharding_manager.postprocess_data(output) output = output.to("cpu") # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.world_size > 1 and fsdp_version(self.ref_policy.actor_module) == 1: self.ref_policy.actor_module._handle.reshard(True) return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): from verl.utils.logger import log_with_rank # only support save and load ckpt for actor assert self._is_actor if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) self.checkpoint_manager.save_checkpoint( local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) dist.barrier() if self._is_lora and hasattr(getattr(self, "actor_module", self.actor_module_fsdp), "peft_config"): lora_save_path = os.path.join(local_path, "lora_adapter") peft_model = getattr(self, "actor_module", self.actor_module_fsdp) peft_config = {} if dist.get_rank() == 0: os.makedirs(lora_save_path, exist_ok=True) peft_config = asdict(peft_model.peft_config.get("default", {})) peft_config["task_type"] = peft_config["task_type"].value peft_config["peft_type"] = peft_config["peft_type"].value peft_config["target_modules"] = list(peft_config["target_modules"]) try: if fsdp_version(self.actor_module_fsdp) > 0: self.actor_module_fsdp = self.actor_module_fsdp.to(get_device_name()) lora_params = layered_summon_lora_params(self.actor_module_fsdp) if dist.get_rank() == 0: save_file(lora_params, os.path.join(lora_save_path, "adapter_model.safetensors")) with open(os.path.join(lora_save_path, "adapter_config.json"), "w", encoding="utf-8") as f: json.dump(peft_config, f, ensure_ascii=False, indent=4) except Exception as e: log_with_rank( f"Save LoRA Adapter Error ({e})", rank=dist.get_rank(), logger=logger, log_only_rank_0=True ) dist.barrier() log_with_rank( f"[rank-{self.rank}]: Saved LoRA adapter to: {lora_save_path}", rank=dist.get_rank(), logger=logger, log_only_rank_0=True, ) if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False): assert self._is_actor or (not self._is_actor and self._is_rollout), ( f"Checkpoint loading is only supported for Actor or standalone Rollout Workers, but got " f"{self._is_actor} and {self._is_rollout}" ) if self._is_offload_param: load_fsdp_model_to_gpu(self.actor_module_fsdp) self.checkpoint_manager.load_checkpoint( local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) if self._is_offload_param: offload_fsdp_model_to_cpu(self.actor_module_fsdp) if self._is_offload_optimizer: offload_fsdp_optimizer(self.actor_optimizer) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def start_profile(self, **kwargs) -> None: """Start profiling for the current rank in the current training step.""" self.profiler.start(**kwargs) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def stop_profile(self) -> None: """Stop profiling for the current rank in the current training step.""" self.profiler.stop() class CriticWorker(Worker, DistProfilerExtension): def __init__(self, config): Worker.__init__(self) DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=omega_conf_to_dataclass(config.get("profiler"))) ) import torch.distributed if not torch.distributed.is_initialized(): torch.distributed.init_process_group( backend=get_nccl_backend(), init_method=os.environ.get("DIST_INIT_METHOD", None) ) self.config = config # build device mesh for Ulysses Sequence Parallel world_size = torch.distributed.get_world_size() from torch.distributed.device_mesh import init_device_mesh fsdp_size = self.config.model.fsdp_config.fsdp_size self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size) self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) # set FSDP offload params self._is_offload_param = self.config.model.fsdp_config.param_offload self._is_offload_optimizer = self.config.model.fsdp_config.optimizer_offload # normalize config self.config.ppo_mini_batch_size *= self.config.rollout_n self.config.ppo_mini_batch_size //= torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size if self.config.ppo_micro_batch_size is not None: self.config.ppo_micro_batch_size //= ( torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size ) self.config.forward_micro_batch_size //= ( torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size ) self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size self.config.forward_micro_batch_size_per_gpu = self.config.forward_micro_batch_size if self.config.ppo_micro_batch_size_per_gpu is not None: assert self.config.ppo_mini_batch_size % self.config.ppo_micro_batch_size_per_gpu == 0, ( f"normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be divisible by " f"ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}" ) assert self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu > 0, ( f"normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be larger than " f"ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}" ) self._is_lora = self.config.model.get("lora_rank", 0) > 0 def _build_critic_model_optimizer(self, config): # the following line is necessary from torch import optim from torch.distributed.fsdp import MixedPrecision from verl.utils.model import load_valuehead_model, print_model_size from verl.utils.torch_dtypes import PrecisionType use_shm = config.model.get("use_shm", False) local_path = copy_to_local(config.model.path, use_shm=use_shm) # note that the tokenizer between actor and critic may be different. So override tokenizer info with actor info # using random initialized model from any architecture. May not be the same as Actor. tokenizer_path = copy_to_local(config.model.tokenizer_path, use_shm=use_shm) self.tokenizer = hf_tokenizer(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False)) self.processor = hf_processor(tokenizer_path, trust_remote_code=config.model.get("trust_remote_code", False)) if self.config.model.get("custom_chat_template", None) is not None: if self.processor is not None: self.processor.chat_template = self.config.model.custom_chat_template else: self.tokenizer.chat_template = self.config.model.custom_chat_template override_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create())) override_config_kwargs = { "bos_token_id": self.tokenizer.bos_token_id, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, } override_config_kwargs.update(override_config) if self.rank == 0: print(f"Critic overriding config {override_config_kwargs}") torch_dtype = self.config.model.fsdp_config.get("model_dtype", "fp32") torch_dtype = PrecisionType.to_dtype(torch_dtype) from transformers import AutoConfig critic_model_config = AutoConfig.from_pretrained( local_path, attn_implementation="flash_attention_2", trust_remote_code=config.model.get("trust_remote_code", False), ) critic_model_config.num_labels = 1 # patch for kimi-vl if getattr(critic_model_config, "model_type", None) == "kimi_vl": critic_model_config.text_config.topk_method = "greedy" init_context = get_init_weight_context_manager( use_meta_tensor=not critic_model_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") critic_model_config.classifier_dropout = 0.0 critic_model_config.hidden_dropout = "0" critic_model_config.summary_dropout_prob = 0.0 critic_module = load_valuehead_model( local_path, torch_dtype, critic_model_config, config.model.get("trust_remote_code", False), ) use_remove_padding = config.model.get("use_remove_padding", False) apply_monkey_patch( model=critic_module, use_remove_padding=use_remove_padding, ulysses_sp_size=self.ulysses_sequence_parallel_size, ) # some parameters may not in torch_dtype critic_module.to(torch_dtype) if config.model.get("enable_gradient_checkpointing", False): critic_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={"use_reentrant": False}) if self._is_lora: print("Applying LoRA to critic module") critic_module.enable_input_require_grads() # Convert config to regular Python types before creating PEFT model lora_config = { "task_type": TaskType.CAUSAL_LM, "r": self.config.model.lora_rank, "lora_alpha": self.config.model.lora_alpha, "target_modules": convert_to_regular_types(self.config.model.target_modules), "bias": "none", } critic_module = get_peft_model(critic_module, LoraConfig(**lora_config)) if self.rank == 0: print_model_size(critic_module) self.critic_model_config = critic_model_config fsdp_config = self.config.model.fsdp_config mixed_precision_config = fsdp_config.get("mixed_precision", None) if mixed_precision_config is not None: param_dtype = PrecisionType.to_dtype(mixed_precision_config.get("param_dtype", "bf16")) reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get("reduce_dtype", "fp32")) buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get("buffer_dtype", "fp32")) else: param_dtype = torch.bfloat16 reduce_dtype = torch.float32 buffer_dtype = torch.float32 mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype) auto_wrap_policy = get_fsdp_wrap_policy( module=critic_module, config=self.config.model.fsdp_config.wrap_policy, is_lora=self.config.model.get("lora_rank", 0) > 0, ) log_gpu_memory_usage("Before critic FSDP", logger=None) fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) # Note: We force turn off CPUOffload for critic because it causes incorrect results when using grad accumulation if config.strategy == "fsdp": critic_module = FSDP( critic_module, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, mixed_precision=mixed_precision, sync_module_states=True, forward_prefetch=self.config.model.fsdp_config.forward_prefetch, device_mesh=self.device_mesh, cpu_offload=None, ) elif config.strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" mp_policy = MixedPrecisionPolicy( param_dtype=param_dtype, reduce_dtype=reduce_dtype, cast_forward_inputs=True ) offload_policy = None if fsdp_config.offload_policy: self._is_offload_param = False self._is_offload_optimizer = False offload_policy = CPUOffloadPolicy(pin_memory=True) fsdp_kwargs = { "mesh": fsdp_mesh, "mp_policy": mp_policy, "offload_policy": offload_policy, "reshard_after_forward": fsdp_config.reshard_after_forward, } full_state = critic_module.state_dict() apply_fsdp2(critic_module, fsdp_kwargs, fsdp_config) fsdp2_load_full_state_dict(critic_module, full_state, fsdp_mesh, offload_policy) else: raise NotImplementedError(f"Unknown strategy {config.strategy}") if config.model.get("enable_activation_offload", False): enable_gradient_checkpointing = config.model.get("enable_gradient_checkpointing", False) enable_activation_offloading(critic_module, config.strategy, enable_gradient_checkpointing) log_gpu_memory_usage("After critic FSDP", logger=None) critic_optimizer = optim.AdamW( critic_module.parameters(), lr=config.optim.lr, betas=config.optim.get("betas", (0.9, 0.999)), weight_decay=config.optim.get("weight_decay", 1e-2), ) total_steps = config.optim.get("total_training_steps", 0) num_warmup_steps = int(config.optim.get("lr_warmup_steps", -1)) warmup_style = config.optim.get("warmup_style", "constant") if num_warmup_steps < 0: num_warmup_steps_ratio = config.optim.get("lr_warmup_steps_ratio", 0.0) num_warmup_steps = int(num_warmup_steps_ratio * total_steps) if self.rank == 0: print(f"Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}") from verl.utils.torch_functional import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup if warmup_style == "constant": critic_lr_scheduler = get_constant_schedule_with_warmup( optimizer=critic_optimizer, num_warmup_steps=num_warmup_steps ) elif warmup_style == "cosine": critic_lr_scheduler = get_cosine_schedule_with_warmup( optimizer=critic_optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=total_steps ) else: raise NotImplementedError(f"Warmup style {warmup_style} is not supported") return critic_module, critic_optimizer, critic_lr_scheduler @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) from verl.workers.critic import DataParallelPPOCritic self.critic_module, self.critic_optimizer, self.critic_lr_scheduler = self._build_critic_model_optimizer( self.config ) if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) log_gpu_memory_usage("After offload critic model during init", logger=logger) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.critic_optimizer) log_gpu_memory_usage("After offload critic optimizer during init", logger=logger) self.critic = DataParallelPPOCritic( config=self.config, critic_module=self.critic_module, critic_optimizer=self.critic_optimizer ) self.flops_counter = FlopsCounter(self.critic_model_config) self.checkpoint_manager = FSDPCheckpointManager( model=self.critic_module, optimizer=self.critic_optimizer, lr_scheduler=self.critic_lr_scheduler, processing_class=self.processor if self.processor is not None else self.tokenizer, checkpoint_config=self.config.checkpoint, ) @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="cyan") def compute_values(self, data: DataProto): # Support all hardwares data = data.to(get_device_id()) if self._is_offload_param: load_fsdp_model_to_gpu(self.critic_module) micro_batch_size = self.config.forward_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz # perform forward computation with self.ulysses_sharding_manager: data = self.ulysses_sharding_manager.preprocess_data(data=data) values = self.critic.compute_values(data=data) output = DataProto.from_dict(tensors={"values": values}) output = self.ulysses_sharding_manager.postprocess_data(data=output) output = output.to("cpu") if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) return output @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="pink") def update_critic(self, data: DataProto): # Support all hardwares data = data.to(get_device_id()) if self._is_offload_param: load_fsdp_model_to_gpu(self.critic_module) if self._is_offload_optimizer: load_fsdp_optimizer(optimizer=self.critic_optimizer, device_id=get_device_id()) # perform forward computation with self.ulysses_sharding_manager: data = self.ulysses_sharding_manager.preprocess_data(data=data) with Timer(name="update_critic", logger=None) as timer: metrics = self.critic.update_critic(data=data) delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/critic"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size lr = self.critic_lr_scheduler.get_last_lr()[0] metrics["critic/lr"] = lr self.critic_lr_scheduler.step() output = DataProto(batch=None, meta_info={"metrics": metrics}) output = self.ulysses_sharding_manager.postprocess_data(data=output) if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_fsdp_optimizer(optimizer=self.critic_optimizer) output = output.to("cpu") return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): import torch if self._is_offload_param: load_fsdp_model_to_gpu(self.critic_module) self.checkpoint_manager.save_checkpoint( local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=True): import torch if self._is_offload_param: load_fsdp_model_to_gpu(self.critic_module) self.checkpoint_manager.load_checkpoint( local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) torch.distributed.barrier() if self._is_offload_param: offload_fsdp_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_fsdp_optimizer(self.critic_optimizer) # TODO(sgm): we may need to extract it to dp_reward_model.py class RewardModelWorker(Worker, DistProfilerExtension): """ Note that we only implement the reward model that is subclass of AutoModelForTokenClassification. """ def __init__(self, config): Worker.__init__(self) DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=omega_conf_to_dataclass(config.get("profiler"))) ) import torch.distributed if not torch.distributed.is_initialized(): torch.distributed.init_process_group( backend=get_nccl_backend(), init_method=os.environ.get("DIST_INIT_METHOD", None) ) self.config = config # build device mesh for Ulysses Sequence Parallel world_size = torch.distributed.get_world_size() from torch.distributed.device_mesh import init_device_mesh fsdp_size = self.config.model.fsdp_config.fsdp_size self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size) self.ulysses_device_mesh = None self.ulysses_sequence_parallel_size = self.config.get("ulysses_sequence_parallel_size", 1) dp = world_size // self.ulysses_sequence_parallel_size if self.ulysses_sequence_parallel_size > 1: self.ulysses_device_mesh = init_device_mesh( device_name, mesh_shape=(dp, self.ulysses_sequence_parallel_size), mesh_dim_names=["dp", "sp"] ) self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh) self.use_remove_padding = self.config.model.get("use_remove_padding", False) # normalize config if self.config.micro_batch_size is not None: self.config.micro_batch_size //= torch.distributed.get_world_size() self.config.micro_batch_size_per_gpu = self.config.micro_batch_size def _build_model(self, config): # the following line is necessary from torch.distributed.fsdp import CPUOffload from transformers import AutoConfig, AutoModelForTokenClassification use_shm = config.model.get("use_shm", False) # download the checkpoint from hdfs local_path = copy_to_local(config.model.path, use_shm=use_shm) if self.config.model.input_tokenizer is None: self._do_switch_chat_template = False else: self._do_switch_chat_template = True input_tokenizer_local_path = copy_to_local(config.model.input_tokenizer, use_shm=use_shm) self.input_tokenizer = hf_tokenizer( input_tokenizer_local_path, trust_remote_code=config.model.get("trust_remote_code", False) ) self.tokenizer = hf_tokenizer(local_path, trust_remote_code=config.model.get("trust_remote_code", False)) trust_remote_code = config.model.get("trust_remote_code", False) model_config = AutoConfig.from_pretrained(local_path, trust_remote_code=trust_remote_code) model_config.num_labels = 1 # note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect init_context = get_init_weight_context_manager( use_meta_tensor=not model_config.tie_word_embeddings, mesh=self.device_mesh ) with init_context(), warnings.catch_warnings(): warnings.simplefilter("ignore") model_config.classifier_dropout = 0.0 reward_module = AutoModelForTokenClassification.from_pretrained( pretrained_model_name_or_path=local_path, config=model_config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", trust_remote_code=trust_remote_code, ) apply_monkey_patch( model=reward_module, use_remove_padding=config.model.get("use_remove_padding", False), ulysses_sp_size=self.ulysses_sequence_parallel_size, ) reward_module.to(torch.bfloat16) auto_wrap_policy = get_fsdp_wrap_policy(module=reward_module, config=self.config.model.fsdp_config) fsdp_mesh = self.device_mesh sharding_strategy = get_sharding_strategy(fsdp_mesh) if config.strategy == "fsdp": reward_module = FSDP( reward_module, param_init_fn=init_fn, use_orig_params=False, auto_wrap_policy=auto_wrap_policy, device_id=get_device_id(), sharding_strategy=sharding_strategy, # zero3 sync_module_states=True, cpu_offload=CPUOffload(offload_params=True), forward_prefetch=self.config.model.fsdp_config.forward_prefetch, device_mesh=self.device_mesh, ) elif config.strategy == "fsdp2": assert CPUOffloadPolicy is not None, "PyTorch version >= 2.4 is required for using fully_shard API (FSDP2)" cpu_offload = CPUOffloadPolicy(pin_memory=True) fsdp_kwargs = { "mesh": fsdp_mesh, "offload_policy": cpu_offload, "reshard_after_forward": config.model.fsdp_config.reshard_after_forward, } full_state = reward_module.state_dict() apply_fsdp2(reward_module, fsdp_kwargs, config.model.fsdp_config) fsdp2_load_full_state_dict(reward_module, full_state, fsdp_mesh, cpu_offload) else: raise NotImplementedError(f"Unknown strategy: {config.strategy}") return reward_module @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # This is used to import external_lib into the huggingface systems import_external_libs(self.config.model.get("external_lib", None)) self.reward_module = self._build_model(config=self.config) def _forward_micro_batch(self, micro_batch): if is_cuda_available: from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input elif is_npu_available: from transformers.integrations.npu_flash_attention import ( index_first_axis, pad_input, rearrange, unpad_input, ) from verl.utils.ulysses import gather_outputs_and_unpad, ulysses_pad_and_slice_inputs with torch.no_grad(), torch.autocast(device_type=device_name, dtype=torch.bfloat16): input_ids = micro_batch["input_ids"] batch_size, seqlen = input_ids.shape attention_mask = micro_batch["attention_mask"] position_ids = micro_batch["position_ids"] if position_ids.dim() == 3: # qwen2vl mrope position_ids = position_ids.transpose(0, 1) # (bsz, 3, seqlen) -> (3, bsz, seqlen) if self.use_remove_padding: input_ids_rmpad, indices, *_ = unpad_input( input_ids.unsqueeze(-1), attention_mask ) # input_ids_rmpad (total_nnz, ...) input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz) # unpad the position_ids to align the rotary if position_ids.dim() == 3: position_ids_rmpad = ( index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."), indices) .transpose(0, 1) .unsqueeze(1) ) # (3, bsz, seqlen) -> (3, 1, bsz * seqlen) else: position_ids_rmpad = index_first_axis( rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."), indices ).transpose(0, 1) # pad and slice the inputs if sp > 1 if self.ulysses_sequence_parallel_size > 1: input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs( input_ids_rmpad, position_ids_rmpad, sp_size=self.ulysses_sequence_parallel_size ) # only pass input_ids and position_ids to enable flash_attn_varlen output = self.reward_module( input_ids=input_ids_rmpad, attention_mask=None, position_ids=position_ids_rmpad, use_cache=False ) reward_rmpad = output.logits reward_rmpad = reward_rmpad.squeeze(0) # (total_nnz) # gather output if sp > 1 if self.ulysses_sequence_parallel_size > 1: reward_rmpad = gather_outputs_and_unpad( reward_rmpad, gather_dim=0, unpad_dim=0, padding_size=pad_size ) # pad it back rm_score = pad_input(reward_rmpad, indices=indices, batch=batch_size, seqlen=seqlen).squeeze(-1) else: output = self.reward_module( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, use_cache=False ) rm_score = output.logits # (batch_size, seq_len, 1) rm_score = rm_score.squeeze(-1) # extract the result of the last valid token eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,) rm_score = rm_score[torch.arange(batch_size), eos_mask_idx] return rm_score def _expand_to_token_level(self, data: DataProto, scores: torch.Tensor): batch_size = data.batch.batch_size[0] # expand as token_level_reward attention_mask = data.batch["attention_mask"] position_ids = data.batch["position_ids"] response_length = data.batch["responses"].shape[-1] if position_ids.dim() == 3: # qwen2vl mrope [bs, 3, seq_len] position_ids = position_ids[:, 0, :] eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,) token_level_scores = torch.zeros_like(attention_mask, dtype=scores.dtype) # (bsz, seqlen) token_level_scores[torch.arange(batch_size), eos_mask_idx] = scores # select the response part token_level_scores = token_level_scores[:, -response_length:] return token_level_scores def _switch_chat_template(self, data: DataProto): src_max_length = data.batch["attention_mask"].shape[-1] src_tokenizer = self.input_tokenizer target_tokenizer = self.tokenizer rm_input_ids = [] rm_attention_mask = [] for i in range(data.batch.batch_size[0]): # extract raw prompt if isinstance(data.non_tensor_batch["raw_prompt"][i], list): chat: list = data.non_tensor_batch["raw_prompt"][i] else: chat: list = data.non_tensor_batch["raw_prompt"][i].tolist() # extract response response_ids = data.batch["responses"][i] response_length = response_ids.shape[-1] valid_response_length = data.batch["attention_mask"][i][-response_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode response = src_tokenizer.decode(valid_response_ids) # remove bos and eos response = response.replace(src_tokenizer.eos_token, "") chat.append({"role": "assistant", "content": response}) prompt_with_chat_template = target_tokenizer.apply_chat_template( chat, add_generation_prompt=False, tokenize=False ) if self.rank == 0 and i == 0: # for debugging purpose print(f"Switch template. chat: {prompt_with_chat_template}") # the maximum length is actually determined by the reward model itself max_length = self.config.get("max_length", src_max_length) if max_length is None: max_length = src_max_length model_inputs = target_tokenizer(prompt_with_chat_template, return_tensors="pt", add_special_tokens=False) input_ids, attention_mask = verl_F.postprocess_data( input_ids=model_inputs["input_ids"], attention_mask=model_inputs["attention_mask"], max_length=max_length, pad_token_id=target_tokenizer.pad_token_id, left_pad=False, # right padding truncation=self.config.get("truncation", "right"), ) # truncate from the right rm_input_ids.append(input_ids) rm_attention_mask.append(attention_mask) rm_input_ids = torch.cat(rm_input_ids, dim=0) rm_attention_mask = torch.cat(rm_attention_mask, dim=0) rm_position_ids = compute_position_id_with_mask(rm_attention_mask) rm_inputs = {"input_ids": rm_input_ids, "attention_mask": rm_attention_mask, "position_ids": rm_position_ids} return DataProto.from_dict(rm_inputs) @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="brown") def compute_rm_score(self, data: DataProto): import itertools from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches # Support all hardwares data = data.to(get_device_id()) if self._do_switch_chat_template: rm_data = self._switch_chat_template(data) else: rm_input_ids = data.batch["input_ids"] rm_attention_mask = data.batch["attention_mask"] rm_position_ids = data.batch["position_ids"] rm_inputs = { "input_ids": rm_input_ids, "attention_mask": rm_attention_mask, "position_ids": rm_position_ids, } rm_data = DataProto.from_dict(rm_inputs) # Support all hardwares rm_data.batch = rm_data.batch.to(get_device_id()) # perform forward computation with self.ulysses_sharding_manager: rm_data = self.ulysses_sharding_manager.preprocess_data(data=rm_data) data = self.ulysses_sharding_manager.preprocess_data(data=data) use_dynamic_bsz = self.config.use_dynamic_bsz if use_dynamic_bsz: max_token_len = self.config.forward_max_token_len_per_gpu * self.ulysses_sequence_parallel_size micro_batches, indices = rearrange_micro_batches(batch=rm_data.batch, max_token_len=max_token_len) else: micro_batches = rm_data.batch.split(self.config.micro_batch_size_per_gpu) output = [] for micro_batch in micro_batches: rm_score = self._forward_micro_batch(micro_batch) output.append(rm_score) scores = torch.cat(output, dim=0) # (batch_size) if use_dynamic_bsz: indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == scores.size(0), f"{len(indices)} vs. {scores.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) scores = scores[revert_indices] token_level_scores = self._expand_to_token_level(data, scores) # Note that this is only the scores, may not be the final rewards used to train RL output = DataProto.from_dict(tensors={"rm_scores": token_level_scores}) output = self.ulysses_sharding_manager.postprocess_data(data=output) # https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes # unshard the root FSDP module if self.world_size > 1 and fsdp_version(self.reward_module) == 1: self.reward_module._handle.reshard(True) output = output.to("cpu") return output # ================================= Async related workers ================================= class AsyncActorRolloutRefWorker(ActorRolloutRefWorker): def _build_rollout(self, trust_remote_code=False): rollout, rollout_sharding_manager = super()._build_rollout(trust_remote_code) # NOTE: rollout is not actually initialized here, it's deferred # to be initialized by AsyncvLLMServer. self.vllm_tp_size = self.config.rollout.tensor_model_parallel_size self.vllm_dp_rank = int(os.environ["RANK"]) // self.vllm_tp_size self.vllm_tp_rank = int(os.environ["RANK"]) % self.vllm_tp_size # used for sleep/wake_up rollout.sharding_manager = rollout_sharding_manager return rollout, rollout_sharding_manager @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def generate_sequences(self, prompts: DataProto): raise NotImplementedError("AsyncActorRolloutRefWorker does not support generate_sequences") # ============================ vLLM related ============================ @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) def execute_method(self, method: str | bytes, *args, **kwargs): """Called by ExternalRayDistributedExecutor collective_rpc.""" return self.rollout.execute_method(method, *args, **kwargs) @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) def get_zeromq_address(self): return self.rollout.get_zeromq_address() # ============================ SGLang related ============================ @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD, blocking=False) async def chat_completion(self, json_request): ret = await self.rollout.chat_completion(json_request) return ret @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD, blocking=False) async def generate(self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str) -> list[int]: ret = await self.rollout.generate(prompt_ids, sampling_params, request_id) return ret @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) async def wake_up(self): if self.config.rollout.free_cache_engine: await self.rollout.wake_up() # return something to block the caller return True @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) async def sleep(self): if self.config.rollout.free_cache_engine: await self.rollout.sleep() # return something to block the caller return True ================================================ FILE: verl_rl/verl/workers/megatron_workers.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The main entry point to run the PPO algorithm """ import datetime import logging import os import time from typing import Any import psutil import torch import torch.distributed from codetiming import Timer from megatron.core import parallel_state as mpu from omegaconf import DictConfig, OmegaConf, open_dict from verl import DataProto from verl.single_controller.base.decorator import Dispatch, register from verl.single_controller.base.megatron.worker import MegatronWorker from verl.utils import hf_tokenizer from verl.utils.checkpoint.megatron_checkpoint_manager import MegatronCheckpointManager from verl.utils.config import omega_conf_to_dataclass from verl.utils.device import get_device_id, get_device_name, get_nccl_backend, get_torch_device from verl.utils.flops_counter import FlopsCounter from verl.utils.fs import copy_to_local from verl.utils.megatron_utils import ( load_megatron_model_to_gpu, load_megatron_optimizer, offload_megatron_model_to_cpu, offload_megatron_optimizer, ) from verl.utils.model import get_hf_model_path, load_mcore_dist_weights, load_megatron_gptmodel_weights from verl.utils.profiler import ( DistProfiler, DistProfilerExtension, GPUMemoryLogger, log_gpu_memory_usage, simple_timer, ) from verl.utils.profiler.performance import reduce_timing from verl.workers.actor.megatron_actor import MegatronPPOActor from verl.workers.critic.megatron_critic import MegatronPPOCritic from verl.workers.reward_model.megatron.reward_model import MegatronRewardModel logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def set_random_seed(seed): import random import numpy as np import torch torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) if get_torch_device().device_count() > 0: from megatron.core import tensor_parallel tensor_parallel.model_parallel_cuda_manual_seed(seed) # FIXME: torch cumsum not support deterministic (used in vllm sampler), # https://github.com/pytorch/pytorch/issues/89492 # torch.use_deterministic_algorithms(True, warn_only=True) # os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' class ActorRolloutRefWorker(MegatronWorker, DistProfilerExtension): """ This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy or a hybrid engine based on the config.rollout """ def __init__(self, config: DictConfig, role: str, **kwargs): MegatronWorker.__init__(self) self.config = config # NOTE(sgm): We utilize colocate WorkerGroup by default. # As a result, Workers for different model share the same process. # Therefore, we only require one distribute initialization. # To utilize different parallel strategy in different models: # 1, users should disable WorkerDict; 2.assign different ResourcePool to different models, # 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385 if not torch.distributed.is_initialized(): rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group( backend=get_nccl_backend(), timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) get_torch_device().set_device(rank) mpu.initialize_model_parallel( tensor_model_parallel_size=self.config.actor.megatron.tensor_model_parallel_size, pipeline_model_parallel_size=self.config.actor.megatron.pipeline_model_parallel_size, virtual_pipeline_model_parallel_size=self.config.actor.megatron.virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank=None, use_sharp=False, context_parallel_size=self.config.actor.megatron.context_parallel_size, expert_model_parallel_size=self.config.actor.megatron.expert_model_parallel_size, expert_tensor_parallel_size=self.config.actor.megatron.expert_tensor_parallel_size, nccl_communicator_config_path=None, ) set_random_seed(seed=self.config.actor.megatron.seed) self.role = role assert self.role in ["actor", "rollout", "ref", "actor_rollout", "actor_rollout_ref"] self._is_actor = self.role in ["actor", "actor_rollout", "actor_rollout_ref"] self._is_rollout = self.role in ["rollout", "actor_rollout", "actor_rollout_ref"] self._is_ref = self.role in ["ref", "actor_rollout_ref"] profiler_config = omega_conf_to_dataclass(config.get("profiler")) DistProfilerExtension.__init__(self, DistProfiler(rank=self.rank, config=profiler_config)) # TODO(sgm): Currently, we only support reference model param offload # will support other offload later self._is_offload_param = False self._is_offload_grad = False self._is_offload_optimizer = False # normalize config if self._is_actor and self._is_rollout: self.config.actor.ppo_mini_batch_size *= self.config.rollout.n self.config.actor.ppo_mini_batch_size //= mpu.get_data_parallel_world_size() if self.config.actor.get("ppo_micro_batch_size", None): self.config.actor.ppo_micro_batch_size //= mpu.get_data_parallel_world_size() self.config.rollout.log_prob_micro_batch_size //= mpu.get_data_parallel_world_size() self.config.actor.ppo_micro_batch_size_per_gpu = self.config.actor.ppo_micro_batch_size self.config.rollout.log_prob_micro_batch_size_per_gpu = self.config.rollout.log_prob_micro_batch_size self._is_offload_param = self.config.actor.megatron.get("param_offload", False) self._is_offload_grad = self.config.actor.megatron.get("grad_offload", False) self._is_offload_optimizer = self.config.actor.megatron.get("optimizer_offload", False) elif self._is_ref: if self.config.ref.get("log_prob_micro_batch_size", None): self.config.ref.log_prob_micro_batch_size //= mpu.get_data_parallel_world_size() self.config.ref.log_prob_micro_batch_size_per_gpu = self.config.ref.log_prob_micro_batch_size else: assert self.config.ref.get("log_prob_micro_batch_size_per_gpu", None) is not None, ( "Please note that in the ref policy configuration, `log_prob_micro_batch_size_per_gpu` and " "`log_prob_micro_batch_size` should not be None at the same time." ) self._ref_is_offload_param = self.config.ref.megatron.get("param_offload", False) def _build_model_optimizer(self, model_path, optim_config, override_model_config, override_transformer_config): from verl.utils.megatron.optimizer import get_megatron_optimizer, get_megatron_optimizer_param_scheduler from verl.utils.megatron_utils import get_model, init_megatron_optim_config from verl.utils.model import get_generation_config, print_model_size self._init_hf_config_and_tf_config( model_path, model_path, self.dtype, override_model_config, override_transformer_config, self.config.model.get("trust_remote_code", False), self.config.actor.megatron.use_mbridge, ) self.generation_config = get_generation_config(self.local_path) def make_model(wrap_with_ddp=False): if self.bridge is not None: from verl.models.mcore.mbridge import freeze_moe_router post_model_creation_callbacks = [] if override_model_config.get("moe_config", {}).get("freeze_moe_router", False): post_model_creation_callbacks.append(freeze_moe_router) return self.bridge.get_model( post_model_creation_callbacks=post_model_creation_callbacks, wrap_with_ddp=wrap_with_ddp ) else: def megatron_actor_model_provider(pre_process, post_process): from verl.models.mcore import init_mcore_model parallel_model = init_mcore_model( self.tf_config, self.hf_config, pre_process, post_process, share_embeddings_and_output_weights=self.share_embeddings_and_output_weights, value=False, freeze_moe_router=override_model_config.get("moe_config", {}).get("freeze_moe_router", False), ) parallel_model.to(get_device_name()) return parallel_model override_ddp_config = OmegaConf.to_container( self.config.actor.megatron.get("override_ddp_config", OmegaConf.create()), resolve=True ) return get_model( megatron_actor_model_provider, wrap_with_ddp=wrap_with_ddp, use_distributed_optimizer=self.config.actor.megatron.use_distributed_optimizer, override_ddp_config=override_ddp_config, ) if self._is_actor or self._is_rollout: actor_module = make_model(wrap_with_ddp=True) print(f"actor_module: {len(actor_module)}") if self.config.actor.load_weight: if self.config.actor.megatron.use_dist_checkpointing: load_mcore_dist_weights( actor_module, self.config.actor.megatron.dist_checkpointing_path, is_value_model=False ) else: if self.bridge is not None: local_model_path = get_hf_model_path(self.config) self.bridge.load_weights(actor_module, local_model_path) else: load_megatron_gptmodel_weights( self.config, self.hf_config, actor_module, params_dtype=self.dtype, is_value_model=False ) if self.rank == 0: print_model_size(actor_module[0]) log_gpu_memory_usage("After MegatronPPOActor init", logger=logger) elif self._is_ref: print(f"self.config.ref.load_weight: {self.config.ref.load_weight}") ref_module = make_model(wrap_with_ddp=False) if self.config.ref.load_weight: # should align with the actor: assert self.config.actor.load_weight == self.config.ref.load_weight print("load ref weight start") if self.config.ref.megatron.use_dist_checkpointing: load_mcore_dist_weights( ref_module, self.config.ref.megatron.dist_checkpointing_path, is_value_model=False ) else: if self.bridge is not None: local_model_path = get_hf_model_path(self.config) self.bridge.load_weights(ref_module, local_model_path) else: load_megatron_gptmodel_weights( self.config, self.hf_config, ref_module, params_dtype=self.dtype, is_value_model=False ) log_gpu_memory_usage("After ref module init", logger=logger) return ref_module, self.hf_config # TODO: add more optimizer args into config if self._is_actor: optim_config_megatron = init_megatron_optim_config(optim_config) actor_optimizer = get_megatron_optimizer(model=actor_module, config=optim_config_megatron) actor_optimizer_scheduler = get_megatron_optimizer_param_scheduler( optimizer=actor_optimizer, config=optim_config ) else: optim_config = None actor_optimizer = None actor_optimizer_scheduler = None log_gpu_memory_usage("After actor optimizer init", logger=logger) return actor_module, actor_optimizer, actor_optimizer_scheduler, self.hf_config, optim_config def _build_rollout(self, trust_remote_code=False): from torch.distributed.device_mesh import init_device_mesh layer_name_mapping = { "qkv_layer_name": "self_attention.linear_qkv.", "gate_proj_layer_name": "linear_fc1.", } if self.config.rollout.name == "vllm": from torch.distributed.device_mesh import init_device_mesh from verl.workers.rollout.vllm_rollout import vLLMRollout from verl.workers.sharding_manager.megatron_vllm import MegatronVLLMShardingManager # NOTE(sgm): If the QKV and gate_up projection layer are concate together in actor, # we will reorganize their weight format when resharding from actor to rollout. infer_tp = self.config.rollout.tensor_model_parallel_size dp = self.world_size // infer_tp assert self.world_size % infer_tp == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}" ) rollout_device_mesh = init_device_mesh( get_device_name(), mesh_shape=(dp, infer_tp), mesh_dim_names=["dp", "infer_tp"] ) log_gpu_memory_usage("Before building vllm rollout", logger=None) local_path = copy_to_local(self.config.model.path, use_shm=self.config.model.get("use_shm", False)) from verl.workers.rollout.vllm_rollout import vLLMAsyncRollout vllm_rollout_cls = vLLMRollout if self.config.rollout.mode == "sync" else vLLMAsyncRollout rollout = vllm_rollout_cls( model_path=local_path, config=self.config.rollout, tokenizer=self.tokenizer, model_hf_config=self.actor_model_config, device_mesh=rollout_device_mesh, trust_remote_code=trust_remote_code, ) log_gpu_memory_usage("After building vllm rollout", logger=logger) # perform weight resharding between actor and rollout from verl.models.mcore import get_mcore_weight_converter weight_converter = get_mcore_weight_converter(self.actor_model_config, self.dtype) sharding_manager = MegatronVLLMShardingManager( inference_engine=rollout.inference_engine, model_config=self.actor_model_config, transformer_config=self.tf_config, rollout_config=self.config.rollout, layer_name_mapping=layer_name_mapping, actor_module=self.actor.actor_module, weight_converter=weight_converter, device_mesh=rollout_device_mesh, offload_param=self._is_offload_param, bridge=self.bridge, ) log_gpu_memory_usage("After building sharding manager", logger=logger) elif self.config.rollout.name == "sglang": from verl.workers.rollout.sglang_rollout import SGLangRollout # NOTE(linjunrong): Due to recent fp8 support in SGLang. Now importing any symbol relate to SGLang's # model_runner would check CUDA device capability. # However, due to verl's setting, the main process of ray can not find any CUDA device, which would # potentially lead to: "RuntimeError: No CUDA GPUs are available". # For this reason, sharding_manager.__init__ should not import FSDPSGLangShardingManager and we import it # here use the abs path. # check: https://github.com/sgl-project/sglang/blob/00f42707eaddfc2c0528e5b1e0094025c640b7a0/python/sglang/srt/layers/quantization/fp8_utils.py#L76 from verl.workers.sharding_manager.megatron_sglang import MegatronSGLangShardingManager infer_tp = self.config.rollout.tensor_model_parallel_size dp = self.world_size // infer_tp assert self.world_size % infer_tp == 0, ( f"rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}" ) rollout_device_mesh = init_device_mesh( "cpu", mesh_shape=(dp, infer_tp, 1), mesh_dim_names=("dp", "tp", "pp") ) local_path = copy_to_local(self.config.model.path) log_gpu_memory_usage(f"Before building {self.config.rollout.name} rollout", logger=None) rollout = SGLangRollout( actor_module=local_path, config=self.config.rollout, processing_class=self.processor if self.processor is not None else self.tokenizer, model_hf_config=self.actor_model_config, trust_remote_code=trust_remote_code, device_mesh=rollout_device_mesh, ) log_gpu_memory_usage(f"After building {self.config.rollout.name} rollout", logger=None) from verl.models.mcore import get_mcore_weight_converter weight_converter = get_mcore_weight_converter(self.actor_model_config, self.dtype) sharding_manager = MegatronSGLangShardingManager( actor_module=self.actor.actor_module, inference_engine=rollout._engine, model_config=self.actor_model_config, rollout_config=self.config.rollout, transformer_config=self.tf_config, layer_name_mapping=layer_name_mapping, weight_converter=weight_converter, bridge=self.bridge, device_mesh=rollout_device_mesh, offload_param=self._is_offload_param, ) log_gpu_memory_usage("After building sharding manager", logger=logger) else: raise NotImplementedError("Only vllmRollout is supported with Megatron now") print(f"rollout and sharding manager init done sharding_manager: {sharding_manager}") return rollout, sharding_manager @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) from verl.utils.torch_dtypes import PrecisionType override_model_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create())) if self._is_actor: override_transformer_config = OmegaConf.to_container( self.config.actor.megatron.get("override_transformer_config", OmegaConf.create()), resolve=True ) elif self._is_ref: override_transformer_config = OmegaConf.to_container( self.config.ref.megatron.get("override_transformer_config", OmegaConf.create()), resolve=True ) else: override_transformer_config = {} self.param_dtype = torch.bfloat16 log_gpu_memory_usage("Before init actor model and optimizer", logger=logger) self.dtype = PrecisionType.to_dtype(self.param_dtype) if self._is_actor or self._is_rollout: # we need the model for actor and rollout optim_config = self.config.actor.optim if self._is_actor else None ( self.actor_module, self.actor_optimizer, self.actor_optimizer_scheduler, self.actor_model_config, self.actor_optim_config, ) = self._build_model_optimizer( model_path=self.config.model.path, optim_config=optim_config, override_model_config=override_model_config, override_transformer_config=override_transformer_config, ) if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) log_gpu_memory_usage("After offload actor params and grad during init", logger=logger) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during init", logger=logger) if self._is_actor: OmegaConf.set_struct(self.config.actor, True) with open_dict(self.config.actor): use_fused_kernels = self.config.model.get("use_fused_kernels", False) self.config.actor.use_fused_kernels = use_fused_kernels self.actor = MegatronPPOActor( config=self.config.actor, model_config=self.actor_model_config, hf_config=self.hf_config, tf_config=self.tf_config, actor_module=self.actor_module, actor_optimizer=self.actor_optimizer, ) log_gpu_memory_usage("After MegatronPPOActor init", logger=logger) if self._is_rollout: self.rollout, self.sharding_manager = self._build_rollout( trust_remote_code=self.config.model.get("trust_remote_code", False) ) # used for sleep/wake_up self.rollout.sharding_manager = self.sharding_manager log_gpu_memory_usage("After rollout init", logger=logger) if self._is_ref: self.ref_module, self.ref_model_config = self._build_model_optimizer( model_path=self.config.model.path, optim_config=None, override_model_config=override_model_config, override_transformer_config=override_transformer_config, ) log_gpu_memory_usage("After ref model init", logger=logger) self.ref_policy = MegatronPPOActor( config=self.config.ref, model_config=self.ref_model_config, hf_config=self.hf_config, tf_config=self.tf_config, actor_module=self.ref_module, actor_optimizer=None, ) if self._ref_is_offload_param: offload_megatron_model_to_cpu(self.ref_module) log_gpu_memory_usage("After offload ref params during init", logger=logger) if self._is_actor: self.flops_counter = FlopsCounter(self.actor_model_config) self.checkpoint_mananager = MegatronCheckpointManager( config=self.config, checkpoint_config=self.config.actor.checkpoint, model_config=self.actor_model_config, transformer_config=self.tf_config, role="actor", model=self.actor_module, arch=self.architectures[0], hf_config=self.hf_config, param_dtype=self.param_dtype, share_embeddings_and_output_weights=self.share_embeddings_and_output_weights, processing_class=self.processor if self.processor is not None else self.tokenizer, optimizer=self.actor_optimizer, optimizer_scheduler=self.actor_optimizer_scheduler, use_distributed_optimizer=self.config.actor.megatron.use_distributed_optimizer, use_checkpoint_opt_param_scheduler=self.config.actor.optim.use_checkpoint_opt_param_scheduler, bridge=self.bridge, use_dist_checkpointing=self.config.actor.megatron.use_dist_checkpointing, ) get_torch_device().empty_cache() log_gpu_memory_usage("After init_model finish", logger=logger) @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) @GPUMemoryLogger(role="update_actor", logger=logger) @DistProfiler.annotate(color="red") def update_actor(self, data: DataProto): assert self._is_actor if self._is_offload_param: load_megatron_model_to_gpu(self.actor_module) log_gpu_memory_usage("After load actor params and grad during update_actor", logger=logger) if self._is_offload_optimizer: load_megatron_optimizer(self.actor_optimizer) log_gpu_memory_usage("After load actor optimizer during update_actor", logger=logger) data.batch = data.batch.to(get_device_name()) micro_batch_size = self.config.actor.ppo_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size dataloader = self.actor.make_minibatch_iterator(data=data) with Timer(name="update_policy", logger=None) as timer: metrics = self.actor.update_policy(dataloader=dataloader) delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/actor"] = estimated_flops * self.config.actor.ppo_epochs / promised_flops / self.world_size metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024**3) metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024**3) metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024**3) from verl.utils.megatron.optimizer import get_megatron_last_lr metrics["actor/lr"] = get_megatron_last_lr(self.actor_optimizer) self.actor_optimizer_scheduler.step(1) # TODO: here, we should return all metrics output = DataProto(meta_info={"metrics": metrics}) output = output.to("cpu") if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) log_gpu_memory_usage("After offload actor params and grad during update_actor", logger=logger) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) log_gpu_memory_usage("After offload actor optimizer during update_actor", logger=logger) get_torch_device().empty_cache() return output @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @GPUMemoryLogger(role="generate_sequences", logger=logger) @DistProfiler.annotate(color="red") def generate_sequences(self, prompts: DataProto): assert self._is_rollout prompts.batch = prompts.batch.to(get_device_name()) meta_info = { "eos_token_id": self.generation_config.eos_token_id if self.generation_config is not None else self.tokenizer.eos_token_id, "pad_token_id": self.generation_config.pad_token_id if self.generation_config is not None else self.tokenizer.pad_token_id, } prompts.meta_info.update(meta_info) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) timing_generate = {} with self.sharding_manager: log_gpu_memory_usage("After entering sharding manager", logger=logger) prompts = self.sharding_manager.preprocess_data(prompts) with simple_timer("generate_sequences", timing_generate): output = self.rollout.generate_sequences(prompts=prompts) output = self.sharding_manager.postprocess_data(output) log_gpu_memory_usage("After rollout generation", logger=logger) timing_generate.update(self.sharding_manager.timing) # We calculate the average timing across all ranks # to make sure meta_info["timing"] is the same timing_generate = reduce_timing(timing_generate) output.meta_info["timing"] = timing_generate output = output.to("cpu") # clear kv cache get_torch_device().empty_cache() return output @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) @GPUMemoryLogger(role="compute_ref_log_prob", logger=logger) @DistProfiler.annotate(color="olive") def compute_ref_log_prob(self, data: DataProto): assert self._is_ref if self._ref_is_offload_param: load_megatron_model_to_gpu(self.ref_module, load_grad=False) log_gpu_memory_usage("After load ref params and grad during compute_ref_log_prob", logger=logger) micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["max_token_len"] = self.config.ref.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.ref.log_prob_use_dynamic_bsz data.meta_info["temperature"] = self.config.rollout.temperature data = data.to(get_device_id()) output, _ = self.ref_policy.compute_log_prob(data=data, calculate_entropy=False) output = DataProto.from_dict(tensors={"ref_log_prob": output}) output = output.to("cpu") if self._ref_is_offload_param: offload_megatron_model_to_cpu(self.ref_module) log_gpu_memory_usage("After offload ref params and grad during compute_ref_log_prob", logger=logger) get_torch_device().empty_cache() return output @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) @GPUMemoryLogger(role="compute_log_prob", logger=logger) @DistProfiler.annotate(color="blue") def compute_log_prob(self, data: DataProto): assert self._is_actor if self._is_offload_param: load_megatron_model_to_gpu(self.actor_module, load_grad=False) log_gpu_memory_usage("After load actor params and grad during compute_log_prob", logger=logger) # we should always recompute old_log_probs when it is HybridEngine data.meta_info["micro_batch_size"] = self.config.rollout.log_prob_micro_batch_size_per_gpu data.meta_info["max_token_len"] = self.config.rollout.log_prob_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.rollout.log_prob_use_dynamic_bsz data.meta_info["temperature"] = self.config.rollout.temperature data = data.to(get_device_id()) output, entropys = self.actor.compute_log_prob(data=data, calculate_entropy=True) output = DataProto.from_dict( tensors={"old_log_probs": output, "entropys": entropys}, meta_info={"temperature": self.config.rollout.temperature}, ) output = output.to("cpu") # clear kv cache if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) log_gpu_memory_usage("After offload actor params and grad during compute_log_prob", logger=logger) get_torch_device().empty_cache() return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, checkpoint_path, hdfs_path=None, del_local_after_load=True): if self._is_offload_param: load_megatron_model_to_gpu(self.actor_module) self.checkpoint_mananager.load_checkpoint( local_path=checkpoint_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.actor_optimizer) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_pretrained_model(self, checkpoint_path, del_local_after_load=True): pass @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, checkpoint_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): if self._is_offload_param: load_megatron_model_to_gpu(self.actor_module) self.checkpoint_mananager.save_checkpoint( local_path=checkpoint_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep ) torch.distributed.barrier() if self._is_offload_param: offload_megatron_model_to_cpu(self.actor_module) class AsyncActorRolloutRefWorker(ActorRolloutRefWorker): def _build_rollout(self, trust_remote_code=False): rollout, rollout_sharding_manager = super()._build_rollout(trust_remote_code) # NOTE: rollout is not actually initialized here, it's deferred # to be initialized by AsyncvLLMServer. self.vllm_tp_size = self.config.rollout.tensor_model_parallel_size self.vllm_dp_rank = int(os.environ["RANK"]) // self.vllm_tp_size self.vllm_tp_rank = int(os.environ["RANK"]) % self.vllm_tp_size # used for sleep/wake_up rollout.sharding_manager = rollout_sharding_manager return rollout, rollout_sharding_manager # ============================ vLLM related ============================ @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) def execute_method(self, method: str | bytes, *args, **kwargs): """Called by ExternalRayDistributedExecutor collective_rpc.""" if self.vllm_tp_rank == 0 and method != "execute_model": print( f"[DP={self.vllm_dp_rank},TP={self.vllm_tp_rank}] execute_method: " f"{method if isinstance(method, str) else 'Callable'}" ) return self.rollout.execute_method(method, *args, **kwargs) @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) def get_zeromq_address(self): return self.rollout.get_zeromq_address() # ============================ SGLang related ============================ @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD, blocking=False) async def chat_completion(self, json_request): ret = await self.rollout.chat_completion(json_request) return ret @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD, blocking=False) async def generate(self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str) -> list[int]: ret = await self.rollout.generate(prompt_ids, sampling_params, request_id) return ret @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) async def wake_up(self): if self.config.rollout.free_cache_engine: await self.rollout.wake_up() # return something to block the caller return True @register(dispatch_mode=Dispatch.DIRECT_ROLLOUT_METHOD) async def sleep(self): if self.config.rollout.free_cache_engine: await self.rollout.sleep() # return something to block the caller return True class CriticWorker(MegatronWorker, DistProfilerExtension): def __init__(self, config): MegatronWorker.__init__(self) DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=omega_conf_to_dataclass(config.get("profiler"))) ) self.config = config # NOTE(sgm): We utilize colocate WorkerGroup by default. # As a result, Workers for different model share the same process. # Therefore, we only require one distribute initialization. # To utilize different parallel strategy in different models: # 1, users should disable WorkerDict; 2.assign different ResourcePool to different models, # 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385 if not torch.distributed.is_initialized(): rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group( backend=get_nccl_backend(), timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) get_torch_device().set_device(rank) mpu.initialize_model_parallel( tensor_model_parallel_size=self.config.megatron.tensor_model_parallel_size, pipeline_model_parallel_size=self.config.megatron.pipeline_model_parallel_size, virtual_pipeline_model_parallel_size=self.config.megatron.virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank=None, use_sharp=False, context_parallel_size=self.config.megatron.context_parallel_size, expert_model_parallel_size=self.config.megatron.expert_model_parallel_size, expert_tensor_parallel_size=self.config.megatron.expert_tensor_parallel_size, nccl_communicator_config_path=None, ) set_random_seed(seed=self.config.megatron.seed) # set FSDP offload params self._is_offload_param = self.config.megatron.param_offload self._is_offload_optimizer = self.config.megatron.optimizer_offload # normalize config self.config.ppo_mini_batch_size *= self.config.rollout_n self.config.ppo_mini_batch_size //= mpu.get_data_parallel_world_size() if self.config.get("ppo_micro_batch_size", None): self.config.ppo_micro_batch_size //= mpu.get_data_parallel_world_size() self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size # TODO(sgm): support critic model offload def _build_critic_model_optimizer( self, model_path, optim_config, override_model_config, override_transformer_config ): from megatron.core.models.gpt.gpt_model import ModelType from verl.utils.megatron.optimizer import get_megatron_optimizer, get_megatron_optimizer_param_scheduler from verl.utils.megatron_utils import get_model, init_megatron_optim_config from verl.utils.model import print_model_size self._init_hf_config_and_tf_config( model_path, self.config.model.tokenizer_path, self.dtype, override_model_config, override_transformer_config, self.config.model.get("trust_remote_code", False), self.config.megatron.use_mbridge, ) if self.bridge is not None: from verl.models.mcore.mbridge import freeze_moe_router, make_value_model post_model_creation_callbacks = [make_value_model] if override_model_config.get("moe_config", {}).get("freeze_moe_router", False): post_model_creation_callbacks.append(freeze_moe_router) critic_module = self.bridge.get_model( post_model_creation_callbacks=post_model_creation_callbacks, wrap_with_ddp=True ) else: def megatron_critic_model_provider(pre_process, post_process): from verl.models.mcore import init_mcore_model parallel_model = init_mcore_model( self.tf_config, self.hf_config, pre_process, post_process, share_embeddings_and_output_weights=False, value=True, freeze_moe_router=override_model_config.get("moe_config", {}).get("freeze_moe_router", False), ) parallel_model.to(get_device_name()) return parallel_model override_ddp_config = OmegaConf.to_container( self.config.megatron.get("override_ddp_config", OmegaConf.create()), resolve=True ) # Step 3: initialize the megatron model critic_module = get_model( model_provider_func=megatron_critic_model_provider, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=True, use_distributed_optimizer=self.config.megatron.use_distributed_optimizer, override_ddp_config=override_ddp_config, ) # note that here critic_module will be a list to be compatible with the construction of interleaved pp (vpp). # but here, we do not use pp (vpp) yet. For simplicity, we remove the list # critic_module = nn.ModuleList(critic_module) if self.config.load_weight: t0 = time.time() if self.config.megatron.use_dist_checkpointing: load_mcore_dist_weights( critic_module, self.config.megatron.dist_checkpointing_path, is_value_model=True ) else: if self.bridge is not None: local_model_path = get_hf_model_path(self.config) self.bridge.load_weights(critic_module, local_model_path) else: load_megatron_gptmodel_weights( self.config, self.hf_config, critic_module, params_dtype=self.dtype, is_value_model=True ) t1 = time.time() if torch.distributed.get_rank() == 0: print(f"critic load_weight time: {t1 - t0}") if self.rank == 0: print_model_size(critic_module[0]) # TODO: add more optimizer args into config optim_config_megatron = init_megatron_optim_config(optim_config) critic_optimizer = get_megatron_optimizer(model=critic_module, config=optim_config_megatron) critic_optimizer_scheduler = get_megatron_optimizer_param_scheduler( optimizer=critic_optimizer, config=optim_config ) get_torch_device().empty_cache() return critic_module, critic_optimizer, critic_optimizer_scheduler, self.hf_config, optim_config @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # create critic from verl.utils.torch_dtypes import PrecisionType if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) override_model_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create())) override_transformer_config = OmegaConf.to_container( self.config.megatron.get("override_transformer_config", OmegaConf.create()), resolve=True ) self.param_dtype = torch.bfloat16 self.dtype = PrecisionType.to_dtype(self.param_dtype) ( self.critic_module, self.critic_optimizer, self.critic_optimizer_scheduler, self.critic_model_config, critic_optimizer_config, ) = self._build_critic_model_optimizer( model_path=self.config.model.path, optim_config=self.config.optim, override_model_config=override_model_config, override_transformer_config=override_transformer_config, ) if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.critic_optimizer) self.critic = MegatronPPOCritic( config=self.config, model_config=self.critic_model_config, hf_config=self.hf_config, tf_config=self.tf_config, critic_module=self.critic_module, critic_optimizer=self.critic_optimizer, critic_optimizer_config=critic_optimizer_config, ) self.flops_counter = FlopsCounter(self.critic_model_config) self.checkpoint_mananager = MegatronCheckpointManager( config=self.config, checkpoint_config=self.config.checkpoint, model_config=self.critic_model_config, transformer_config=self.tf_config, role="critic", model=self.critic_module, arch=self.architectures[0], hf_config=self.hf_config, param_dtype=self.param_dtype, share_embeddings_and_output_weights=False, processing_class=self.processor if self.processor is not None else self.tokenizer, optimizer=self.critic_optimizer, optimizer_scheduler=self.critic_optimizer_scheduler, use_distributed_optimizer=self.config.megatron.use_distributed_optimizer, use_checkpoint_opt_param_scheduler=self.config.optim.use_checkpoint_opt_param_scheduler, bridge=self.bridge, use_dist_checkpointing=self.config.megatron.use_dist_checkpointing, ) @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) @DistProfiler.annotate(color="cyan") def compute_values(self, data: DataProto): micro_batch_size = self.config.ppo_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz data = data.to(get_device_id()) if self._is_offload_param: load_megatron_model_to_gpu(self.critic_module) values = self.critic.compute_values(data=data) output = DataProto.from_dict(tensors={"values": values}) output = output.to("cpu") if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) return output @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) @DistProfiler.annotate(color="pink") def update_critic(self, data: DataProto): data = data.to(get_device_id()) if self._is_offload_param: load_megatron_model_to_gpu(self.critic_module) if self._is_offload_optimizer: load_megatron_optimizer(self.critic_optimizer) dataloader = self.critic.make_minibatch_iterator(data) with Timer(name="update_critic", logger=None) as timer: metrics = self.critic.update_critic(dataloader=dataloader) delta_time = timer.last global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/critic"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size from verl.utils.megatron.optimizer import get_megatron_last_lr metrics["critic/lr"] = get_megatron_last_lr(self.critic_optimizer) self.critic_optimizer_scheduler.step(1) output = DataProto(batch=None, meta_info={"metrics": metrics}) if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.critic_optimizer) output = output.to("cpu") return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, checkpoint_path, hdfs_path=None, del_local_after_load=True): if self._is_offload_param: load_megatron_model_to_gpu(self.critic_module) self.checkpoint_mananager.load_checkpoint( local_path=checkpoint_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load ) if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) if self._is_offload_optimizer: offload_megatron_optimizer(self.critic_optimizer) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, checkpoint_path, hdfs_path=None, global_steps=0, max_ckpt_to_keep=None): if self._is_offload_param: load_megatron_model_to_gpu(self.critic_module) self.checkpoint_mananager.save_checkpoint( local_path=checkpoint_path, hdfs_path=hdfs_path, global_step=global_steps, max_ckpt_to_keep=max_ckpt_to_keep ) if self._is_offload_param: offload_megatron_model_to_cpu(self.critic_module) class RewardModelWorker(MegatronWorker, DistProfilerExtension): """ Note that we only implement the reward model that is subclass of AutoModelForSequenceClassification. """ def __init__(self, config): MegatronWorker.__init__(self) DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=omega_conf_to_dataclass(config.get("profiler"))) ) self.config = config # NOTE(sgm): We utilize colocate WorkerGroup by default. # As a result, Workers for different model share the same process. # Therefore, we only require one distribute initialization. # To utilize different parallel strategy in different models: # 1, users should disable WorkerDict; 2.assign different ResourcePool to different models, # 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385 if not torch.distributed.is_initialized(): rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group( backend=get_nccl_backend(), timeout=datetime.timedelta(seconds=self.config.get("nccl_timeout", 600)), init_method=os.environ.get("DIST_INIT_METHOD", None), ) get_torch_device().set_device(rank) mpu.initialize_model_parallel( tensor_model_parallel_size=self.config.megatron.tensor_model_parallel_size, pipeline_model_parallel_size=self.config.megatron.pipeline_model_parallel_size, virtual_pipeline_model_parallel_size=self.config.megatron.virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank=None, use_sharp=False, context_parallel_size=self.config.megatron.context_parallel_size, expert_model_parallel_size=self.config.megatron.expert_model_parallel_size, expert_tensor_parallel_size=self.config.megatron.expert_tensor_parallel_size, nccl_communicator_config_path=None, ) set_random_seed(seed=self.config.megatron.seed) # normalize config if self.config.micro_batch_size is not None: self.config.micro_batch_size //= mpu.get_data_parallel_world_size() self.config.micro_batch_size_per_gpu = self.config.micro_batch_size def _build_rm_model(self, model_path, tokenizer, override_model_config, override_transformer_config): from megatron.core.models.gpt.gpt_model import ModelType from verl.utils.megatron_utils import get_model self._init_hf_config_and_tf_config( model_path, tokenizer, self.dtype, override_model_config, override_transformer_config, self.config.model.get("trust_remote_code", False), self.config.megatron.use_mbridge, ) if self.bridge is not None: from verl.models.mcore.mbridge import freeze_moe_router, make_value_model post_model_creation_callbacks = [make_value_model] if override_model_config.get("moe_config", {}).get("freeze_moe_router", False): post_model_creation_callbacks.append(freeze_moe_router) reward_model = self.bridge.get_model( post_model_creation_callbacks=post_model_creation_callbacks, wrap_with_ddp=False ) else: def megatron_rm_model_provider(pre_process, post_process): from verl.models.mcore import init_mcore_model parallel_model = init_mcore_model( self.tf_config, self.hf_config, pre_process, post_process, share_embeddings_and_output_weights=False, value=True, ) parallel_model.to(get_device_name()) return parallel_model # Step 3: initialize the megatron model reward_model = get_model( model_provider_func=megatron_rm_model_provider, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=False, use_distributed_optimizer=self.config.megatron.use_distributed_optimizer, ) # note that here reward_model will be a list to be compatible with the construction of interleaved pp (vpp) # but here, we do not use pp (vpp) yet. For simplicity, we remove the list # reward_model = nn.ModuleList(reward_model) if self.config.load_weight: if self.config.megatron.use_dist_checkpointing: load_mcore_dist_weights(reward_model, self.config.megatron.dist_checkpointing_path, is_value_model=True) else: if self.bridge is not None: local_model_path = get_hf_model_path(self.config) self.bridge.load_weights(reward_model, local_model_path) else: load_megatron_gptmodel_weights( self.config, self.hf_config, reward_model, params_dtype=self.dtype, is_value_model=True ) # TODO: add more optimizer args into config get_torch_device().empty_cache() return reward_model, self.hf_config @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): # create critic from verl.utils.torch_dtypes import PrecisionType if self.config.model.get("external_lib", None) is not None: # This is used to import external_lib into the huggingface systems import importlib importlib.import_module(self.config.model.external_lib) override_model_config = OmegaConf.to_container(self.config.model.get("override_config", OmegaConf.create())) override_transformer_config = OmegaConf.to_container( self.config.megatron.get("override_transformer_config", OmegaConf.create()), resolve=True ) use_shm = self.config.model.get("use_shm", False) sft_tokenizer_local_path = copy_to_local(self.config.model.input_tokenizer, use_shm=use_shm) sft_tokenizer = hf_tokenizer(sft_tokenizer_local_path) rm_tokenizer_path = self.config.model.get("rm_tokenizer", None) rm_tokenizer = None if rm_tokenizer_path is not None: rm_tokenizer_local_path = copy_to_local(rm_tokenizer_path, use_shm=use_shm) rm_tokenizer = hf_tokenizer( rm_tokenizer_local_path, trust_remote_code=self.config.model.get("trust_remote_code", False) ) self.param_dtype = torch.bfloat16 self.dtype = PrecisionType.to_dtype(self.param_dtype) reward_model_module, reward_model_config = self._build_rm_model( model_path=self.config.model.path, tokenizer=rm_tokenizer, override_model_config=override_model_config, override_transformer_config=override_transformer_config, ) # FIXME(sgm): reward model param offload is implemented in MegatronRewardModel # should be implemented in workers self.rm = MegatronRewardModel( config=self.config, reward_model_module=reward_model_module, model_config=reward_model_config, hf_config=self.hf_config, tf_config=self.tf_config, sft_tokenizer=sft_tokenizer, rm_tokenizer=rm_tokenizer, ) # TODO: reward model use itself tokenizer instead of sft tokenizer # the input_ids, responses, attention_mask and position_ids may be different! @register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO) @DistProfiler.annotate(color="brown") def compute_rm_score(self, data: DataProto): data.meta_info["micro_batch_size"] = self.config.micro_batch_size_per_gpu data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz data = data.to(get_device_id()) output = self.rm.compute_reward(data) output = output.to("cpu") return output ================================================ FILE: verl_rl/verl/workers/reward_manager/__init__.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .registry import get_reward_manager_cls, register # noqa: I001 from .batch import BatchRewardManager from .dapo import DAPORewardManager from .naive import NaiveRewardManager from .prime import PrimeRewardManager # Note(haibin.lin): no need to include all reward managers here in case of complicated dependencies __all__ = [ "BatchRewardManager", "DAPORewardManager", "NaiveRewardManager", "PrimeRewardManager", "register", "get_reward_manager_cls", ] ================================================ FILE: verl_rl/verl/workers/reward_manager/batch.py ================================================ # Copyright 2025 Individual Contributor: Mert Unsal # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict import torch from verl import DataProto from verl.workers.reward_manager import register @register("batch") class BatchRewardManager: """ A batch reward manager that computes rewards for a batch of data. Args: tokenizer (Tokenizer): The tokenizer to use for decoding the responses. num_examine (int): The number of responses to examine. compute_score (callable): The function to compute the rewards. reward_fn_key (str): The key to use for the reward function. reward_kwargs (dict): The keyword arguments to pass to the reward function. """ def __init__(self, tokenizer, num_examine, compute_score, reward_fn_key="data_source", **reward_kwargs): self.tokenizer = tokenizer self.num_examine = num_examine self.compute_score = compute_score self.reward_fn_key = reward_fn_key self.reward_kwargs = reward_kwargs def verify(self, data): prompt_ids = data.batch["prompts"] response_ids = data.batch["responses"] attention_mask = data.batch["attention_mask"] prompt_len = prompt_ids.shape[-1] valid_response_lengths = attention_mask[:, prompt_len:].sum(dim=-1) responses_str = [] for i in range(len(data)): valid_len = valid_response_lengths[i] valid_response_ids = response_ids[i][:valid_len] response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) responses_str.append(response_str) ground_truths = [item.non_tensor_batch["reward_model"].get("ground_truth", None) for item in data] data_sources = data.non_tensor_batch[self.reward_fn_key] extras = data.non_tensor_batch.get("extra_info", [None] * len(data)) scores = self.compute_score( data_sources=data_sources, solution_strs=responses_str, ground_truths=ground_truths, extra_infos=extras, **self.reward_kwargs, ) return scores def __call__(self, data: DataProto, return_dict=False): # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): if return_dict: return {"reward_tensor": data.batch["rm_scores"]} else: return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) prompt_ids = data.batch["prompts"] prompt_len = prompt_ids.shape[-1] attention_mask = data.batch["attention_mask"] valid_response_lengths = attention_mask[:, prompt_len:].sum(dim=-1) data_sources = data.non_tensor_batch[self.reward_fn_key] scores = self.verify(data) rewards = [] already_printed = {} for i in range(len(data)): length = valid_response_lengths[i].item() score = scores[i] if isinstance(score, dict): reward = score["score"] for key, value in score.items(): reward_extra_info[key].append(value) else: reward = score rewards.append(reward) reward_tensor[i, length - 1] = reward data_source = data_sources[i] if already_printed.get(data_source, 0) < self.num_examine: response_str = self.tokenizer.decode(data.batch["responses"][i][:length], skip_special_tokens=True) prompt_str = self.tokenizer.decode(data.batch["prompts"][i], skip_special_tokens=True) ground_truth = data[i].non_tensor_batch["reward_model"].get("ground_truth", None) print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) print("[score]", scores[i]) already_printed[data_source] = already_printed.get(data_source, 0) + 1 data.batch["acc"] = torch.tensor(rewards, dtype=torch.float32, device=prompt_ids.device) if return_dict: return {"reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info} else: return reward_tensor ================================================ FILE: verl_rl/verl/workers/reward_manager/dapo.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict import torch from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register @register("dapo") class DAPORewardManager: """The reward manager.""" def __init__( self, tokenizer, num_examine, compute_score=None, reward_fn_key="data_source", max_resp_len=None, overlong_buffer_cfg=None, ) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key self.overlong_buffer_cfg = overlong_buffer_cfg self.max_resp_len = max_resp_len if self.overlong_buffer_cfg is not None: assert self.max_resp_len is not None, ( f"max_resp_len must be provided if {overlong_buffer_cfg=}, but got None" ) assert self.max_resp_len >= self.overlong_buffer_cfg.len, ( "max_resp_len must be larger than overlong_buffer.len" ) def __call__(self, data: DataProto, return_dict: bool = False): """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): if return_dict: return {"reward_tensor": data.batch["rm_scores"]} else: return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) already_print_data_sources = {} for i in range(len(data)): data_item = data[i] # DataProtoItem prompt_ids = data_item.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() valid_prompt_ids = prompt_ids[-valid_prompt_length:] response_ids = data_item.batch["responses"] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode prompt_str = self.tokenizer.decode(valid_prompt_ids, skip_special_tokens=True) response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) eos_token = self.tokenizer.eos_token if response_str.endswith(eos_token): response_str = response_str[: -len(eos_token)] ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] data_source = data_item.non_tensor_batch[self.reward_fn_key] extra_info = data_item.non_tensor_batch.get("extra_info", None) result = self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, ) score: float if isinstance(result, dict): score = result["score"] # Store the information including original reward for key, value in result.items(): reward_extra_info[key].append(value) else: score = result reward_extra_info["acc"].append(score) reward = score if self.overlong_buffer_cfg.enable: overlong_buffer_len = self.overlong_buffer_cfg.len expected_len = self.max_resp_len - overlong_buffer_len exceed_len = valid_response_length - expected_len overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0) reward += overlong_reward if self.overlong_buffer_cfg.log: reward_extra_info["overlong_reward"].append(overlong_reward) reward_extra_info["overlong"].append(overlong_reward < 0) reward_tensor[i, valid_response_length - 1] = reward if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) if isinstance(result, dict): for key, value in result.items(): print(f"[{key}]", value) else: print("[score]", score) if return_dict: return { "reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info, } else: return reward_tensor ================================================ FILE: verl_rl/verl/workers/reward_manager/naive.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict import torch from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register @register("naive") class NaiveRewardManager: """The reward manager.""" def __init__(self, tokenizer, num_examine, compute_score=None, reward_fn_key="data_source") -> None: """ Initialize the NaiveRewardManager instance. Args: tokenizer: The tokenizer used to decode token IDs into text. num_examine: The number of batches of decoded responses to print to the console for debugging purpose. compute_score: A function to compute the reward score. If None, `default_compute_score` will be used. reward_fn_key: The key used to access the data source in the non-tensor batch data. Defaults to "data_source". """ self.tokenizer = tokenizer # Store the tokenizer for decoding token IDs self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key # Store the key for accessing the data source def __call__(self, data: DataProto, return_dict=False): """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): if return_dict: return {"reward_tensor": data.batch["rm_scores"]} else: return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) reward_extra_info = defaultdict(list) already_print_data_sources = {} for i in range(len(data)): data_item = data[i] # DataProtoItem prompt_ids = data_item.batch["prompts"] prompt_length = prompt_ids.shape[-1] valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum() valid_prompt_ids = prompt_ids[-valid_prompt_length:] response_ids = data_item.batch["responses"] valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum() valid_response_ids = response_ids[:valid_response_length] # decode prompt_str = self.tokenizer.decode(valid_prompt_ids, skip_special_tokens=True) response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True) ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"] data_source = data_item.non_tensor_batch[self.reward_fn_key] extra_info = data_item.non_tensor_batch.get("extra_info", {}) num_turns = data_item.non_tensor_batch.get("__num_turns__", None) extra_info["num_turns"] = num_turns score = self.compute_score( data_source=data_source, solution_str=response_str, ground_truth=ground_truth, extra_info=extra_info, ) if isinstance(score, dict): reward = score["score"] # Store the information including original reward for key, value in score.items(): reward_extra_info[key].append(value) else: reward = score reward_tensor[i, valid_response_length - 1] = reward if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print("[prompt]", prompt_str) print("[response]", response_str) print("[ground_truth]", ground_truth) if isinstance(score, dict): for key, value in score.items(): print(f"[{key}]", value) else: print("[score]", score) if return_dict: return { "reward_tensor": reward_tensor, "reward_extra_info": reward_extra_info, } else: return reward_tensor ================================================ FILE: verl_rl/verl/workers/reward_manager/prime.py ================================================ # Copyright 2024 PRIME team and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from concurrent.futures import ProcessPoolExecutor from functools import partial from typing import Callable, Optional import psutil import torch from transformers import PreTrainedTokenizer from verl import DataProto from verl.utils.reward_score import default_compute_score from verl.workers.reward_manager import register async def single_compute_score(evaluation_func, completion, reference, task, task_extra_info, executor, timeout=300.0): loop = asyncio.get_running_loop() try: # Ensure process_completion is called properly future = loop.run_in_executor(executor, partial(evaluation_func, task, completion, reference, task_extra_info)) return await asyncio.wait_for(future, timeout=timeout) except asyncio.TimeoutError: print(f"[Timeout] Task timeout: {completion}") return None # Default value for timed-out rows except Exception as e: print(f"[Error] Task failed: {e}, completion: {completion[:80]}") return None # Default value for failed rows async def parallel_compute_score_async( evaluation_func, completions, references, tasks, extra_info=None, num_processes=64 ): if extra_info is None: extra_info = [None] * len(tasks) scores = [] with ProcessPoolExecutor(max_workers=num_processes) as executor: # to prevent very occasional starvation caused by some anomalous programs ( like infinite loop ), the # exceptions in async programs will instantly halt the evaluation, and all summoned processes will be killed. try: # Create tasks for all rows tasks_async = [ single_compute_score(evaluation_func, c, r, t, ei, executor, timeout=300.0) for c, r, t, ei in zip(completions, references, tasks, extra_info, strict=True) ] results = await asyncio.gather(*tasks_async, return_exceptions=False) except Exception as e: print(f"[Exception] async gather failed: {e}") raise finally: terminated_count = 0 for pid, proc in executor._processes.items(): try: p = psutil.Process(pid) p.terminate() try: p.wait(timeout=5) except psutil.TimeoutExpired: p.kill() terminated_count += 1 except Exception: pass print(f"[Shutdown] {terminated_count} subprocess(es) terminated.") # Process results for result, completion, reference, task in zip(results, completions, references, tasks, strict=True): if isinstance(result, Exception) or result is None: # Handle failed or timed-out tasks scores.append(0.0) elif isinstance(result, int | float | bool): scores.append(float(result)) else: scores.append(float(result[0])) return scores def run_reward_scoring(evaluation_func, completions, references, tasks, extra_info=None, num_processes=64): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: return loop.run_until_complete( parallel_compute_score_async(evaluation_func, completions, references, tasks, extra_info, num_processes) ) finally: loop.close() @register("prime") class PrimeRewardManager: """ The Reward Manager used in https://github.com/PRIME-RL/PRIME """ def __init__( self, tokenizer: PreTrainedTokenizer, num_examine: int, compute_score: Optional[Callable] = None, reward_fn_key: str = "data_source", ) -> None: self.tokenizer = tokenizer self.num_examine = num_examine # the number of batches of decoded responses to print to the console self.compute_score = compute_score or default_compute_score self.reward_fn_key = reward_fn_key def verify(self, data): """ verify the batch and save as ``acc`` tensor """ # batched scoring prompt_ids = data.batch["prompts"] response_ids = data.batch["responses"] sequences_str = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True) ground_truth = [data_item.non_tensor_batch["reward_model"]["ground_truth"] for data_item in data] data_sources = data.non_tensor_batch[self.reward_fn_key] extra_info = data.non_tensor_batch.get("extra_info", None) assert len(sequences_str) == len(ground_truth) == len(data_sources) try: scores = run_reward_scoring( self.compute_score, completions=sequences_str, references=ground_truth, tasks=data_sources, extra_info=extra_info, num_processes=64, ) except asyncio.TimeoutError: print("[Timeout] Global reward scoring timed out. Setting all as 0.") scores = [0.0 for _ in range(len(sequences_str))] except Exception as e: print(f"[Error] Unexpected error during scoring. Setting all as 0. {e}") scores = [0.0 for _ in range(len(sequences_str))] data.batch["acc"] = torch.tensor(scores, dtype=torch.float32, device=prompt_ids.device) return scores def __call__(self, data: DataProto, return_dict: bool = False): """We will expand this function gradually based on the available datasets""" # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn if "rm_scores" in data.batch.keys(): return data.batch["rm_scores"] reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32) already_print_data_sources = {} # batched scoring prompt_ids = data.batch["prompts"] prompt_length = prompt_ids.shape[-1] response_ids = data.batch["responses"] valid_response_length = data.batch["attention_mask"][:, prompt_length:].sum(dim=-1) sequences_str = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True) data_sources = data.non_tensor_batch["data_source"] scores = self.verify(data) for i in range(len(data)): data_source = data_sources[i] reward_tensor[i, valid_response_length[i].item() - 1] = scores[i] if data_source not in already_print_data_sources: already_print_data_sources[data_source] = 0 if already_print_data_sources[data_source] < self.num_examine: already_print_data_sources[data_source] += 1 print(sequences_str) if return_dict: return {"reward_tensor": reward_tensor} else: return reward_tensor ================================================ FILE: verl_rl/verl/workers/reward_manager/registry.py ================================================ # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = ["register", "get_reward_manager_cls"] REWARD_MANAGER_REGISTRY = {} def register(name): """Decorator to register a reward manager class with a given name. Args: name: `(str)` The name of the reward manager. """ def decorator(cls): if name in REWARD_MANAGER_REGISTRY and REWARD_MANAGER_REGISTRY[name] != cls: raise ValueError( f"Reward manager {name} has already been registered: {REWARD_MANAGER_REGISTRY[name]} vs {cls}" ) REWARD_MANAGER_REGISTRY[name] = cls return cls return decorator def get_reward_manager_cls(name): """Get the reward manager class with a given name. Args: name: `(str)` The name of the reward manager. Returns: `(type)`: The reward manager class. """ if name not in REWARD_MANAGER_REGISTRY: raise ValueError(f"Unknown reward manager: {name}") return REWARD_MANAGER_REGISTRY[name] ================================================ FILE: verl_rl/verl/workers/reward_model/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BasePPORewardModel __all__ = ["BasePPORewardModel"] ================================================ FILE: verl_rl/verl/workers/reward_model/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The base class for reward model """ from abc import ABC, abstractmethod from verl import DataProto class BasePPORewardModel(ABC): def __init__(self, config): self.config = config @abstractmethod def compute_reward(self, data: DataProto) -> DataProto: """Computing reward given input_ids. The transformers should output a tensor with shape [batch_size, sequence_length], and the value at [EOS] mask should be gathered. Args: data: must contain keys "input_ids", "attention_mask" and "position_ids". - input_ids: [batch_size, sequence_length] - attention_mask: [batch_size, sequence_length] - position_ids: [batch_size, sequence_length] Returns: a data pass protocol containing "reward". Only the [EOS] position contains the reward. Other position should have zero reward. Note that this may change in the future if we use dense reward. So, we leave the interface for general case. - reward: [batch_size, sequence_length]. """ pass ================================================ FILE: verl_rl/verl/workers/reward_model/megatron/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .reward_model import MegatronRewardModel __all__ = ["MegatronRewardModel"] ================================================ FILE: verl_rl/verl/workers/reward_model/megatron/reward_model.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Megatron Reward Model. """ import itertools import torch import torch.distributed from megatron.core import parallel_state as mpu from megatron.core.pipeline_parallel import get_forward_backward_func from tensordict import TensorDict from verl import DataProto from verl.utils.device import get_device_id, get_device_name, get_torch_device from verl.utils.megatron.pipeline_parallel import make_batch_generator from verl.utils.seqlen_balancing import get_reverse_idx, rearrange_micro_batches from verl.utils.torch_functional import broadcast_dict_tensor, pad_sequence_to_length from verl.workers.reward_model.base import BasePPORewardModel class MegatronRewardModel(BasePPORewardModel): def __init__( self, config, model_config, reward_model_module: torch.nn.ModuleList, hf_config, tf_config, sft_tokenizer=None, rm_tokenizer=None, ): self.config = config self.reward_model_module = reward_model_module self.hf_config = hf_config self.tf_config = tf_config self.model_config = model_config self.device = "cuda" self.sft_tokenizer = sft_tokenizer self.rm_tokenizer = rm_tokenizer self.use_different_tokenizer = rm_tokenizer is not None print(f"MegatronRewardModel.config: {self.config}") if self.config.megatron.param_offload: self.offload_params_to_cpu() def re_encode_by_rm_tokenizer(self, data: DataProto) -> DataProto: assert self.use_different_tokenizer, "re-encode need rm tokenizer not be None!" # need to use rm tokenizer to re-generate input_ids, attention_mask and position_ids # 1. remove pad for each sequence # 2. decode by sft_tokenizer, remove sft system prompts # 3. encode by rm_tokenizer with rm system prompts, get rm_input_ids # 4. generate attention_mask and position_ids input_ids = data.batch["input_ids"] # (bs, seq_len) attention_mask = data.batch["attention_mask"] position_ids = data.batch["position_ids"] ori_values = {"input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids} _, ori_seqlen = input_ids.size(0), input_ids.size(1) input_ids_for_rm = [] attention_mask_for_rm = [] position_ids_for_rm = [] print_decode = True ori_seqlen = ori_seqlen + 128 for id, mask in zip(input_ids, attention_mask, strict=True): # 1. remove pad for each sequence non_zero_indices = torch.nonzero(mask).view(-1) begin_pos, end_pos = non_zero_indices[0].item(), non_zero_indices[-1].item() valid_id = id[begin_pos : end_pos + 1] # 2. decode by sft_tokenizer, remove sft system prompts decode_result = self.sft_tokenizer.decode(valid_id) # workaround decode_with_rm_chat = ( decode_result.replace("<|user|>\n", "[INST] ") .replace("\n<|assistant|>\n", " [/INST]") .replace(" \n<|assistant|>\n", " [/INST]") + "" ) if print_decode and torch.distributed.get_rank() == 0: # only print first decode result print( f"device {get_device_id()}: sft decode result:\n{decode_result}\n \ \ndevice {get_device_id()}: sft decode result with \ rm chat template:\n{decode_with_rm_chat}\n\n" ) print_decode = False # 3. encode by rm_tokenizer rm_input_ids = self.rm_tokenizer(decode_with_rm_chat, return_tensors="pt")["input_ids"][0].to( input_ids.device ) # 4. generate attention_mask and position_ids rm_attention_mask = torch.ones_like(rm_input_ids, device=input_ids.device) cur_seqlen = rm_input_ids.shape[-1] # NOTE(gh): the later reward compute will process the shape (bs, seqlen_pad_128) if cur_seqlen > ori_seqlen: print(f"warninig: rm encode seqlen {cur_seqlen} > sft encode seqlen {ori_seqlen}") rm_input_ids = rm_input_ids[:ori_seqlen] rm_attention_mask = rm_attention_mask[:ori_seqlen] else: # right padding rm_input_ids = pad_sequence_to_length(rm_input_ids, ori_seqlen, self.rm_tokenizer.pad_token_id) rm_attention_mask = pad_sequence_to_length(rm_attention_mask, ori_seqlen, 0) rm_position_ids = torch.arange(0, ori_seqlen, device=input_ids.device) input_ids_for_rm.append(torch.unsqueeze(rm_input_ids, dim=0)) attention_mask_for_rm.append(torch.unsqueeze(rm_attention_mask, dim=0)) position_ids_for_rm.append(torch.unsqueeze(rm_position_ids, dim=0)) input_ids_for_rm = torch.cat(input_ids_for_rm, dim=0) attention_mask_for_rm = torch.cat(attention_mask_for_rm, dim=0) position_ids_for_rm = torch.cat(position_ids_for_rm, dim=0) # (bs, seqlen) will not change, but input_ids, attention_mask and position_ids will change # NOTE(gh): need to replace into origin values after compute reward! data.batch["input_ids"] = input_ids_for_rm data.batch["attention_mask"] = attention_mask_for_rm data.batch["position_ids"] = position_ids_for_rm return data, ori_values @torch.no_grad() def compute_reward(self, data: DataProto) -> DataProto: if self.config.megatron.param_offload: self.load_params_to_cuda() if self.use_different_tokenizer: data, ori_values = self.re_encode_by_rm_tokenizer(data) input_ids = data.batch["input_ids"] # (bs, seq_len') attention_mask = data.batch["attention_mask"] position_ids = data.batch["position_ids"] use_dynamic_bsz = data.meta_info.get("use_dynamic_bsz", False) micro_batch_size = data.meta_info.get("micro_batch_size", None) max_token_len = data.meta_info.get("max_token_len", None) assert micro_batch_size is not None, "micro batch size is needed for forward compute" if use_dynamic_bsz: assert max_token_len is not None, "use_dynamic_bsz is True, but max_token_len is None!" max_token_len = max_token_len * self.config.megatron.context_parallel_size responses = data.batch["responses"] batch_size = responses.size(0) response_length = responses.size(1) with torch.no_grad(): output = self.forward_batch( data, use_dynamic_bsz=use_dynamic_bsz, micro_batch_size=micro_batch_size, max_token_len=max_token_len ) if mpu.is_pipeline_last_stage(ignore_virtual=True): logits = torch.cat(output["output"], dim=0) if use_dynamic_bsz: indices = output["indices"] indices = list(itertools.chain.from_iterable(indices)) assert len(indices) == logits.size(0), f"{len(indices)} vs. {logits.size()}" revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long) logits = logits[revert_indices] else: logits = torch.empty( (input_ids.shape[0], input_ids.shape[1]), device=input_ids.device, ) logits = logits.to(torch.float32) # broadcast across pp ranks torch.distributed.broadcast( tensor=logits, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), async_op=False, ) # (bs, seqlen', hidden_size) -> (bs, seqlen', 1) -> (bs, seqlen') token_level_rewards = logits # find the last token reward ends = attention_mask.cumsum(dim=-1).argmax(dim=-1).view(-1, 1) # (bs, 1) rewards = torch.gather(token_level_rewards, dim=1, index=ends) # (bs, 1) if self.use_different_tokenizer: data.batch.update(ori_values) input_ids = ori_values["input_ids"] attention_mask = ori_values["attention_mask"] position_ids = ori_values["position_ids"] token_level_rewards = rewards.expand(attention_mask.shape[0], attention_mask.shape[1]) # (bs, ori_seqlen) # assign last valid token reward to ori position if position_ids.dim() == 3: # qwen2vl mrope [bs, 3, seq_len] position_ids = position_ids[:, 0, :] eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bs,) eos_mask = torch.zeros_like(attention_mask) eos_mask[torch.arange(batch_size), eos_mask_idx] = 1.0 token_level_rewards = token_level_rewards * eos_mask token_level_rewards = token_level_rewards[:, -response_length:] if self.config.megatron.param_offload: self.offload_params_to_cpu() else: # add empty cache after each compute get_torch_device().empty_cache() batch = TensorDict({"rm_scores": token_level_rewards}, batch_size=input_ids.shape[0]) return DataProto(batch=batch) def forward_batch(self, data: DataProto, use_dynamic_bsz=False, micro_batch_size=None, max_token_len=None): """ We assume: - The model takes input: (input_ids, attention_mask, position_ids). No rmpad for the input - The communication shape is (total_nnz_pad_to_sp // tp_size, 1, hidden_size) if sequence parallel is enabled """ # broadcast from last pp rank to all other pp ranks # TODO: actually, we just need to control the sampling order. mini_batch = data mini_batch.batch = mini_batch.batch.contiguous() broadcast_dict_tensor( mini_batch.batch, src=mpu.get_pipeline_model_parallel_last_rank(), group=mpu.get_pipeline_model_parallel_group(), ) mini_batch.batch["attention_mask"] = mini_batch.batch["attention_mask"].to(bool) self.has_multi_modal_inputs = "multi_modal_inputs" in mini_batch.non_tensor_batch.keys() if self.has_multi_modal_inputs: mini_batch.batch["multi_modal_inputs"] = mini_batch.non_tensor_batch["multi_modal_inputs"] mini_batch.batch["multi_modal_inputs_idx"] = torch.Tensor( list(range(len(mini_batch.non_tensor_batch["multi_modal_inputs"]))) ).to(torch.int64) indices = None if use_dynamic_bsz: assert max_token_len is not None, "max_token_len must be set when use_dynamic_bsz is True" vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size() if vpp_size is not None and vpp_size > 1: microbatch_group_size_per_vp_stage = self.tf_config.microbatch_group_size_per_vp_stage micro_batches, indices = rearrange_micro_batches( batch=mini_batch.batch, num_batches_divided_by=microbatch_group_size_per_vp_stage, max_token_len=max_token_len, ) assert len(micro_batches) % self.tf_config.microbatch_group_size_per_vp_stage == 0, ( f"micro_batches {micro_batches} must be divisible by microbatch_group_size_per_vp_stage " f"{microbatch_group_size_per_vp_stage} for megatron backend" ) else: micro_batches, indices = rearrange_micro_batches(batch=mini_batch.batch, max_token_len=max_token_len) total_seqlen = max_token_len else: assert micro_batch_size is not None, ( "micro_batch_size is needed to be passed in when not using dynamic batch size" ) micro_batches = mini_batch.batch.split(micro_batch_size) seq_len = micro_batches[0]["input_ids"].shape[1] total_seqlen = micro_batch_size * seq_len n_micro_batch = len(micro_batches) # compute input shapes for pp stages forward_backward_func = get_forward_backward_func() def loss_func(output): return torch.tensor(1.0, device=output.device), output def forward_step(batch_iter, model): batch = next(batch_iter) input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] position_ids = batch["position_ids"] from verl.models.mcore import get_mcore_forward_fn forward_fn = get_mcore_forward_fn(self.hf_config) multi_modal_inputs = {} if "multi_modal_inputs" in batch: for key in batch["multi_modal_inputs"][0].keys(): multi_modal_inputs[key] = torch.cat( [batch["multi_modal_inputs"][i][key] for i in batch["multi_modal_inputs_idx"]], dim=0 ) output = forward_fn( model, input_ids, attention_mask, position_ids, sequence_parallel=self.tf_config.sequence_parallel, value_model=True, multi_modal_inputs=multi_modal_inputs, ) return output, loss_func # batch should be a list of batches inside micro-batches batch_generator = make_batch_generator(micro_batches, vpp_size=len(self.reward_model_module)) # TODO: we may use the new schedule instead # for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size) if mpu.get_pipeline_model_parallel_world_size() > 1: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.reward_model_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # no use when input_shapes was set micro_batch_size=1, # no use when input_shapes was set forward_only=True, ) else: losses_reduced = forward_backward_func( forward_step_func=forward_step, data_iterator=batch_generator, model=self.reward_model_module, num_microbatches=n_micro_batch, seq_length=total_seqlen, # in use for pp = 1 micro_batch_size=1, # in use for pp = 1 forward_only=True, ) if self.has_multi_modal_inputs: data.batch.pop("multi_modal_inputs") data.batch.pop("multi_modal_inputs_idx") data.non_tensor_batch.pop("multi_modal_inputs") # loss_reduces contains the stats returned from loss_func losses_reduced = {"output": losses_reduced} if use_dynamic_bsz: losses_reduced["indices"] = indices return losses_reduced def offload_params_to_cpu(self): if self.device in ["cuda", "npu"]: for reward_model_module in self.reward_model_module: for name, param in reward_model_module.named_parameters(): param.data = param.data.to("cpu", non_blocking=True) self.device = "cpu" get_torch_device().empty_cache() def load_params_to_cuda(self): if self.device == "cpu": for reward_model_module in self.reward_model_module: for name, param in reward_model_module.named_parameters(): param.data = param.data.to(get_device_id(), non_blocking=True) self.device = get_device_name() ================================================ FILE: verl_rl/verl/workers/roles/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .critic import CriticWorker __all__ = ["CriticWorker"] ================================================ FILE: verl_rl/verl/workers/roles/actor.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register class ActorWorker(Worker): """ This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy or a hybrid engine based on the config.rollout """ def __init__(self, config): raise NotImplementedError @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): raise NotImplementedError @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def update_actor(self, data: DataProto): raise NotImplementedError @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_log_prob(self, data: DataProto): raise NotImplementedError @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) def compute_ref_log_prob(self, data: DataProto): raise NotImplementedError @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): raise NotImplementedError @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False): raise NotImplementedError ================================================ FILE: verl_rl/verl/workers/roles/critic.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The main entry point to run the PPO algorithm """ import logging import os import torch from codetiming import Timer from verl import DataProto from verl.single_controller.base import Worker from verl.single_controller.base.decorator import Dispatch, register from verl.trainer.ppo import core_algos from verl.utils.config import omega_conf_to_dataclass from verl.utils.device import ( get_device_id, get_nccl_backend, ) from verl.utils.profiler import DistProfiler, DistProfilerExtension from verl.utils.py_functional import append_to_dict from verl.utils.torch_functional import masked_mean from verl.workers.engine import EngineRegistry logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class CriticWorker(Worker, DistProfilerExtension): def __init__(self, config): Worker.__init__(self) DistProfilerExtension.__init__( self, DistProfiler(rank=self.rank, config=omega_conf_to_dataclass(config.get("profiler"))) ) import torch.distributed if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend=get_nccl_backend()) self.config = config self.engine = EngineRegistry.new(self.config.strategy, self.config) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def init_model(self): self.engine.init_model() def _post_fn_values(self, micro_batch, preds): response_length = micro_batch["responses"].size(-1) values = preds[:, -response_length - 1 : -1] use_remove_padding = self.config.model.get("use_remove_padding", False) if not use_remove_padding: values = values.squeeze(-1) return values, {"values": values.clone().detach()} @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="cyan") def compute_values(self, data: DataProto): # Support all hardwares data = data.to(get_device_id()) micro_batch_size = self.config.forward_micro_batch_size_per_gpu data.meta_info["micro_batch_size"] = micro_batch_size data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu data.meta_info["use_dynamic_bsz"] = self.config.use_dynamic_bsz with self.engine.eval_mode(): data = self.engine.shard_data(data=data) output = self.engine.infer_batch(data, post_fn=self._post_fn_values) response_mask = data.batch["response_mask"] values = output["values"] * response_mask # Only action tokens have values output = DataProto.from_dict(tensors={"values": values}) output = self.engine.unshard_data(data=output) output = output.to("cpu") return output def loss_fn( self, batch: DataProto, vpreds: dict[str, torch.Tensor] ) -> tuple[torch.Tensor, dict[str, torch.Tensor]]: old_values = batch["values"] returns = batch["returns"] response_mask = batch["response_mask"] micro_batch_metrics = {} values, _ = self._post_fn_values(batch, vpreds) vf_loss, vf_clipfrac = core_algos.compute_value_loss( vpreds=values, values=old_values, returns=returns, response_mask=response_mask, cliprange_value=self.config.cliprange_value, loss_agg_mode=self.config.loss_agg_mode, ) if self.config.use_dynamic_bsz: # relative to the dynamic bsz loss = vf_loss * (len(batch) / self.config.ppo_mini_batch_size) else: gradient_accumulation = self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu loss = vf_loss / gradient_accumulation micro_batch_metrics = { "critic/vf_loss": vf_loss.detach().item(), "critic/vf_clipfrac": vf_clipfrac.detach().item(), "critic/vpred_mean": masked_mean(values, response_mask).detach().item(), } return loss, micro_batch_metrics @register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO) @DistProfiler.annotate(color="pink") def update_critic(self, data: DataProto): metrics = {} # Support all hardwares data = data.to(get_device_id()) # perform forward computation with self.engine.train_mode(): data = self.engine.shard_data(data=data) with Timer(name="update_critic", logger=None) as timer: select_keys = [ "input_ids", "responses", "response_mask", "attention_mask", "position_ids", "values", "returns", ] batch = data.select(batch_keys=select_keys).batch has_multi_modal_inputs = "multi_modal_inputs" in data.non_tensor_batch.keys() # Split to make minibatch iterator for updating the actor # See PPO paper for details. https://arxiv.org/abs/1707.06347 if has_multi_modal_inputs: num_mini_batches = data.batch.batch_size[0] // self.config.ppo_mini_batch_size non_tensor_select_keys = ["multi_modal_inputs"] dataloader = data.select(select_keys, non_tensor_select_keys).chunk(num_mini_batches) else: dataloader = batch.split(self.config.ppo_mini_batch_size) for epoch in range(self.config.ppo_epochs): for batch_idx, mini_batch in enumerate(dataloader): self.engine.optimizer_zero_grad() mini_batch_metrics = self.engine.train_batch(mini_batch, self.loss_fn) grad_norm = self.engine.optimizer_step() mini_batch_metrics["critic/grad_norm"] = grad_norm.detach().item() append_to_dict(metrics, mini_batch_metrics) self.engine.optimizer_zero_grad() delta_time = timer.last # TODO: should not access engine's flops_counter global_num_tokens = data.meta_info["global_token_num"] estimated_flops, promised_flops = self.engine.flops_counter.estimate_flops(global_num_tokens, delta_time) metrics["perf/mfu/critic"] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size metrics["critic/lr"] = self.engine.lr_scheduler_step()[0] output = DataProto(batch=None, meta_info={"metrics": metrics}) output = self.engine.unshard_data(data=output) output = output.to("cpu") return output @register(dispatch_mode=Dispatch.ONE_TO_ALL) def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None): self.engine.save_checkpoint(local_path, hdfs_path, global_step, max_ckpt_to_keep) @register(dispatch_mode=Dispatch.ONE_TO_ALL) def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=True): self.engine.load_checkpoint(local_path, hdfs_path, del_local_after_load) ================================================ FILE: verl_rl/verl/workers/rollout/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .base import BaseRollout from .hf_rollout import HFRollout from .naive import NaiveRollout __all__ = ["BaseRollout", "NaiveRollout", "HFRollout"] ================================================ FILE: verl_rl/verl/workers/rollout/async_server.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os import socket import threading from abc import ABC, abstractmethod from contextlib import asynccontextmanager from typing import Any, Optional import fastapi import ray import uvicorn from omegaconf import DictConfig from starlette.requests import Request from starlette.responses import JSONResponse from verl.protocol import DataProto from verl.single_controller.ray.base import RayWorkerGroup from verl.workers.rollout.chat_scheduler import ChatCompletionScheduler logger = logging.getLogger(__file__) def _get_free_port(): with socket.socket() as sock: sock.bind(("", 0)) return sock.getsockname()[1] class AsyncServerBase(ABC): """Base class for AsyncServer.""" def __init__(self): self.address = ray.util.get_node_ip_address() self.port = None self.server_ready = asyncio.Event() asyncio.create_task(self._start_fastapi_server()) async def _start_fastapi_server(self): @asynccontextmanager async def lifespan(app: fastapi.FastAPI): print(f"FastAPI listen on {self.address}:{self.port}") self.server_ready.set() yield # There's no way to gracefully restart uvicorn server if port is already in use, # so we exit the process directly and let AsyncLLMServerManager restart it. print("FastAPI shutdown, maybe address already in use, exit process immediately.") os._exit(-1) app = fastapi.FastAPI(lifespan=lifespan) app.router.add_api_route("/v1/chat/completions", self.chat_completion, methods=["POST"]) self.port = _get_free_port() config = uvicorn.Config(app, host=["::", "0.0.0.0"], port=self.port, log_level="warning") server = uvicorn.Server(config) await server.serve() async def get_server_address(self) -> tuple[str, int]: """Get FastAPI server address.""" await self.server_ready.wait() return f"{self.address}:{self.port}" @abstractmethod async def chat_completion(self, raw_request: Request) -> JSONResponse: """OpenAI chat completion API. Args: raw_request (Request): raw json request Returns: JSONResponse: json response API reference: https://platform.openai.com/docs/api-reference/chat/create """ raise NotImplementedError @abstractmethod async def generate(self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str) -> list[int]: """Generate response ids given prompt ids. Args: prompt_ids (List[int]): prompt ids sampling_params (Dict[str, Any]): sampling params request_id (str): request id Returns: List[int]: response ids """ raise NotImplementedError @abstractmethod async def init_engine(self): """Init async LLM engine.""" raise NotImplementedError @abstractmethod async def wake_up(self): """Wake up engine to load model weights and build kv cache.""" raise NotImplementedError @abstractmethod async def sleep(self): """Sleep engine to offload model weights and discard kv cache.""" raise NotImplementedError class AsyncLLMServerManager: """AsyncLLMServerManager manage a group of vllm instances, i.e AsyncvLLMServer.""" def __init__(self, config: DictConfig, worker_group: RayWorkerGroup): """Initialize AsyncLLMServerManager. Args: config: DictConfig, actor_rollout_ref config. worker_group: RayWorkerGroup, worker group of AsyncActorRolloutRefWorker. """ self.full_config = config self.config = config.actor_rollout_ref self.worker_group = worker_group self.rollout_tp_size = self.config.rollout.tensor_model_parallel_size self.rollout_dp_size = self.worker_group.world_size // self.rollout_tp_size register_center = ray.get_actor(f"{self.worker_group.name_prefix}_register_center") workers_info = ray.get(register_center.get_worker_info.remote()) assert len(workers_info) == self.worker_group.world_size self.async_llm_servers = [None] * self.rollout_dp_size self.server_addresses = [None] * self.rollout_dp_size if self.config.rollout.agent.custom_async_server: server_class = async_server_class( rollout_backend=self.config.rollout.name, rollout_backend_module=self.config.rollout.agent.custom_async_server.path, rollout_backend_class=self.config.rollout.agent.custom_async_server.name, ) else: server_class = async_server_class(rollout_backend=self.config.rollout.name) # Start all server instances, restart if address already in use. unready_dp_ranks = set(range(self.rollout_dp_size)) while len(unready_dp_ranks) > 0: servers = { rollout_dp_rank: server_class.options( # make sure AsyncvLLMServer colocates with its corresponding workers scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy( node_id=workers_info[rollout_dp_rank * self.rollout_tp_size], soft=False, ), name=f"async_llm_server_{rollout_dp_rank}", ).remote(config, self.rollout_dp_size, rollout_dp_rank, self.worker_group.name_prefix) for rollout_dp_rank in unready_dp_ranks } for rollout_dp_rank, server in servers.items(): try: address = ray.get(server.get_server_address.remote()) self.server_addresses[rollout_dp_rank] = address self.async_llm_servers[rollout_dp_rank] = server unready_dp_ranks.remove(rollout_dp_rank) except Exception: ray.kill(server) print(f"rollout server {rollout_dp_rank} failed, maybe address already in use, restarting...") # All server instances are ready, init AsyncLLM engine. ray.get([server.init_engine.remote() for server in self.async_llm_servers]) # Init user provided chat scheduler in sperate thread. self.chat_scheduler: ChatCompletionScheduler = None self.chat_scheduler_exception: Exception = None self.chat_scheduler_loop = None self.chat_scheduler_ready = threading.Event() self.chat_scheduler_thread = threading.Thread(target=self._init_chat_scheduler, daemon=True) self.chat_scheduler_thread.start() self.chat_scheduler_ready.wait() def _init_chat_scheduler(self): self.chat_scheduler_loop = asyncio.new_event_loop() asyncio.set_event_loop(self.chat_scheduler_loop) try: self.chat_scheduler = ChatCompletionScheduler( config=self.full_config, server_addresses=self.server_addresses, ) except Exception as e: logger.exception(f"chat_scheduler init error: {e}") self.chat_scheduler_exception = e finally: self.chat_scheduler_ready.set() self.chat_scheduler_loop.run_forever() def wake_up(self): """Wake up all vllm instances.""" if self.config.rollout.free_cache_engine: ray.get([server.wake_up.remote() for server in self.async_llm_servers]) def sleep(self): """Sleep all vllm instances.""" if self.config.rollout.free_cache_engine: ray.get([server.sleep.remote() for server in self.async_llm_servers]) def submit_chat_completions( self, messages: list[dict[str, str]], sampling_params: dict[str, Any], ): """Submit a chat completion request to chat scheduler and wait until it is done. To submit multiple requests in parallel, please use `generate_sequences` instead. Args: same as ChatCompletionScheduler.submit_chat_completions. """ assert self.chat_scheduler is not None, "chat scheduler is not initialized." future = asyncio.run_coroutine_threadsafe( self.chat_scheduler._submit_chat_completions_semaphore( messages=messages, request_id=None, sampling_params=sampling_params, ), self.chat_scheduler_loop, ) future.result() def generate_sequences(self, prompts: DataProto, **sampling_params) -> DataProto: """Generate multiple sequences in parallel via chat scheduler.""" assert self.chat_scheduler is not None, "chat scheduler is not initialized." future = asyncio.run_coroutine_threadsafe( self.chat_scheduler.generate_sequences(prompts, **sampling_params), self.chat_scheduler_loop ) return future.result() def async_server_class( rollout_backend: str, rollout_backend_module: Optional[str] = None, rollout_backend_class: Optional[str] = None ) -> type[AsyncServerBase]: """Get async server class. Args: rollout_backend: str, rollout backend type (alias), should be "vllm" or "sglang". rollout_backend_module: Optional[str], import path of the rollout backend. rollout_backend_class: Optional[str], class name of the rollout backend. Returns: Type[AsyncServerBase]: async server class. """ if rollout_backend_class is None and rollout_backend_module is None: # If both are None, use the default backend class # Do not change the original import behavior # importlib.import_module and from ... import ... have subtle differences in ray if rollout_backend == "vllm": from verl.workers.rollout.vllm_rollout.vllm_async_server import AsyncvLLMServer return AsyncvLLMServer elif rollout_backend == "sglang": from verl.workers.rollout.sglang_rollout.async_sglang_server import AsyncSGLangServer return AsyncSGLangServer else: raise NotImplementedError(f"rollout backend {rollout_backend} is not supported") if rollout_backend_module is None or rollout_backend_class is None: raise ValueError("rollout_backend_module and rollout_backend_class must be both provided for customization") from verl.utils.import_utils import load_extern_type return load_extern_type(rollout_backend_module, rollout_backend_class) ================================================ FILE: verl_rl/verl/workers/rollout/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from verl import DataProto __all__ = ["BaseRollout"] class BaseRollout(ABC): """Base class for rollout.""" @abstractmethod def generate_sequences(self, prompts: DataProto) -> DataProto: """Generate sequences""" pass ================================================ FILE: verl_rl/verl/workers/rollout/chat_scheduler.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import heapq import importlib import itertools import json import logging import time from abc import ABC, abstractmethod from typing import Any from uuid import uuid4 import aiohttp import numpy as np import torch from cachetools import LRUCache from omegaconf import DictConfig from openai import AsyncOpenAI from openai.types.chat.chat_completion import ChatCompletion from tensordict import TensorDict from verl.protocol import DataProto from verl.tools.utils.tool_registry import initialize_tools_from_config from verl.utils import hf_tokenizer from verl.utils.fs import copy_to_local from verl.utils.import_utils import deprecated logger = logging.getLogger(__file__) class CompletionCallback(ABC): def __init__(self, config: DictConfig, scheduler: "ChatCompletionScheduler"): self.config = config self.scheduler = scheduler # Initialize tools from config file self.max_assistant_turns = config.actor_rollout_ref.rollout.multi_turn.max_assistant_turns tool_config_path = config.actor_rollout_ref.rollout.multi_turn.tool_config_path tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else [] self.tools = {tool.name: tool for tool in tool_list} self._tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list] print(f"Initialized tools: {self.tools}", flush=True) local_path = copy_to_local(config.actor_rollout_ref.model.path) self.tokenizer = hf_tokenizer(local_path, trust_remote_code=True) @property def tool_schemas(self): """OpenAI JSON tool schemas.""" return self._tool_schemas @property def extra_body(self) -> dict[str, Any]: """Extra body pass to OpenAI API.""" return None @abstractmethod async def __call__(self, messages: list[dict[str, str]], completions: ChatCompletion, info: dict[str, Any]): """Call back function to process completions. Args: messages: List of messages including raw prompt and assistant, tool response generated so far. completions: Chat completions from OpenAI compatible server. info: Any other auxiliary information pass across multi-turn. """ raise NotImplementedError @abstractmethod def postprocess(self, batch: DataProto, batch_conversations: list[list[dict[str, str]]], n: int) -> DataProto: """Post process batch data. Args: batch: Batch input messages from RLHFDataset. batch_conversations: List of messages including raw prompt, assistant response, tool response. Note that `len(batch_conversations) == len(batch) * n`, e.g n=2, batch_conversations=[messages_0_0, messages_0_1, messages_1_0, messages_1_1, ...] n: How many chat completion choices to generate for each input message. Returns: Batch data, should include ["prompts", "responses", "response_mask", "input_ids", "attention_mask", "position_ids"]. """ raise NotImplementedError class ToolCompletionCallback(CompletionCallback): def __init__(self, config: DictConfig, scheduler: "ChatCompletionScheduler"): super().__init__(config, scheduler) # TODO: add reward manager to calculate reward score once a sample finish async def __call__(self, messages: list[dict[str, str]], completions: ChatCompletion, info: dict[str, Any]): message = completions.choices[0].message.model_dump(exclude_unset=True, exclude_none=True) if "content" not in message: message["content"] = "" messages.append(message) finish_reason = completions.choices[0].finish_reason # STEP 0: check if we reach max turns if self.max_assistant_turns and len(messages) >= self.max_assistant_turns: print(f"[id={completions.id},turn={len(messages)},finish_reason={finish_reason}] Reach max turns, done!") return # STEP 1: check if the model called tools if finish_reason != "tool_calls": print(f"[id={completions.id},turn={len(messages)},finish_reason={finish_reason}] No tool called, done!") return # STEP 2: call tools tool_calls = completions.choices[0].message.tool_calls print(f"[id={completions.id},turn={len(messages)},finish_reason={finish_reason}] Call {len(tool_calls)} tools") tasks = [] for tool_call in tool_calls: tasks.append(self._call_tool(tool_call)) tool_responses = await asyncio.gather(*tasks) if any(isinstance(item, Exception) for item in tool_responses): print( f"[id={completions.id},turn={len(messages)},finish_reason={finish_reason}] Error when calling tools, " f"done!" ) return messages.extend(tool_responses) # STEP 3: resubmit completion request with tool responses self.scheduler.submit_chat_completions(messages=messages, request_id=completions.id, info=info) async def _call_tool(self, tool_call) -> dict[str, str]: """Call tool and return tool response.""" tool_name = tool_call.function.name tool_args = json.loads(tool_call.function.arguments) tool = self.tools[tool_name] instance_id = await tool.create() try: tool_response, tool_reward_score, tool_metrics = await tool.execute(instance_id, tool_args) except Exception as e: logger.exception(f"Error when executing tool: {e}") return e finally: await tool.release(instance_id) return { "role": "tool", "content": tool_response, "tool_call_id": tool_call.id, } def postprocess(self, batch: DataProto, batch_conversations: list[list[dict[str, str]]], n: int) -> DataProto: # NOTE: consistent with batch version of generate_sequences in vllm_rollout_spmd.py # prompts: left pad # responses: right pad # input_ids: prompt + response # attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0] # position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11] # prompts: [prompt] from input dataset prompts = [ self.tokenizer.apply_chat_template( prompt, tools=self.tool_schemas, add_generation_prompt=True, tokenize=False ) for prompt in batch.non_tensor_batch["raw_prompt"] ] assert len(batch_conversations) == len(prompts) * n # sequences: [prompt + response] sequences = [ self.tokenizer.apply_chat_template( conversation, tools=self.tool_schemas, add_generation_prompt=False, tokenize=False ) for conversation in batch_conversations ] # responses: [response] responses = [sequence[len(prompts[i // n]) :] for i, sequence in enumerate(sequences)] prompts = self.tokenizer(prompts, return_tensors="pt", padding="longest", padding_side="left") responses = self.tokenizer(responses, return_tensors="pt", padding="longest", padding_side="right") if n > 1: prompts["input_ids"] = prompts["input_ids"].repeat_interleave(n, dim=0) prompts["attention_mask"] = prompts["attention_mask"].repeat_interleave(n, dim=0) # response_mask: response mask with tools calling masked out response_mask = self._mask_out_tools_calling_tokens( batch.non_tensor_batch["raw_prompt"].repeat(n, axis=0), batch_conversations, responses["input_ids"], responses["attention_mask"], ) input_ids = torch.cat([prompts["input_ids"], responses["input_ids"]], dim=1) attention_mask = torch.cat([prompts["attention_mask"], responses["attention_mask"]], dim=1) position_ids = (attention_mask.cumsum(dim=1) - 1) * attention_mask batch = TensorDict( { "prompts": prompts["input_ids"], # [bsz, prompt_length] "responses": responses["input_ids"], # [bsz, response_length] "response_mask": response_mask, # [bsz, response_length] "input_ids": input_ids, # [bsz, prompt_length + response_length] "attention_mask": attention_mask, # [bsz, prompt_length + response_length] "position_ids": position_ids, # [bsz, prompt_length + response_length] }, batch_size=len(input_ids), ) num_turns = np.array([len(conversation) for conversation in batch_conversations], dtype=np.int32) return DataProto(batch=batch, non_tensor_batch={"__num_turns__": num_turns}) def _mask_out_tools_calling_tokens( self, raw_prompts: list[list[dict[str, str]]], batch_conversations: list[list[dict[str, str]]], input_ids: torch.Tensor, attention_mask: torch.Tensor, ) -> torch.Tensor: """Mask out tools calling tokens in the responses. Args: raw_prompts: [prompt] from input dataset batch_conversations: [prompt + response] input_ids: responses tokens attention_mask: responses attention mask Returns: mask: (batch_size, response_length) """ batch_size = input_ids.size(0) assert len(raw_prompts) == batch_size, f"{len(raw_prompts)} != {batch_size}" assert len(batch_conversations) == batch_size, f"{len(batch_conversations)} != {batch_size}" # Deduplicate adjacent tool calls, since they're merged into one turn. # [user, assistant, tool, tool, assistant] -> [user, assistant, tool, assistant] # TODO: it's chat_template specific, find a more generic way to do this. def deduplicate_adjacent_tool_calls(roles): result = [] for role, group in itertools.groupby(roles): if role == "tool": result.append(role) else: result.extend(group) return result loss_mask = attention_mask.clone() for i in range(batch_size): responses = batch_conversations[i][len(raw_prompts[i]) :] assert len(responses) > 0, f"responses is empty: {responses}" roles = deduplicate_adjacent_tool_calls([response["role"] for response in responses]) # Each turn should be: [BOS]...[EOS] eos_indices = input_ids[i].eq(self.tokenizer.eos_token_id).nonzero().squeeze(1)[: len(roles)] for j in range(len(roles)): if roles[j] == "tool": bos = eos_indices[j - 1] + 1 if j > 0 else 0 eos = eos_indices[j] loss_mask[i, bos : eos + 1] = 0 return loss_mask @deprecated("verl.experimental.agent_loop.AgentLoopManager") class ChatCompletionScheduler: def __init__( self, config: DictConfig, server_addresses: list[str], max_cache_size: int = 10000, ): """ Args: config: DictConfig. server_addresses: List[str], OpenAI compatible server addresses. max_cache_size: int, max cache size of request_id to address mapping. """ self.config = config.actor_rollout_ref.rollout model_path = config.actor_rollout_ref.model.path self.model_name = "/".join(model_path.split("/")[-2:]) # Least requests load balancing self.weighted_addresses = [[0, address] for address in server_addresses] heapq.heapify(self.weighted_addresses) # LRU cache to map request_id to address self.request_id_to_address = LRUCache(maxsize=max_cache_size) self.background_tasks = set() if self.config.multi_turn.completion_callback is None: self.completion_callback = ToolCompletionCallback(config, self) logger.warning("completion_callback is None, use ToolCompletionCallback") else: module_path, class_name = self.config.multi_turn.completion_callback.rsplit(".", 1) module = importlib.import_module(module_path) self.completion_callback = getattr(module, class_name)(config, self) def submit_chat_completions(self, *, messages: list[dict[str, str]], request_id: str, info: dict[str, Any]): """Submit chat completion request without wait, completion_callback will be called when the request is done. Args: messages: List of messages. request_id: Request id. info: Any other auxiliary information pass across multi-turn. """ info["__depth__"] += 1 task = asyncio.create_task(self._submit_chat_completions_and_callback(messages, request_id, info)) # “fire-and-forget” background tasks self.background_tasks.add(task) task.add_done_callback(self.background_tasks.discard) async def _submit_chat_completions_and_callback( self, messages: list[dict[str, str]], request_id: str, info: dict[str, Any], ): """Submit chat completion request, wait request finish and do callback.""" if request_id: request_id = request_id.removeprefix("chatcmpl-") assert request_id in self.request_id_to_address address = self.request_id_to_address.pop(request_id) else: address = self.weighted_addresses[0][1] self.weighted_addresses[0][0] += 1 heapq.heapreplace(self.weighted_addresses, self.weighted_addresses[0]) # use new request_id to avoid duplicate request_id problem request_id = uuid4().hex self.request_id_to_address[request_id] = address completions, exception = None, None try: # NOTE: OpenAI client uses httpx, seems to have performance issue in high concurrency requests. completions = await self._chat_completions_aiohttp( address, messages=messages, tools=self.completion_callback.tool_schemas, extra_body=self.completion_callback.extra_body, extra_headers={"x-request-id": request_id}, **info["__sampling_params__"], ) except Exception as e: # Let user handle the exception exception = e info["__depth__"] -= 1 if exception is not None: logger.exception(f"chat completion failed with exception: {exception}") else: try: await self.completion_callback(messages, completions, info) except Exception as e: logger.exception(f"completion callback failed with exception: {e}") # No more ongoing completion requests if info["__depth__"] == 0: info["__done__"].set() async def _chat_completions_openai(self, address: str, **chat_complete_request) -> ChatCompletion: client = AsyncOpenAI(base_url=f"http://{address}/v1", api_key="token-abc123", timeout=None, max_retries=0) return await client.chat.completions.create(**chat_complete_request) async def _chat_completions_aiohttp(self, address: str, **chat_complete_request) -> ChatCompletion: try: extra_body = chat_complete_request.pop("extra_body", {}) chat_complete_request.update(extra_body or {}) extra_headers = chat_complete_request.pop("extra_headers") timeout = aiohttp.ClientTimeout(total=None) session = aiohttp.ClientSession(timeout=timeout) async with session.post( url=f"http://{address}/v1/chat/completions", headers={"Authorization": "Bearer token-abc123", **extra_headers}, json=chat_complete_request, ) as resp: data = await resp.json() return ChatCompletion(**data) finally: await session.close() async def generate_sequences(self, batch: DataProto) -> DataProto: t_start = time.time() kwargs = dict( model=self.model_name, temperature=self.config.temperature, top_p=self.config.top_p, ) # override sampling params for validation if batch.meta_info.get("validate", False): kwargs["top_p"] = self.config.val_kwargs.top_p kwargs["temperature"] = self.config.val_kwargs.temperature print(f"[ChatCompletionScheduler] generate_sequences sampling params: {kwargs}") # NOTE: For multi-turn rollout, repeat raw_prompt n times and process each prompt independently, # validation dataset has already been repeated in `PPOTrainer._validate`. n = 1 if batch.meta_info.get("validate", False) else self.config.n tasks, batch_conversations = [], [None] * len(batch) * n for batch_index, conversation in enumerate(batch.non_tensor_batch["raw_prompt"].repeat(n, axis=0)): # raw_prompt: [{"role": "user", "content": ""}, ["role": "assistant", "content"], ...] batch_conversations[batch_index] = conversation.tolist() tasks.append( asyncio.create_task( self._submit_chat_completions_semaphore( messages=batch_conversations[batch_index], request_id=None, sampling_params=kwargs, ) ) ) await asyncio.gather(*tasks) output_batch = self.completion_callback.postprocess(batch, batch_conversations, n=n) output_batch.meta_info["timing"] = {"generate_sequences": time.time() - t_start} print("[ChatCompletionScheduler] generate_sequences done") return output_batch async def _submit_chat_completions_semaphore( self, messages: list[dict[str, str]], request_id: str, sampling_params: dict[str, Any] ): done = asyncio.Event() info = { "__done__": done, "__depth__": 0, # indicate how many ongoing completion requests "__sampling_params__": sampling_params, } self.submit_chat_completions(messages=messages, request_id=request_id, info=info) # Wait until all completion requests are done await done.wait() ================================================ FILE: verl_rl/verl/workers/rollout/hf_rollout.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rollout with huggingface models. TODO: refactor this class. Currently, it will hang when using FSDP HybridShard. We should actually create a single GPU model. Then, get full state_dict and bind the state_dict to the single GPU model. Then, use the single GPU model to perform generation. """ import contextlib import torch import torch.distributed from tensordict import TensorDict from torch import nn from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from transformers import GenerationConfig from verl import DataProto from verl.utils.device import get_device_name, get_torch_device from verl.utils.torch_functional import get_response_mask from .base import BaseRollout __all__ = ["HFRollout"] class HFRollout(BaseRollout): def __init__(self, module: nn.Module, config): super().__init__() self.config = config self.module = module def generate_sequences(self, prompts: DataProto) -> DataProto: batch_size = prompts.batch.batch_size[0] num_chunks = max(batch_size // self.config.get("micro_batch_size", batch_size), 1) batch_prompts = prompts.chunk(chunks=num_chunks) output = [self._generate_minibatch(p) for p in batch_prompts] output = DataProto.concat(output) return output @torch.no_grad() def _generate_minibatch(self, prompts: DataProto) -> DataProto: # make sampling args can be overridden by inputs do_sample = prompts.meta_info.get("do_sample", self.config.do_sample) is_validate = prompts.meta_info.get("validate", False) temperature = prompts.meta_info.get("temperature", self.config.temperature) response_length = prompts.meta_info.get("response_length", self.config.response_length) top_p = prompts.meta_info.get("top_p", self.config.get("top_p", 1.0)) top_k = max(0, prompts.meta_info.get("top_k", self.config.get("top_k", 0))) # to be compatible with vllm if not do_sample: # do_sample==False -> greedy decoding kwargs = { "do_sample": False, "num_beams": 1, } elif is_validate: # do validate and do sample -> use val_kwargs kwargs = { "do_sample": True, "num_beams": 1, "top_k": max(0, self.config.val_kwargs.top_k), # to be compatible with vllm "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "num_return_sequences": 1, # if validate, already repeat in ray_trainer } else: # do_sample -> use rollout config kwargs = { "do_sample": True, "num_beams": 1, "top_p": top_p, "top_k": top_k, "temperature": temperature, "num_return_sequences": self.config.n, } # make config according to generate mode generation_config = GenerationConfig(**kwargs) idx = prompts.batch["input_ids"] # (bs, prompt_length) prompt_length = idx.size(1) attention_mask = prompts.batch["attention_mask"] # left-padded attention_mask position_ids = prompts.batch["position_ids"] # used to construct attention_mask eos_token_id = prompts.meta_info["eos_token_id"] pad_token_id = prompts.meta_info["pad_token_id"] self.module.eval() param_ctx = contextlib.nullcontext() if isinstance(self.module, FSDP): # recurse need to set to False according to https://github.com/pytorch/pytorch/issues/100069 param_ctx = FSDP.summon_full_params(self.module, writeback=False, recurse=False) with param_ctx, torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16): output = self.module.generate( input_ids=idx, attention_mask=attention_mask, position_ids=position_ids, do_sample=do_sample, max_new_tokens=response_length, eos_token_id=eos_token_id, pad_token_id=pad_token_id, generation_config=generation_config, output_scores=False, # this is potentially very large return_dict_in_generate=True, use_cache=True, ) # TODO: filter out the seq with no answers like ds-chat seq = output.sequences generated_batch_size = seq.size(0) # bs * num_return_sequences # huggingface generate will stop generating when all the batch reaches [EOS]. # We have to pad to response_length sequence_length = prompt_length + self.config.response_length delta_length = sequence_length - seq.shape[1] if delta_length > 0: delta_tokens = torch.ones(size=(generated_batch_size, delta_length), device=seq.device, dtype=seq.dtype) delta_tokens = pad_token_id * delta_tokens seq = torch.cat((seq, delta_tokens), dim=1) assert seq.shape[1] == sequence_length # make necessary reputations if num_return_sequences > 1 num_return_sequences = kwargs.get("num_return_sequences", 1) if num_return_sequences > 1: position_ids = position_ids.repeat_interleave(num_return_sequences, dim=0) attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) prompt = seq[:, :prompt_length] # (generated_batch_size, prompt_length) response = seq[:, prompt_length:] # (generated_batch_size, response_length) response_length = response.size(1) delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device) delta_position_id = delta_position_id.unsqueeze(0).repeat(generated_batch_size, 1) response_position_ids = position_ids[:, -1:] + delta_position_id position_ids = torch.cat([position_ids, response_position_ids], dim=-1) response_attention_mask = get_response_mask( response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype ) attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1) batch = TensorDict( { "prompts": prompt, "responses": response, "input_ids": seq, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=generated_batch_size, ) # empty cache before compute old_log_prob get_torch_device().empty_cache() self.module.train() return DataProto(batch=batch) ================================================ FILE: verl_rl/verl/workers/rollout/naive/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .naive_rollout import NaiveRollout __all__ = ["NaiveRollout"] ================================================ FILE: verl_rl/verl/workers/rollout/naive/naive_rollout.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ In single GPU rollout, the sequences are generated directly by sampling from the model. The output will contain 1. output_ids 2. attention_masks (left padding) 3. eos_masks 4. log_probs """ import torch import torch.nn.functional as F from tensordict import TensorDict from torch import nn from verl import DataProto from verl.utils.torch_functional import logprobs_from_logits from ..base import BaseRollout __all__ = ["NaiveRollout"] class NaiveRollout(BaseRollout): def __init__(self, module: nn.Module, config): """A naive rollout. It requires the module to be compatible with huggingface APIs. That is: The module should define __call__ to receive input_ids, attention_mask and position_ids. It outputs a structure that contains logits field. Args: module: module here follows huggingface APIs config: DictConfig """ super().__init__() self.config = config self.module = module @torch.no_grad() def generate_sequences(self, prompts: DataProto) -> DataProto: """Generate sequences""" idx = prompts.batch["input_ids"] # (bs, prompt_length) attention_mask = prompts.batch["attention_mask"] # left-padded attention_mask position_ids = prompts.batch["position_ids"] # used to construct attention_mask eos_token_id = prompts.meta_info["eos_token_id"] batch_size = idx.size(0) prompt_length = idx.size(1) self.module.eval() prev_attention_mask = torch.ones(size=(batch_size, 1), dtype=attention_mask.dtype, device=attention_mask.device) logits_lst = [] for _ in range(self.config.response_length): # if the sequence context is growing too long we must crop it at block_size # idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:] idx_cond = idx # forward the model to get the logits for the index in the sequence # we use huggingface APIs here output = self.module(input_ids=idx_cond, attention_mask=attention_mask, position_ids=position_ids) logits = output.logits # pluck the logits at the final step and scale by desired temperature logits = logits[:, -1, :] / self.config.temperature # (bs, vocab_size) # optionally crop the logits to only the top k options if self.config.top_k is not None: v, _ = torch.topk(logits, min(self.config.top_k, logits.size(-1))) logits[logits < v[:, [-1]]] = -float("Inf") # apply softmax to convert logits to (normalized) probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution if self.config.do_sample: idx_next = torch.multinomial(probs, num_samples=1) else: idx_next = torch.argmax(probs, dim=-1, keepdim=True) attention_mask = torch.cat((attention_mask, prev_attention_mask), dim=-1) for token_id in eos_token_id: prev_attention_mask = torch.logical_and(idx_next != token_id, prev_attention_mask.bool()) prev_attention_mask.to(attention_mask.dtype) position_ids = torch.cat((position_ids, position_ids[:, -1:] + 1), dim=-1) # append sampled index to the running sequence and continue idx = torch.cat((idx, idx_next), dim=1) logits_lst.append(logits) logits = torch.stack(logits_lst, dim=1) # (bs, response_length, vocab_size) prompts = idx[:, :prompt_length] # (bs, prompt_length) response = idx[:, prompt_length:] # (bs, response_length) log_probs = logprobs_from_logits(logits=logits, labels=response) batch = TensorDict( { "input_ids": prompts, "responses": response, "sequences": idx, "old_log_probs": log_probs, "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=batch_size, ) self.module.train() return DataProto(batch=batch) ================================================ FILE: verl_rl/verl/workers/rollout/schemas.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import difflib import logging import os from enum import Enum from typing import Any, Optional import torch from pydantic import BaseModel, ConfigDict, model_validator from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast, ProcessorMixin from verl.tools.schemas import OpenAIFunctionToolCall, OpenAIFunctionToolSchema from verl.utils.model import compute_position_id_with_mask logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) BASE_CHAT_HISTORY = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "I am a user."}, ] class FinishReasonTypeEnum(str, Enum): """The enum for finish reason type.""" LENGTH = "length" STOP = "stop" TOOL_CALL = "tool_calls" @classmethod def from_str(cls, value: str) -> "FinishReasonTypeEnum": if value == "stop": return cls.STOP elif value == "length": return cls.LENGTH elif value == "tool_calls": return cls.TOOL_CALL else: raise ValueError(f"Unsupported finish reason type: {value}") class Message(BaseModel): role: str content: str | dict[str, Any] | list[dict[str, Any]] tool_calls: Optional[list[OpenAIFunctionToolCall]] = None class AsyncRolloutRequestStateEnum(str, Enum): """The enum for async rollout request state.""" PENDING = "pending" RUNNING = "running" COMPLETED = "completed" FAILED = "failed" TOOL_CALLING = "tool_calling" INTERACTING = "interacting" class TokenizationSanityCheckModeEnum(str, Enum): """The enum for tokenization sanity check mode.""" DISABLE = "disable" STRICT = "strict" IGNORE_STRIPPABLE = "ignore_strippable" class AsyncRolloutRequest(BaseModel): """The data model for async rollout.""" model_config = ConfigDict(arbitrary_types_allowed=True) batch_data_id: int = 0 rollout_offset: int = 0 request_id: str state: AsyncRolloutRequestStateEnum messages: list[Message] multi_modal_keys: Optional[list[str]] = None multi_modal_data: Optional[dict[str, Any]] = None multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None tool_schemas: Optional[list[OpenAIFunctionToolSchema]] = None tools_kwargs: dict[str, Any] = {} interaction_kwargs: dict[str, Any] = {} input_ids: Optional[torch.Tensor] = None prompt_ids: Optional[torch.Tensor] = None response_ids: Optional[torch.Tensor] = None attention_mask: Optional[torch.Tensor] = None prompt_attention_mask: Optional[torch.Tensor] = None response_attention_mask: Optional[torch.Tensor] = None position_ids: Optional[torch.Tensor] = None prompt_position_ids: Optional[torch.Tensor] = None response_position_ids: Optional[torch.Tensor] = None loss_mask: Optional[torch.Tensor] = None prompt_loss_mask: Optional[torch.Tensor] = None response_loss_mask: Optional[torch.Tensor] = None reward_scores: dict[str, float] max_prompt_len: int max_response_len: int = 8192 max_model_len: int = 32768 metrics: dict[str, list[Any]] = {} use_inference_chat_template: bool tokenization_sanity_check_mode: TokenizationSanityCheckModeEnum generation_prompt_ids: Optional[torch.Tensor] = None base_conv_wo_gen_prompt_end_pos: int base_conv_with_gen_prompt_end_pos: int @model_validator(mode="before") @classmethod def initialize_request(cls, values): if not (messages := values.get("messages")): raise ValueError("messages is required for AsyncRolloutRequest initialization") if not (max_prompt_len := values.get("max_prompt_len")): raise ValueError("max_prompt_len is required for AsyncRolloutRequest initialization") if not (processing_class := values.pop("processing_class", None)): raise ValueError("processing_class is required for AsyncRolloutRequest initialization") values["messages"] = [Message.model_validate(msg) for msg in messages] # If there is no multi_modal_keys, we assume the multi-modal data is image and video. if not values.get("multi_modal_keys"): values["multi_modal_keys"] = ["image", "video"] if not values.get("multi_modal_data"): values["multi_modal_data"] = {key: [] for key in values["multi_modal_keys"]} else: # check if all multi_modal_keys are in multi_modal_data for key in values["multi_modal_keys"]: if key not in values["multi_modal_data"]: values["multi_modal_data"][key] = [] if not values.get("multi_modal_inputs"): values["multi_modal_inputs"] = {} tools = ( [tool.model_dump() for tool in tool_schemas] if (tool_schemas := values.get("tool_schemas", [])) else None ) multi_modal_data = values["multi_modal_data"] tokens_without_prompt = cls._handle_apply_chat_template( processing_class, messages, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, ) if ( values.get("input_ids") is None or values.get("attention_mask") is None or values.get("position_ids") is None ): tokenization_dict_with_prompt = cls._handle_apply_chat_template( processing_class, messages, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, return_dict=True, ) values["input_ids"], values["attention_mask"] = ( tokenization_dict_with_prompt["input_ids"], tokenization_dict_with_prompt["attention_mask"], ) if values["input_ids"].shape[-1] > max_prompt_len: # Only log the warning to avoid truncating in the middle of generation prompt. Consider raising an # error for this case in the future. logger.warning( f"Prompt {values['batch_data_id']} has length {values['input_ids'].shape[-1]} " f"which is greater than max_prompt_len {max_prompt_len} after applied chat template with tools." ) # Process multi_modal_inputs multi_modal_inputs = tokenization_dict_with_prompt.copy() multi_modal_inputs.pop("input_ids", None) multi_modal_inputs.pop("attention_mask", None) values["multi_modal_inputs"] = multi_modal_inputs values["position_ids"] = values["prompt_position_ids"] = cls._get_position_ids( processing_class, values["input_ids"], values["attention_mask"], multi_modal_inputs ) values["prompt_ids"], values["prompt_attention_mask"] = values["input_ids"], values["attention_mask"] values["loss_mask"] = values["prompt_loss_mask"] = torch.zeros_like(values["input_ids"], dtype=torch.bool) values["generation_prompt_ids"] = values["input_ids"][..., tokens_without_prompt.shape[-1] :] values["base_conv_wo_gen_prompt_end_pos"] = cls._handle_apply_chat_template( processing_class, BASE_CHAT_HISTORY, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, ).shape[-1] values["base_conv_with_gen_prompt_end_pos"] = cls._handle_apply_chat_template( processing_class, BASE_CHAT_HISTORY, multi_modal_data=multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, ).shape[-1] return values @staticmethod def _handle_apply_chat_template( processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, messages: list[Message], multi_modal_data: dict[str, Any], tools: Optional[list[OpenAIFunctionToolSchema]] = None, add_generation_prompt: bool = False, tokenize: bool = False, return_dict: bool = False, ): raw_prompt = processing_class.apply_chat_template( messages, tools=tools, add_generation_prompt=add_generation_prompt, tokenize=False ) if not tokenize: return raw_prompt if isinstance(processing_class, PreTrainedTokenizer) or isinstance(processing_class, PreTrainedTokenizerFast): if any(len(values) > 0 for values in multi_modal_data.values()): logger.warning( "There is multi_modal_data but you are not using a processor. Multi-modal data will be ignored." ) model_inputs = processing_class(text=[raw_prompt], return_tensors="pt") elif isinstance(processing_class, ProcessorMixin): # When we update multi_model_keys, we also need to update this logic images = images if len(images := multi_modal_data.get("image", [])) > 0 else None videos = videos if len(videos := multi_modal_data.get("video", [])) > 0 else None model_inputs = processing_class(text=[raw_prompt], images=images, videos=videos, return_tensors="pt") else: raise ValueError(f"Unsupported processing class type: {type(processing_class)}") model_inputs = dict(model_inputs) if return_dict: return model_inputs else: return model_inputs["input_ids"] @staticmethod def _get_position_ids( processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, input_ids: torch.Tensor, attention_mask: torch.Tensor, multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None, ) -> torch.Tensor: # special case for qwen2vl is_qwen2vl = ( hasattr(processing_class, "image_processor") and "Qwen2VLImageProcessor" in processing_class.image_processor.__class__.__name__ ) if is_qwen2vl: from verl.models.transformers.qwen2_vl import get_rope_index image_grid_thw = video_grid_thw = second_per_grid_ts = None if multi_modal_inputs: image_grid_thw = multi_modal_inputs.get("image_grid_thw") video_grid_thw = multi_modal_inputs.get("video_grid_thw") second_per_grid_ts = multi_modal_inputs.get("second_per_grid_ts") assert input_ids.dim() == 2 and input_ids.shape[0] == 1, ( f"input_ids should be 2D with batch size 1, but got shape {input_ids.shape}" ) assert attention_mask.dim() == 2 and attention_mask.shape[0] == 1, ( f"attention_mask should be 2D with batch size 1, but got shape {attention_mask.shape}" ) new_position_ids = get_rope_index( processing_class, input_ids=input_ids.squeeze(0), image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, second_per_grid_ts=second_per_grid_ts, attention_mask=attention_mask.squeeze(0), ) return new_position_ids # (3, seq_len) else: return compute_position_id_with_mask(attention_mask) # (1, seq_len) def _update_input_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, new_input_ids: torch.Tensor, attention_mask: bool, loss_mask: bool, new_multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None, ) -> None: """ Update the input_ids, attention_mask, position_ids, and loss_mask of the request in additive manner. """ self.input_ids = torch.cat([self.input_ids, new_input_ids], dim=-1) attention_mask = torch.ones_like(new_input_ids) * int(attention_mask) self.attention_mask = torch.cat([self.attention_mask, attention_mask], dim=-1) loss_mask = torch.ones_like(new_input_ids) * int(loss_mask) self.loss_mask = torch.cat([self.loss_mask, loss_mask], dim=-1) if new_multi_modal_inputs: self._update_multi_modal_inputs(new_multi_modal_inputs) new_position_ids = self._get_position_ids( processing_class, new_input_ids, attention_mask, new_multi_modal_inputs ) last_pos = self.position_ids[..., -1:] new_position_ids = new_position_ids + (last_pos + 1) self.position_ids = torch.cat([self.position_ids, new_position_ids], dim=-1) assert ( self.input_ids.shape[-1] == self.attention_mask.shape[-1] == self.position_ids.shape[-1] == self.loss_mask.shape[-1] ), f"""Request {self.request_id} has different length of {self.input_ids.shape[-1]=}, {self.attention_mask.shape[-1]=}, {self.position_ids.shape[-1]=}, {self.loss_mask.shape[-1]=}""" def _update_multi_modal_inputs(self, new_multi_modal_inputs: dict[str, torch.Tensor]) -> None: """ Update the multi_modal_inputs of the request in additive manner. """ for key in new_multi_modal_inputs: input_tensor = new_multi_modal_inputs[key] self.multi_modal_inputs[key] = ( torch.cat([self.multi_modal_inputs[key], input_tensor], dim=0) if key in self.multi_modal_inputs else input_tensor ) def get_generation_prompt_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin ) -> list[int]: """ Get the generation prompt ids for rollout engine. Because rollout engine(SGLang) requires the ids to be a list, we need to convert the tensor to a list. """ generation_prompt_ids = ( None if self.input_ids[..., -self.generation_prompt_ids.shape[-1] :].eq(self.generation_prompt_ids).all() else self.generation_prompt_ids ) if generation_prompt_ids is not None: self._update_input_ids(processing_class, generation_prompt_ids, attention_mask=True, loss_mask=False) if self.use_inference_chat_template: messages = [msg.model_dump() for msg in self.messages] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None generation_prompt_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=self.multi_modal_data, tools=tools, add_generation_prompt=True, tokenize=True, ) return generation_prompt_ids.squeeze(0).tolist() else: return self.input_ids.squeeze(0).tolist() def add_user_message( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, content: str, ) -> None: self.messages.append(Message(role="user", content=content)) messages = [*BASE_CHAT_HISTORY, self.messages[-1]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None # We don't need to pass multi_modal_data here because we don't have any multi-modal data from Engine # Inference, it is pure text. content_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data={}, tools=tools, add_generation_prompt=False, tokenize=True )[..., self.base_conv_wo_gen_prompt_end_pos :] self._update_input_ids(processing_class, content_ids, attention_mask=True, loss_mask=False) def add_assistant_message( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, content: str, tool_calls: Optional[list[OpenAIFunctionToolCall]] = None, ) -> None: self.messages.append(Message(role="assistant", content=content, tool_calls=tool_calls)) messages = [*BASE_CHAT_HISTORY, self.messages[-1]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None # We don't need to pass multi_modal_data here because we don't have any multi-modal data from Engine # Inference, it is pure text. content_ids = self._handle_apply_chat_template( processing_class, messages, multi_modal_data={}, tools=tools, add_generation_prompt=False, tokenize=True )[..., self.base_conv_with_gen_prompt_end_pos :] self._update_input_ids(processing_class, content_ids, attention_mask=True, loss_mask=True) def add_tool_response_messages( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, contents: list[str | dict[str, Any]], ) -> None: if not contents: return # We also handle the case when tool returns image # We require the processing of the image and video to be done at tool.execute() level delta_multi_modal_data = {key: [] for key in self.multi_modal_keys} for content in contents: if isinstance(content, dict): content_list = [] # When we update multi_model_keys, we also need to update this logic if "image" in content: if not isinstance(content["image"], list): raise ValueError( f"Image must be a list, but got {type(content['image'])}. Please check the tool.execute(). " f"For single images, wrap in a list: [image]. " f"Example: {{'image': [img1]}} or {{'image': [img1, img2, ...]}}." ) content_list.extend([{"type": "image"} for _ in content["image"]]) delta_multi_modal_data["image"].extend(content["image"]) if "video" in content: if not isinstance(content["video"], list): raise ValueError( f"Video must be a list, but got {type(content['video'])}. Please check the tool.execute(). " f"For single videos, wrap in a list: [video]. " f"Example: {{'video': [video1]}} or {{'video': [video1, video2, ...]}}." ) content_list.extend([{"type": "video"} for _ in content["video"]]) delta_multi_modal_data["video"].extend(content["video"]) if "text" in content: content_list.append({"type": "text", "text": content["text"]}) for key in content: if key not in ["image", "video", "text"]: logger.warning( f"Tool response message contains unexpected key: {key} " f"while we only support `image`, `video`, and `text`." ) self.messages.append(Message(role="tool", content=content_list)) else: self.messages.append(Message(role="tool", content=content)) messages = [*BASE_CHAT_HISTORY, *self.messages[-len(contents) :]] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None for key in self.multi_modal_keys: if len(delta_multi_modal_data[key]) > 0: self.multi_modal_data[key].extend(delta_multi_modal_data[key]) # We just passed the new multi-modal data to the chat template to update the input_ids. content_info = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=delta_multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, return_dict=True, ) content_ids = content_info["input_ids"][..., self.base_conv_wo_gen_prompt_end_pos :] # process multi_modal_inputs multi_modal_inputs = content_info.copy() multi_modal_inputs.pop("input_ids", None) multi_modal_inputs.pop("attention_mask", None) self._update_input_ids( processing_class, content_ids, attention_mask=True, loss_mask=False, new_multi_modal_inputs=multi_modal_inputs, ) def update_metrics(self, metrics: Any, tool_id: str) -> None: """ metrics: should be a dict of tools_name -> Any """ if self.metrics.get(tool_id) is None: self.metrics[tool_id] = [] self.metrics[tool_id].append(metrics) def _get_prompt_diffs( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, full_prompt_ids: torch.Tensor, current_prompt_ids: torch.Tensor, diff_surrounding_chars: int = 10, ) -> list[dict[str, Any]]: """Get differences between full prompt and current prompt with surrounding context. This function helps debug tokenization mismatches by showing the differences between full prompt and current prompt with surrounding context. Instead of just showing the exact diff, it includes additional tokens before and after to help locate the issue in the chat template. For example, if the actual diff is a newline change from "\n\n" to "\n", with diff_surrounding_chars the output might look like: full_prompt_chunk: "<|im_start|>assistant\n\nI think..." current_prompt_chunk: "<|im_start|>assistant\nI think..." This context makes it much easier to identify where in the chat template the mismatch occurs. Args: processing_class: The processing class to use for decoding the token IDs full_prompt_ids: Token IDs from applying chat template to all messages at once current_prompt_ids: Token IDs from incremental chat template application diff_surrounding_chars: Number of surrounding characters to include for context (default: 10) Returns: List of dicts containing the differing chunks with context and their indices """ full_prompt_ids = full_prompt_ids.squeeze(0) current_prompt_ids = current_prompt_ids.squeeze(0) full_prompt = processing_class.decode(full_prompt_ids, skip_special_tokens=False) current_prompt = processing_class.decode(current_prompt_ids, skip_special_tokens=False) s = difflib.SequenceMatcher(None, full_prompt, current_prompt, autojunk=False) diffs = [] for tag, i1, i2, j1, j2 in s.get_opcodes(): if tag == "equal": continue # Get the surrounding context for better readability start_i = max(0, i1 - diff_surrounding_chars) end_i = min(len(full_prompt), i2 + diff_surrounding_chars) start_j = max(0, j1 - diff_surrounding_chars) end_j = min(len(current_prompt), j2 + diff_surrounding_chars) diffs.append( { "full_prompt_chunk": full_prompt[start_i:end_i], "current_prompt_chunk": current_prompt[start_j:end_j], "indices": (start_i, end_i, start_j, end_j), } ) return diffs def finalize( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, reward_scores: dict[str, list[float]], finish_reason_type: FinishReasonTypeEnum = FinishReasonTypeEnum.STOP, ) -> None: self.state = AsyncRolloutRequestStateEnum.COMPLETED self.reward_scores = reward_scores # In case we failed to generate the assistant message and the generation prompt ids were already added to # input_ids, remove them from the end of input_ids if self.input_ids[..., -self.generation_prompt_ids.shape[-1] :].eq(self.generation_prompt_ids).all(): self.input_ids = self.input_ids[..., : -self.generation_prompt_ids.shape[-1]] self.attention_mask = self.attention_mask[..., : -self.generation_prompt_ids.shape[-1]] self.position_ids = self.position_ids[..., : -self.generation_prompt_ids.shape[-1]] self.loss_mask = self.loss_mask[..., : -self.generation_prompt_ids.shape[-1]] self.response_ids = self.input_ids[..., self.prompt_ids.shape[-1] :] if self.tokenization_sanity_check_mode != TokenizationSanityCheckModeEnum.DISABLE: # When there is a diff, we log the diffs with diff_surrounding_chars context diff_surrounding_chars = 10 messages = [msg.model_dump() for msg in self.messages] tools = [tool.model_dump() for tool in self.tool_schemas] if self.tool_schemas else None full_prompt_info = self._handle_apply_chat_template( processing_class, messages, multi_modal_data=self.multi_modal_data, tools=tools, add_generation_prompt=False, tokenize=True, return_dict=True, ) full_prompt_ids = full_prompt_info["input_ids"] # We must use dict(full_prompt_info) to convert BatchFeature values to a new dict # because np.array() only keeps the keys for BatchFeature. full_prompt_multi_modal_inputs = full_prompt_info.copy() full_prompt_multi_modal_inputs.pop("input_ids", None) full_prompt_multi_modal_inputs.pop("attention_mask", None) for multi_modal_inputs_key in self.multi_modal_inputs: if multi_modal_inputs_key in full_prompt_multi_modal_inputs: if ( not self.multi_modal_inputs[multi_modal_inputs_key] .eq(full_prompt_multi_modal_inputs[multi_modal_inputs_key]) .all() ): logger.warning( f"Multi-modal data {multi_modal_inputs_key} is not consistent. " f"This may lead to unexpected behavior during training. " f"Please review your multi_modal_inputs logic." ) else: logger.warning( f"Multi-modal inputs key {multi_modal_inputs_key} is not found in the multi_modal_inputs. " f"This may lead to unexpected behavior during training." f"Please review your multi_modal_inputs logic." ) if diffs := self._get_prompt_diffs( processing_class, full_prompt_ids, self.input_ids, diff_surrounding_chars=diff_surrounding_chars ): log_warning = False if self.tokenization_sanity_check_mode == TokenizationSanityCheckModeEnum.STRICT: log_warning = True elif self.tokenization_sanity_check_mode == TokenizationSanityCheckModeEnum.IGNORE_STRIPPABLE: non_strippable_diffs_exist = any( d["full_prompt_chunk"].strip() or d["current_prompt_chunk"].strip() for d in diffs ) if non_strippable_diffs_exist: log_warning = True if log_warning: mode_str = f" ({self.tokenization_sanity_check_mode.value})" logger.warning( f"Inconsistent training and inference tokenization detected{mode_str}. This may lead to " f"unexpected behavior during training. Please review your chat template to determine if this " f"is intentional. For more information, refer to the multiturn README.md." ) logger.warning( f"Showing {diff_surrounding_chars} characters before and after the diffs for context and " f"better readability." ) diff_details_list = [] for d in diffs: i1, i2, j1, j2 = d["indices"] diff_details_list.append( f"idx {i1}:{i2} -> {j1}:{j2} | full_prompt_chunk: {repr(d['full_prompt_chunk'])} | " f"current_prompt_chunk: {repr(d['current_prompt_chunk'])}" ) diff_details = "\n".join(diff_details_list) logger.warning(f"Found differences:\n{diff_details}") if finish_reason_type == FinishReasonTypeEnum.STOP: pass elif finish_reason_type == FinishReasonTypeEnum.LENGTH: pass else: raise ValueError(f"Unsupported finalize finish reason type: {finish_reason_type}") self.truncate_output_ids(processing_class) assert ( self.input_ids.shape[-1] == self.attention_mask.shape[-1] == self.position_ids.shape[-1] == self.loss_mask.shape[-1] ), f"""Request {self.request_id} has different length of {self.input_ids.shape[-1]=}, {self.attention_mask.shape[-1]=}, {self.position_ids.shape[-1]=}, {self.loss_mask.shape[-1]=}""" def truncate_output_ids( self, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin ) -> None: self.input_ids = self.input_ids[..., : self.max_model_len] self.attention_mask = self.attention_mask[..., : self.max_model_len] self.position_ids = self.position_ids[..., : self.max_model_len] self.loss_mask = self.loss_mask[..., : self.max_model_len] self.response_ids = self.input_ids[..., self.prompt_ids.shape[-1] :][..., : self.max_response_len] self.response_attention_mask = self.attention_mask[..., self.prompt_attention_mask.shape[-1] :][ ..., : self.max_response_len ] self.response_position_ids = self.position_ids[..., self.prompt_position_ids.shape[-1] :][ ..., : self.max_response_len ] self.response_loss_mask = self.loss_mask[..., self.prompt_loss_mask.shape[-1] :][..., : self.max_response_len] ================================================ FILE: verl_rl/verl/workers/rollout/sglang_rollout/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and from .sglang_rollout import SGLangRollout __all__ = ["SGLangRollout"] ================================================ FILE: verl_rl/verl/workers/rollout/sglang_rollout/async_sglang_server.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging from typing import Any import ray from omegaconf import DictConfig from starlette.requests import Request from starlette.responses import JSONResponse from verl.workers.rollout.async_server import AsyncServerBase logger = logging.getLogger(__file__) @ray.remote(num_cpus=1) class AsyncSGLangServer(AsyncServerBase): def __init__(self, config: DictConfig, dp_size: int, dp_rank: int, wg_prefix: str): super().__init__() self.config = config.actor_rollout_ref self._tp_size = self.config.rollout.get("tensor_model_parallel_size", 1) self._dp_size = dp_size self._dp_rank = dp_rank self.wg_prefix = wg_prefix self.workers = [] self.master_worker = None async def init_engine(self): if self.workers: # avoid init twice return all_actors = ray.util.list_named_actors(all_namespaces=True) matched_actors = [ actor for actor in all_actors if actor.get("name", None).startswith(self.wg_prefix + "WorkerDict_") ] gpu_per_node = len(set([actor["name"].split(":")[1] for actor in matched_actors])) # total gpu num assert len(matched_actors) == self._dp_size * self._tp_size for matched_actor in matched_actors: fields = matched_actor["name"].split(":") assert len(fields) == 2, f"invalid actor name: {matched_actor['name']}" pg_index, local_rank = int(fields[0].split("_")[-1]), int(fields[1]) current_global_rank = gpu_per_node * pg_index + local_rank worker_dp_rank = current_global_rank // self._tp_size worker_tp_rank = current_global_rank % self._tp_size if worker_dp_rank == self._dp_rank: worker = ray.get_actor(**matched_actor) self.workers.append(worker) if worker_tp_rank == 0: self.master_worker = worker async def chat_completion(self, raw_request: Request): request = await raw_request.json() # only send request to master worker in tp rank 0 output_future = self.master_worker.chat_completion.remote(request) [outputs] = await asyncio.gather(output_future) return JSONResponse(outputs) async def generate(self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str) -> list[int]: return await self.master_worker.generate.remote(prompt_ids, sampling_params, request_id) async def wake_up(self): if not self.config.rollout.free_cache_engine: return tasks = [worker.wake_up.remote() for worker in self.workers] if tasks: await asyncio.gather(*tasks) async def sleep(self): if not self.config.rollout.free_cache_engine: return tasks = [worker.sleep.remote() for worker in self.workers] if tasks: await asyncio.gather(*tasks) ================================================ FILE: verl_rl/verl/workers/rollout/sglang_rollout/sglang_rollout.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import asyncio import logging import multiprocessing as mp import os import time from copy import deepcopy from json import JSONDecodeError from typing import Any, List, Optional, Tuple from uuid import uuid4 import numpy as np import sglang.srt.entrypoints.engine import torch import torch.distributed as dist from omegaconf import DictConfig from sglang.srt.managers.tokenizer_manager import ( ReleaseMemoryOccupationReqInput, ResumeMemoryOccupationReqInput, UpdateWeightsFromTensorReqInput, ) from sglang.srt.sampling.sampling_params import SamplingParams from sglang.srt.server_args import ServerArgs from sglang.srt.utils import ( MultiprocessingSerializer, assert_pkg_version, get_ip, get_open_port, is_cuda, maybe_set_triton_cache_manager, set_prometheus_multiproc_dir, set_ulimit, ) from tensordict import TensorDict from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.nn.utils.rnn import pad_sequence from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast, ProcessorMixin from verl import DataProto from verl.interactions.base import BaseInteraction from verl.interactions.utils.interaction_registry import initialize_interactions_from_config from verl.third_party.sglang import parallel_state as sglang_ps from verl.tools.base_tool import BaseTool from verl.tools.schemas import OpenAIFunctionCallSchema, OpenAIFunctionParsedSchema, OpenAIFunctionToolCall from verl.tools.utils.tool_registry import initialize_tools_from_config from verl.utils.net_utils import is_ipv6 from verl.utils.profiler import GPUMemoryLogger from verl.utils.torch_functional import get_response_mask, pad_sequence_to_length from verl.workers.rollout.base import BaseRollout from verl.workers.rollout.schemas import ( AsyncRolloutRequest, AsyncRolloutRequestStateEnum, FinishReasonTypeEnum, Message, ) from verl.workers.rollout.sglang_rollout.utils import broadcast_pyobj try: from sglang.srt.function_call.function_call_parser import FunctionCallParser except ImportError: from sglang.srt.function_call_parser import FunctionCallParser try: from sglang.srt.entrypoints.openai.protocol import Tool except ImportError: from sglang.srt.openai_api.protocol import Tool logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # patch to avoid issue https://github.com/sgl-project/sglang/issues/6723 def _set_envs_and_config(server_args: ServerArgs): # Set global environments os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" os.environ["NCCL_CUMEM_ENABLE"] = "0" os.environ["NCCL_NVLS_ENABLE"] = str(int(server_args.enable_nccl_nvls)) os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "4" os.environ["CUDA_MODULE_LOADING"] = "AUTO" # Set prometheus env vars if server_args.enable_metrics: set_prometheus_multiproc_dir() # Set ulimit set_ulimit() # Fix triton bugs if server_args.tp_size * server_args.dp_size > 1: # FIXME: remove this after https://github.com/triton-lang/triton/pull/4295 is used as a dependency. maybe_set_triton_cache_manager() # Check flashinfer version if server_args.attention_backend == "flashinfer": assert_pkg_version( "flashinfer_python", "0.2.5", "Please uninstall the old version and reinstall the latest version by following the instructions at https://docs.flashinfer.ai/installation.html.", ) if is_cuda(): assert_pkg_version( "sgl-kernel", "0.1.1", "Please reinstall the latest version with `pip install sgl-kernel --force-reinstall`", ) # Set mp start method mp.set_start_method("spawn", force=True) sglang.srt.entrypoints.engine._set_envs_and_config = _set_envs_and_config # because chatCompletion is an async method, it makes the whole ray actor be an async actor # which can not call loop.run_until_complete. So we need to make the engine to be an async class class AsyncEngine(sglang.srt.entrypoints.engine.Engine): def __init__(self, **kwargs): super().__init__(**kwargs) # default to use dummy load format, which need to reload weights in first time self._need_reload = True async def release_memory_occupation(self, tags: Optional[list[str]] = None): """Release GPU occupation temporarily.""" if tags is None: obj = ReleaseMemoryOccupationReqInput() else: obj = ReleaseMemoryOccupationReqInput(tags=tags) return await self.tokenizer_manager.release_memory_occupation(obj, None) async def resume_memory_occupation(self, tags: Optional[list[str]] = None): """Resume GPU occupation.""" # because __init__ is a sync method, it can not call the async release_memory_occupation # have to move release_memory_occupation from __init__ to here # For multi-stage awake, we run release weight and kv_cache when we resume weights for the first time. if self._need_reload: await self.release_memory_occupation() self._need_reload = False if tags is None: obj = ResumeMemoryOccupationReqInput() else: obj = ResumeMemoryOccupationReqInput(tags=tags) return await self.tokenizer_manager.resume_memory_occupation(obj, None) async def update_weights_from_tensor( self, named_tensors: List[Tuple[str, torch.Tensor]], # noqa: UP006 load_format: Optional[str] = None, flush_cache: bool = True, ): """Update weights from distributed source. If there are going to be more updates, set `flush_cache` to be false to avoid duplicated cache cleaning operation.""" obj = UpdateWeightsFromTensorReqInput( serialized_named_tensors=[ MultiprocessingSerializer.serialize(named_tensors) for _ in range(self.server_args.tp_size) ], load_format=load_format, flush_cache=flush_cache, ) return await self.tokenizer_manager.update_weights_from_tensor(obj, None) async def flush_cache(self): return await self.tokenizer_manager.flush_cache() # NOTE(sgm): add for verl. We can optimize it by making # the dataloader yield List[int] without padding. def _pre_process_inputs( pad_token_id, prompt_token_ids: torch.Tensor, ) -> torch.Tensor: # remove the left padding in the prompt token_id non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0] return prompt_token_ids[non_pad_index:] # NOTE(linjunrong): adhoc def _post_process_outputs(processing_class, output): try: # This is when processing_class is a processor tokenizer = processing_class.tokenizer except AttributeError: try: # This is when processing_class is a tokenizer tokenizer = processing_class except AttributeError as e: raise ValueError(f"Cannot get tokenizer from processing_class {processing_class}") from e def _map_each_response(resp): output_token_logprobs = resp["meta_info"]["output_token_logprobs"] log_probs, output_token_ids = zip( *[(log_prob, token_ids) for log_prob, token_ids, _ in output_token_logprobs], strict=True ) return torch.tensor(output_token_ids), torch.tensor(log_probs) out_map = map(lambda x: _map_each_response(x), output) batched_output_token_ids = [] batched_logprobs = [] for output_token_ids, log_probs in out_map: batched_output_token_ids.append(output_token_ids) batched_logprobs.append(log_probs) pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id batched_output_token_ids = pad_sequence(batched_output_token_ids, batch_first=True, padding_value=pad_token_id) if len(batched_logprobs) > 0: batched_logprobs = pad_sequence(batched_logprobs, batch_first=True, padding_value=pad_token_id) return batched_output_token_ids, batched_logprobs def get_tool_call_parser_type( processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, ) -> str: items = FunctionCallParser.ToolCallParserEnum.items() for parser_type, parser_cls in items: parser = parser_cls() try: # This is when processing_class is a tokenizer tokenizer_vocab = processing_class.get_vocab() except AttributeError: try: # This is when processing_class is a processor tokenizer_vocab = processing_class.tokenizer.get_vocab() except AttributeError as e: raise ValueError(f"Cannot get vocab from processing_class {processing_class}") from e if parser.bot_token.strip() in tokenizer_vocab and ( parser.eot_token == "" or parser.eot_token.strip() in tokenizer_vocab ): return parser_type else: raise ValueError(f"No tool call parser found for processing_class {processing_class}") class SGLangRollout(BaseRollout): def __init__( self, actor_module: str, config: DictConfig, processing_class: PreTrainedTokenizer | PreTrainedTokenizerFast | ProcessorMixin, model_hf_config, port=None, trust_remote_code: bool = False, device_mesh: DeviceMesh | None = None, **kwargs, ): """Synchronized SGLang rollout engine. Args: actor_module: Huggingface model name or path to the model. The model should be supported by SGLang. config: A DictConfig object containing SGLang-specific operational parameters and rollout settings. Refer to https://docs.sglang.ai/backend/server_arguments.html processing_class: The tokenizer or processor instance compatible with the actor_module. model_hf_config: The Hugging Face model's configuration (e.g., `transformers.PretrainedConfig`). It provides architectural details and hyperparameters like `max_position_embeddings`, used by SGLang for correct model initialization. This is the model's inherent design, not SGLang's runtime behavior. port: Optional port for multi-node initialization when nnodes > 1. trust_remote_code: Whether or not to allow for custom models defined on the Hub in their own modeling files. device_mesh: Optional `DeviceMesh` object for distributed setup. **kwargs: Additional keyword arguments, primarily `train_tp` for Megatron Backend integration to initialize hybrid engine process groups. """ super().__init__() self.config = config self._device_mesh_cpu = device_mesh os.environ.setdefault("SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK", "true") ( self._tool_schemas, self._tool_map, self._tool_call_parser_type, self._sgl_tools, self._function_call_parser, ) = self._initialize_tools(config, processing_class) self.interaction_map: dict[str, BaseInteraction] = self._initialize_interactions(config) # If turn on `free_cache_engine`, SGLang engine's KV cache # will be freed after each `generate_sequences` call. logger.info( f"tool_schemas: {self._tool_schemas}, tool_map: {self._tool_map}, tool_call_parser_type: " f"{self._tool_call_parser_type}, sgl_tools: {self._sgl_tools}, function_call_parser: " f"{self._function_call_parser}" ) self._init_distributed_env(device_mesh_cpu=device_mesh, **kwargs) self._verify_config(model_hf_config=model_hf_config) # initialize the inference engine self._init_inference_engine(trust_remote_code, actor_module, port) self._init_sampling_params(**kwargs) self.processing_class = processing_class try: # This is when processing_class is a tokenizer self.pad_token_id = self.processing_class.pad_token_id except AttributeError: try: # This is when processing_class is a processor self.pad_token_id = self.processing_class.tokenizer.pad_token_id except AttributeError as e: raise ValueError(f"Cannot get pad_token_id from processing_class {self.processing_class}") from e def _init_distributed_env(self, device_mesh_cpu, **kwargs): self._device_mesh_cpu = device_mesh_cpu os.environ.setdefault("SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK", "true") self.tensor_parallel_size = self.config.get("tensor_model_parallel_size", 1) assert self.tensor_parallel_size <= dist.get_world_size(), ( "tensor parallel size should be less than or equal to the world size" ) self.train_tp = kwargs.get("train_tp", None) if self.train_tp is not None: # deployed with megatron os.environ["CUDA_TIMER_STREAM_KAFKA_ENABLE"] = "0" os.environ["MEGATRON_IMPORT_TIMERS"] = "0" train_tp = kwargs.get("train_tp", None) num_tp_per_train_tp = train_tp // self.tensor_parallel_size sglang_ps.initialize_parallel_state( tensor_model_parallel_size=self.tensor_parallel_size, num_tp_per_train_tp=num_tp_per_train_tp, ) tp_size = self.tensor_parallel_size world_size = int(os.getenv("WORLD_SIZE", "-1")) # init device mesh if self._device_mesh_cpu is None: device_mesh_kwargs = dict( mesh_shape=(world_size // tp_size, tp_size, 1), mesh_dim_names=["dp", "tp", "pp"], ) self._device_mesh_cpu = init_device_mesh("cpu", **device_mesh_kwargs) self._rank = self._device_mesh_cpu.get_rank() self._tp_rank = self._device_mesh_cpu["tp"].get_local_rank() self._tp_size = self._device_mesh_cpu["tp"].size() if self._rank == 0: logger.info(f"_init_distributed_env: :tp_world: {self._tp_size}, global_world: {world_size}") # get tp_rank of this process in this tp group visible_devices = [None] * self._device_mesh_cpu.size(1) torch.distributed.all_gather_object( visible_devices, os.environ["CUDA_VISIBLE_DEVICES"], self._device_mesh_cpu.get_group("tp") ) self.visible_devices_set = set(",".join(visible_devices).split(",")) os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(sorted(list(self.visible_devices_set))) def _verify_config(self, model_hf_config): if not self.config.get("max_model_len", None): self.config.max_model_len = self.config.prompt_length + self.config.response_length assert ( self.config.max_model_len >= self.config.prompt_length + self.config.response_length ), f"""max_model_len should be greater than total sequence length (prompt_length + response_length): {self.config.max_model_len} >= {self.config.prompt_length} + {self.config.response_length}""" max_position_embeddings = None if hasattr(model_hf_config, "max_position_embeddings"): max_position_embeddings = model_hf_config.max_position_embeddings elif hasattr(model_hf_config, "llm_config") and hasattr(model_hf_config.llm_config, "max_position_embeddings"): max_position_embeddings = model_hf_config.llm_config.max_position_embeddings elif hasattr(model_hf_config, "text_config") and hasattr( model_hf_config.text_config, "max_position_embeddings" ): max_position_embeddings = model_hf_config.text_config.max_position_embeddings if max_position_embeddings is None: raise ValueError("max_position_embeddings not found in model_hf_config") rope_scaling_config = getattr(model_hf_config, "rope_scaling", None) if not rope_scaling_config: assert max_position_embeddings >= self.config.prompt_length + self.config.response_length, ( "model context length should be greater than total sequence length" ) else: # handle type where there's a length extend factor # see https://qwen.readthedocs.io/en/latest/deployment/vllm.html#extended-context-support # for using yarn as an example rope_scaling_factor = rope_scaling_config.get("factor", 1.0) assert ( model_hf_config.max_position_embeddings * rope_scaling_factor >= self.config.prompt_length + self.config.response_length ), ( f"model context length should be greater than total sequence length, " f"got rope_scaling_factor={rope_scaling_factor} and " f"max_position_embeddings={model_hf_config.max_position_embeddings}" ) # currently max_assistant_turns stand for max number of tool calls if self.config.multi_turn.max_assistant_turns is None: self.config.multi_turn.max_assistant_turns = self.config.max_model_len // 3 if self.config.multi_turn.max_user_turns is None: self.config.multi_turn.max_user_turns = self.config.max_model_len // 3 def _init_inference_engine(self, trust_remote_code, actor_module, port): # initialize the inference engine nnodes = -(-self._tp_size // len(self.visible_devices_set)) if nnodes > 1: ip = get_ip() port = get_open_port() if port is None else port [ip, port] = broadcast_pyobj( [ip, port], rank=self._rank, dist_group=self._device_mesh_cpu.get_group("tp"), src=self._device_mesh_cpu["tp"].mesh[0].item(), force_cpu_device=False, ) dist_init_addr = f"[{ip}]:{port}" if is_ipv6(ip) else f"{ip}:{port}" else: dist_init_addr = None load_format = "dummy" if self.config.load_format.startswith("dummy") else self.config.load_format tp_size_per_node = self._tp_size // nnodes node_rank = self._tp_rank // tp_size_per_node first_rank_in_node = self._tp_rank % tp_size_per_node == 0 if first_rank_in_node: rank = dist.get_rank() os.environ["SGLANG_BLOCK_NONZERO_RANK_CHILDREN"] = "0" self._engine = AsyncEngine( model_path=actor_module, dtype=self.config.dtype, mem_fraction_static=self.config.gpu_memory_utilization, enable_memory_saver=True, base_gpu_id=0, gpu_id_step=1, tp_size=self._tp_size, node_rank=node_rank, load_format=load_format, dist_init_addr=dist_init_addr, nnodes=nnodes, trust_remote_code=trust_remote_code, # NOTE(linjunrong): add rank to prevent SGLang generate same port inside PortArgs.init_new # when random.seed is being set during training port=30000 + rank, # NOTE(Chenyang): if you want to debug the SGLang engine output # please set the following parameters # Otherwise, it will make the engine run too slow # log_level="INFO", # log_requests=True, # log_requests_level=2, # max_running_requests=1, mm_attention_backend="fa3", attention_backend="fa3", # In async mode, we want token in token out. skip_tokenizer_init=self.config.mode == "async", ) else: self._engine = None self.sharding_manager = None self.is_sleep = True def _init_sampling_params(self, **kwargs): kwargs = dict( n=1, max_new_tokens=self.config.response_length, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, ) # supporting adding any sampling params from the config file for k in self.config.keys(): if hasattr(SamplingParams(), str(k)) or "stop" in str(k): kwargs[k] = self.config.get(k) kwargs["n"] = 1 # already repeat in ray_trainer self.sampling_params = kwargs def _initialize_tools(self, config, processing_class): """Initialize tools from configuration. Args: config: Configuration object containing tool-related settings, specifically `config.multi_turn.tool_config_path`. tokenizer: The tokenizer instance used for parsing tool calls from the model's generated text. Returns: tuple: A tuple containing: - tool_schemas (list[dict]): OpenAI-formatted JSON schemas defining each tool's capabilities. - tool_map (dict[str, BaseTool]): A dictionary mapping tool names to their executable `BaseTool` objects. - tool_call_parser_type (str): The identifier for the specific parser type (e.g., 'json_mode', 'tool_code') used to extract tool calls. - sgl_tools (list[sglang.srt.openai_api.protocol.Tool]): Tool definitions optimized for SGLang's internal engine. - function_call_parser (sglang.srt.function_call_parser.FunctionCallParser): The active parser instance responsible for extracting structured tool calls from model outputs. """ if config.multi_turn.tool_config_path is None: return [], {}, None, [], None tools_config_file = config.multi_turn.tool_config_path tool_list = initialize_tools_from_config(tools_config_file) logger.info(f"Initialize tools from configuration.: tool_list: {tool_list}") tool_schemas = [tool.get_openai_tool_schema().model_dump() for tool in tool_list] tool_map = {tool.name: tool for tool in tool_list} tool_call_parser_type = get_tool_call_parser_type(processing_class) sgl_tools = [Tool.model_validate(tool_schema) for tool_schema in tool_schemas] function_call_parser = FunctionCallParser( sgl_tools, tool_call_parser_type, ) return ( tool_schemas, tool_map, tool_call_parser_type, sgl_tools, function_call_parser, ) def _initialize_interactions(self, config): """Initialize interactions from configuration. Returns: dict[str, BaseInteraction]: A dictionary mapping interaction names to interaction instances. """ if config.multi_turn.interaction_config_path is None: return {} interaction_config_file = config.multi_turn.interaction_config_path interaction_map = initialize_interactions_from_config(interaction_config_file) logger.info(f"Initialize interactions from configuration: interaction_map: {list(interaction_map.keys())}") return interaction_map @GPUMemoryLogger(role="sglang rollout", logger=logger) @torch.no_grad() def generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto: """Generate sequences for a batch of prompts. Args: batch (DataProto): Input batch. Returns: DataProto: Output batch. - prompts: [bsz, prompt_length], prompt token ids from dataset. - responses: [bsz, response_length], output token ids include response tokens from LLM generation and observation tokens from tool_calls. - response_mask: [bsz, response_length], 1 for LLM generated tokens, 0 for observation/padding tokens. - input_ids: [bsz, prompt_length + response_length], whole sequence token ids, including prompt tokens and response tokens. - attention_mask: [bsz, prompt_length + response_length], 0 for padding tokens, 1 for other tokens. - position_ids: [bsz, prompt_length + response_length], incremental position ids. For multi-turn conversations: responses: |<- LLM generation ->|<- tool_calls ->|<- LLM generation ->|<- padding ->| response_mask: | 1, 1, 1, ..., 1, 1 | 0, 0, .., 0, 0 | 1, 1, 1, ..., 1, 1 | 0, 0, ..., 0| """ if self.config.multi_turn.enable: return self._req_level_generate_sequences(prompts, **kwargs) return self._batch_level_generate_sequences(prompts, **kwargs) @GPUMemoryLogger(role="sglang rollout", logger=logger) @torch.no_grad() def _batch_level_generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto: """Generates single-turn sequences for a batch of prompts. For single-turn generation, all prompts are processed in one request. `_batch_level_generate_sequences` involves: 1. Extracting and pre-processing prompt token IDs from the input `prompts`. This includes handling padding and preparing raw token ID lists. 2. Preparing inputs for the SGLang engine, including multi-modal data if present. 3. Invoking the SGLang engine (`self._engine.async_generate`, an async coroutine) with the batch of processed inputs and specified sampling parameters on the master TP rank. 4. Broadcasting the results from the master TP rank to all other TP ranks. 5. Post-processing the engine's output to format the generated token IDs and (if applicable) log probabilities. 6. Constructing the final sequences by concatenating original prompts with the generated responses. 7. Updating attention masks and position IDs to reflect the full concatenated sequences. 8. If `self.config.free_cache_engine` is true, the SGLang engine's KV cache is flushed after generation on the master TP rank. Args: prompts: A `DataProto` object containing the batch of input prompts, including tensor data (like `input_ids`, `attention_mask`) and meta-information (like `eos_token_id`, `do_sample`). **kwargs: Additional keyword arguments that can override the default sampling parameters (e.g., `temperature`, `top_p`, `max_new_tokens`). These are temporarily applied using `update_sampling_params`. Returns: DataProto: A `DataProto` object containing the batch of generated sequences. This includes tensors for `prompts` (original input IDs), `responses` (generated token IDs), `input_ids` (concatenated prompt and response), `attention_mask`, and `position_ids` for the full sequences. Note that in GRPO, if the prompts are validated, we repeat the prompts for rollout.n times in ray_trainer. Thus we do not need to repeat the prompts here and set the sampling parameter n to 1. """ # input ids: (bs, prompt_length), left-padded idx = prompts.batch["input_ids"] # attention_mask: (bs, seq_length), left-padded attention_mask = prompts.batch["attention_mask"] position_ids = prompts.batch["position_ids"] # used to generate attention mask for the # response based on EOS token position eos_token_id = prompts.meta_info["eos_token_id"] batch_size = idx.size(0) # Extract non-tensor data non_tensor_batch = prompts.non_tensor_batch if "raw_prompt_ids" not in non_tensor_batch: non_tensor_batch["raw_prompt_ids"] = np.array( [_pre_process_inputs(self.pad_token_id, idx[i]).tolist() for i in range(batch_size)], dtype=object, ) if "multi_modal_data" in non_tensor_batch: sglang_inputs = [] for raw_prompt_ids, multi_modal_data in zip( non_tensor_batch.pop("raw_prompt_ids"), non_tensor_batch.pop("multi_modal_data"), strict=True, ): sglang_inputs.append( { "prompt_token_ids": raw_prompt_ids, "multi_modal_data": multi_modal_data, "image_data": ( multi_modal_data.get("image", None) if isinstance(multi_modal_data, dict) else None ), } ) else: sglang_inputs = [ {"prompt_token_ids": raw_prompt_ids} for raw_prompt_ids in non_tensor_batch.pop("raw_prompt_ids") ] # Ensure token IDs are lists or numpy arrays for input_data in sglang_inputs: if isinstance(input_data["prompt_token_ids"], np.ndarray): input_data["prompt_token_ids"] = input_data["prompt_token_ids"].tolist() elif not isinstance(input_data["prompt_token_ids"], list): raise TypeError( f"prompt_token_ids must be a list or numpy array, got {type(input_data['prompt_token_ids'])}" ) # Extract token IDs and image data for SGLang Engine idx_list = [input_data["prompt_token_ids"] for input_data in sglang_inputs] image_list = [input_data.get("image_data", None) for input_data in sglang_inputs] do_sample = prompts.meta_info.get("do_sample", True) is_validate = prompts.meta_info.get("validate", False) # Create request-level sampling parameters request_sampling_params = self.sampling_params.copy() if not do_sample: request_sampling_params.update( { "n": 1, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, "temperature": 0, "top_p": 1, "top_k": -1, "ignore_eos": False, "min_new_tokens": 0, "max_new_tokens": self.config.response_length, "skip_special_tokens": True, "spaces_between_special_tokens": True, } ) elif is_validate: request_sampling_params.update( { "top_k": self.config.val_kwargs.top_k, "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "n": 1, # if validate, already repeat in ray_trainer } ) # Update with any additional kwargs request_sampling_params.update(kwargs) if self._tp_rank == 0: loop = asyncio.get_event_loop() output = loop.run_until_complete( self._engine.async_generate( prompt=None, # because we have already convert it to prompt token id sampling_params=request_sampling_params, return_logprob=True, input_ids=idx_list, image_data=image_list, ) ) else: output = None # Most naive implementation, can extract tensor and send via gloo if too slow dist.barrier() [output] = broadcast_pyobj( data=[output], rank=self._rank, dist_group=self._device_mesh_cpu["tp"].get_group(), src=self._device_mesh_cpu["tp"].mesh[0].item(), force_cpu_device=False, ) out = _post_process_outputs(self.processing_class, output) response = out[0].to(idx.device) rollout_log_probs = None if self.config.calculate_log_probs: rollout_log_probs = out[1].to(idx.device) if response.shape[1] < self.config.response_length: response = pad_sequence_to_length(response, self.config.response_length, self.pad_token_id) if self.config.calculate_log_probs: rollout_log_probs = pad_sequence_to_length( rollout_log_probs, self.config.response_length, self.pad_token_id ) seq = torch.cat([idx, response], dim=-1) response_length = response.size(1) delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device) delta_position_id = delta_position_id.unsqueeze(0).repeat(batch_size, 1) if position_ids.dim() == 3: # qwen2vl mrope delta_position_id = delta_position_id.view(batch_size, 1, -1).expand(batch_size, 3, -1) # TODO(sgm): fix position_ids on right_pad # prompt: left pad + response: right pad # attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0] # position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11] response_position_ids = position_ids[..., -1:] + delta_position_id position_ids = torch.cat([position_ids, response_position_ids], dim=-1) response_attention_mask = get_response_mask( response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype ) attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1) # all the tp ranks should contain the same data here. data in all ranks are valid batch = TensorDict( { "prompts": idx, "responses": response, "input_ids": seq, # here input_ids become the whole sentences "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=batch_size, ) if self.config.calculate_log_probs: # we will recompute old log prob with actor batch["rollout_log_probs"] = rollout_log_probs # free cache engine if self._engine is not None and self._tp_rank == 0: loop = asyncio.get_event_loop() loop.run_until_complete(self._engine.flush_cache()) return DataProto(batch=batch, non_tensor_batch=non_tensor_batch) async def _async_rollout_a_request( self, req: AsyncRolloutRequest, do_sample: bool = True, is_validate: bool = False, **kwargs, ) -> AsyncRolloutRequest: assert self._tp_rank == 0, "only the master process can call this function" _req = deepcopy(req) finish_reason_type = None output = None current_turns = 0 user_turns = 0 user_turn_rewards = [] # Create request-level sampling parameters request_sampling_params = self.sampling_params.copy() if not do_sample: request_sampling_params.update( { "n": 1, "presence_penalty": 0.0, "frequency_penalty": 0.0, "repetition_penalty": 1.0, "temperature": 0, "top_p": 1, "top_k": -1, "ignore_eos": False, "min_new_tokens": 0, "max_new_tokens": self.config.response_length, "skip_special_tokens": True, "spaces_between_special_tokens": True, } ) elif is_validate: request_sampling_params.update( { "top_k": self.config.val_kwargs.top_k, "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "n": 1, # if validate, already repeat in ray_trainer } ) # Update with any additional kwargs request_sampling_params.update(kwargs) while current_turns < self.config.multi_turn.max_assistant_turns: if _req.state == AsyncRolloutRequestStateEnum.PENDING: await self._handle_pending_state(_req) _req.state = AsyncRolloutRequestStateEnum.RUNNING elif _req.state == AsyncRolloutRequestStateEnum.TOOL_CALLING: if _req.messages[-1].tool_calls is not None: parsed_tool_calls = _req.messages[-1].tool_calls tool_call_results = await asyncio.gather( *[ self._tool_map[tool_call.function.name].execute( _req.request_id, tool_call.function.arguments, **_req.tools_kwargs[tool_call.function.name].get("execute_kwargs", {}), ) for tool_call in parsed_tool_calls ] ) _req.add_tool_response_messages(self.processing_class, [resp for resp, _, _ in tool_call_results]) for tool_call, (resp, reward, metrics) in zip(parsed_tool_calls, tool_call_results, strict=True): _req.update_metrics(metrics, tool_call.function.name) if len(_req.input_ids) >= self.config.max_model_len: finish_reason_type = FinishReasonTypeEnum.STOP break _req.state = AsyncRolloutRequestStateEnum.RUNNING else: raise ValueError(f"Unexpected tool calling last message state: {_req.messages[-1]}") elif _req.state == AsyncRolloutRequestStateEnum.RUNNING: # Only continue the conversation if the prompt length is not greater than max_model_len - 1, # since SGLang raises an error when max_new_tokens + 1 is greater to max_model_len (the extra # token accounts for the EOS token). if len(_req.get_generation_prompt_ids(self.processing_class)) + 1 >= self.config.max_model_len: finish_reason_type = FinishReasonTypeEnum.LENGTH break # Video support is not implemented yet image_data = ( _req.multi_modal_data["image"] if _req.multi_modal_data and "image" in _req.multi_modal_data else None ) video_data = ( _req.multi_modal_data["video"] if _req.multi_modal_data and "video" in _req.multi_modal_data else None ) if video_data: logger.warning( "video support is not implemented yet, current length of video data is %d", len(video_data) ) output = await self._handle_engine_call(_req, request_sampling_params, image_data=image_data) content = output["text"] finish_reason_type = FinishReasonTypeEnum.from_str(output["meta_info"]["finish_reason"]["type"]) current_turns += 1 if finish_reason_type == FinishReasonTypeEnum.LENGTH: _req.add_assistant_message(self.processing_class, content) break else: if self._function_call_parser and self._function_call_parser.has_tool_call(content): finish_reason_type = FinishReasonTypeEnum.TOOL_CALL _req.state = AsyncRolloutRequestStateEnum.TOOL_CALLING try: normed_content, tool_calls = self._function_call_parser.parse_non_stream(content) except JSONDecodeError: normed_content = content tool_calls = [] except AttributeError: normed_content = content tool_calls = [] parsed_tool_calls = [] for tool_call in tool_calls: function, has_decode_error = OpenAIFunctionCallSchema.from_openai_function_parsed_schema( OpenAIFunctionParsedSchema( name=tool_call.name, arguments=tool_call.parameters, ) ) # Drop the tool call if its arguments has decode error if has_decode_error: continue parsed_tool_calls.append( OpenAIFunctionToolCall( id=str(tool_call.tool_index), function=function, ) ) if len(parsed_tool_calls) > 0: _req.add_assistant_message( self.processing_class, normed_content, tool_calls=parsed_tool_calls ) else: _req.add_assistant_message(self.processing_class, content) finish_reason_type = FinishReasonTypeEnum.STOP _req.state = AsyncRolloutRequestStateEnum.COMPLETED break else: _req.add_assistant_message( self.processing_class, content, ) if ( _req.interaction_kwargs and self.interaction_map and user_turns < self.config.multi_turn.max_user_turns and current_turns < self.config.multi_turn.max_assistant_turns ): _req.state = AsyncRolloutRequestStateEnum.INTERACTING else: break elif _req.state == AsyncRolloutRequestStateEnum.INTERACTING: user_turns += 1 messages = [{"role": x.role, "content": x.content} for x in _req.messages] # Get interaction by name from interaction_kwargs interaction_name = _req.interaction_kwargs.get( "name", "gsm8k" ) # Default to gsm8k for backward compatibility if interaction_name not in self.interaction_map: raise ValueError( f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: " f"{list(self.interaction_map.keys())}" ) interaction = self.interaction_map[interaction_name] should_terminate_sequence, content, reward, metrics = await interaction.generate_response( _req.request_id, messages, **_req.interaction_kwargs ) user_turn_rewards.append(reward) if should_terminate_sequence: finish_reason_type = FinishReasonTypeEnum.STOP _req.state = AsyncRolloutRequestStateEnum.COMPLETED break else: _req.add_user_message(self.processing_class, content) if len(_req.input_ids) >= self.config.max_model_len: finish_reason_type = FinishReasonTypeEnum.STOP break else: _req.state = AsyncRolloutRequestStateEnum.RUNNING if current_turns >= self.config.multi_turn.max_assistant_turns: finish_reason_type = FinishReasonTypeEnum.STOP # Calculate the reward for each tool async def calc_reward_and_release_fn(name: str, tool: BaseTool): reward = await tool.calc_reward(_req.request_id, **_req.tools_kwargs[name].get("calc_reward_kwargs", {})) await tool.release(_req.request_id, **_req.tools_kwargs[name].get("release_kwargs", {})) return name, reward tool_reward_tasks = [] for name in _req.tools_kwargs.keys(): tool = self._tool_map[name] tool_reward_tasks.append(calc_reward_and_release_fn(name, tool)) tool_reward_scores = await asyncio.gather(*tool_reward_tasks) tool_reward_scores = dict(tool_reward_scores) all_rewards = {**tool_reward_scores, **{"user_turn_rewards": user_turn_rewards}} _req.finalize(self.processing_class, all_rewards, finish_reason_type) return _req async def _handle_engine_call( self, _req: AsyncRolloutRequest, sampling_params: dict, image_data: Optional[list[Any]] = None ) -> dict: generation_prompt_ids = _req.get_generation_prompt_ids(self.processing_class) return await self._handle_engine_generate(generation_prompt_ids, sampling_params, image_data) async def _handle_engine_generate( self, generation_prompt_ids: list[int], sampling_params: dict, image_data: Optional[list[Any]] = None ) -> dict: max_new_tokens = min(self.config.response_length, self.config.max_model_len - len(generation_prompt_ids) - 1) kwargs = sampling_params.copy() kwargs["max_new_tokens"] = max_new_tokens kwargs["n"] = 1 # group size is supported in preprocess output = await self._engine.async_generate( input_ids=generation_prompt_ids, sampling_params=kwargs, return_logprob=False, image_data=image_data, ) return output async def _handle_pending_state(self, _req: AsyncRolloutRequest) -> AsyncRolloutRequest: if _req.tool_schemas is not None: tool_creation_coroutines = [] for tool_schema in _req.tool_schemas: tool = self._tool_map[tool_schema.function.name] create_kwargs = _req.tools_kwargs[tool.name].get("create_kwargs", {}) tool_creation_coroutines.append(tool.create(_req.request_id, **create_kwargs)) await asyncio.gather(*tool_creation_coroutines) if _req.interaction_kwargs and self.interaction_map: interaction_kwargs = _req.interaction_kwargs # Get interaction by name from interaction_kwargs interaction_name = interaction_kwargs.get("name", "gsm8k") # Default to gsm8k for backward compatibility if interaction_name not in self.interaction_map: raise ValueError( f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: " f"{list(self.interaction_map.keys())}" ) interaction = self.interaction_map[interaction_name] await interaction.start_interaction(_req.request_id, **interaction_kwargs) @GPUMemoryLogger(role="sglang rollout", logger=logger) @torch.no_grad() def generate_sequences_with_tools(self, prompts: DataProto, **kwargs) -> DataProto: logger.warning( "`generate_sequences_with_tools` is deprecated, please use `generate_sequences(...)`", DeprecationWarning, stacklevel=2, ) return self._req_level_generate_sequences(prompts, **kwargs) @GPUMemoryLogger(role="sglang rollout", logger=logger) @torch.no_grad() def _req_level_generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto: """Generates multi-turn sequences for a batch of prompts. For multi-turn generation, each prompt is processed separately via `_req_level_generate_sequences` for better tool calling control. Note that in multi-turn generation, we repeat the prompts for rollout.n times in ray_trainer. Thus we do not need to repeat the prompts here and set the sampling parameter n to 1. """ # Async rollout with tools support do_sample = prompts.meta_info.get("do_sample", True) is_validate = prompts.meta_info.get("validate", False) tgt_device = prompts.batch["input_ids"].device if self._tp_rank == 0: req_list = self._preprocess_prompt_to_async_rollout_requests( prompts, ) loop = asyncio.get_event_loop() output_req_list = loop.run_until_complete( asyncio.gather( *[self._async_rollout_a_request(req, do_sample, is_validate, **kwargs) for req in req_list], ) ) sorted_output_req_list = sorted(output_req_list, key=lambda x: (x.batch_data_id, x.rollout_offset)) else: sorted_output_req_list = None dist.barrier() [sorted_output_req_list] = broadcast_pyobj( data=[sorted_output_req_list], rank=self._rank, dist_group=self._device_mesh_cpu["tp"].get_group(), src=self._device_mesh_cpu["tp"].mesh[0].item(), force_cpu_device=False, ) # Construct the batch data prompt_ids, response_ids = [], [] prompt_attention_mask, response_attention_mask = [], [] prompt_position_ids, response_position_ids = [], [] prompt_loss_mask, response_loss_mask = [], [] messages = [] reward_scores = [] multi_modal_inputs = [] request_ids = [] for req in sorted_output_req_list: assert req.state == AsyncRolloutRequestStateEnum.COMPLETED, f"Request {req.request_id} is not completed" assert ( req.input_ids.shape[-1] == req.attention_mask.shape[-1] == req.position_ids.shape[-1] == req.loss_mask.shape[-1] ), f"""Request {req.request_id} has different length of {req.input_ids.shape[-1]=}, {req.attention_mask.shape[-1]=}, {req.position_ids.shape[-1]=}, {req.loss_mask.shape[-1]=}""" error_message_lines = [ f"""Request {req.request_id} has input_ids length {req.input_ids.shape[-1]} greater than max_model_len {self.config.max_model_len}""", f"Decoded input_ids: {self.processing_class.decode(req.input_ids.squeeze(0))}", f"Decoded prompt_ids: {self.processing_class.decode(req.prompt_ids.squeeze(0))}", f"Decoded response_ids: {self.processing_class.decode(req.response_ids.squeeze(0))}", f"Messages: {req.messages}", f"Max model length: {req.max_model_len}", ] error_message = "\n".join(error_message_lines) assert req.input_ids.shape[-1] <= self.config.max_model_len, error_message prompt_ids.append(req.prompt_ids.to(tgt_device).squeeze(0)) response_ids.append(req.response_ids.to(tgt_device).squeeze(0)) if req.response_ids.shape[-1] > self.config.response_length: logger.warning( f"""{req.request_id=} has response_ids length {req.response_ids.shape[-1]} greater than max_response_len {self.config.response_length},\n{req=}""" ) prompt_attention_mask.append(req.prompt_attention_mask.to(tgt_device).squeeze(0)) response_attention_mask.append(req.response_attention_mask.to(tgt_device).squeeze(0)) prompt_position_ids.append(req.prompt_position_ids.to(tgt_device).squeeze(0)) response_position_ids.append(req.response_position_ids.to(tgt_device).squeeze(0)) prompt_loss_mask.append(req.prompt_loss_mask.to(tgt_device).squeeze(0)) response_loss_mask.append(req.response_loss_mask.to(tgt_device).squeeze(0)) messages.append({"messages": req.messages}) reward_scores.append(req.reward_scores) multi_modal_inputs.append(req.multi_modal_inputs) request_ids.append(req.request_id) prompt_ids = pad_sequence( prompt_ids, batch_first=True, padding_value=self.pad_token_id, padding_side="left", ) if prompt_ids.shape[-1] < self.config.prompt_length: prompt_ids = pad_sequence_to_length(prompt_ids, self.config.prompt_length, self.pad_token_id, left_pad=True) response_ids = pad_sequence(response_ids, batch_first=True, padding_value=self.pad_token_id) if response_ids.shape[-1] < self.config.response_length: response_ids = pad_sequence_to_length(response_ids, self.config.response_length, self.pad_token_id) prompt_attention_mask = pad_sequence( prompt_attention_mask, batch_first=True, padding_value=0, padding_side="left", ) if prompt_attention_mask.shape[-1] < self.config.prompt_length: prompt_attention_mask = pad_sequence_to_length( prompt_attention_mask, self.config.prompt_length, 0, left_pad=True ) response_attention_mask = pad_sequence(response_attention_mask, batch_first=True, padding_value=0) if response_attention_mask.shape[-1] < self.config.response_length: response_attention_mask = pad_sequence_to_length(response_attention_mask, self.config.response_length, 0) # padding prompt_position_ids if prompt_position_ids[0].dim() == 2: # if prompt_position_ids is a 2D tensor # e.g. from qwen2vl, prompt_position_ids.shape = (3, seq_len) transposed_prompt_position_ids = [p.transpose(0, 1) for p in prompt_position_ids] prompt_position_ids = pad_sequence( transposed_prompt_position_ids, batch_first=True, padding_value=0, padding_side="left" ) prompt_position_ids = prompt_position_ids.transpose(1, 2) else: prompt_position_ids = pad_sequence( prompt_position_ids, batch_first=True, padding_value=0, padding_side="left" ) if prompt_position_ids.shape[-1] < self.config.prompt_length: prompt_position_ids = pad_sequence_to_length( prompt_position_ids, self.config.prompt_length, 0, left_pad=True ) # padding response_position_ids if response_position_ids[0].dim() == 2: # if response_position_ids is a 2D tensor # e.g. from qwen2vl, response_position_ids.shape = (3, seq_len) transposed_response_position_ids = [p.transpose(0, 1) for p in response_position_ids] response_position_ids = pad_sequence( transposed_response_position_ids, batch_first=True, padding_value=0, padding_side="left" ) response_position_ids = response_position_ids.transpose(1, 2) else: response_position_ids = pad_sequence(response_position_ids, batch_first=True, padding_value=0) if response_position_ids.shape[-1] < self.config.response_length: response_position_ids = pad_sequence_to_length(response_position_ids, self.config.response_length, 0) prompt_loss_mask = pad_sequence(prompt_loss_mask, batch_first=True, padding_value=0, padding_side="left") if prompt_loss_mask.shape[1] < self.config.prompt_length: prompt_loss_mask = pad_sequence_to_length(prompt_loss_mask, self.config.prompt_length, 0, left_pad=True) response_loss_mask = pad_sequence(response_loss_mask, batch_first=True, padding_value=0) if response_loss_mask.shape[1] < self.config.response_length: response_loss_mask = pad_sequence_to_length(response_loss_mask, self.config.response_length, 0) input_ids = torch.cat((prompt_ids, response_ids), dim=-1) attention_mask = torch.cat((prompt_attention_mask, response_attention_mask), dim=-1) position_ids = torch.cat((prompt_position_ids, response_position_ids), dim=-1) # Construct the batch data batch = TensorDict( { "prompts": prompt_ids, "responses": response_ids, "response_mask": response_loss_mask, "input_ids": input_ids, # here input_ids become the whole sentences "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=len(sorted_output_req_list), ) # free cache engine if self._engine is not None and self._tp_rank == 0: loop = asyncio.get_event_loop() loop.run_until_complete(self._engine.flush_cache()) non_tensor_batch = { "messages": np.array(messages), "reward_scores": np.array(reward_scores), "request_id": np.array(request_ids), } is_multimodal = isinstance(self.processing_class, ProcessorMixin) and ( hasattr(self.processing_class, "image_processor") or hasattr(self.model_hf_config, "vision_config") ) if is_multimodal: non_tensor_batch["multi_modal_inputs"] = np.array(multi_modal_inputs, dtype=object) return DataProto( batch=batch, non_tensor_batch=non_tensor_batch, ) def _preprocess_prompt_to_async_rollout_requests(self, prompts: DataProto, n: int = 1) -> list[AsyncRolloutRequest]: assert "raw_prompt" in prompts.non_tensor_batch, ( "need data.return_raw_chat=True, due to no official way do parse_messages" ) logger.info( "n is deprecated for SGLang rollout since ray ppo trainer will repeat the prompts for rollout.n times" ) req_list = [] multi_modal_data_list = prompts.non_tensor_batch.get( "multi_modal_data", [None] * len(prompts.non_tensor_batch["raw_prompt"]) ) for data_idx, (raw_prompt, multi_modal_data) in enumerate( zip(prompts.non_tensor_batch["raw_prompt"], multi_modal_data_list, strict=True) ): if self._tool_schemas: _tools_kwargs = prompts.non_tensor_batch["tools_kwargs"][data_idx] _tool_schemas = [self._tool_map[k].get_openai_tool_schema() for k in _tools_kwargs.keys()] _input_ids = None _attention_mask = None else: _input_ids = _pre_process_inputs(self.pad_token_id, prompts.batch["input_ids"][data_idx]) _attention_mask = _pre_process_inputs(0, prompts.batch["attention_mask"][data_idx]) _tools_kwargs = {} _tool_schemas = None if self.interaction_map: _interaction_kwargs = prompts.non_tensor_batch["interaction_kwargs"][data_idx] else: _interaction_kwargs = {} req = AsyncRolloutRequest( batch_data_id=data_idx, rollout_offset=0, request_id=str(uuid4()), state=AsyncRolloutRequestStateEnum.PENDING, messages=raw_prompt.tolist(), multi_modal_data=multi_modal_data, tool_schemas=_tool_schemas, tools_kwargs=_tools_kwargs, interaction_kwargs=_interaction_kwargs, input_ids=_input_ids, response_ids=None, attention_mask=_attention_mask, response_attention_mask=None, response_position_ids=None, response_loss_mask=None, reward_scores={}, max_prompt_len=self.config.prompt_length, max_response_len=self.config.response_length, max_model_len=min(self.config.max_model_len, self.config.prompt_length + self.config.response_length), use_inference_chat_template=self.config.multi_turn.use_inference_chat_template, tokenization_sanity_check_mode=self.config.multi_turn.tokenization_sanity_check_mode, processing_class=self.processing_class, ) error_message = f"""Request {req.request_id} has mismatched lengths: input_ids={req.input_ids.shape[-1]}, attention_mask={req.attention_mask.shape[-1]}, position_ids={req.position_ids.shape[-1]}, loss_mask={req.loss_mask.shape[-1]}""" assert ( req.input_ids.shape[-1] == req.attention_mask.shape[-1] == req.position_ids.shape[-1] == req.loss_mask.shape[-1] ), error_message req_list.append(req) return req_list async def chat_completion(self, json_request): assert self._tp_rank == 0, "only called in tp rank 0" _input_ids = None _attention_mask = None _position_ids = None _tool_schemas = [] _tools_kwargs = {} req = AsyncRolloutRequest( request_id=str(uuid4()), state=AsyncRolloutRequestStateEnum.PENDING, messages=[Message.model_validate(msg) for msg in json_request["messages"]], tool_schemas=_tool_schemas, tools_kwargs=_tools_kwargs, input_ids=_input_ids, prompt_ids=_input_ids, response_ids=None, attention_mask=_attention_mask, prompt_attention_mask=_attention_mask, response_attention_mask=None, position_ids=_position_ids, prompt_position_ids=_position_ids, response_position_ids=None, loss_mask=None, prompt_loss_mask=None, response_loss_mask=None, reward_scores={}, max_prompt_len=self.config.prompt_length, max_response_len=self.config.response_length, max_model_len=min(self.config.max_model_len, self.config.prompt_length + self.config.response_length), use_inference_chat_template=self.config.multi_turn.use_inference_chat_template, tokenization_sanity_check_mode=self.config.multi_turn.tokenization_sanity_check_mode, processing_class=self.processing_class, ) # json_request already contains sampling_params # Filter only valid SamplingParams arguments valid_sampling_params = {} temp_sampling_params = SamplingParams() # Create temporary instance to check valid attributes for k, v in json_request.items(): if k not in ["messages", "model", "tools"] and hasattr(temp_sampling_params, k): valid_sampling_params[k] = v output = await self._handle_engine_call(req, valid_sampling_params) # it can be Dict or AsyncIterator[Dict] if isinstance(output, dict): outputs = [output] else: outputs = output # build openai chat completion format choices = [] id = None for i, content in enumerate(outputs): choices.append( { "index": i, "message": { "role": "assistant", "content": content["text"], }, "finish_reason": content["meta_info"]["finish_reason"]["type"], } ) id = content["meta_info"]["id"] return { "id": "chatcmpl-" + id, "object": "chat.completion", "created": int(time.time()), "model": json_request.get("model", "sglang_model"), "choices": choices, } # this function is left for uniform train-inference resharding async def generate( self, prompt_ids: torch.Tensor, sampling_params: dict[str, Any], request_id: str ) -> torch.Tensor: request_sampling_params = self.sampling_params.copy() request_sampling_params.update(sampling_params) output = await self._handle_engine_generate(prompt_ids, request_sampling_params) return output["output_ids"] async def wake_up(self): if not self.is_sleep: return await self.sharding_manager.wake_up() # pylint: disable=C2801 self.is_sleep = False # this function is left for uniform train-inference resharding async def sleep(self): if self.is_sleep: return await self.sharding_manager.sleep() self.is_sleep = True ================================================ FILE: verl_rl/verl/workers/rollout/sglang_rollout/utils.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle from typing import Any, Iterator, Optional import numpy as np import torch import torch.distributed as dist from verl.utils.device import get_device_name def broadcast_pyobj( data: list[Any], rank: int, dist_group: Optional[torch.distributed.ProcessGroup] = None, src: int = 0, force_cpu_device: bool = False, ): """from https://github.com/sgl-project/sglang/blob/844e2f227ab0cce6ef818a719170ce37b9eb1e1b/python/sglang/srt/utils.py#L905 Broadcast inputs from src rank to all other ranks with torch.dist backend. The `rank` here refer to the source rank on global process group (regardless of dist_group argument). """ device = torch.device(get_device_name() if not force_cpu_device else "cpu") if rank == src: if len(data) == 0: tensor_size = torch.tensor([0], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) else: serialized_data = pickle.dumps(data) size = len(serialized_data) tensor_data = torch.ByteTensor(np.frombuffer(serialized_data, dtype=np.uint8)).to(device) tensor_size = torch.tensor([size], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) dist.broadcast(tensor_data, src=src, group=dist_group) return data else: tensor_size = torch.tensor([0], dtype=torch.long, device=device) dist.broadcast(tensor_size, src=src, group=dist_group) size = tensor_size.item() if size == 0: return [] tensor_data = torch.empty(size, dtype=torch.uint8, device=device) dist.broadcast(tensor_data, src=src, group=dist_group) serialized_data = bytes(tensor_data.cpu().numpy()) data = pickle.loads(serialized_data) return data def get_named_tensor_buckets( iterable: Iterator[tuple[str, torch.Tensor]], bucket_bytes: int ) -> Iterator[list[tuple[str, torch.Tensor]]]: """ Group tensors into buckets based on a specified size in megabytes. Args: iterable: An iterator of tuples containing tensor names and tensors. bucket_bytes: The maximum size of each bucket in bytes. Yields: Lists of tuples, where each tuple contains a tensor name and its corresponding tensor. Example: >>> tensors = [('tensor1', torch.randn(1000, 1000)), ('tensor2', torch.randn(2000, 2000))] >>> for bucket in get_named_tensor_buckets(tensors, bucket_size_mb=10): ... print(bucket) [('tensor1', tensor(...)), ('tensor2', tensor(...))] """ if bucket_bytes <= 0: raise ValueError(f"bucket_bytes must be greater than 0, got {bucket_bytes}") current_bucket = [] current_size = 0 for name, tensor in iterable: tensor_size = tensor.element_size() * tensor.numel() if current_size + tensor_size > bucket_bytes: if current_bucket: yield current_bucket current_bucket = [(name, tensor)] current_size = tensor_size else: current_bucket.append((name, tensor)) current_size += tensor_size if current_bucket: yield current_bucket ================================================ FILE: verl_rl/verl/workers/rollout/tokenizer.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The base tokenizer class, required for any hybrid engine based rollout or inference with vLLM. """ from abc import ABC, abstractmethod import numpy as np import torch __all__ = ["HybridEngineBaseTokenizer"] class HybridEngineBaseTokenizer(ABC): """the tokenizer property and function name should align with HF's to meet vllm requirement""" @property @abstractmethod def vocab_size(self): """ `int`: Size of the base vocabulary (without the added tokens). """ pass @property @abstractmethod def pad_token_id(self): """ `Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set. """ pass @property @abstractmethod def eos_token_id(self): """ `Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been set. """ pass @property @abstractmethod def all_special_ids(self) -> list[int]: """ `List[int]`: List the ids of the special tokens(`''`, `''`, etc.) mapped to class attributes. """ pass @property @abstractmethod def all_special_tokens(self) -> list[str]: """ `List[str]`: A list of the unique special tokens (`''`, `''`, ..., etc.). Convert tokens of `tokenizers.AddedToken` type to string. """ pass @abstractmethod def encode(self, text): """ Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Args: text (`str`, `List[str]` or `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers. text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers. """ pass @abstractmethod def decode( self, token_ids: int | list[int] | np.ndarray | torch.Tensor, skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ pass @abstractmethod def convert_ids_to_tokens(self, ids: int | list[int], skip_special_tokens: bool = False) -> str | list[str]: """ Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and added tokens. Args: ids (`int` or `List[int]`): The token id (or token ids) to convert to tokens. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Returns: `str` or `List[str]`: The decoded token(s). """ pass @abstractmethod def get_added_vocab(self) -> dict[str, int]: """ Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from the fast call because for now we always add the tokens even if they are already in the vocabulary. This is something we should change. Returns: `Dict[str, int]`: The added tokens. """ pass @abstractmethod def convert_tokens_to_string(self, tokens: list[str]) -> str: """ Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we often want to remove sub-word tokenization artifacts at the same time. Args: tokens (`List[str]`): The token to join in a string. Returns: `str`: The joined tokens. """ pass @property def is_fast(self): return False ================================================ FILE: verl_rl/verl/workers/rollout/vllm_rollout/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from importlib.metadata import PackageNotFoundError, version from .vllm_rollout_spmd import vLLMAsyncRollout, vLLMRollout # noqa: F401 def get_version(pkg): try: return version(pkg) except PackageNotFoundError: return None vllm_package_name = "vllm" vllm_package_version = get_version(vllm_package_name) if vllm_package_version is None: raise PackageNotFoundError( "To use vllm rollout, please ensure the 'vllm' package is properly installed. See " "https://verl.readthedocs.io/en/latest/start/install.html for more details" ) if "ROCM_PATH" in os.environ: import re match = re.match(r"(\d+\.\d+\.?\d*)", vllm_package_version) if match: vllm_package_version = match.group(1) else: raise ValueError(f"Warning: Could not parse version format: {vllm_package_version}") ================================================ FILE: verl_rl/verl/workers/rollout/vllm_rollout/vllm_async_server.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import pickle from typing import Any, Callable, Optional import ray import zmq from omegaconf import DictConfig from starlette.requests import Request from starlette.responses import JSONResponse, StreamingResponse from vllm import SamplingParams from vllm.engine.arg_utils import AsyncEngineArgs from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ChatCompletionRequest, ChatCompletionResponse, ErrorResponse from vllm.entrypoints.openai.serving_chat import OpenAIServingChat from vllm.entrypoints.openai.serving_models import BaseModelPath, OpenAIServingModels from vllm.inputs import TokensPrompt from vllm.outputs import RequestOutput from vllm.v1.engine.async_llm import AsyncLLM from vllm.v1.executor.abstract import Executor from vllm.worker.worker_base import WorkerWrapperBase from verl.utils.fs import copy_to_local from verl.workers.rollout.async_server import AsyncServerBase logger = logging.getLogger(__file__) def _get_model_runner_workers(vllm_config, init_ray: bool = True): assert vllm_config.instance_id is not None, "instance_id must be set for external ray actors." fields = vllm_config.instance_id.split(":") assert len(fields) == 4, ( f"instance_id: {vllm_config.instance_id} must be in the format of " f":::." ) namespace, wg_prefix, vllm_dp_size, vllm_dp_rank = fields[0], fields[1], int(fields[2]), int(fields[3]) # Make sure subprocess in same namespace as parent actor. # actor name format: {name_prefix}WorkerDict_{pg_idx}:{local_rank} if init_ray: ray.init(namespace=namespace) actor_names = [ actor_name for actor_name in ray.util.list_named_actors() if actor_name.startswith(f"{wg_prefix}WorkerDict") ] vllm_tp_size = vllm_config.parallel_config.tensor_parallel_size assert len(actor_names) == vllm_dp_size * vllm_tp_size, ( f"instance_id: {vllm_config.instance_id} has {len(actor_names)} actors, but vllm_dp_size: " f"{vllm_dp_size} * vllm_tp_size: {vllm_tp_size} = {vllm_dp_size * vllm_tp_size} is expected." ) def get_pg_index_and_local_rank(actor_name) -> tuple[int, int]: fields = actor_name.split(":") assert len(fields) == 2, f"invalid actor name: {actor_name}" pg_index, local_rank = int(fields[0].split("_")[-1]), int(fields[1]) return pg_index, local_rank # sort actor names by pg_index and local_rank actor_names = sorted(actor_names, key=get_pg_index_and_local_rank) actor_names = actor_names[vllm_dp_rank * vllm_tp_size : (vllm_dp_rank + 1) * vllm_tp_size] workers: list[WorkerWrapperBase] = [ray.get_actor(actor_name) for actor_name in actor_names] print(f"instance_id: {vllm_config.instance_id} initializes with external actors: {actor_names}") return workers class ExternalRayDistributedExecutor(Executor): """An executor that engines are launched by external ray actors.""" uses_ray: bool = False def _init_executor(self) -> None: self.workers = _get_model_runner_workers(vllm_config=self.vllm_config, init_ray=True) kwargs = dict( vllm_config=self.vllm_config, local_rank=None, rank=None, distributed_init_method="env://", is_driver_worker=True, ) self.collective_rpc("init_worker", args=([kwargs],)) self.collective_rpc("init_device") self.collective_rpc("load_model") print(f"instance_id: {self.vllm_config.instance_id} initializes finished.") def collective_rpc( self, method: str | Callable, timeout: Optional[float] = None, args: tuple = (), kwargs: Optional[dict[str, Any]] = None, ) -> list[Any]: # TODO(wuxibin): support ray compiled graph if isinstance(method, str): sent_method = method else: sent_method = pickle.dumps(method) del method # ~3ms overhead per schedule step due to SchedulerOutput/ModelRunnerOutput serialization/deserialization. outputs = ray.get( [worker.execute_method.remote(sent_method, *args, **(kwargs or {})) for worker in self.workers] ) return outputs def check_health(self): return class ExternalZeroMQDistributedExecutor(Executor): """An executor that engines are launched by external ray actors.""" uses_ray: bool = False def _init_executor(self) -> None: addresses = os.environ["VERL_VLLM_ZMQ_ADDRESSES"].split(",") self.context = zmq.Context() self.sockets = [] for address in addresses: socket = self.context.socket(zmq.REQ) socket.connect(address) self.sockets.append(socket) kwargs = dict( vllm_config=self.vllm_config, local_rank=None, rank=None, distributed_init_method="env://", is_driver_worker=True, ) self.collective_rpc("init_worker", args=([kwargs],)) self.collective_rpc("init_device") self.collective_rpc("load_model") def collective_rpc( self, method: str | Callable, timeout: Optional[float] = None, args: tuple = (), kwargs: Optional[dict[str, Any]] = None, ) -> list[Any]: if isinstance(method, str): sent_method = method else: sent_method = pickle.dumps(method) del method message = pickle.dumps((sent_method, args, kwargs or {})) for socket in self.sockets: socket.send(message, zmq.DONTWAIT) outputs = [] for socket in self.sockets: outputs.append(pickle.loads(socket.recv())) return outputs def check_health(self): return @ray.remote(num_cpus=1) class AsyncvLLMServer(AsyncServerBase): """ AsyncvLLMServer is a wrapper for AsyncLLM, it uses ExternalRayDistributedExecutor to launch engines in hybrid rollout workers, i.e AsyncActorRolloutRefWorker. AsyncvLLMServer works as follows: 1. Start FastAPI server first. 2. Initialize AsyncLLM with ExternalRayDistributedExecutor. 3. AsyncLLM spawn EngineCore in subprocess. 4. EngineCore initialize ExternalRayDistributedExecutor. 5. ExternalRayDistributedExecutor lookup its corresponding actors by name. 6. ExternalRayDistributedExecutor init executor: init_worker, init_device, load_model. For vLLM AsyncLLM design, see: https://github.com/vllm-project/vllm/pull/9826 """ def __init__(self, config: DictConfig, vllm_dp_size: int, vllm_dp_rank: int, wg_prefix: str): """ Args: config: DictConfig. vllm_dp_size: int, vllm data parallel size. vllm_dp_rank: int, vllm data parallel rank. wg_prefix: str, worker group prefix, used to lookup actors. """ super().__init__() self.config = config.actor_rollout_ref self.vllm_dp_size = vllm_dp_size self.vllm_dp_rank = vllm_dp_rank self.wg_prefix = wg_prefix self.engine: AsyncLLM = None async def init_engine(self): """Init vLLM AsyncLLM engine.""" config = self.config model_path = config.model.path model_name = "/".join(model_path.split("/")[-2:]) local_path = copy_to_local(model_path) trust_remote_code = config.model.get("trust_remote_code", False) config = config.rollout tensor_parallel_size = config.get("tensor_model_parallel_size", 1) max_num_batched_tokens = config.get("max_num_batched_tokens", 8192) max_model_len = config.max_model_len if config.max_model_len else config.prompt_length + config.response_length self.max_model_len = int(max_model_len) # Override default generation config from hugging face model config, # user can still override them by passing kwargs in each request. kwargs = dict( n=1, logprobs=0, repetition_penalty=1.0, max_new_tokens=config.response_length, ) for k in config.keys(): if hasattr(SamplingParams(), str(k)): kwargs[k] = config.get(k) print(f"override_generation_config: {kwargs}") backend = os.environ.get("VERL_VLLM_DISTRIBUTED_BACKEND", "zeromq") if backend == "zeromq": distributed_executor_backend = ExternalZeroMQDistributedExecutor elif backend == "ray": distributed_executor_backend = ExternalRayDistributedExecutor else: distributed_executor_backend = None engine_args = AsyncEngineArgs( model=local_path, enable_sleep_mode=config.free_cache_engine, override_generation_config=kwargs, tensor_parallel_size=tensor_parallel_size, distributed_executor_backend=distributed_executor_backend, dtype=config.dtype, enforce_eager=config.enforce_eager, gpu_memory_utilization=config.gpu_memory_utilization, disable_custom_all_reduce=True, skip_tokenizer_init=False, max_model_len=self.max_model_len, load_format="auto", disable_log_stats=config.disable_log_stats, max_num_batched_tokens=max_num_batched_tokens, enable_chunked_prefill=config.enable_chunked_prefill, enable_prefix_caching=True, trust_remote_code=trust_remote_code, seed=config.get("seed", 0), ) # init async llm engine vllm_config = self._create_engine_config(engine_args) self.engine = AsyncLLM.from_vllm_config(vllm_config) # build serving chat model_config = self.engine.model_config BASE_MODEL_PATHS = [BaseModelPath(name=model_name, model_path=model_path)] models = OpenAIServingModels(self.engine, model_config, BASE_MODEL_PATHS) self.openai_serving_chat = OpenAIServingChat( self.engine, model_config, models, "assistant", request_logger=RequestLogger(max_log_len=4096), chat_template=None, chat_template_content_format="auto", enable_auto_tools=config.multi_turn.tool_config_path is not None, tool_parser=config.multi_turn.format, # hermes, llama3_json, ... ) def _create_engine_config(self, engine_args: AsyncEngineArgs): vllm_config = engine_args.create_engine_config() namespace = ray.get_runtime_context().namespace vllm_config.instance_id = f"{namespace}:{self.wg_prefix}:{self.vllm_dp_size}:{self.vllm_dp_rank}" # VERL_VLLM_ZMQ_ADDRESSES if engine_args.distributed_executor_backend == ExternalZeroMQDistributedExecutor: workers = _get_model_runner_workers(vllm_config=vllm_config, init_ray=False) zmq_addresses = ray.get([worker.get_zeromq_address.remote() for worker in workers]) print(f"VERL_VLLM_ZMQ_ADDRESSES: {zmq_addresses}") os.environ["VERL_VLLM_ZMQ_ADDRESSES"] = ",".join(zmq_addresses) return vllm_config async def chat_completion(self, raw_request: Request): """OpenAI-compatible HTTP endpoint. API reference: https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html """ request_json = await raw_request.json() request = ChatCompletionRequest(**request_json) generator = await self.openai_serving_chat.create_chat_completion(request, raw_request) if isinstance(generator, ErrorResponse): return JSONResponse(content=generator.model_dump(), status_code=generator.code) if request.stream: return StreamingResponse(content=generator, media_type="text/event-stream") else: assert isinstance(generator, ChatCompletionResponse) return JSONResponse(content=generator.model_dump()) async def generate(self, prompt_ids: list[int], sampling_params: dict[str, Any], request_id: str) -> list[int]: max_tokens = self.max_model_len - len(prompt_ids) sampling_params = SamplingParams(max_tokens=max_tokens, **sampling_params) prompt = TokensPrompt(prompt_token_ids=prompt_ids) generator = self.engine.generate(prompt=prompt, sampling_params=sampling_params, request_id=request_id) # Get final response final_res: Optional[RequestOutput] = None async for output in generator: final_res = output assert final_res is not None return final_res.outputs[0].token_ids async def wake_up(self): if self.config.rollout.free_cache_engine: await self.engine.wake_up() async def sleep(self): # TODO: https://github.com/vllm-project/vllm/issues/17103 await self.engine.reset_prefix_cache() if self.config.rollout.free_cache_engine: await self.engine.sleep() ================================================ FILE: verl_rl/verl/workers/rollout/vllm_rollout/vllm_rollout_spmd.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The vllm_rollout that can be applied in different backend When working with FSDP: - Use DTensor weight loader (recommended) or HF weight loader - Utilize state_dict from the FSDP to synchronize the weights among tp ranks in vLLM When working with Megatron: - Use Megatron weight loader - During training, only the current pp stage holds the parameters - Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters) - Bind the parameters to the inference engine - Do inference in tp. pp is treated as additional dp - After inference, all the parameters that doesn't belong to this pp rank is freed. """ import logging import os import pickle import socket import threading from contextlib import contextmanager from copy import deepcopy from types import MethodType from typing import Any import numpy as np import ray import torch import torch.distributed import zmq from filelock import FileLock from omegaconf import DictConfig, OmegaConf from tensordict import TensorDict from vllm import LLM, SamplingParams from vllm.distributed import parallel_state as vllm_ps from vllm.lora.request import LoRARequest from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.worker.worker_base import WorkerWrapperBase from verl import DataProto from verl.utils.profiler import GPUMemoryLogger from verl.utils.torch_functional import get_response_mask, pad_2d_list_to_length from verl.workers.rollout.base import BaseRollout logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) # TODO # 1. support pp in vllm # 2. passing tokenizer is not necessary? no encoding/decoding is happending here # 3. simplify init logics # NOTE(sgm): add for verl. We can optimize it by making the dataloader yield List[int] without padding. def _pre_process_inputs(pad_token_id, prompt_token_ids: torch.Tensor) -> list[int]: # remove the left padding in the prompt token_id # pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id # is not None else self.llm_engine.tokenizer.eos_token_id non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0] token_ids = prompt_token_ids[non_pad_index:].tolist() return token_ids class vLLMRollout(BaseRollout): def __init__(self, model_path: str, config: DictConfig, tokenizer, model_hf_config, **kwargs): """A vLLM rollout. It requires the module is supported by the vllm. Args: module: module here follows huggingface APIs config: DictConfig tokenizer: the task/model tokenizer model_hf_config: the huggingface config to initiallize the generating model in vllm **kwargs: train_tp, for Megatron Backend to initialize hybrid engine (zero redundancy) process group """ super().__init__() self.config = config tensor_parallel_size = self.config.get("tensor_model_parallel_size", 1) assert tensor_parallel_size <= torch.distributed.get_world_size(), ( "tensor parallel size should be less than or equal to the world size" ) max_num_batched_tokens = self.config.get("max_num_batched_tokens", 8192) if kwargs.get("train_tp") is not None: # deployed with megatron import os os.environ["CUDA_TIMER_STREAM_KAFKA_ENABLE"] = "0" os.environ["MEGATRON_IMPORT_TIMERS"] = "0" vllm_ps.initialize_model_parallel(tensor_model_parallel_size=tensor_parallel_size) rope_scaling_config = getattr(model_hf_config, "rope_scaling", None) if not rope_scaling_config: max_position_embeddings = None if hasattr(model_hf_config, "max_position_embeddings"): max_position_embeddings = model_hf_config.max_position_embeddings elif hasattr(model_hf_config, "llm_config") and hasattr( model_hf_config.llm_config, "max_position_embeddings" ): max_position_embeddings = model_hf_config.llm_config.max_position_embeddings elif hasattr(model_hf_config, "text_config") and hasattr( model_hf_config.text_config, "max_position_embeddings" ): max_position_embeddings = model_hf_config.text_config.max_position_embeddings if max_position_embeddings is None: raise ValueError("max_position_embeddings not found in model_hf_config") assert max_position_embeddings >= config.prompt_length + config.response_length, ( "model context length should be greater than total sequence length" ) else: # handle type where there's a length extend factor # see https://qwen.readthedocs.io/en/latest/deployment/vllm.html#extended-context-support # for using yarn as an example rope_scaling_factor = rope_scaling_config.get("factor", 1.0) assert ( model_hf_config.max_position_embeddings * rope_scaling_factor >= config.prompt_length + config.response_length ), ( "model context length should be greater than total sequence length, " + f"got rope_scaling_factor={rope_scaling_factor} and " + f"max_position_embeddings={model_hf_config.max_position_embeddings}" ) max_model_len = int(config.max_model_len or config.prompt_length + config.response_length) if max_num_batched_tokens < max_model_len and self.config.enable_chunked_prefill: raise ValueError( "Enable chunked prefill, max_num_batched_tokens is smaller than max_model_len, \ please increase max_num_batched_tokens or disable chunked prefill" ) trust_remote_code = kwargs.get("trust_remote_code", False) load_format = "dummy" if config.load_format.startswith("dummy") else config.load_format lora_kwargs = kwargs.pop("lora_kwargs", {}) self.lora_kwargs = lora_kwargs # copy it to avoid secretly modifying the engine config engine_kwargs = ( {} if "engine_kwargs" not in config or "vllm" not in config.engine_kwargs else OmegaConf.to_container(deepcopy(config.engine_kwargs.vllm)) ) # For each vLLM engine parameter, # - `None` means not setting it, so we pop it, and leave it to vLLM default value # (which can vary across different vLLM versions); # - Otherwise it's the desired value we want to explicitly set. engine_kwargs = {key: val for key, val in engine_kwargs.items() if val is not None} if config.get("limit_images", None): # support for multi-image data engine_kwargs["limit_mm_per_prompt"] = {"image": config.get("limit_images")} self.inference_engine = LLM( model=model_path, enable_sleep_mode=config.free_cache_engine, tensor_parallel_size=tensor_parallel_size, distributed_executor_backend="external_launcher", dtype=config.dtype, enforce_eager=config.enforce_eager, gpu_memory_utilization=config.gpu_memory_utilization, disable_custom_all_reduce=True, skip_tokenizer_init=False, max_model_len=max_model_len, load_format=load_format, disable_log_stats=config.disable_log_stats, max_num_batched_tokens=max_num_batched_tokens, enable_chunked_prefill=config.enable_chunked_prefill, enable_prefix_caching=True, trust_remote_code=trust_remote_code, seed=config.get("seed", 0), **lora_kwargs, **engine_kwargs, ) # Offload vllm model to reduce peak memory usage if config.free_cache_engine: self.inference_engine.sleep(level=1) kwargs = dict( n=1, logprobs=0, # can be set to 0 and let actor to recompute max_tokens=config.response_length, ) kwargs["detokenize"] = False # supporting adding any sampling params from the config file for k in config.keys(): if hasattr(SamplingParams(), str(k)) and k != "seed": kwargs[k] = config.get(k) kwargs["n"] = 1 # already repeat in ray_trainer print(f"kwargs: {kwargs}") self.sampling_params = SamplingParams(**kwargs) self.pad_token_id = tokenizer.pad_token_id @contextmanager def update_sampling_params(self, **kwargs): # update sampling params old_sampling_params_args = {} if kwargs: for key, value in kwargs.items(): if hasattr(self.sampling_params, key): old_value = getattr(self.sampling_params, key) old_sampling_params_args[key] = old_value setattr(self.sampling_params, key, value) yield # roll back to previous sampling params # if len(old_sampling_params_args): for key, value in old_sampling_params_args.items(): setattr(self.sampling_params, key, value) @GPUMemoryLogger(role="vllm rollout spmd", logger=logger) @torch.no_grad() def generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto: """Generate sequences for a batch of prompts. Args: batch (DataProto): Input batch. Returns: DataProto: Output batch. - prompts: [bsz, prompt_length], prompt token ids from dataset. - responses: [bsz, response_length], output token ids include response tokens from LLM generation and observation tokens from tool_calls. - response_mask: [bsz, response_length], 1 for LLM generated tokens, 0 for observation/padding tokens. - input_ids: [bsz, prompt_length + response_length], whole sequence token ids, including prompt tokens and response tokens. - attention_mask: [bsz, prompt_length + response_length], 0 for padding tokens, 1 for other tokens. - position_ids: [bsz, prompt_length + response_length], incremental position ids. For multi-turn conversations: responses: |<- LLM generation ->|<- tool_calls ->|<- LLM generation ->|<- padding ->| response_mask: | 1, 1, 1, ..., 1, 1 | 0, 0, .., 0, 0 | 1, 1, 1, ..., 1, 1 | 0, 0, ..., 0| """ idx = prompts.batch["input_ids"] # (bs, prompt_length) # left-padded attention_mask attention_mask = prompts.batch["attention_mask"] position_ids = prompts.batch["position_ids"] # used to construct attention_mask eos_token_id = prompts.meta_info["eos_token_id"] batch_size = idx.size(0) non_tensor_batch = prompts.non_tensor_batch if "raw_prompt_ids" not in non_tensor_batch: non_tensor_batch["raw_prompt_ids"] = np.array( [_pre_process_inputs(self.pad_token_id, idx[i]) for i in range(batch_size)], dtype=object ) if batch_size != len(non_tensor_batch["raw_prompt_ids"]): raise RuntimeError("vllm sharding manager is not work properly.") if "multi_modal_data" in non_tensor_batch: vllm_inputs = [] for raw_prompt_ids, multi_modal_data in zip( non_tensor_batch.pop("raw_prompt_ids"), non_tensor_batch.pop("multi_modal_data"), strict=True ): vllm_inputs.append({"prompt_token_ids": raw_prompt_ids, "multi_modal_data": multi_modal_data}) else: vllm_inputs = [ {"prompt_token_ids": raw_prompt_ids} for raw_prompt_ids in non_tensor_batch.pop("raw_prompt_ids") ] # ensure the type of `prompt_token_ids` passed to vllm is list[int] # https://github.com/volcengine/verl/pull/772 for input_data in vllm_inputs: if isinstance(input_data["prompt_token_ids"], np.ndarray): input_data["prompt_token_ids"] = input_data["prompt_token_ids"].tolist() elif not isinstance(input_data["prompt_token_ids"], list): raise TypeError( f"prompt_token_ids must be a list or numpy array, got {type(input_data['prompt_token_ids'])}" ) do_sample = prompts.meta_info.get("do_sample", True) is_validate = prompts.meta_info.get("validate", False) if not do_sample: kwargs = { "best_of": 1, "top_p": 1.0, "top_k": -1, "min_p": 0.0, "temperature": 0, "n": 1, # if greedy, only 1 response } elif is_validate: # TODO: try ** kwargs = { "top_k": self.config.val_kwargs.top_k, "top_p": self.config.val_kwargs.top_p, "temperature": self.config.val_kwargs.temperature, "n": 1, # if validate, already repeat in ray_trainer } lora_requests = None if self.lora_kwargs: lora_int_ids = list(self.inference_engine.llm_engine.list_loras()) if len(lora_int_ids) > 0: lora_int_id = lora_int_ids[0] lora_requests = [ LoRARequest(lora_name=f"{lora_int_id}", lora_int_id=lora_int_id, lora_path="/simon-stub-path") ] * batch_size # users can customize different sampling_params at different run with self.update_sampling_params(**kwargs): outputs = self.inference_engine.generate( prompts=vllm_inputs, # because we have already convert it to prompt token id sampling_params=self.sampling_params, lora_request=lora_requests, use_tqdm=False, ) # TODO(sgm): disable logprob when recompute_log_prob is enable # if n = 1: (bs, response_length) ; if n > 1: (bs * n, response_length) response = [] rollout_log_probs = [] for output in outputs: for sample_id in range(len(output.outputs)): response_ids = output.outputs[sample_id].token_ids response.append(response_ids) if self.config.calculate_log_probs: curr_log_prob = [] for i, logprob in enumerate(output.outputs[sample_id].logprobs): curr_log_prob.append(logprob[response_ids[i]].logprob) rollout_log_probs.append(curr_log_prob) response = pad_2d_list_to_length(response, self.pad_token_id, max_length=self.config.response_length).to( idx.device ) if self.config.calculate_log_probs: rollout_log_probs = pad_2d_list_to_length( rollout_log_probs, -1, max_length=self.config.response_length ).to(idx.device) rollout_log_probs = rollout_log_probs.to(torch.float32) seq = torch.cat([idx, response], dim=-1) response_length = response.size(1) delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device) delta_position_id = delta_position_id.unsqueeze(0).expand(batch_size, -1) if position_ids.dim() == 3: # qwen2vl mrope delta_position_id = delta_position_id.view(batch_size, 1, -1).expand(batch_size, 3, -1) # TODO(sgm): fix position_ids on right_pad # prompt: left pad + response: right pad # attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0] # position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11] response_position_ids = position_ids[..., -1:] + delta_position_id position_ids = torch.cat([position_ids, response_position_ids], dim=-1) response_attention_mask = get_response_mask( response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype ) attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1) # all the tp ranks should contain the same data here. data in all ranks are valid batch = TensorDict( { "prompts": idx, "responses": response, "input_ids": seq, # here input_ids become the whole sentences "attention_mask": attention_mask, "position_ids": position_ids, }, batch_size=batch_size, ) if self.config.calculate_log_probs: # we will recompute old log prob with actor batch["rollout_log_probs"] = rollout_log_probs return DataProto(batch=batch, non_tensor_batch=non_tensor_batch) # https://github.com/vllm-project/vllm/issues/13175 def _monkey_patch_compute_logits(model, vocab_size: int): original_compute_logits = model.compute_logits def compute_logits( self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> torch.Tensor: logits = original_compute_logits(hidden_states, sampling_metadata) logits[..., vocab_size:] = float("-inf") return logits model.compute_logits = MethodType(compute_logits, model) class vLLMAsyncRollout: """vLLMAsyncRollout is a thin wrapper of WorkerWrapperBase, which is engine in single worker process. """ def __init__(self, model_path: str, config: DictConfig, tokenizer, model_hf_config, **kwargs): self.tokenizer = tokenizer # Engine is deferred to be initialized in init_worker self.config = config self.inference_engine: WorkerWrapperBase = None self.sharding_manager = None self.is_sleep = False self.address = self._init_zeromq() def _init_zeromq(self) -> str: tensor_parallel_size = self.config.tensor_model_parallel_size # single node: ipc, multi nodes: tcp local_world_size = int(os.environ["RAY_LOCAL_WORLD_SIZE"]) socket_type = "ipc" if tensor_parallel_size <= local_world_size else "tcp" # File lock to prevent multiple workers listen to same port with FileLock("/tmp/verl_vllm_zmq.lock"): if socket_type == "ipc": pid = os.getpid() address = f"ipc:///tmp/verl_vllm_zmq_{pid}.ipc" else: ip, port = self._get_free_port() address = f"tcp://{ip}:{port}" context = zmq.Context() self.socket = context.socket(zmq.REP) self.socket.bind(address) self.loop_thread = threading.Thread(target=self._loop_forever) self.loop_thread.start() return address def _get_free_port(self): ip = ray.util.get_node_ip_address() with socket.socket() as sock: sock.bind(("", 0)) port = sock.getsockname()[1] return ip, port def _loop_forever(self): while True: message = self.socket.recv() method, args, kwargs = pickle.loads(message) result = self.execute_method(method, *args, **kwargs) self.socket.send(pickle.dumps(result)) def get_zeromq_address(self): return self.address def init_worker(self, all_kwargs: list[dict[str, Any]]): """Initialize worker engine.""" all_kwargs[0]["rank"] = int(os.environ["RANK"]) all_kwargs[0]["local_rank"] = 0 self.vllm_config = all_kwargs[0]["vllm_config"] self.inference_engine = WorkerWrapperBase(vllm_config=self.vllm_config) self.inference_engine.init_worker(all_kwargs) def load_model(self, *args, **kwargs): self.inference_engine.load_model(*args, **kwargs) # inference engine is initialized now, update sharding manager self.sharding_manager.inference_engine = self.inference_engine self.sharding_manager.model_runner = self.inference_engine.worker.model_runner _monkey_patch_compute_logits(self.inference_engine.worker.model_runner.model, len(self.tokenizer)) def sleep(self, *args, **kwargs): """Offload model weights and discard kv cache.""" if self.is_sleep: return self.sharding_manager.__exit__(None, None, None) self.is_sleep = True def wake_up(self, *args, **kwargs): """Load model weights and build kv cache.""" if not self.is_sleep: return self.sharding_manager.__enter__() # pylint: disable=C2801 self.is_sleep = False def execute_method(self, method: str | bytes, *args, **kwargs): if method == "init_worker": return self.init_worker(*args, **kwargs) elif method == "load_model": return self.load_model(*args, **kwargs) elif method == "sleep": return self.sleep(*args, **kwargs) elif method == "wake_up": return self.wake_up(*args, **kwargs) else: return self.inference_engine.execute_method(method, *args, **kwargs) ================================================ FILE: verl_rl/verl/workers/sharding_manager/__init__.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: verl_rl/verl/workers/sharding_manager/base.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sharding manager to implement HybridEngine """ from verl import DataProto class BaseShardingManager: def __init__(self): self.timing = {} def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass def preprocess_data(self, data: DataProto) -> DataProto: return data def postprocess_data(self, data: DataProto) -> DataProto: return data ================================================ FILE: verl_rl/verl/workers/sharding_manager/fsdp_sglang.py ================================================ # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import logging import os import torch import torch.distributed as dist from sglang.srt.entrypoints.engine import Engine from sglang.srt.model_executor.model_runner import LocalSerializedTensor try: from sglang.srt.utils import TorchPatchMultiprocessingSerializer as MultiprocessingSerializer except ImportError: from sglang.srt.utils import MultiprocessingSerializer from torch.distributed.device_mesh import DeviceMesh from torch.distributed.fsdp.api import FullStateDictConfig, ShardedStateDictConfig, StateDictType from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.tensor import DTensor from verl import DataProto from verl.protocol import all_gather_data_proto from verl.utils.device import get_device_id, get_torch_device from verl.utils.fsdp_utils import fsdp_version, load_fsdp_model_to_gpu, offload_fsdp_model_to_cpu from verl.utils.model import convert_weight_keys from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage, simple_timer from verl.utils.torch_functional import check_device_is_available from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets from .base import BaseShardingManager # from vllm.distributed import parallel_state as sglang_ps logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) def _preprocess_tensor_for_update_weights(tensor: torch.Tensor): if isinstance(tensor, DTensor): return tensor.full_tensor() return tensor class FSDPSGLangShardingManager(BaseShardingManager): @check_device_is_available() def __init__( self, module: FSDP, inference_engine: Engine, model_config, rollout_config, full_params: bool = False, device_mesh: DeviceMesh = None, offload_param: bool = False, multi_stage_wake_up: bool = False, ): self.module = module self.inference_engine = inference_engine self.model_config = model_config self.rollout_config = rollout_config self.device_mesh = device_mesh self.offload_param = offload_param self.multi_stage_wake_up = multi_stage_wake_up # Full params self.full_params = full_params if full_params and fsdp_version(self.module) == 1: FSDP.set_state_dict_type( self.module, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig() ) elif fsdp_version(self.module) == 1: FSDP.set_state_dict_type( self.module, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) self.tp_size = self.device_mesh["infer_tp"].size() self.tp_rank = self.device_mesh["infer_tp"].get_local_rank() # Note that torch_random_states may be different on each dp rank self.torch_random_states = get_torch_device().get_rng_state() # get a random rng states if self.device_mesh is not None: gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) else: self.gen_random_states = None @GPUMemoryLogger(role="FSDPSGLangShardingManager enter", logger=logger) def __enter__(self): self.timing = {} with simple_timer("reshard", self.timing): loop = asyncio.get_event_loop() loop.run_until_complete(self.wake_up()) @GPUMemoryLogger(role="FSDPSGLangShardingManager exit", logger=logger) def __exit__(self, exc_type, exc_value, traceback): loop = asyncio.get_event_loop() loop.run_until_complete(self.sleep()) async def update_weights(self, params): # Most naive implementation, can optimize a lot if it is bottleneck from sglang Engine weight update named_tensors = [(k, v) for k, v in params.items()] load_format = None # convert megabytes to bytes update_weights_bucket_bytes = int(self.rollout_config.update_weights_bucket_megabytes) << 20 for batch in get_named_tensor_buckets(named_tensors, update_weights_bucket_bytes): # On each rank, serialize a batch of (name, tensor) tuples. # named_tensors_batch will be a list like: # [(name0, serialized_tensor0_tp0), (name1, serialized_tensor1_tp0), ...] named_tensors_batch = [ (name, MultiprocessingSerializer.serialize(_preprocess_tensor_for_update_weights(tensor))) for name, tensor in batch ] if self.device_mesh["infer_tp"].get_local_rank() == 0: # On rank 0, prepare a list to hold the gathered batches from all ranks. gathered_serialized_batches = [None for _ in range(self.device_mesh["infer_tp"].mesh.size()[0])] else: gathered_serialized_batches = None # Gather the named_tensors_batch from all ranks to rank 0. # After this, on rank 0, gathered_serialized_batches will be a list of lists: # [ [ (name0, s_t0_tp0), (name1, s_t1_tp0), ... ], # batch from TP rank 0 # [ (name0, s_t0_tp1), (name1, s_t1_tp1), ... ], # batch from TP rank 1 # ... ] # On other ranks, gathered_serialized_batches will be None. dist.gather_object( obj=named_tensors_batch, object_gather_list=gathered_serialized_batches, dst=self.device_mesh["infer_tp"].mesh.tolist()[0], group=self.device_mesh["infer_tp"].get_group(), ) if self.device_mesh["infer_tp"].get_local_rank() == 0: # Use zip(*) to "transpose" the data structure. # This groups the serialized parts for each individual tensor across all TP ranks. # Example: from [[(n0, t0_tp0), (n1, t1_tp0)], [(n0, t0_tp1), (n1, t1_tp1)]] # to [ ( (n0, t0_tp0), (n0, t0_tp1) ), ( (n1, t1_tp0), (n1, t1_tp1) ) ] logical_tensors = zip(*gathered_serialized_batches, strict=True) await self.inference_engine.update_weights_from_tensor( named_tensors=[ # 'tensor_group' represents a single logical tensor's data from all ranks. ( tensor_group[0][0], # Get the name from the first rank's data. LocalSerializedTensor( # 'rank_part' is the (name, serialized_tensor) tuple from one specific rank. values=[rank_part[1] for rank_part in tensor_group] ), ) for tensor_group in logical_tensors # each tensor_group is like ( (n0, t0_tp0), (n0, t0_tp1) ) ], load_format=load_format, flush_cache=False, ) if self.device_mesh["infer_tp"].get_local_rank() == 0: await self.inference_engine.flush_cache() async def release_memory(self): if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine: await self.inference_engine.release_memory_occupation() @GPUMemoryLogger(role="FSDPSGLangShardingManager enter", logger=logger) async def wake_up(self): get_torch_device().empty_cache() if self.device_mesh["infer_tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine: if self.multi_stage_wake_up: await self.inference_engine.resume_memory_occupation(tags=["weights"]) log_gpu_memory_usage("Before resume SGLang weights in sharding manager", logger=logger) else: await self.inference_engine.resume_memory_occupation() log_gpu_memory_usage("Before resume SGLang weights + kv_cache in sharding manager", logger=logger) log_gpu_memory_usage("Before state_dict() in sharding manager memory", logger=logger) if self.offload_param: load_fsdp_model_to_gpu(self.module) params = self.module.state_dict() log_gpu_memory_usage("After state_dict() in sharding manager memory", logger=logger) device = get_device_id() # used when fsdp2 set cpu_offload_policy params = { k: v.to(device, non_blocking=True) if fsdp_version(self.module) == 2 else v for k, v in params.items() } # convert weight keys to match the model config params = convert_weight_keys(params, getattr(self.module, "_fsdp_wrapped_module", self.module)) # Copy, not share memory await self.update_weights(params) log_gpu_memory_usage("After sync model weights in sharding manager", logger=logger) del params if self.offload_param: offload_fsdp_model_to_cpu(self.module) get_torch_device().empty_cache() log_gpu_memory_usage("After del state_dict and empty_cache in sharding manager", logger=logger) if ( self.multi_stage_wake_up and self.rollout_config.free_cache_engine and self.device_mesh["infer_tp"].get_local_rank() == 0 ): await self.inference_engine.resume_memory_occupation(tags=["kv_cache"]) log_gpu_memory_usage("After resume SGLang kv_cache in sharding manager", logger=logger) # important: need to manually set the random states of each tp to be identical. if self.device_mesh is not None: self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="FSDPSGLangShardingManager exit", logger=logger) async def sleep(self): if self.rollout_config.free_cache_engine: log_gpu_memory_usage("Before SGLang offload in sharding manager", logger=logger) await self.release_memory() log_gpu_memory_usage("After SGLang offload in sharding manager", logger=logger) self.module.train() # add empty cache after each compute get_torch_device().empty_cache() # restore random states if self.device_mesh is not None: self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) def preprocess_data(self, data: DataProto) -> DataProto: """All gather across tp group to make each rank has identical input.""" if self.tp_size == 1: return data # TODO: Current impl doesn't consider FSDP with torch micro-dp group = self.device_mesh["infer_tp"].get_group() all_gather_data_proto(data=data, process_group=group) return data def postprocess_data(self, data: DataProto) -> DataProto: """Get chunk data of this tp rank since we do all gather in preprocess.""" if self.tp_size == 1: return data return data.chunk(chunks=self.tp_size)[self.tp_rank] ================================================ FILE: verl_rl/verl/workers/sharding_manager/fsdp_ulysses.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Contains a resharding manager that binds weights from FSDP zero3 to XPerfGPT """ from torch.distributed.device_mesh import DeviceMesh from verl import DataProto from verl.protocol import all_gather_data_proto from verl.utils.ulysses import get_ulysses_sequence_parallel_group, set_ulysses_sequence_parallel_group from .base import BaseShardingManager class FSDPUlyssesShardingManager(BaseShardingManager): """ Sharding manager to support data resharding when using FSDP + Ulysses """ def __init__(self, device_mesh: DeviceMesh): super().__init__() self.device_mesh = device_mesh self.seed_offset = 12345 def __enter__(self): if self.device_mesh is not None: # We have a global SP group # so we have to change to use model-specific sp group self.prev_sp_group = get_ulysses_sequence_parallel_group() set_ulysses_sequence_parallel_group(self.device_mesh["sp"].get_group()) # TODO: check how to set seed for each model def __exit__(self, exc_type, exc_value, traceback): # restore random states if self.device_mesh is not None: # revert to previous sp group set_ulysses_sequence_parallel_group(self.prev_sp_group) # TODO: check how to set seed for each model def preprocess_data(self, data: DataProto) -> DataProto: """ AllGather data from sp region This is because the data is first sharded along the FSDP dimension as we utilize the DP_COMPUTE In Ulysses, we need to make sure the same data is used across a SP group """ if self.device_mesh is not None: group = self.device_mesh["sp"].get_group() all_gather_data_proto(data=data, process_group=group) return data def postprocess_data(self, data: DataProto) -> DataProto: """ Split the data to follow FSDP partition """ if self.device_mesh is not None: sp_size = self.device_mesh["sp"].size() sp_rank = self.device_mesh["sp"].get_local_rank() data = data.chunk(chunks=sp_size)[sp_rank] return data ================================================ FILE: verl_rl/verl/workers/sharding_manager/fsdp_vllm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import os import time from collections import OrderedDict from torch.distributed.device_mesh import DeviceMesh from torch.distributed.fsdp.api import FullStateDictConfig, ShardedStateDictConfig, StateDictType from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP try: # for torch 2.5+ from torch.distributed.tensor import DTensor except ImportError: from torch.distributed._tensor import DTensor from dataclasses import asdict from verl import DataProto from verl.protocol import all_gather_data_proto from verl.third_party.vllm import LLM from verl.third_party.vllm import parallel_state as vllm_ps from verl.utils.device import get_device_id, get_device_name, get_torch_device from verl.utils.fsdp_utils import ( fsdp_version, layered_summon_lora_params, load_fsdp_model_to_gpu, offload_fsdp_model_to_cpu, ) from verl.utils.model import check_exclude_modules, check_target_modules, convert_weight_keys from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage, simple_timer from verl.utils.torch_functional import check_device_is_available from verl.utils.vllm_utils import TensorLoRARequest, VLLMHijack, is_version_ge, patch_vllm_moe_model_weight_loader from .base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) class FSDPVLLMShardingManager(BaseShardingManager): """Sharding manager for FSDP models with vLLM inference engine integration. Manages parameter synchronization between FSDP training models and vLLM inference engines, handling both full parameters and LoRA adapters with efficient memory management and device placement. """ @check_device_is_available() def __init__( self, module: FSDP, inference_engine: LLM, model_config, rollout_config, full_params: bool = False, device_mesh: DeviceMesh = None, offload_param: bool = False, load_format: str = "dummy_hf", layered_summon: bool = True, ): self.module = module # For AsyncLLM, inference_engine and model_runner are defer initialized in vLLMAsyncRollout.load_model self.inference_engine = inference_engine # self.model_runner = inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner if # inference_engine else None self.model_runner = ( self.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner if self.inference_engine else None ) self.model_config = model_config self.rollout_config = rollout_config self.device_mesh = device_mesh self.offload_param = offload_param self.load_format = load_format self.layered_summon = layered_summon # Full params self.full_params = full_params if full_params and fsdp_version(self.module) == 1: FSDP.set_state_dict_type( self.module, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig() ) elif fsdp_version(self.module) == 1: FSDP.set_state_dict_type( self.module, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(), ) self.tp_size = self.device_mesh["infer_tp"].size() self.tp_rank = self.device_mesh["infer_tp"].get_local_rank() # Note that torch_random_states may be different on each dp rank self.torch_random_states = get_torch_device().get_rng_state() # get a random rng states if self.device_mesh is not None: gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) else: self.gen_random_states = None self.base_sync_done: bool = "dummy" not in load_format if is_version_ge(pkg="vllm", minver="0.7.3"): VLLMHijack.hijack() @GPUMemoryLogger(role="fsdp vllm sharding_manager", logger=logger) def __enter__(self): def __collect_lora_params() -> OrderedDict: """ collect lora params or full params if base model is not ready in vllm work with if isinstance(self.module._fsdp_wrapped_module, PeftModel) """ from peft.utils.save_and_load import get_peft_model_state_dict lora_params = OrderedDict() peft_model = getattr(self.module, "_fsdp_wrapped_module", self.module) if fsdp_version(self.module) > 0: if self.layered_summon: if not self.base_sync_done: raise ValueError( "To use layered_summon, you must make sure base-model is preloaded in vllm, e.g. let " "rollout.load_format=safetensors" ) lora_params = layered_summon_lora_params(self.module) else: with FSDP.summon_full_params(self.module, writeback=False): if self.base_sync_done: lora_params = get_peft_model_state_dict(peft_model) lora_params = { name: param.full_tensor().detach().cpu() if hasattr(param, "full_tensor") else param.detach().cpu() for name, param in lora_params.items() } else: model = peft_model.base_model.model orig_dev = "cpu" if "cpu" in str(next(model.parameters()).device) else get_device_name() model = model.to("cpu") for name, param in model.state_dict().items(): if any(x in name for x in ["_flat_param", "lora_"]): continue name = name.replace("_fsdp_wrapped_module.", "").replace(".base_layer", "") lora_params[name] = ( param.full_tensor().detach().cpu() if hasattr(param, "full_tensor") else param.detach().cpu() ) model = model.to(orig_dev) get_torch_device().empty_cache() else: if self.base_sync_done: lora_params = get_peft_model_state_dict(peft_model) else: model = peft_model.base_model.model orig_dev = "cpu" if "cpu" in str(next(model.parameters()).device) else get_device_name() model = model.to("cpu") for name, param in model.state_dict().items(): if any(x in name for x in ["_flat_param", "lora_"]): continue name = name.replace("_fsdp_wrapped_module.", "").replace(".base_layer", "") lora_params[name] = param.detach().cpu() model = model.to(orig_dev) return lora_params # NOTE: Basically, we only need `get_torch_device().empty_cache()` before vllm wake_up and # after vllm sleep, since vllm has its own caching memory allocator CuMemAllocator. # Out of vllm scope, we should avoid empty cache to let pytorch using caching memory # to speed up memory allocations. # # pytorch: https://pytorch.org/docs/stable/notes/cuda.html#memory-management # vllm: https://github.com/vllm-project/vllm/blob/v0.7.3/vllm/device_allocator/cumem.py#L103 self.timing = {} with simple_timer("reshard", self.timing): get_torch_device().empty_cache() log_gpu_memory_usage("Before state_dict() in sharding manager memory", logger=logger) if self.offload_param: load_fsdp_model_to_gpu(self.module) peft_config = None peft_model = getattr(self.module, "_fsdp_wrapped_module", self.module) if hasattr(peft_model, "peft_config"): peft_config = peft_model.peft_config.get("default", None) params = __collect_lora_params() else: params = self.module.state_dict() params = convert_weight_keys(params, getattr(self.module, "_fsdp_wrapped_module", self.module)) log_gpu_memory_usage("After state_dict() in sharding manager memory", logger=logger) if self.rollout_config.free_cache_engine: if "tags" in inspect.signature(self.inference_engine.wake_up).parameters: self.inference_engine.wake_up(tags=["weights"]) else: self.inference_engine.wake_up() # update model params self.update_params(params, peft_config=peft_config) log_gpu_memory_usage("After sync model weights in sharding manager", logger=logger) del params if self.offload_param: offload_fsdp_model_to_cpu(self.module) get_torch_device().empty_cache() if ( self.rollout_config.free_cache_engine and "tags" in inspect.signature(self.inference_engine.wake_up).parameters ): self.inference_engine.wake_up(tags=["kv_cache"]) log_gpu_memory_usage("After del state_dict and empty_cache in sharding manager", logger=logger) # important: need to manually set the random states of each tp to be identical. if self.device_mesh is not None: self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="fsdp vllm sharding_manager", logger=logger) def __exit__(self, exc_type, exc_value, traceback): if self.rollout_config.free_cache_engine: self.inference_engine.sleep(level=1) self.module.train() # add empty cache after each compute get_torch_device().empty_cache() # restore random states if self.device_mesh is not None: self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) @GPUMemoryLogger(role="fsdp vllm sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: """All gather across tp group to make each rank has identical input.""" if self.tp_size == 1: return data # TODO: Current impl doesn't consider FSDP with torch micro-dp group = vllm_ps.get_tensor_model_parallel_group().device_group all_gather_data_proto(data=data, process_group=group) return data @GPUMemoryLogger(role="fsdp vllm sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: """Get chunk data of this tp rank since we do all gather in preprocess.""" if self.tp_size == 1: return data return data.chunk(chunks=self.tp_size)[self.tp_rank] def update_params(self, updated_params, peft_config=None): """Update model parameters in the vLLM inference engine. Synchronizes parameters from the FSDP training model to the vLLM inference engine, handling both full model parameters and LoRA adapters with proper device placement and memory management. Args: updated_params (dict): Dictionary of parameter names to tensor values. peft_config (optional): PEFT configuration for LoRA adapters. """ model = self.model_runner.model if peft_config: if self.base_sync_done: lora_int_id = int(time.time_ns() % 0x7FFFFFFF) lora_reqest = TensorLoRARequest( lora_name=f"{lora_int_id}", lora_int_id=lora_int_id, lora_path="simon_lora_path", peft_config=asdict(peft_config), lora_tensors=updated_params, ) self.inference_engine.llm_engine.add_lora(lora_reqest) logger.info(f"vLLM load weights, loaded_params: {len(updated_params)}") return else: def replace_lora_wrapper(k): """Replace LoRA parameter keys with base layer equivalents. Transforms LoRA parameter names to their corresponding base layer names for proper weight loading in vLLM when base model sync is not done. Args: k (str): Original parameter key name. Returns: str: Transformed parameter key for base layer. """ stacked_params = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"] if k.endswith(".weight"): module_k = k[: -len(".weight")] if check_exclude_modules(peft_config, module_k): return k elif any([module_k.endswith(s) for s in stacked_params]) or check_target_modules( peft_config, module_k ): return f"{module_k}.base_layer.weight" if k.endswith(".bias"): module_k = k[: -len(".bias")] if check_exclude_modules(peft_config, module_k): return k elif any([module_k.endswith(s) for s in stacked_params]) or check_target_modules( peft_config, module_k ): return f"{module_k}.base_layer.bias" return k updated_params = {replace_lora_wrapper(k): v for k, v in updated_params.items()} patch_vllm_moe_model_weight_loader(model) device = get_device_id() # used when fsdp2 set cpu_offload_policy loaded_params = model.load_weights( ( (name, param.to(device, non_blocking=True).full_tensor() if isinstance(param, DTensor) else param) for name, param in updated_params.items() ) ) self.base_sync_done = True logger.info(f"vLLM load weights, loaded_params: {len(loaded_params) if loaded_params else -1}") ================================================ FILE: verl_rl/verl/workers/sharding_manager/megatron_sglang.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # Copyright 2023-2024 SGLang Team # Copyright 2025 ModelBest Inc. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains a Megatron style Hybrid Engine that shares the weights of the actor with the inference engine. """ import asyncio import logging import os import torch.distributed as dist from omegaconf import DictConfig from sglang.srt.entrypoints.engine import Engine from sglang.srt.model_executor.model_runner import LocalSerializedTensor try: from sglang.srt.utils import TorchPatchMultiprocessingSerializer as MultiprocessingSerializer except ImportError: from sglang.srt.utils import MultiprocessingSerializer from torch import nn from torch.distributed.device_mesh import DeviceMesh from verl.protocol import DataProto, all_gather_data_proto from verl.utils.device import get_torch_device from verl.utils.megatron_utils import ( load_megatron_model_to_gpu, offload_megatron_model_to_cpu, per_tensor_generator, ) from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage, simple_timer from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets from .base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_PPO_LOGGING_LEVEL", "WARN")) """ Megatron Hybrid Engine: - During training, only the current pp stage holds the parameters - Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters) - Bind the parameters to the inference engine - Do inference in tp. pp is treated as additional dp - After inference, all the parameters that doesn't belong to this pp rank is freed. """ class MegatronSGLangShardingManager(BaseShardingManager): """A sharding manager for Megatron-style training & inference with SGLang. This class manages the sharding of model parameters between training and inference phases in a Megatron-style parallel setup. It handles: - Loading/offloading parameters between CPU/GPU - Updating inference engine weights - Managing random states for reproducibility - Data preprocessing for distributed inference Args: actor_module (nn.ModuleList): The actor model modules inference_engine (Engine): The SGLang inference engine model_config: Configuration for the actor's model rollout_config: Configuration for rollout generation transformer_config: Transformer-specific configuration layer_name_mapping: Mapping between layer names and parameters weight_converter: Utility for converting weights between formats device_mesh (DeviceMesh | None): PyTorch device mesh for distributed training offload_param (bool): Whether to offload parameters to CPU when not in use """ def __init__( self, actor_module: nn.ModuleList, inference_engine: Engine, model_config: DictConfig, rollout_config: DictConfig, transformer_config, layer_name_mapping, weight_converter, device_mesh: DeviceMesh | None = None, offload_param: bool = False, bridge=None, ): self.actor_module = actor_module self.inference_engine = inference_engine self.model_config = model_config self.rollout_config = rollout_config self.transformer_config = transformer_config self.layer_name_mapping = layer_name_mapping self.weight_converter = weight_converter self.device_mesh = device_mesh self.bridge = bridge self.offload_param = offload_param if self.device_mesh is not None: self.infer_tp_size = self.device_mesh["tp"].mesh.size()[0] else: self.infer_tp_size = self.inference_engine._tp_size # Note that torch_random_states may be different on each dp rank self.torch_random_states = get_torch_device().get_rng_state() # get a random rng states if self.device_mesh is not None: gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) else: self.gen_random_states = None @GPUMemoryLogger(role="MegatronSGLangShardingManager enter", logger=logger) def __enter__(self): self.timing = {} with simple_timer("reshard", self.timing): loop = asyncio.get_event_loop() loop.run_until_complete(self.wake_up()) @GPUMemoryLogger(role="MegatronSGLangShardingManager exit", logger=logger) def __exit__(self, exc_type, exc_value, traceback): loop = asyncio.get_event_loop() loop.run_until_complete(self.sleep()) async def update_weights(self, params): """ Update model weights using tensor buckets, similar to THUDM/slime's implementation. Notes: - For the best performance of `rebuild_cuda_tensor`, it is recommended to: 1. Enable `RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES`. 2. Manually set `CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7` when using Tensor Parallelism (TP >= 8). - See reference implementations in SLIME: - Main logic: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L452 - runtime envs: https://github.com/THUDM/slime/blob/fb7605cc5fb09af0f9369d37f7192f12bddee577/slime/ray/ppo_actor.py#L39 """ if self.device_mesh["tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine: await self.inference_engine.resume_memory_occupation() named_tensors = params load_format = None update_weights_bucket_bytes = int(self.rollout_config.update_weights_bucket_megabytes) << 20 for batch in get_named_tensor_buckets(named_tensors, update_weights_bucket_bytes): # On each rank, serialize a batch of (name, tensor) tuples. # named_tensors_batch will be a list like: # [(name0, serialized_tensor0_tp0), (name1, serialized_tensor1_tp0), ...] named_tensors_batch = [ (name, MultiprocessingSerializer.serialize(tensor.detach())) for name, tensor in batch ] if self.device_mesh["tp"].get_local_rank() == 0: # On rank 0, prepare a list to hold the gathered batches from all ranks. gathered_serialized_batches = [None for _ in range(self.device_mesh["tp"].mesh.size()[0])] else: gathered_serialized_batches = None # Gather the named_tensors_batch from all ranks to rank 0. # After this, on rank 0, gathered_serialized_batches will be a list of lists: # [ [ (name0, s_t0_tp0), (name1, s_t1_tp0), ... ], # batch from TP rank 0 # [ (name0, s_t0_tp1), (name1, s_t1_tp1), ... ], # batch from TP rank 1 # ... ] # On other ranks, gathered_serialized_batches will be None. dist.gather_object( obj=named_tensors_batch, object_gather_list=gathered_serialized_batches, dst=self.device_mesh["tp"].mesh.tolist()[0], group=self.device_mesh["tp"].get_group(), ) if self.device_mesh["tp"].get_local_rank() == 0: # Use zip(*) to "transpose" the data structure. # This groups the serialized parts for each individual tensor across all TP ranks. # Example: from [[(n0, t0_tp0), (n1, t1_tp0)], [(n0, t0_tp1), (n1, t1_tp1)]] # to [ ( (n0, t0_tp0), (n0, t0_tp1) ), ( (n1, t1_tp0), (n1, t1_tp1) ) ] logical_tensors = zip(*gathered_serialized_batches, strict=False) await self.inference_engine.update_weights_from_tensor( named_tensors=[ # 'tensor_group' represents a single logical tensor's data from all ranks. ( tensor_group[0][0], # Get the name from the first rank's data. LocalSerializedTensor( # 'rank_part' is the (name, serialized_tensor) tuple from one specific rank. values=[rank_part[1] for rank_part in tensor_group] ), ) for tensor_group in logical_tensors # each tensor_group is like ( (n0, t0_tp0), (n0, t0_tp1) ) ], load_format=load_format, flush_cache=False, ) if self.device_mesh["tp"].get_local_rank() == 0: await self.inference_engine.flush_cache() async def release_memory(self): if self.device_mesh["tp"].get_local_rank() == 0 and self.rollout_config.free_cache_engine: await self.inference_engine.release_memory_occupation() @GPUMemoryLogger(role="MegatronSGLangShardingManager enter", logger=logger) async def wake_up(self): if self.offload_param: load_megatron_model_to_gpu(self.actor_module) if self.bridge is not None: per_tensor_param = self.bridge.export_weights(self.actor_module) else: per_tensor_param = per_tensor_generator( self.actor_module, self.model_config, self.weight_converter, self.transformer_config, self.layer_name_mapping, ) await self.update_weights(per_tensor_param) if self.offload_param: offload_megatron_model_to_cpu(self.actor_module) get_torch_device().empty_cache() # important: need to manually set the random states of each tp to be identical. if self.device_mesh is not None: self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="MegatronSGLangShardingManager exit", logger=logger) async def sleep(self): if self.rollout_config.free_cache_engine: log_gpu_memory_usage("Before SGLang offload in sharding manager", logger=logger) await self.release_memory() log_gpu_memory_usage("After SGLang offload in sharding manager", logger=logger) for model in self.actor_module: model.train() # add empty cache after each compute get_torch_device().empty_cache() # restore random states if self.device_mesh is not None: self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) @GPUMemoryLogger(role="megatron sglang sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: # DP_COMPUTE_PROTO: all training ranks are dp, the same as fsdp if self.infer_tp_size == 1: return data all_gather_data_proto(data, self.device_mesh["tp"].get_group()) return data @GPUMemoryLogger(role="megatron sglang sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: # DP_COMPUTE_PROTO: all training ranks are dp, the same as fsdp if self.infer_tp_size == 1: return data return data.chunk(chunks=self.infer_tp_size)[self.device_mesh["tp"].get_local_rank()] ================================================ FILE: verl_rl/verl/workers/sharding_manager/megatron_vllm.py ================================================ # Copyright 2024 Bytedance Ltd. and/or its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains a Megatron style Hybrid Engine that shares the weights of the actor with the inference engine. """ import inspect import logging import os import torch import torch.distributed from megatron.core import parallel_state as mpu from omegaconf import DictConfig from torch import nn from verl import DataProto from verl.models.mcore.weight_converter import McoreToHFWeightConverterBase from verl.protocol import all_gather_data_proto from verl.third_party.vllm import LLM from verl.third_party.vllm import parallel_state as vllm_ps from verl.utils.device import get_torch_device from verl.utils.megatron_utils import load_megatron_model_to_gpu, offload_megatron_model_to_cpu, per_tensor_generator from verl.utils.profiler import GPUMemoryLogger, log_gpu_memory_usage from verl.utils.profiler.performance import simple_timer from verl.utils.torch_functional import check_device_is_available from verl.utils.vllm_utils import patch_vllm_moe_model_weight_loader from .base import BaseShardingManager logger = logging.getLogger(__file__) logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN")) """ Megatron Hybrid Engine: - During training, only the current pp stage holds the parameters - Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters) - Bind the parameters to the inference engine - Do inference in tp. pp is treated as additional dp - After inference, all the parameters that doesn't belong to this pp rank is freed. """ class MegatronVLLMShardingManager(BaseShardingManager): """A sharding manager that bridges Megatron-LM training with vLLM inference. This class handles the parameter sharding and communication between: - Megatron-LM's tensor/expert parallel training setup - vLLM's tensor parallel inference setup Key responsibilities: - Manages parameter broadcasting between training and inference configurations - Handles weight conversion between Megatron and HuggingFace formats - Coordinates memory management between training and inference phases - Maintains random state consistency across different parallel groups Args: actor_module (nn.ModuleList): The Megatron-LM model being trained inference_engine (LLM): The vLLM inference engine model_config: Configuration for the actor's model transformer_config: Transformer-specific configuration for the model rollout_config: Configuration for rollout layer_name_mapping: Mapping between Megatron and HF layer names weight_converter (McoreToHFWeightConverterBase): Converts weights between formats device_mesh: Device mesh for parallel operations offload_param (bool): Whether to offload parameters when not in use """ @check_device_is_available() def __init__( self, actor_module: nn.ModuleList, inference_engine: LLM, model_config: DictConfig, transformer_config, rollout_config: DictConfig, layer_name_mapping, weight_converter: McoreToHFWeightConverterBase, device_mesh, offload_param: bool = True, bridge=None, ): self.actor_module = actor_module self.inference_engine = inference_engine self.offload_param = offload_param # For AsyncLLM, inference_engine and model_runner are defer initialized in vLLMAsyncRollout.load_model self.model_runner = ( self.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner if self.inference_engine else None ) self.model_config = model_config self.transformer_config = transformer_config self.rollout_config = rollout_config self.layer_name_mapping = layer_name_mapping self.weight_converter = weight_converter self.bridge = bridge # initialize groups for vllm inference self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() self.device_mesh = device_mesh self.infer_tp_size = self.device_mesh["infer_tp"].size() self.infer_tp_rank = self.device_mesh["infer_tp"].get_local_rank() self.train_tp_size = mpu.get_tensor_model_parallel_world_size() self.train_tp_rank = mpu.get_tensor_model_parallel_rank() self.train_tp_group = mpu.get_tensor_model_parallel_group() self.train_ep_size = mpu.get_expert_model_parallel_world_size() self.train_ep_rank = mpu.get_expert_model_parallel_rank() self.train_ep_group = mpu.get_expert_model_parallel_group() self.train_etp_size = mpu.get_expert_tensor_parallel_world_size() self.train_etp_rank = mpu.get_expert_tensor_parallel_rank() self.train_etp_group = mpu.get_expert_tensor_parallel_group() self.need_tp_reshard = self.train_tp_size != self.infer_tp_size self.train_tp_larger = self.train_tp_size > self.infer_tp_size self.torch_random_states = get_torch_device().get_rng_state() if self.device_mesh is not None: gen_dp_rank = self.device_mesh["dp"].get_local_rank() get_torch_device().manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) else: self.gen_random_states = None @GPUMemoryLogger(role="megatron vllm sharding_manager", logger=logger) def __enter__(self): self.timing = {} with simple_timer("reshard", self.timing): get_torch_device().empty_cache() log_gpu_memory_usage("Before state_dict() in sharding manager memory", logger=logger) if self.offload_param: load_megatron_model_to_gpu(self.actor_module) if self.rollout_config.free_cache_engine: if "tags" in inspect.signature(self.inference_engine.wake_up).parameters: self.inference_engine.wake_up(tags=["weights"]) else: self.inference_engine.wake_up() if self.bridge is not None: per_tensor_param = self.bridge.export_weights(self.actor_module) else: per_tensor_param = per_tensor_generator( self.actor_module, self.model_config, self.weight_converter, self.transformer_config, self.layer_name_mapping, ) model = self.model_runner.model patch_vllm_moe_model_weight_loader(model) loaded_params = model.load_weights(per_tensor_param) info = f"vLLM load weights, loaded_params: {len(loaded_params)}" logger.info(info) if self.offload_param: offload_megatron_model_to_cpu(self.actor_module) get_torch_device().empty_cache() if ( self.rollout_config.free_cache_engine and "tags" in inspect.signature(self.inference_engine.wake_up).parameters ): self.inference_engine.wake_up(tags=["kv_cache"]) # important: need to manually set the random states of each tp to be identical. if self.device_mesh is not None: self.torch_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.gen_random_states) @GPUMemoryLogger(role="megatron vllm sharding_manager", logger=logger) def __exit__(self, exc_type, exc_value, traceback): if self.rollout_config.free_cache_engine: self.inference_engine.sleep(level=1) for model in self.actor_module: model.train() get_torch_device().empty_cache() # restore random states if self.device_mesh is not None: self.gen_random_states = get_torch_device().get_rng_state() get_torch_device().set_rng_state(self.torch_random_states) @GPUMemoryLogger(role="megatron vllm sharding_manager", logger=logger) def preprocess_data(self, data: DataProto) -> DataProto: # DP_COMPUTE_PROTO: all training ranks are dp, the same as fsdp if self.infer_tp_size == 1: return data # TODO: Current impl doesn't consider FSDP with torch micro-dp group = vllm_ps.get_tensor_model_parallel_group().device_group all_gather_data_proto(data=data, process_group=group) return data @GPUMemoryLogger(role="megatron vllm sharding_manager", logger=logger) def postprocess_data(self, data: DataProto) -> DataProto: # DP_COMPUTE_PROTO: all training ranks are dp, the same as fsdp if self.infer_tp_size == 1: return data return data.chunk(chunks=self.infer_tp_size)[self.infer_tp_rank]